blob: f79f4c3bcda3a7d8a695f71beb70d0a13e177df9 [file] [log] [blame]
#!/usr/bin/env lua
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local function trim(s)
return (s:gsub("^%s*(.-)%s*$", "%1"))
end
-- Note: The `execute_cmd` return value will have a line break at the end,
-- it is recommended to use the `trim` function to handle the return value.
local function execute_cmd(cmd)
local t, err = io.popen(cmd)
if not t then
return nil, "failed to execute command: " .. cmd .. ", error info:" .. err
end
local data = t:read("*all")
t:close()
return data
end
execute_cmd("install -d -m 777 /tmp/apisix_cores/")
local pkg_cpath_org = package.cpath
local pkg_path_org = package.path
local apisix_home = "/usr/local/apisix"
local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;;"
local pkg_path = apisix_home .. "/deps/share/lua/5.1/?.lua;;"
local min_etcd_version = "3.4.0"
-- only for developer, use current folder as working space
local is_root_path = false
local script_path = arg[0]
if script_path:sub(1, 2) == './' then
apisix_home = trim(execute_cmd("pwd"))
if not apisix_home then
error("failed to fetch current path")
end
if string.match(apisix_home .. "/", '^/root/') then
is_root_path = true
end
pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;"
pkg_path = apisix_home .. "/?/init.lua;"
.. apisix_home .. "/deps/share/lua/5.1/?.lua;;"
end
-- print("apisix_home: ", apisix_home)
package.cpath = pkg_cpath .. pkg_cpath_org
package.path = pkg_path .. pkg_path_org
do
-- skip luajit environment
local ok = pcall(require, "table.new")
if not ok then
local ok, json = pcall(require, "cjson")
if ok and json then
io.stderr:write("please remove the cjson library in Lua, it may "
.. "conflict with the cjson library in openresty. "
.. "\n luarocks remove cjson\n")
return
end
end
end
local yaml = require("tinyyaml")
local template = require("resty.template")
local ngx_tpl = [=[
# Configuration File - Nginx Server Configs
# This is a read-only file, do not try to modify it.
master_process on;
worker_processes {* worker_processes *};
{% if os_name == "Linux" then %}
worker_cpu_affinity auto;
{% end %}
error_log {* error_log *} {* error_log_level or "warn" *};
pid logs/nginx.pid;
worker_rlimit_nofile {* worker_rlimit_nofile *};
events {
accept_mutex off;
worker_connections {* event.worker_connections *};
}
worker_rlimit_core {* worker_rlimit_core *};
worker_shutdown_timeout {* worker_shutdown_timeout *};
env APISIX_PROFILE;
{% if envs then %}
{% for _, name in ipairs(envs) do %}
env {*name*};
{% end %}
{% end %}
{% if stream_proxy then %}
stream {
lua_package_path "$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
lua_socket_log_errors off;
lua_shared_dict lrucache-lock-stream 10m;
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} valid={*dns_resolver_valid*};
resolver_timeout {*resolver_timeout*};
upstream apisix_backend {
server 127.0.0.1:80;
balancer_by_lua_block {
apisix.stream_balancer_phase()
}
}
init_by_lua_block {
require "resty.core"
apisix = require("apisix")
apisix.stream_init()
}
init_worker_by_lua_block {
apisix.stream_init_worker()
}
server {
{% for _, port in ipairs(stream_proxy.tcp or {}) do %}
listen {*port*} {% if enable_reuseport then %} reuseport {% end %} {% if proxy_protocol and proxy_protocol.enable_tcp_pp then %} proxy_protocol {% end %};
{% end %}
{% for _, port in ipairs(stream_proxy.udp or {}) do %}
listen {*port*} udp {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if proxy_protocol and proxy_protocol.enable_tcp_pp_to_upstream then %}
proxy_protocol on;
{% end %}
preread_by_lua_block {
apisix.stream_preread_phase()
}
proxy_pass apisix_backend;
log_by_lua_block {
apisix.stream_log_phase()
}
}
}
{% end %}
http {
lua_package_path "$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
lua_shared_dict plugin-limit-req 10m;
lua_shared_dict plugin-limit-count 10m;
lua_shared_dict prometheus-metrics 10m;
lua_shared_dict plugin-limit-conn 10m;
lua_shared_dict upstream-healthcheck 10m;
lua_shared_dict worker-events 10m;
lua_shared_dict lrucache-lock 10m;
lua_shared_dict skywalking-tracing-buffer 100m;
lua_shared_dict balancer_ewma 10m;
lua_shared_dict balancer_ewma_locks 10m;
lua_shared_dict balancer_ewma_last_touched_at 10m;
# for openid-connect plugin
lua_shared_dict discovery 1m; # cache for discovery metadata documents
lua_shared_dict jwks 1m; # cache for JWKs
lua_shared_dict introspection 10m; # cache for JWT verification results
# for custom shared dict
{% if http.lua_shared_dicts then %}
{% for cache_key, cache_size in pairs(http.lua_shared_dicts) do %}
lua_shared_dict {*cache_key*} {*cache_size*};
{% end %}
{% end %}
{% if enabled_plugins["proxy-cache"] then %}
# for proxy cache
{% for _, cache in ipairs(proxy_cache.zones) do %}
proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *};
{% end %}
{% end %}
{% if enabled_plugins["proxy-cache"] then %}
# for proxy cache
map $upstream_cache_zone $upstream_cache_zone_info {
{% for _, cache in ipairs(proxy_cache.zones) do %}
{* cache.name *} {* cache.disk_path *},{* cache.cache_levels *};
{% end %}
}
{% end %}
lua_ssl_verify_depth 5;
ssl_session_timeout 86400;
{% if http.underscores_in_headers then %}
underscores_in_headers {* http.underscores_in_headers *};
{%end%}
lua_socket_log_errors off;
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} valid={*dns_resolver_valid*};
resolver_timeout {*resolver_timeout*};
lua_http10_buffering off;
lua_regex_match_limit 100000;
lua_regex_cache_max_entries 8192;
log_format main '{* http.access_log_format *}';
access_log {* http.access_log *} main buffer=16384 flush=3;
open_file_cache max=1000 inactive=60;
client_max_body_size {* http.client_max_body_size *};
keepalive_timeout {* http.keepalive_timeout *};
client_header_timeout {* http.client_header_timeout *};
client_body_timeout {* http.client_body_timeout *};
send_timeout {* http.send_timeout *};
server_tokens off;
include mime.types;
charset utf-8;
{% if real_ip_header then %}
real_ip_header {* real_ip_header *};
{% print("\nDeprecated: apisix.real_ip_header has been moved to nginx_config.http.real_ip_header. apisix.real_ip_header will be removed in the future version. Please use nginx_config.http.real_ip_header first.\n\n") %}
{% elseif http.real_ip_header then %}
real_ip_header {* http.real_ip_header *};
{% end %}
{% if real_ip_from then %}
{% print("\nDeprecated: apisix.real_ip_from has been moved to nginx_config.http.real_ip_from. apisix.real_ip_from will be removed in the future version. Please use nginx_config.http.real_ip_from first.\n\n") %}
{% for _, real_ip in ipairs(real_ip_from) do %}
set_real_ip_from {*real_ip*};
{% end %}
{% elseif http.real_ip_from then %}
{% for _, real_ip in ipairs(http.real_ip_from) do %}
set_real_ip_from {*real_ip*};
{% end %}
{% end %}
upstream apisix_backend {
server 0.0.0.1;
balancer_by_lua_block {
apisix.http_balancer_phase()
}
keepalive 320;
}
init_by_lua_block {
require "resty.core"
apisix = require("apisix")
local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} }
local args = {
dns_resolver = dns_resolver,
}
apisix.http_init(args)
}
init_worker_by_lua_block {
apisix.http_init_worker()
}
{% if enable_admin and port_admin then %}
server {
{%if https_admin then%}
listen {* port_admin *} ssl;
{%if admin_api_mtls and admin_api_mtls.admin_ssl_cert and admin_api_mtls.admin_ssl_cert ~= "" and
admin_api_mtls.admin_ssl_cert_key and admin_api_mtls.admin_ssl_cert_key ~= "" and
admin_api_mtls.admin_ssl_ca_cert and admin_api_mtls.admin_ssl_ca_cert ~= ""
then%}
ssl_verify_client on;
ssl_certificate {* admin_api_mtls.admin_ssl_cert *};
ssl_certificate_key {* admin_api_mtls.admin_ssl_cert_key *};
ssl_client_certificate {* admin_api_mtls.admin_ssl_ca_cert *};
{% else %}
ssl_certificate cert/apisix_admin_ssl.crt;
ssl_certificate_key cert/apisix_admin_ssl.key;
{%end%}
ssl_session_cache shared:SSL:20m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% else %}
listen {* port_admin *};
{%end%}
log_not_found off;
location /apisix/admin {
{%if allow_admin then%}
{% for _, allow_ip in ipairs(allow_admin) do %}
allow {*allow_ip*};
{% end %}
deny all;
{%end%}
content_by_lua_block {
apisix.http_admin()
}
}
location /apisix/dashboard {
{%if allow_admin then%}
{% for _, allow_ip in ipairs(allow_admin) do %}
allow {*allow_ip*};
{% end %}
deny all;
{%end%}
alias dashboard/;
try_files $uri $uri/index.html /index.html =404;
}
location =/robots.txt {
return 200 'User-agent: *\nDisallow: /';
}
}
{% end %}
server {
listen {* node_listen *} {% if enable_reuseport then %} reuseport {% end %};
{% if ssl.enable then %}
listen {* ssl.listen_port *} ssl {% if ssl.enable_http2 then %} http2 {% end %} {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if proxy_protocol and proxy_protocol.listen_http_port then %}
listen {* proxy_protocol.listen_http_port *} proxy_protocol;
{% end %}
{% if proxy_protocol and proxy_protocol.listen_https_port then %}
listen {* proxy_protocol.listen_https_port *} ssl {% if ssl.enable_http2 then %} http2 {% end %} proxy_protocol;
{% end %}
{% if enable_ipv6 then %}
listen [::]:{* node_listen *} {% if enable_reuseport then %} reuseport {% end %};
{% if ssl.enable then %}
listen [::]:{* ssl.listen_port *} ssl {% if ssl.enable_http2 then %} http2 {% end %} {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% end %} {% -- if enable_ipv6 %}
ssl_certificate cert/apisix.crt;
ssl_certificate_key cert/apisix.key;
ssl_session_cache shared:SSL:20m;
ssl_session_timeout 10m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% if with_module_status then %}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
access_log off;
stub_status;
}
{% end %}
{% if enable_admin and not port_admin then %}
location /apisix/admin {
{%if allow_admin then%}
{% for _, allow_ip in ipairs(allow_admin) do %}
allow {*allow_ip*};
{% end %}
deny all;
{%end%}
content_by_lua_block {
apisix.http_admin()
}
}
location /apisix/dashboard {
{%if allow_admin then%}
{% for _, allow_ip in ipairs(allow_admin) do %}
allow {*allow_ip*};
{% end %}
deny all;
{%end%}
alias dashboard/;
try_files $uri $uri/index.html /index.html =404;
}
{% end %}
ssl_certificate_by_lua_block {
apisix.http_ssl_phase()
}
location / {
set $upstream_mirror_host '';
set $upstream_scheme 'http';
set $upstream_host $host;
set $upstream_upgrade '';
set $upstream_connection '';
set $upstream_uri '';
access_by_lua_block {
apisix.http_access_phase()
}
proxy_http_version 1.1;
proxy_set_header Host $upstream_host;
proxy_set_header Upgrade $upstream_upgrade;
proxy_set_header Connection $upstream_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass_header Server;
proxy_pass_header Date;
### the following x-forwarded-* headers is to send to upstream server
set $var_x_forwarded_for $remote_addr;
set $var_x_forwarded_proto $scheme;
set $var_x_forwarded_host $host;
set $var_x_forwarded_port $server_port;
if ($http_x_forwarded_for != "") {
set $var_x_forwarded_for "${http_x_forwarded_for}, ${realip_remote_addr}";
}
if ($http_x_forwarded_proto != "") {
set $var_x_forwarded_proto $http_x_forwarded_proto;
}
if ($http_x_forwarded_host != "") {
set $var_x_forwarded_host $http_x_forwarded_host;
}
if ($http_x_forwarded_port != "") {
set $var_x_forwarded_port $http_x_forwarded_port;
}
proxy_set_header X-Forwarded-For $var_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
{% if enabled_plugins["proxy-cache"] then %}
### the following configuration is to cache response content from upstream server
set $upstream_cache_zone off;
set $upstream_cache_key '';
set $upstream_cache_bypass '';
set $upstream_no_cache '';
set $upstream_hdr_expires '';
set $upstream_hdr_cache_control '';
proxy_cache $upstream_cache_zone;
proxy_cache_valid any {% if proxy_cache.cache_ttl then %} {* proxy_cache.cache_ttl *} {% else %} 10s {% end %};
proxy_cache_min_uses 1;
proxy_cache_methods GET HEAD;
proxy_cache_lock_timeout 5s;
proxy_cache_use_stale off;
proxy_cache_key $upstream_cache_key;
proxy_no_cache $upstream_no_cache;
proxy_cache_bypass $upstream_cache_bypass;
proxy_hide_header Cache-Control;
proxy_hide_header Expires;
add_header Cache-Control $upstream_hdr_cache_control;
add_header Expires $upstream_hdr_expires;
add_header Apisix-Cache-Status $upstream_cache_status always;
{% end %}
proxy_pass $upstream_scheme://apisix_backend$upstream_uri;
{% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror;
{% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
location @grpc_pass {
access_by_lua_block {
apisix.grpc_access_phase()
}
grpc_set_header Content-Type application/grpc;
grpc_socket_keepalive on;
grpc_pass grpc://apisix_backend;
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
{% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror {
internal;
if ($upstream_mirror_host = "") {
return 200;
}
proxy_pass $upstream_mirror_host$request_uri;
}
{% end %}
}
}
]=]
local function write_file(file_path, data)
local file, err = io.open(file_path, "w+")
if not file then
return false, "failed to open file: " .. file_path .. ", error info:" .. err
end
file:write(data)
file:close()
return true
end
local function read_file(file_path)
local file, err = io.open(file_path, "rb")
if not file then
return false, "failed to open file: " .. file_path .. ", error info:" .. err
end
local data = file:read("*all")
file:close()
return data
end
local function is_empty_yaml_line(line)
return line == '' or string.find(line, '^%s*$') or
string.find(line, '^%s*#')
end
local function tab_is_array(t)
local count = 0
for k,v in pairs(t) do
count = count + 1
end
return #t == count
end
local function merge_conf(base, new_tab)
for key, val in pairs(new_tab) do
if type(val) == "table" then
if tab_is_array(val) then
base[key] = val
else
merge_conf(base[key], val)
end
else
base[key] = val
end
end
return base
end
local function read_yaml_conf()
local profile = require("apisix.core.profile")
profile.apisix_home = apisix_home .. "/"
local local_conf_path = profile:yaml_path("config-default")
local default_conf_yaml, err = read_file(local_conf_path)
if not default_conf_yaml then
return nil, err
end
local default_conf = yaml.parse(default_conf_yaml)
if not default_conf then
return nil, "invalid config-default.yaml file"
end
local_conf_path = profile:yaml_path("config")
local user_conf_yaml, err = read_file(local_conf_path)
if not user_conf_yaml then
return nil, err
end
local is_empty_file = true
for line in string.gmatch(user_conf_yaml .. '\n', '(.-)\r?\n') do
if not is_empty_yaml_line(line) then
is_empty_file = false
break
end
end
if not is_empty_file then
local user_conf = yaml.parse(user_conf_yaml)
if not user_conf then
return nil, "invalid config.yaml file"
end
merge_conf(default_conf, user_conf)
end
return default_conf
end
local function get_openresty_version()
local str = "nginx version: openresty/"
local ret = execute_cmd("openresty -v 2>&1")
local pos = string.find(ret,str)
if pos then
return string.sub(ret, pos + string.len(str))
end
str = "nginx version: nginx/"
ret = execute_cmd("openresty -v 2>&1")
pos = string.find(ret, str)
if pos then
return string.sub(ret, pos + string.len(str))
end
return nil
end
local function is_32bit_arch()
local ok, ffi = pcall(require, "ffi")
if ok then
-- LuaJIT
return ffi.abi("32bit")
end
local ret = execute_cmd("getconf LONG_BIT")
local bits = tonumber(ret)
return bits <= 32
end
local function split(self, sep)
local sep, fields = sep or ":", {}
local pattern = string.format("([^%s]+)", sep)
self:gsub(pattern, function(c) fields[#fields + 1] = c end)
return fields
end
local function parse_semantic_version(ver)
local errmsg = "invalid semantic version: " .. ver
local parts = split(ver, "-")
if #parts > 2 then
return nil, errmsg
end
if #parts == 2 then
ver = parts[1]
end
local fields = split(ver, ".")
if #fields ~= 3 then
return nil, errmsg
end
local major = tonumber(fields[1])
local minor = tonumber(fields[2])
local patch = tonumber(fields[3])
if not (major and minor and patch) then
return nil, errmsg
end
return {
major = major,
minor = minor,
patch = patch,
}
end
local function compare_semantic_version(v1, v2)
local ver1, err = parse_semantic_version(v1)
if not ver1 then
return nil, err
end
local ver2, err = parse_semantic_version(v2)
if not ver2 then
return nil, err
end
if ver1.major ~= ver2.major then
return ver1.major < ver2.major
end
if ver1.minor ~= ver2.minor then
return ver1.minor < ver2.minor
end
return ver1.patch < ver2.patch
end
local function check_version(cur_ver_s, need_ver_s)
local cur_vers = split(cur_ver_s, [[.]])
local need_vers = split(need_ver_s, [[.]])
local len = math.max(#cur_vers, #need_vers)
for i = 1, len do
local cur_ver = tonumber(cur_vers[i]) or 0
local need_ver = tonumber(need_vers[i]) or 0
if cur_ver > need_ver then
return true
end
if cur_ver < need_ver then
return false
end
end
return true
end
local function local_dns_resolver(file_path)
local file, err = io.open(file_path, "rb")
if not file then
return false, "failed to open file: " .. file_path .. ", error info:" .. err
end
local dns_addrs = {}
for line in file:lines() do
local addr, n = line:gsub("^nameserver%s+(%d+%.%d+%.%d+%.%d+)%s*$", "%1")
if n == 1 then
table.insert(dns_addrs, addr)
end
end
file:close()
return dns_addrs
end
local _M = {version = 0.1}
function _M.help()
print([[
Usage: apisix [action] <argument>
help: show this message, then exit
init: initialize the local nginx.conf
init_etcd: initialize the data of etcd
start: start the apisix server
stop: stop the apisix server
restart: restart the apisix server
reload: reload the apisix server
version: print the version of apisix
]])
end
local function init()
if is_root_path then
print('Warning! Running apisix under /root is only suitable for development environments'
.. ' and it is dangerous to do so. It is recommended to run APISIX in a directory other than /root.')
end
-- read_yaml_conf
local yaml_conf, err = read_yaml_conf()
if not yaml_conf then
error("failed to read local yaml config of apisix: " .. err)
end
-- print("etcd: ", yaml_conf.etcd.host)
local or_ver = execute_cmd("openresty -V 2>&1")
local with_module_status = true
if or_ver and not or_ver:find("http_stub_status_module", 1, true) then
io.stderr:write("'http_stub_status_module' module is missing in ",
"your openresty, please check it out. Without this ",
"module, there will be fewer monitoring indicators.\n")
with_module_status = false
end
local enabled_plugins = {}
for i, name in ipairs(yaml_conf.plugins) do
enabled_plugins[name] = true
end
if enabled_plugins["proxy-cache"] and not yaml_conf.apisix.proxy_cache then
error("missing apisix.proxy_cache for plugin proxy-cache")
end
-- Using template.render
local sys_conf = {
lua_path = pkg_path_org,
lua_cpath = pkg_cpath_org,
os_name = trim(execute_cmd("uname")),
apisix_lua_home = apisix_home,
with_module_status = with_module_status,
error_log = {level = "warn"},
enabled_plugins = enabled_plugins,
}
if not yaml_conf.apisix then
error("failed to read `apisix` field from yaml file")
end
if not yaml_conf.nginx_config then
error("failed to read `nginx_config` field from yaml file")
end
if is_32bit_arch() then
sys_conf["worker_rlimit_core"] = "4G"
else
sys_conf["worker_rlimit_core"] = "16G"
end
for k,v in pairs(yaml_conf.apisix) do
sys_conf[k] = v
end
for k,v in pairs(yaml_conf.nginx_config) do
sys_conf[k] = v
end
local wrn = sys_conf["worker_rlimit_nofile"]
local wc = sys_conf["event"]["worker_connections"]
if not wrn or wrn <= wc then
-- ensure the number of fds is slightly larger than the number of conn
sys_conf["worker_rlimit_nofile"] = wc + 128
end
if(sys_conf["enable_dev_mode"] == true) then
sys_conf["worker_processes"] = 1
sys_conf["enable_reuseport"] = false
elseif tonumber(sys_conf["worker_processes"]) == nil then
sys_conf["worker_processes"] = "auto"
end
local dns_resolver = sys_conf["dns_resolver"]
if not dns_resolver or #dns_resolver == 0 then
local dns_addrs, err = local_dns_resolver("/etc/resolv.conf")
if not dns_addrs then
error("failed to import local DNS: " .. err)
end
if #dns_addrs == 0 then
error("local DNS is empty")
end
sys_conf["dns_resolver"] = dns_addrs
end
local conf_render = template.compile(ngx_tpl)
local ngxconf = conf_render(sys_conf)
local ok, err = write_file(apisix_home .. "/conf/nginx.conf", ngxconf)
if not ok then
error("failed to update nginx.conf: " .. err)
end
local op_ver = get_openresty_version()
if op_ver == nil then
io.stderr:write("can not find openresty\n")
return
end
local need_ver = "1.15.8"
if not check_version(op_ver, need_ver) then
io.stderr:write("openresty version must >=", need_ver, " current ", op_ver, "\n")
return
end
end
_M.init = init
local function init_etcd(show_output)
-- read_yaml_conf
local yaml_conf, err = read_yaml_conf()
if not yaml_conf then
error("failed to read local yaml config of apisix: " .. err)
end
if not yaml_conf.apisix then
error("failed to read `apisix` field from yaml file when init etcd")
end
if yaml_conf.apisix.config_center ~= "etcd" then
return true
end
if not yaml_conf.etcd then
error("failed to read `etcd` field from yaml file when init etcd")
end
local etcd_conf = yaml_conf.etcd
local timeout = etcd_conf.timeout or 3
local uri
--convert old single etcd config to multiple etcd config
if type(yaml_conf.etcd.host) == "string" then
yaml_conf.etcd.host = {yaml_conf.etcd.host}
end
local cluster_version
local host_count = #(yaml_conf.etcd.host)
local dkjson = require("dkjson")
-- check the etcd cluster version
for index, host in ipairs(yaml_conf.etcd.host) do
uri = host .. "/version"
local cmd = string.format("curl -s -m %d %s", timeout * 2, uri)
local res = execute_cmd(cmd)
local errmsg = string.format("got malformed version message: \"%s\" from etcd", res)
local body, _, err = dkjson.decode(res)
if err then
io.stderr:write(errmsg)
return
end
local cluster_version = body["etcdcluster"]
if not cluster_version then
io.stderr:write(errmsg)
return
end
if compare_semantic_version(cluster_version, min_etcd_version) then
io.stderr:write("etcd cluster version ", cluster_version,
" is less than the required version ", min_etcd_version,
", please upgrade your etcd cluster")
return
end
break
end
local etcd_ok = false
for index, host in ipairs(yaml_conf.etcd.host) do
local is_success = true
for _, dir_name in ipairs({"/routes", "/upstreams", "/services",
"/plugins", "/consumers", "/node_status",
"/ssl", "/global_rules", "/stream_routes",
"/proto"}) do
local key = (etcd_conf.prefix or "") .. dir_name .. "/"
local base64_encode = require("base64").encode
local uri = host .. "/v3/kv/put"
local post_json = '{"value":"' .. base64_encode("init_dir") .. '", "key":"' .. base64_encode(key) .. '"}'
local cmd = "curl " .. uri .. " -X POST -d '" .. post_json
.. "' --connect-timeout " .. timeout
.. " --max-time " .. timeout * 2 .. " --retry 1 2>&1"
local res = execute_cmd(cmd)
if (etcd_version == "v3" and not res:find("OK", 1, true)) then
is_success = false
if (index == host_count) then
error(cmd .. "\n" .. res)
end
break
end
if show_output then
print(cmd)
print(res)
end
end
if is_success then
etcd_ok = true
break
end
end
if not etcd_ok then
error("none of the configured etcd works well")
end
end
_M.init_etcd = init_etcd
local openresty_args = [[openresty -p ]] .. apisix_home .. [[ -c ]]
.. apisix_home .. [[/conf/nginx.conf]]
function _M.start(...)
-- check running
local pid_path = apisix_home .. "/logs/nginx.pid"
local pid, err = read_file(pid_path)
if pid then
local hd = io.popen("lsof -p " .. pid)
local res = hd:read("*a")
if res and res ~= "" then
print("APISIX is running...")
return nil
end
end
init(...)
init_etcd(...)
local cmd = openresty_args
-- print(cmd)
os.execute(cmd)
end
function _M.stop()
local cmd = openresty_args .. [[ -s stop]]
-- print(cmd)
os.execute(cmd)
end
function _M.restart()
_M.stop()
_M.start()
end
function _M.reload()
-- reinit nginx.conf
init()
local test_cmd = openresty_args .. [[ -t -q ]]
-- When success,
-- On linux, os.execute returns 0,
-- On macos, os.execute returns 3 values: true, exit, 0, and we need the first.
local test_ret = os.execute((test_cmd))
if (test_ret == 0 or test_ret == true) then
local cmd = openresty_args .. [[ -s reload]]
-- print(cmd)
os.execute(cmd)
return
end
print("test openresty failed")
end
function _M.version()
local ver = require("apisix.core.version")
print(ver['VERSION'])
end
local cmd_action = arg[1]
if not cmd_action then
return _M.help()
end
if not _M[cmd_action] then
print("invalid argument: ", cmd_action, "\n")
return
end
_M[cmd_action](arg[2])