From 5e115f4fc8eb1eb589775ba8f8ed8c09e0407567 Mon Sep 17 00:00:00 2001 From: ashing Date: Mon, 9 Jan 2023 18:40:00 +0800 Subject: [PATCH 01/11] feat: stream subsystem support kubernetes service discovery --- apisix/cli/ngx_tpl.lua | 7 ++ apisix/discovery/kubernetes/init.lua | 11 ++ t/APISIX.pm | 4 + t/kubernetes/discovery/stream/kubernetes.t | 121 +++++++++++++++++++++ 4 files changed, 143 insertions(+) create mode 100644 t/kubernetes/discovery/stream/kubernetes.t diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua index c1e46220a4e4..022b94ad9996 100644 --- a/apisix/cli/ngx_tpl.lua +++ b/apisix/cli/ngx_tpl.lua @@ -144,6 +144,13 @@ stream { lua_shared_dict plugin-limit-conn-stream {* stream.lua_shared_dict["plugin-limit-conn-stream"] *}; {% end %} + # for discovery shared dict + {% if discovery_shared_dicts then %} + {% for key, size in pairs(discovery_shared_dicts) do %} + lua_shared_dict {*key*}-stream {*size*}; + {% end %} + {% end %} + resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %}; resolver_timeout {*resolver_timeout*}; diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua index d7258a55642b..b18683815f99 100644 --- a/apisix/discovery/kubernetes/init.lua +++ b/apisix/discovery/kubernetes/init.lua @@ -25,6 +25,7 @@ local os = os local error = error local pcall = pcall local setmetatable = setmetatable +local is_http = ngx.config.subsystem == "http" local process = require("ngx.process") local core = require("apisix.core") local util = require("apisix.cli.util") @@ -334,6 +335,10 @@ end local function single_mode_init(conf) local endpoint_dict = ngx.shared.kubernetes + if not is_http then + endpoint_dict = ngx.shared["kubernetes-stream"] + end + if not endpoint_dict then error("failed to get lua_shared_dict: ngx.shared.kubernetes, " .. "please check your APISIX version") @@ -408,6 +413,9 @@ local function multiple_mode_worker_init(confs) end local endpoint_dict = ngx.shared["kubernetes-" .. id] + if not is_http then + endpoint_dict = ngx.shared["kubernetes-" .. id .. "-stream"] + end if not endpoint_dict then error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. "please check your APISIX version") @@ -434,6 +442,9 @@ local function multiple_mode_init(confs) end local endpoint_dict = ngx.shared["kubernetes-" .. id] + if not is_http then + endpoint_dict = ngx.shared["kubernetes-" .. id .. "-stream"] + end if not endpoint_dict then error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. "please check your APISIX version") diff --git a/t/APISIX.pm b/t/APISIX.pm index 22e143ba2244..34ca73bddfde 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -392,6 +392,10 @@ _EOC_ lua_shared_dict plugin-limit-conn-stream 10m; lua_shared_dict etcd-cluster-health-check-stream 10m; + lua_shared_dict kubernetes-stream 1m; + lua_shared_dict kubernetes-first-stream 1m; + lua_shared_dict kubernetes-second-stream 1m; + upstream apisix_backend { server 127.0.0.1:1900; balancer_by_lua_block { diff --git a/t/kubernetes/discovery/stream/kubernetes.t b/t/kubernetes/discovery/stream/kubernetes.t new file mode 100644 index 000000000000..1f66a1dbb24f --- /dev/null +++ b/t/kubernetes/discovery/stream/kubernetes.t @@ -0,0 +1,121 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + our $token_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; + our $token_value = eval {`cat $token_file 2>/dev/null`}; + + our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: "127.0.0.1" + port: "6443" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + - id: second + service: + schema: "http", + host: "127.0.0.1", + port: "6445" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + +_EOC_ + +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + if (!$block->apisix_yaml) { + $block->set_value("apisix_yaml", $apisix_yaml); + } + + my $main_config = $block->main_config // <<_EOC_; +env KUBERNETES_SERVICE_HOST=127.0.0.1; +env KUBERNETES_SERVICE_PORT=6443; +env KUBERNETES_CLIENT_TOKEN=$::token_value; +env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; +_EOC_ + + $block->set_value("main_config", $main_config); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: connect to first/default/kubernetes:https endpoints +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - + id: 1 + server_port: 1985 + upstream: + service_name: first/default/kubernetes:https + discovery_type: kubernetes + type: roundrobin + +#END +--- stream_request +"GET /hello HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n" +--- log_level: info +--- error_log eval +qr/proxy request to \S+:6443/ + + + +=== TEST 2: connect to first/ns-d/ep:p1 endpoints, no valid upstreams node +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - + id: 1 + server_port: 1985 + upstream: + service_name: first/ns-d/ep:p1 + discovery_type: kubernetes + type: roundrobin + +#END +--- stream_request +"GET /hello HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n" +--- error_log +no valid upstream node From 8c0f3450af68c45744bbd1467df9356383735bcd Mon Sep 17 00:00:00 2001 From: ashing Date: Mon, 9 Jan 2023 21:24:34 +0800 Subject: [PATCH 02/11] feat: test cases --- t/kubernetes/discovery/stream/kubernetes.t | 275 ++++++++++++++++++--- 1 file changed, 247 insertions(+), 28 deletions(-) diff --git a/t/kubernetes/discovery/stream/kubernetes.t b/t/kubernetes/discovery/stream/kubernetes.t index 1f66a1dbb24f..50da9afaf851 100644 --- a/t/kubernetes/discovery/stream/kubernetes.t +++ b/t/kubernetes/discovery/stream/kubernetes.t @@ -62,9 +62,7 @@ routes: [] #END _EOC_ - if (!$block->apisix_yaml) { - $block->set_value("apisix_yaml", $apisix_yaml); - } + $block->set_value("apisix_yaml", $apisix_yaml); my $main_config = $block->main_config // <<_EOC_; env KUBERNETES_SERVICE_HOST=127.0.0.1; @@ -75,47 +73,268 @@ _EOC_ $block->set_value("main_config", $main_config); + my $config = $block->config // <<_EOC_; + location /operators { + content_by_lua_block { + local http = require("resty.http") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local operators = core.json.decode(request_body) + + core.log.info("get body ", request_body) + core.log.info("get operators ", #operators) + for _, op in ipairs(operators) do + local method, path, body + local headers = { + ["Host"] = "127.0.0.1:6445" + } + + if op.op == "replace_subsets" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + if #op.subsets == 0 then + body = '[{"path":"/subsets","op":"replace","value":[]}]' + else + local t = { { op = "replace", path = "/subsets", value = op.subsets } } + body = core.json.encode(t, true) + end + headers["Content-Type"] = "application/json-patch+json" + end + + if op.op == "replace_labels" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + local t = { { op = "replace", path = "/metadata/labels", value = op.labels } } + body = core.json.encode(t, true) + headers["Content-Type"] = "application/json-patch+json" + end + + local httpc = http.new() + core.log.info("begin to connect ", "127.0.0.1:6445") + local ok, message = httpc:connect({ + scheme = "http", + host = "127.0.0.1", + port = 6445, + }) + if not ok then + core.log.error("connect 127.0.0.1:6445 failed, message : ", message) + ngx.say("FAILED") + end + local res, err = httpc:request({ + method = method, + path = path, + headers = headers, + body = body, + }) + if err ~= nil then + core.log.err("operator k8s cluster error: ", err) + return 500 + end + if res.status ~= 200 and res.status ~= 201 and res.status ~= 409 then + return res.status + end + end + ngx.say("DONE") + } + } + +_EOC_ + + $block->set_value("config", $config); + + my $stream_config = $block->stream_config // <<_EOC_; + server { + listen 8125; + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.kubernetes") + + ngx.sleep(1) + + local sock = ngx.req.socket() + local request_body = sock:receive() + + core.log.info("get body ", request_body) + + local response_body = "{" + local queries = core.json.decode(request_body) + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + +_EOC_ + + $block->set_value("extra_stream_config", $stream_config); + }); run_tests(); __DATA__ -=== TEST 1: connect to first/default/kubernetes:https endpoints +=== TEST 1: create namespace and endpoints --- yaml_config eval: $::yaml_config ---- apisix_yaml -stream_routes: - - - id: 1 - server_port: 1985 - upstream: - service_name: first/default/kubernetes:https - discovery_type: kubernetes - type: roundrobin - -#END ---- stream_request -"GET /hello HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n" ---- log_level: info ---- error_log eval -qr/proxy request to \S+:6443/ +--- request +POST /operators +[ + { + "op": "replace_subsets", + "namespace": "ns-a", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-b" + }, + { + "op": "replace_subsets", + "namespace": "ns-b", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-c" + }, + { + "op": "replace_subsets", + "namespace": "ns-c", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "port": 5002 + } + ] + } + ] + } +] +--- more_headers +Content-type: application/json -=== TEST 2: connect to first/ns-d/ep:p1 endpoints, no valid upstreams node +=== TEST 2: use default parameters --- yaml_config eval: $::yaml_config +--- extra_stream_config eval: $::yaml_config --- apisix_yaml stream_routes: - id: 1 server_port: 1985 - upstream: - service_name: first/ns-d/ep:p1 - discovery_type: kubernetes - type: roundrobin + upstream_id: 1 + +upstreams: + - nodes: + "127.0.0.1:8125": 1 + type: roundrobin + id: 1 #END --- stream_request -"GET /hello HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n" ---- error_log -no valid upstream node +["first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1"] +--- stream_response eval +qr{ 2 2 2 2 2 2 } From 66079cd4ed69476a4c78003aa4a145c7c2b6b62a Mon Sep 17 00:00:00 2001 From: ashing Date: Mon, 9 Jan 2023 23:07:26 +0800 Subject: [PATCH 03/11] fix: ci --- ci/linux_openresty_1_19_runner.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/linux_openresty_1_19_runner.sh b/ci/linux_openresty_1_19_runner.sh index ed1751308926..45ef5cfa927f 100755 --- a/ci/linux_openresty_1_19_runner.sh +++ b/ci/linux_openresty_1_19_runner.sh @@ -17,5 +17,5 @@ # -export OPENRESTY_VERSION=1.19.3.2 +export OPENRESTY_VERSION=1.19.9.1 . ./ci/linux_openresty_common_runner.sh From 34f37b829e356288a71b7c7ffbe3e1e571eb8fdd Mon Sep 17 00:00:00 2001 From: ashing Date: Tue, 10 Jan 2023 11:19:08 +0800 Subject: [PATCH 04/11] chore: remove code line --- t/kubernetes/discovery/stream/kubernetes.t | 1 - 1 file changed, 1 deletion(-) diff --git a/t/kubernetes/discovery/stream/kubernetes.t b/t/kubernetes/discovery/stream/kubernetes.t index 50da9afaf851..8aa4ee0fe5c3 100644 --- a/t/kubernetes/discovery/stream/kubernetes.t +++ b/t/kubernetes/discovery/stream/kubernetes.t @@ -319,7 +319,6 @@ Content-type: application/json === TEST 2: use default parameters --- yaml_config eval: $::yaml_config ---- extra_stream_config eval: $::yaml_config --- apisix_yaml stream_routes: - From cafcbfd6f76182b148f87f23fc9da3b8cf4b7d0d Mon Sep 17 00:00:00 2001 From: ashing Date: Tue, 10 Jan 2023 18:34:41 +0800 Subject: [PATCH 05/11] fix: test case --- apisix/discovery/kubernetes/init.lua | 23 +++++++++++----------- ci/linux_openresty_1_19_runner.sh | 2 +- t/kubernetes/discovery/stream/kubernetes.t | 11 ++++++++++- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua index b18683815f99..234954714079 100644 --- a/apisix/discovery/kubernetes/init.lua +++ b/apisix/discovery/kubernetes/init.lua @@ -332,13 +332,18 @@ local function start_fetch(handle) ngx.timer.at(0, timer_runner) end - -local function single_mode_init(conf) - local endpoint_dict = ngx.shared.kubernetes +local function get_endpoint_dict(shm) if not is_http then - endpoint_dict = ngx.shared["kubernetes-stream"] + shm = shm .. "-stream" end + return ngx.shared[shm] +end + + +local function single_mode_init(conf) + local endpoint_dict = get_endpoint_dict("kubernetes") + if not endpoint_dict then error("failed to get lua_shared_dict: ngx.shared.kubernetes, " .. "please check your APISIX version") @@ -412,10 +417,7 @@ local function multiple_mode_worker_init(confs) error("duplicate id value") end - local endpoint_dict = ngx.shared["kubernetes-" .. id] - if not is_http then - endpoint_dict = ngx.shared["kubernetes-" .. id .. "-stream"] - end + local endpoint_dict = get_endpoint_dict("kubernetes-" .. id) if not endpoint_dict then error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. "please check your APISIX version") @@ -441,10 +443,7 @@ local function multiple_mode_init(confs) error("duplicate id value") end - local endpoint_dict = ngx.shared["kubernetes-" .. id] - if not is_http then - endpoint_dict = ngx.shared["kubernetes-" .. id .. "-stream"] - end + local endpoint_dict = get_endpoint_dict("kubernetes-" .. id) if not endpoint_dict then error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. "please check your APISIX version") diff --git a/ci/linux_openresty_1_19_runner.sh b/ci/linux_openresty_1_19_runner.sh index 45ef5cfa927f..ed1751308926 100755 --- a/ci/linux_openresty_1_19_runner.sh +++ b/ci/linux_openresty_1_19_runner.sh @@ -17,5 +17,5 @@ # -export OPENRESTY_VERSION=1.19.9.1 +export OPENRESTY_VERSION=1.19.3.2 . ./ci/linux_openresty_common_runner.sh diff --git a/t/kubernetes/discovery/stream/kubernetes.t b/t/kubernetes/discovery/stream/kubernetes.t index 8aa4ee0fe5c3..3df431fb64a8 100644 --- a/t/kubernetes/discovery/stream/kubernetes.t +++ b/t/kubernetes/discovery/stream/kubernetes.t @@ -46,7 +46,16 @@ _EOC_ } -use t::APISIX 'no_plan'; +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version =~ m/\/1.19.3/) { + plan(skip_all => "require OpenResty version >= 1.19.9.1"); +} else { + plan('no_plan'); +} repeat_each(1); log_level('warn'); From 54473dc5e61e2a93b3bf43eb5e55c310aef6b6ea Mon Sep 17 00:00:00 2001 From: ashing Date: Thu, 12 Jan 2023 14:15:23 +0800 Subject: [PATCH 06/11] fix: review --- apisix/discovery/kubernetes/init.lua | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua index 234954714079..d16d4f4fcd31 100644 --- a/apisix/discovery/kubernetes/init.lua +++ b/apisix/discovery/kubernetes/init.lua @@ -332,7 +332,13 @@ local function start_fetch(handle) ngx.timer.at(0, timer_runner) end -local function get_endpoint_dict(shm) +local function get_endpoint_dict(id) + local shm = "kubernetes" + + if id and #id > 0 then + shm = shm .. "-" .. id + end + if not is_http then shm = shm .. "-stream" end @@ -342,7 +348,7 @@ end local function single_mode_init(conf) - local endpoint_dict = get_endpoint_dict("kubernetes") + local endpoint_dict = get_endpoint_dict() if not endpoint_dict then error("failed to get lua_shared_dict: ngx.shared.kubernetes, " .. @@ -417,7 +423,7 @@ local function multiple_mode_worker_init(confs) error("duplicate id value") end - local endpoint_dict = get_endpoint_dict("kubernetes-" .. id) + local endpoint_dict = get_endpoint_dict(id) if not endpoint_dict then error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. "please check your APISIX version") @@ -443,7 +449,7 @@ local function multiple_mode_init(confs) error("duplicate id value") end - local endpoint_dict = get_endpoint_dict("kubernetes-" .. id) + local endpoint_dict = get_endpoint_dict(id) if not endpoint_dict then error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. "please check your APISIX version") From 3501d50d57992863d0247266599b5285f0f673a8 Mon Sep 17 00:00:00 2001 From: ashing Date: Fri, 13 Jan 2023 16:24:08 +0800 Subject: [PATCH 07/11] fix: print error log if not support ngx.process --- apisix/discovery/kubernetes/init.lua | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua index d16d4f4fcd31..79b13ad94450 100644 --- a/apisix/discovery/kubernetes/init.lua +++ b/apisix/discovery/kubernetes/init.lua @@ -26,7 +26,7 @@ local error = error local pcall = pcall local setmetatable = setmetatable local is_http = ngx.config.subsystem == "http" -local process = require("ngx.process") +local support_process, process = pcall(require, "ngx.process") local core = require("apisix.core") local util = require("apisix.cli.util") local local_conf = require("apisix.core.config_local").local_conf() @@ -520,6 +520,12 @@ end function _M.init_worker() + if not support_process then + core.log.error("kubernetes discovery not support in subsystem: " + .. ngx.config.subsystem + .. ", please check if your openresty version >= 1.19.9.1 or not") + return + end local discovery_conf = local_conf.discovery.kubernetes core.log.info("kubernetes discovery conf: ", core.json.delay_encode(discovery_conf)) if #discovery_conf == 0 then From ffb80e8321cbfb8848420d9e038907a6080b739f Mon Sep 17 00:00:00 2001 From: ashing Date: Fri, 13 Jan 2023 16:45:57 +0800 Subject: [PATCH 08/11] docs: add note --- docs/en/latest/discovery/kubernetes.md | 7 +++++++ docs/zh/latest/discovery/kubernetes.md | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/docs/en/latest/discovery/kubernetes.md b/docs/en/latest/discovery/kubernetes.md index 04e01f6ca2b8..32cefb5b588e 100644 --- a/docs/en/latest/discovery/kubernetes.md +++ b/docs/en/latest/discovery/kubernetes.md @@ -34,6 +34,13 @@ The [_Kubernetes_](https://kubernetes.io/) service discovery [_List-Watch_](http Discovery also provides a node query interface in accordance with the [_APISIX Discovery Specification_](https://github.com/apache/apisix/blob/master/docs/en/latest/discovery.md). + +:::note + +use kubernetes discovery in L4 require OpenResty version >= 1.19.9.1 + +::: + ## How To Use Kubernetes service discovery both support single-cluster and multi-cluster mode, applicable to the case where the service is distributed in a single or multiple Kubernetes clusters. diff --git a/docs/zh/latest/discovery/kubernetes.md b/docs/zh/latest/discovery/kubernetes.md index 17342882082a..9706b425af8e 100644 --- a/docs/zh/latest/discovery/kubernetes.md +++ b/docs/zh/latest/discovery/kubernetes.md @@ -34,6 +34,12 @@ Kubernetes 服务发现以 [_List-Watch_](https://kubernetes.io/docs/reference/u 同时遵循 [_APISIX Discovery 规范_](https://github.com/apache/apisix/blob/master/docs/zh/latest/discovery.md) 提供了节点查询接口。 +:::note + +在四层中使用 Kubenetes 服务发现要求 OpenResty 版本大于等于 1.19.9.1 + +::: + ## Kubernetes 服务发现的使用 目前 Kubernetes 服务发现支持单集群和多集群模式,分别适用于待发现的服务分布在单个或多个 Kubernetes 的场景。 From 2c74653e9af0a5a8cbad53d01faf12bc71fa9207 Mon Sep 17 00:00:00 2001 From: ashing Date: Sun, 15 Jan 2023 22:27:18 +0800 Subject: [PATCH 09/11] fix: ci --- docs/en/latest/discovery/kubernetes.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/en/latest/discovery/kubernetes.md b/docs/en/latest/discovery/kubernetes.md index 32cefb5b588e..e80c7385151a 100644 --- a/docs/en/latest/discovery/kubernetes.md +++ b/docs/en/latest/discovery/kubernetes.md @@ -34,7 +34,6 @@ The [_Kubernetes_](https://kubernetes.io/) service discovery [_List-Watch_](http Discovery also provides a node query interface in accordance with the [_APISIX Discovery Specification_](https://github.com/apache/apisix/blob/master/docs/en/latest/discovery.md). - :::note use kubernetes discovery in L4 require OpenResty version >= 1.19.9.1 From 336149ee302502b78699965b2c61e73d524dcc5e Mon Sep 17 00:00:00 2001 From: ashing Date: Sun, 15 Jan 2023 22:40:02 +0800 Subject: [PATCH 10/11] fix: ci --- docs/zh/latest/discovery/kubernetes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/latest/discovery/kubernetes.md b/docs/zh/latest/discovery/kubernetes.md index 9706b425af8e..5e6d3e0403bf 100644 --- a/docs/zh/latest/discovery/kubernetes.md +++ b/docs/zh/latest/discovery/kubernetes.md @@ -36,7 +36,7 @@ Kubernetes 服务发现以 [_List-Watch_](https://kubernetes.io/docs/reference/u :::note -在四层中使用 Kubenetes 服务发现要求 OpenResty 版本大于等于 1.19.9.1 +在四层中使用 Kubernetes 服务发现要求 OpenResty 版本大于等于 1.19.9.1 ::: From 4d875a3b6246b88e6ed35670ded0799385355a78 Mon Sep 17 00:00:00 2001 From: ashing Date: Tue, 17 Jan 2023 13:58:56 +0800 Subject: [PATCH 11/11] fix: review --- apisix/discovery/kubernetes/init.lua | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua index 79b13ad94450..3f5f275d9aff 100644 --- a/apisix/discovery/kubernetes/init.lua +++ b/apisix/discovery/kubernetes/init.lua @@ -521,9 +521,8 @@ end function _M.init_worker() if not support_process then - core.log.error("kubernetes discovery not support in subsystem: " - .. ngx.config.subsystem - .. ", please check if your openresty version >= 1.19.9.1 or not") + core.log.error("kubernetes discovery not support in subsystem: ", ngx.config.subsystem, + ", please check if your openresty version >= 1.19.9.1 or not") return end local discovery_conf = local_conf.discovery.kubernetes