From d25fbd324e291e999d24d30f4b16eae6e4785a4a Mon Sep 17 00:00:00 2001 From: mika Date: Mon, 20 Jun 2022 11:09:04 +0800 Subject: [PATCH] feat: export some importent params for kafka-client (#7266) --- apisix/plugins/kafka-logger.lua | 9 +++++++++ docs/en/latest/plugins/kafka-logger.md | 4 ++++ docs/zh/latest/plugins/kafka-logger.md | 4 ++++ 3 files changed, 17 insertions(+) diff --git a/apisix/plugins/kafka-logger.lua b/apisix/plugins/kafka-logger.lua index 2947d145e4687..cb43ae3db24be 100644 --- a/apisix/plugins/kafka-logger.lua +++ b/apisix/plugins/kafka-logger.lua @@ -83,6 +83,11 @@ local schema = { -- in lua-resty-kafka, cluster_name is defined as number -- see https://github.com/doujiang24/lua-resty-kafka#new-1 cluster_name = {type = "integer", minimum = 1, default = 1}, + -- config for lua-resty-kafka, default value is same as lua-resty-kafka + producer_batch_num = {type = "integer", minimum = 1, default = 200}, + producer_batch_size = {type = "integer", minimum = 0, default = 1048576}, + producer_max_buffering = {type = "integer", minimum = 1, default = 50000}, + producer_time_linger = {type = "integer", minimum = 1, default = 1} }, required = {"broker_list", "kafka_topic"} } @@ -208,6 +213,10 @@ function _M.log(conf, ctx) broker_config["request_timeout"] = conf.timeout * 1000 broker_config["producer_type"] = conf.producer_type broker_config["required_acks"] = conf.required_acks + broker_config["batch_num"] = conf.producer_batch_num + broker_config["batch_size"] = conf.producer_batch_size + broker_config["max_buffering"] = conf.producer_max_buffering + broker_config["flush_time"] = conf.producer_time_linger * 1000 local prod, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, create_producer, broker_list, broker_config, conf.cluster_name) diff --git a/docs/en/latest/plugins/kafka-logger.md b/docs/en/latest/plugins/kafka-logger.md index e32f64d128f38..c2f50b9bb34b2 100644 --- a/docs/en/latest/plugins/kafka-logger.md +++ b/docs/en/latest/plugins/kafka-logger.md @@ -47,6 +47,10 @@ For more info on Batch-Processor in Apache APISIX please refer. | include_resp_body| boolean | optional | false | [false, true] | Whether to include the response body. The response body is included if and only if it is `true`. | | include_resp_body_expr | array | optional | | | When `include_resp_body` is true, control the behavior based on the result of the [lua-resty-expr](https://github.com/api7/lua-resty-expr) expression. If present, only log the response body when the result is true. | | cluster_name | integer | optional | 1 | [0,...] | the name of the cluster. When there are two or more kafka clusters, you can specify different names. And this only works with async producer_type.| +| producer_batch_num | integer | optional | 200 | [1,...] | `batch_num` param in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka), merge message and batch send to server, unit is message count | +| producer_batch_size | integer | optional | 1048576 | [0,...] | `batch_size` param in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka), unit is byte | +| producer_max_buffering | integer | optional | 50000 | [1,...] | `max_buffering` param in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka), max buffer size, unit is message count | +| producer_time_linger | integer | optional | 1 | [1,...] | `flush_time` param in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka), unit is second | The plugin supports the use of batch processors to aggregate and process entries(logs/data) in a batch. This avoids frequent data submissions by the plugin, which by default the batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. For information or custom batch processor parameter settings, see [Batch-Processor](../batch-processor.md#configuration) configuration section. diff --git a/docs/zh/latest/plugins/kafka-logger.md b/docs/zh/latest/plugins/kafka-logger.md index 9257be4f0cbd1..302b273f37a84 100644 --- a/docs/zh/latest/plugins/kafka-logger.md +++ b/docs/zh/latest/plugins/kafka-logger.md @@ -47,6 +47,10 @@ title: kafka-logger | include_resp_body| boolean | 可选 | false | [false, true] | 是否包括响应体。包含响应体,当为`true`。 | | include_resp_body_expr | array | 可选 | | | 是否采集响体,基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 该选项需要开启 `include_resp_body`| | cluster_name | integer | 可选 | 1 | [0,...] | kafka 集群的名称。当有两个或多个 kafka 集群时,可以指定不同的名称。只适用于 producer_type 是 async 模式。| +| producer_batch_num | integer | 可选 | 200 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`batch_num`参数,聚合消息批量提交,单位为消息条数 | +| producer_batch_size | integer | 可选 | 1048576 | [0,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`batch_size`参数,单位为字节 | +| producer_max_buffering | integer | 可选 | 50000 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`max_buffering`参数,最大缓冲区,单位为条 | +| producer_time_linger | integer | 可选 | 1 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`flush_time`参数,单位为秒 | 本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。