diff --git a/apisix/plugins/ai-rate-limiting.lua b/apisix/plugins/ai-rate-limiting.lua index d8bf970faa4f..a46456b4e792 100644 --- a/apisix/plugins/ai-rate-limiting.lua +++ b/apisix/plugins/ai-rate-limiting.lua @@ -20,6 +20,7 @@ local ipairs = ipairs local type = type local core = require("apisix.core") local limit_count = require("apisix.plugins.limit-count.init") +local policy_to_additional_properties = require("apisix.utils.redis-schema").schema local plugin_name = "ai-rate-limiting" @@ -56,6 +57,12 @@ local schema = { rejected_msg = { type = "string", minLength = 1 }, + policy = { + type = "string", + enum = {"local", "redis", "redis-cluster"}, + default = "local", + }, + allow_degradation = {type = "boolean", default = false}, }, dependencies = { limit = {"time_window"}, @@ -68,6 +75,24 @@ local schema = { { required = {"instances"} } + }, + ["if"] = { + properties = { + policy = { + enum = {"redis"}, + }, + }, + }, + ["then"] = policy_to_additional_properties.redis, + ["else"] = { + ["if"] = { + properties = { + policy = { + enum = {"redis-cluster"}, + }, + }, + }, + ["then"] = policy_to_additional_properties["redis-cluster"], } } @@ -99,7 +124,8 @@ local function transform_limit_conf(plugin_conf, instance_conf, instance_name) limit = instance_conf.limit time_window = instance_conf.time_window end - return { + + local limit_conf = { _vid = key, key = key, @@ -109,15 +135,36 @@ local function transform_limit_conf(plugin_conf, instance_conf, instance_name) rejected_msg = plugin_conf.rejected_msg, show_limit_quota_header = plugin_conf.show_limit_quota_header, -- limit-count need these fields - policy = "local", + policy = plugin_conf.policy or "local", key_type = "constant", - allow_degradation = false, + allow_degradation = plugin_conf.allow_degradation or false, sync_interval = -1, limit_header = "X-AI-RateLimit-Limit-" .. name, remaining_header = "X-AI-RateLimit-Remaining-" .. name, reset_header = "X-AI-RateLimit-Reset-" .. name, } + + -- Pass through Redis configuration if policy is redis or redis-cluster + if plugin_conf.policy == "redis" then + limit_conf.redis_host = plugin_conf.redis_host + limit_conf.redis_port = plugin_conf.redis_port + limit_conf.redis_username = plugin_conf.redis_username + limit_conf.redis_password = plugin_conf.redis_password + limit_conf.redis_database = plugin_conf.redis_database + limit_conf.redis_timeout = plugin_conf.redis_timeout + limit_conf.redis_ssl = plugin_conf.redis_ssl + limit_conf.redis_ssl_verify = plugin_conf.redis_ssl_verify + elseif plugin_conf.policy == "redis-cluster" then + limit_conf.redis_cluster_nodes = plugin_conf.redis_cluster_nodes + limit_conf.redis_cluster_name = plugin_conf.redis_cluster_name + limit_conf.redis_password = plugin_conf.redis_password + limit_conf.redis_timeout = plugin_conf.redis_timeout + limit_conf.redis_cluster_ssl = plugin_conf.redis_cluster_ssl + limit_conf.redis_cluster_ssl_verify = plugin_conf.redis_cluster_ssl_verify + end + + return limit_conf end diff --git a/apisix/plugins/limit-count/limit-count-redis-cluster.lua b/apisix/plugins/limit-count/limit-count-redis-cluster.lua index be7029b667ce..8ed7873ddff9 100644 --- a/apisix/plugins/limit-count/limit-count-redis-cluster.lua +++ b/apisix/plugins/limit-count/limit-count-redis-cluster.lua @@ -14,11 +14,13 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- - local redis_cluster = require("apisix.utils.rediscluster") local core = require("apisix.core") +local ngx = ngx +local get_phase = ngx.get_phase local setmetatable = setmetatable -local tostring = tostring +local util = require("apisix.plugins.limit-count.util") +local ngx_timer_at = ngx.timer.at local _M = {} @@ -28,17 +30,6 @@ local mt = { } -local script = core.string.compress_script([=[ - assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1") - local ttl = redis.call('ttl', KEYS[1]) - if ttl < 0 then - redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2]) - return {ARGV[1] - ARGV[3], ARGV[2]} - end - return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl} -]=]) - - function _M.new(plugin_name, limit, window, conf) local red_cli, err = redis_cluster.new(conf, "plugin-limit-count-redis-cluster-slot-lock") if not red_cli then @@ -57,26 +48,42 @@ function _M.new(plugin_name, limit, window, conf) end -function _M.incoming(self, key, cost) - local red = self.red_cli - local limit = self.limit - local window = self.window - key = self.plugin_name .. tostring(key) +local function log_phase_incoming_thread(premature, self, key, cost) + return util.redis_log_phase_incoming(self, self.red_cli, key, cost) +end + - local ttl = 0 - local res, err = red:eval(script, 1, key, limit, window, cost or 1) +local function log_phase_incoming(self, key, cost, dry_run) + if dry_run then + return true + end - if err then - return nil, err, ttl + local ok, err = ngx_timer_at(0, log_phase_incoming_thread, self, key, cost) + if not ok then + core.log.error("failed to create timer: ", err) + return nil, err end - local remaining = res[1] - ttl = res[2] + return ok +end + + +function _M.incoming(self, key, cost, dry_run) + if get_phase() == "log" then + local ok, err = log_phase_incoming(self, key, cost, dry_run) + if not ok then + return nil, err, 0 + end - if remaining < 0 then - return nil, "rejected", ttl + return 0, self.limit, self.window end - return 0, remaining, ttl + + local commit = true + if dry_run ~= nil then + commit = not dry_run + end + + return util.redis_incoming(self, self.red_cli, key, commit, cost) end diff --git a/apisix/plugins/limit-count/limit-count-redis.lua b/apisix/plugins/limit-count/limit-count-redis.lua index c40ed437f342..8a0c2849e009 100644 --- a/apisix/plugins/limit-count/limit-count-redis.lua +++ b/apisix/plugins/limit-count/limit-count-redis.lua @@ -16,9 +16,12 @@ -- local redis = require("apisix.utils.redis") local core = require("apisix.core") +local ngx = ngx +local get_phase = ngx.get_phase local assert = assert local setmetatable = setmetatable -local tostring = tostring +local util = require("apisix.plugins.limit-count.util") +local ngx_timer_at = ngx.timer.at local _M = {version = 0.3} @@ -29,17 +32,6 @@ local mt = { } -local script = core.string.compress_script([=[ - assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1") - local ttl = redis.call('ttl', KEYS[1]) - if ttl < 0 then - redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2]) - return {ARGV[1] - ARGV[3], ARGV[2]} - end - return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl} -]=]) - - function _M.new(plugin_name, limit, window, conf) assert(limit > 0 and window > 0) @@ -52,37 +44,66 @@ function _M.new(plugin_name, limit, window, conf) return setmetatable(self, mt) end -function _M.incoming(self, key, cost) + +local function log_phase_incoming_thread(premature, self, key, cost) local conf = self.conf local red, err = redis.new(conf) if not red then - return red, err, 0 + return red, err end + return util.redis_log_phase_incoming(self, red, key, cost) +end - local limit = self.limit - local window = self.window - local res - key = self.plugin_name .. tostring(key) - local ttl = 0 - res, err = red:eval(script, 1, key, limit, window, cost or 1) +local function log_phase_incoming(self, key, cost, dry_run) + if dry_run then + return true + end - if err then - return nil, err, ttl + local ok, err = ngx_timer_at(0, log_phase_incoming_thread, self, key, cost) + if not ok then + core.log.error("failed to create timer: ", err) + return nil, err end - local remaining = res[1] - ttl = res[2] + return ok +end + + +function _M.incoming(self, key, cost, dry_run) + if get_phase() == "log" then + local ok, err = log_phase_incoming(self, key, cost, dry_run) + if not ok then + return nil, err, 0 + end + + -- best-effort result because lua-resty-redis is not allowed in log phase + return 0, self.limit, self.window + end + + local conf = self.conf + local red, err = redis.new(conf) + if not red then + return red, err, 0 + end + + local commit = true + if dry_run ~= nil then + commit = not dry_run + end + + local delay, remaining, ttl = util.redis_incoming(self, red, key, commit, cost) + if not delay then + local err = remaining + return nil, err, ttl or 0 + end local ok, err = red:set_keepalive(10000, 100) if not ok then return nil, err, ttl end - if remaining < 0 then - return nil, "rejected", ttl - end - return 0, remaining, ttl + return delay, remaining, ttl end diff --git a/apisix/plugins/limit-count/util.lua b/apisix/plugins/limit-count/util.lua new file mode 100644 index 000000000000..f8c84e310f94 --- /dev/null +++ b/apisix/plugins/limit-count/util.lua @@ -0,0 +1,79 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local tostring = tostring +local tonumber = tonumber +local _M = {version = 0.1} + +local commit_script = core.string.compress_script([=[ + assert(tonumber(ARGV[3]) >= 0, "cost must be at least 0") + local ttl = redis.call('ttl', KEYS[1]) + if ttl < 0 then + redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2]) + return {ARGV[1] - ARGV[3], ARGV[2]} + end + return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl} +]=]) + +function _M.redis_incoming(self, red, key, commit, cost) + local limit = self.limit + local window = self.window + key = self.plugin_name .. tostring(key) + + local requested_cost = cost or 1 + local script_cost = commit and requested_cost or 0 + local res, err = red:eval(commit_script, 1, key, limit, window, script_cost) + + if err then + return nil, err, 0 + end + + local stored_remaining = tonumber(res[1]) + if stored_remaining == nil then + stored_remaining = limit - script_cost + end + local ttl = tonumber(res[2]) or window + + local remaining + if commit then + remaining = stored_remaining + else + remaining = stored_remaining - requested_cost + end + + if remaining < 0 then + return nil, "rejected", ttl + end + + return 0, remaining, ttl +end + +function _M.redis_log_phase_incoming(self, red, key, cost) + local limit = self.limit + local window = self.window + key = self.plugin_name .. tostring(key) + + local res, err = red:eval(commit_script, 1, key, limit, window, cost or 1) + if err then + return nil, err + end + + return res[1] +end + +return _M + diff --git a/docs/en/latest/plugins/ai-rate-limiting.md b/docs/en/latest/plugins/ai-rate-limiting.md index 9cc21a914db4..5c9e9cf605df 100644 --- a/docs/en/latest/plugins/ai-rate-limiting.md +++ b/docs/en/latest/plugins/ai-rate-limiting.md @@ -35,22 +35,36 @@ description: The ai-rate-limiting Plugin enforces token-based rate limiting for ## Description -The `ai-rate-limiting` Plugin enforces token-based rate limiting for requests sent to LLM services. It helps manage API usage by controlling the number of tokens consumed within a specified time frame, ensuring fair resource allocation and preventing excessive load on the service. It is often used with [`ai-proxy`](./ai-proxy.md) or [`ai-proxy-multi`](./ai-proxy-multi.md) plugin. +The `ai-rate-limiting` Plugin enforces token-based rate limiting for requests sent to LLM services. It helps manage API usage by controlling the number of tokens consumed within a specified time frame, ensuring fair resource allocation and preventing excessive load on the service. Token counters can be stored locally on each APISIX node or persisted to Redis/Redis Cluster to coordinate quotas across replicas. It is often used with [`ai-proxy`](./ai-proxy.md) or [`ai-proxy-multi`](./ai-proxy-multi.md) plugin. ## Attributes -| Name | Type | Required | Default | Valid values | Description | -|------------------------------|----------------|----------|----------|---------------------------------------------------------|-------------| -| limit | integer | False | | >0 | The maximum number of tokens allowed within a given time interval. At least one of `limit` and `instances.limit` should be configured. | -| time_window | integer | False | | >0 | The time interval corresponding to the rate limiting `limit` in seconds. At least one of `time_window` and `instances.time_window` should be configured. | -| show_limit_quota_header | boolean | False | true | | If true, includes `X-AI-RateLimit-Limit-*`, `X-AI-RateLimit-Remaining-*`, and `X-AI-RateLimit-Reset-*` headers in the response, where `*` is the instance name. | +| Name | Type | Required | Default | Valid values | Description | +|------------------------------|----------------|----------|--------------|---------------------------------------------------------|-------------| +| limit | integer | False | | >0 | The maximum number of tokens allowed within a given time interval. At least one of `limit` and `instances.limit` should be configured. | +| time_window | integer | False | | >0 | The time interval corresponding to the rate limiting `limit` in seconds. At least one of `time_window` and `instances.time_window` should be configured. | +| show_limit_quota_header | boolean | False | true | | If true, includes `X-AI-RateLimit-Limit-*`, `X-AI-RateLimit-Remaining-*`, and `X-AI-RateLimit-Reset-*` headers in the response, where `*` is the instance name. | | limit_strategy | string | False | total_tokens | [total_tokens, prompt_tokens, completion_tokens] | Type of token to apply rate limiting. `total_tokens` is the sum of `prompt_tokens` and `completion_tokens`. | -| instances | array[object] | False | | | LLM instance rate limiting configurations. | -| instances.name | string | True | | | Name of the LLM service instance. | -| instances.limit | integer | True | | >0 | The maximum number of tokens allowed within a given time interval for an instance. | -| instances.time_window | integer | True | | >0 | The time interval corresponding to the rate limiting `limit` in seconds for an instance. | -| rejected_code | integer | False | 503 | [200, 599] | The HTTP status code returned when a request exceeding the quota is rejected. | -| rejected_msg | string | False | | | The response body returned when a request exceeding the quota is rejected. | +| instances | array[object] | False | | | LLM instance rate limiting configurations. | +| instances.name | string | True | | | Name of the LLM service instance. | +| instances.limit | integer | True | | >0 | The maximum number of tokens allowed within a given time interval for an instance. | +| instances.time_window | integer | True | | >0 | The time interval corresponding to the rate limiting `limit` in seconds for an instance. | +| rejected_code | integer | False | 503 | [200, 599] | The HTTP status code returned when a request exceeding the quota is rejected. | +| rejected_msg | string | False | | | The response body returned when a request exceeding the quota is rejected. | +| policy | string | False | local | [local, redis, redis-cluster] | Storage policy for the rate limiting counter. Use `redis` or `redis-cluster` to share quotas across APISIX nodes or persist counters across restarts. | +| allow_degradation | boolean | False | false | | If true, allows APISIX to continue proxying traffic when the Redis backend is unavailable. | +| redis_host | string | False | | | Address of the Redis node. Required when `policy` is `redis`. | +| redis_port | integer | False | 6379 | >=1 | Port of the Redis node when `policy` is `redis`. | +| redis_username | string | False | | | Username for Redis ACL authentication when `policy` is `redis`. Leave empty when using `requirepass`. | +| redis_password | string | False | | | Password for the Redis node when `policy` is `redis` or `redis-cluster`. | +| redis_database | integer | False | 0 | >=0 | Database index for Redis when `policy` is `redis`. | +| redis_timeout | integer | False | 1000 | >=1 | Redis operation timeout in milliseconds when `policy` is `redis` or `redis-cluster`. | +| redis_ssl | boolean | False | false | | If true, uses TLS when connecting to Redis for the `redis` policy. | +| redis_ssl_verify | boolean | False | false | | If true, verifies the Redis server certificate when `policy` is `redis` and TLS is enabled. | +| redis_cluster_nodes | array[string] | False | | | List of Redis Cluster node addresses (for example, `["10.0.0.1:6379","10.0.0.2:6379"]`). Required when `policy` is `redis-cluster`. | +| redis_cluster_name | string | False | | | Cluster name used by Redis Cluster clients. Required when `policy` is `redis-cluster`. | +| redis_cluster_ssl | boolean | False | false | | If true, uses TLS when connecting to Redis Cluster. | +| redis_cluster_ssl_verify | boolean | False | false | | If true, verifies the Redis Cluster server certificate when TLS is enabled. | ## Examples @@ -138,6 +152,67 @@ You should receive a response similar to the following: If the rate limiting quota of 300 prompt tokens has been consumed in a 30-second window, all additional requests will be rejected. +### Share Quotas Across Gateways with Redis + +By default, `ai-rate-limiting` keeps counters in the memory of each APISIX node. When you run multiple gateways or need quotas that survive restarts, switch the `policy` to `redis` or `redis-cluster` so every node consults the same Redis backend. You can also set `allow_degradation` to `true` to keep proxying even if Redis is temporarily unreachable. + +The following example builds on the previous Route and persists the counter to Redis with TLS enabled: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-rate-limiting-redis-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4o-mini", + "max_tokens": 256 + } + }, + "ai-rate-limiting": { + "limit": 1200, + "time_window": 60, + "policy": "redis", + "redis_host": "redis.internal", + "redis_port": 6380, + "redis_password": "'"$REDIS_PASSWORD"'", + "redis_ssl": true, + "redis_ssl_verify": true, + "allow_degradation": true + } + } + }' +``` + +To use Redis Cluster instead, set `"policy": "redis-cluster"` and configure the `redis_cluster_nodes`, `redis_cluster_name`, and optional TLS fields: + +```json +"ai-rate-limiting": { + "limit": 1200, + "time_window": 60, + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "10.0.0.10:6379", + "10.0.0.11:6379", + "10.0.0.12:6379" + ], + "redis_cluster_name": "apisix-rate-limit", + "redis_password": "my-secret-password", + "redis_cluster_ssl": true +} +``` + +With either Redis-backed policy, requests that spend tokens on one APISIX instance immediately affect the quota seen by all of the others. + ### Rate Limit One Instance Among Multiple The following example demonstrates how you can use `ai-proxy-multi` to configure two models for load balancing, forwarding 80% of the traffic to one instance and 20% to the other. Additionally, use `ai-rate-limiting` to configure token-based rate limiting on the instance that receives 80% of the traffic, such that when the configured quota is fully consumed, the additional traffic will be forwarded to the other instance. diff --git a/docs/zh/latest/plugins/ai-rate-limiting.md b/docs/zh/latest/plugins/ai-rate-limiting.md index e6c4ec7a86a5..f6091b22fb09 100644 --- a/docs/zh/latest/plugins/ai-rate-limiting.md +++ b/docs/zh/latest/plugins/ai-rate-limiting.md @@ -35,22 +35,36 @@ description: ai-rate-limiting 插件对发送到 LLM 服务的请求实施基于 ## 描述 -`ai-rate-limiting` 插件对发送到 LLM 服务的请求实施基于令牌的速率限制。它通过控制在指定时间范围内消耗的令牌数量来帮助管理 API 使用,确保公平的资源分配并防止服务过载。它通常与 [`ai-proxy`](ai-proxy.md) 或 [`ai-proxy-multi`](ai-proxy-multi.md) 插件一起使用。 +`ai-rate-limiting` 插件对发送到 LLM 服务的请求实施基于令牌的速率限制。它通过控制在指定时间范围内消耗的令牌数量来帮助管理 API 使用,确保公平的资源分配并防止服务过载。令牌计数器可以存储在每个 APISIX 节点的本地内存中,也可以持久化到 Redis/Redis 集群中,以便在多副本之间共享配额。它通常与 [`ai-proxy`](ai-proxy.md) 或 [`ai-proxy-multi`](ai-proxy-multi.md) 插件一起使用。 ## 属性 -| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | -|------------------------------|----------------|----------|----------|---------------------------------------------------------|-------------| -| limit | integer | 否 | | >0 | 在给定时间间隔内允许的最大令牌数。`limit` 和 `instances.limit` 中至少应配置一个。 | -| time_window | integer | 否 | | >0 | 与速率限制 `limit` 对应的时间间隔(秒)。`time_window` 和 `instances.time_window` 中至少应配置一个。 | -| show_limit_quota_header | boolean | 否 | true | | 如果为 true,则在响应中包含 `X-AI-RateLimit-Limit-*`、`X-AI-RateLimit-Remaining-*` 和 `X-AI-RateLimit-Reset-*` 头部,其中 `*` 是实例名称。 | +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|------------------------------|----------------|----------|--------------|---------------------------------------------------------|-------------| +| limit | integer | 否 | | >0 | 在给定时间间隔内允许的最大令牌数。`limit` 和 `instances.limit` 中至少应配置一个。 | +| time_window | integer | 否 | | >0 | 与速率限制 `limit` 对应的时间间隔(秒)。`time_window` 和 `instances.time_window` 中至少应配置一个。 | +| show_limit_quota_header | boolean | 否 | true | | 如果为 true,则在响应中包含 `X-AI-RateLimit-Limit-*`、`X-AI-RateLimit-Remaining-*` 和 `X-AI-RateLimit-Reset-*` 头部,其中 `*` 是实例名称。 | | limit_strategy | string | 否 | total_tokens | [total_tokens, prompt_tokens, completion_tokens] | 应用速率限制的令牌类型。`total_tokens` 是 `prompt_tokens` 和 `completion_tokens` 的总和。 | -| instances | array[object] | 否 | | | LLM 实例速率限制配置。 | -| instances.name | string | 是 | | | LLM 服务实例的名称。 | -| instances.limit | integer | 是 | | >0 | 实例在给定时间间隔内允许的最大令牌数。 | -| instances.time_window | integer | 是 | | >0 | 实例速率限制 `limit` 对应的时间间隔(秒)。 | -| rejected_code | integer | 否 | 503 | [200, 599] | 当超出配额的请求被拒绝时返回的 HTTP 状态码。 | -| rejected_msg | string | 否 | | | 当超出配额的请求被拒绝时返回的响应体。 | +| instances | array[object] | 否 | | | LLM 实例速率限制配置。 | +| instances.name | string | 是 | | | LLM 服务实例的名称。 | +| instances.limit | integer | 是 | | >0 | 实例在给定时间间隔内允许的最大令牌数。 | +| instances.time_window | integer | 是 | | >0 | 实例速率限制 `limit` 对应的时间间隔(秒)。 | +| rejected_code | integer | 否 | 503 | [200, 599] | 当超出配额的请求被拒绝时返回的 HTTP 状态码。 | +| rejected_msg | string | 否 | | | 当超出配额的请求被拒绝时返回的响应体。 | +| policy | string | 否 | local | [local, redis, redis-cluster] | 速率限制计数器的存储策略。将其设置为 `redis` 或 `redis-cluster` 可在多个 APISIX 节点之间共享配额或在重启后保留计数。 | +| allow_degradation | boolean | 否 | false | | 如果为 true,当 Redis 后端不可用时,仍允许 APISIX 继续转发请求。 | +| redis_host | string | 否 | | | Redis 节点地址。当 `policy` 为 `redis` 时必填。 | +| redis_port | integer | 否 | 6379 | >=1 | Redis 节点端口。当 `policy` 为 `redis` 时可选。 | +| redis_username | string | 否 | | | 使用 Redis ACL 时的用户名,`policy` 为 `redis` 时可选。 | +| redis_password | string | 否 | | | Redis 节点密码,`policy` 为 `redis` 或 `redis-cluster` 时可选。 | +| redis_database | integer | 否 | 0 | >=0 | Redis 数据库编号,`policy` 为 `redis` 时可选。 | +| redis_timeout | integer | 否 | 1000 | >=1 | Redis 操作超时时间(毫秒),`policy` 为 `redis` 或 `redis-cluster` 时可选。 | +| redis_ssl | boolean | 否 | false | | 如果为 true,则在 `policy=redis` 时通过 TLS 连接 Redis。 | +| redis_ssl_verify | boolean | 否 | false | | 如果为 true,则在 `policy=redis` 且启用 TLS 时校验证书。 | +| redis_cluster_nodes | array[string] | 否 | | | Redis 集群节点列表(例如 `["10.0.0.1:6379","10.0.0.2:6379"]`)。当 `policy` 为 `redis-cluster` 时必填。 | +| redis_cluster_name | string | 否 | | | Redis 集群名称,当 `policy` 为 `redis-cluster` 时必填。 | +| redis_cluster_ssl | boolean | 否 | false | | 如果为 true,则在 `policy=redis-cluster` 时通过 TLS 连接 Redis 集群。 | +| redis_cluster_ssl_verify | boolean | 否 | false | | 如果为 true,则在 `policy=redis-cluster` 且启用 TLS 时校验证书。 | ## 示例 @@ -138,6 +152,67 @@ curl "http://127.0.0.1:9080/anything" -X POST \ 如果在 30 秒窗口内消耗了 300 个提示令牌的速率限制配额,所有额外的请求将被拒绝。 +### 使用 Redis 在网关之间共享配额 + +默认情况下,`ai-rate-limiting` 会把计数器保存在每个 APISIX 节点的内存中。当集群中存在多个网关副本,或希望配额在重启后继续生效时,可以将 `policy` 设置为 `redis` 或 `redis-cluster`,让所有节点都连接到同一个 Redis 后端。您还可以把 `allow_degradation` 设置为 `true`,在 Redis 暂时不可用时继续转发业务流量。 + +以下示例基于上一节的路由,在启用 TLS 的情况下把计数器持久化到 Redis: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-rate-limiting-redis-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4o-mini", + "max_tokens": 256 + } + }, + "ai-rate-limiting": { + "limit": 1200, + "time_window": 60, + "policy": "redis", + "redis_host": "redis.internal", + "redis_port": 6380, + "redis_password": "'"$REDIS_PASSWORD"'", + "redis_ssl": true, + "redis_ssl_verify": true, + "allow_degradation": true + } + } + }' +``` + +如果要使用 Redis 集群,只需把 `"policy": "redis-cluster"`,并补充 `redis_cluster_nodes`、`redis_cluster_name` 以及可选的 TLS 配置: + +```json +"ai-rate-limiting": { + "limit": 1200, + "time_window": 60, + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "10.0.0.10:6379", + "10.0.0.11:6379", + "10.0.0.12:6379" + ], + "redis_cluster_name": "apisix-rate-limit", + "redis_password": "my-secret-password", + "redis_cluster_ssl": true +} +``` + +无论选择哪种 Redis 存储策略,在一个 APISIX 实例上消耗的令牌都会立即同步到其他实例。 + ### 对多个实例中的一个进行速率限制 以下示例演示了如何使用 `ai-proxy-multi` 配置两个模型进行负载均衡,将 80% 的流量转发到一个实例,20% 转发到另一个实例。此外,使用 `ai-rate-limiting` 对接收 80% 流量的实例配置基于令牌的速率限制,这样当配置的配额完全消耗时,额外的流量将被转发到另一个实例。 diff --git a/t/lib/test_redis.lua b/t/lib/test_redis.lua new file mode 100644 index 000000000000..a7115e9f5d50 --- /dev/null +++ b/t/lib/test_redis.lua @@ -0,0 +1,126 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ngx = require("ngx") +local redis = require "resty.redis" + +local ipairs = ipairs +local tonumber = tonumber + +local _M = {} + +local DEFAULT_PORTS = {6379, 5000, 5001, 5002, 5003, 5004, 5005, 5006} + +local function log_warn(...) + if ngx then + ngx.log(ngx.WARN, ...) + end +end + +local function add_port(target, visited, port) + port = tonumber(port) + if port and not visited[port] then + visited[port] = true + target[#target + 1] = port + end +end + +local function flush_single(host, port, opts) + local red = redis:new() + local connect_timeout = opts.connect_timeout or 1000 + local send_timeout = opts.send_timeout or connect_timeout + local read_timeout = opts.read_timeout or connect_timeout + red:set_timeouts(connect_timeout, send_timeout, read_timeout) + + local ok, err = red:connect(host, port) + if not ok then + log_warn("failed to connect to redis ", host, ":", port, ": ", err) + return nil, err + end + + local _, flush_err = red:flushall() + if flush_err then + log_warn("failed to flush redis ", host, ":", port, ": ", flush_err) + end + + local keepalive_pool = opts.keepalive_pool + if keepalive_pool == nil then + keepalive_pool = 0 + end + if keepalive_pool == 0 then + local ok_close, close_err = red:close() + if not ok_close then + log_warn("failed to close redis connection ", host, ":", port, ": ", close_err) + end + else + local keepalive_timeout = opts.keepalive_timeout or 10000 + keepalive_pool = keepalive_pool or 100 + local ok_keepalive, keepalive_err = red:set_keepalive(keepalive_timeout, keepalive_pool) + if not ok_keepalive then + log_warn("failed to set keepalive for redis ", host, ":", port, ": ", keepalive_err) + end + end + + return true +end + +function _M.flush_all(opts) + opts = opts or {} + local host = opts.host or "127.0.0.1" + + local visited = {} + local ports = {} + + local source_ports = opts.ports or DEFAULT_PORTS + for _, port in ipairs(source_ports) do + add_port(ports, visited, port) + end + + add_port(ports, visited, os.getenv("TEST_NGINX_REDIS_PORT")) + + if opts.extra_ports then + for _, port in ipairs(opts.extra_ports) do + add_port(ports, visited, port) + end + end + + for _, port in ipairs(ports) do + flush_single(host, port, opts) + end +end + +function _M.flush_port(host, port, opts) + if type(host) == "table" then + opts = host + host = opts.host or "127.0.0.1" + port = opts.port + end + + opts = opts or {} + host = host or opts.host or "127.0.0.1" + port = port or opts.port + if not port then + return nil, "port is required" + end + + return flush_single(host, port, opts) +end + +_M.default_ports = DEFAULT_PORTS + +return _M + diff --git a/t/plugin/ai-rate-limiting.t b/t/plugin/ai-rate-limiting.t index 66aa0b07f164..16c4961572c5 100644 --- a/t/plugin/ai-rate-limiting.t +++ b/t/plugin/ai-rate-limiting.t @@ -984,3 +984,223 @@ passed Authorization: Bearer token --- error_code eval [200, 200, 200, 200, 200, 200, 200, 503, 503] + + + +=== TEST 21: set route with Redis policy +--- config + location /t { + content_by_lua_block { + require("lib.test_redis").flush_all() + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4" + }, + "override": { + "endpoint": "http://localhost:16724" + }, + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 30, + "time_window": 60, + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: reject the 4th request with Redis policy +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 503] + + + +=== TEST 23: set rejected_code to 403, rejected_msg to "rate limit exceeded" with Redis +--- config + location /t { + content_by_lua_block { + require("lib.test_redis").flush_all() + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4" + }, + "override": { + "endpoint": "http://localhost:16724" + }, + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 30, + "time_window": 60, + "rejected_code": 403, + "rejected_msg": "rate limit exceeded", + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 24: check code and message with Redis +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 403] +--- response_body eval +[ + qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/, + qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/, + qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/, + qr/\{"error_msg":"rate limit exceeded"\}/, +] + + + +=== TEST 25: set route with Redis Cluster policy +--- config + location /t { + content_by_lua_block { + require("lib.test_redis").flush_all() + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4" + }, + "override": { + "endpoint": "http://localhost:16724" + }, + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 30, + "time_window": 60, + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 26: reject request with Redis Cluster policy +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 503] diff --git a/t/plugin/limit-conn-redis-cluster.t b/t/plugin/limit-conn-redis-cluster.t index 997a4a2a7087..d99e90fba933 100644 --- a/t/plugin/limit-conn-redis-cluster.t +++ b/t/plugin/limit-conn-redis-cluster.t @@ -51,6 +51,7 @@ add_block_preprocessor(sub { location /test_concurrency { content_by_lua_block { + require("lib.test_redis").flush_all() local reqs = {} local status_map = {} for i = 1, 10 do @@ -83,6 +84,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local plugin = require("apisix.plugins.limit-conn") local ok, err = plugin.check_schema({ conn = 1, @@ -117,6 +119,7 @@ done --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -173,6 +176,7 @@ status:200, count:10 --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -229,6 +233,7 @@ status:503, count:7 --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -285,6 +290,7 @@ status:503, count:4 --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/plugin/limit-count-redis-cluster.t b/t/plugin/limit-count-redis-cluster.t index 7a4798a60e48..6308f019abb2 100644 --- a/t/plugin/limit-count-redis-cluster.t +++ b/t/plugin/limit-count-redis-cluster.t @@ -42,6 +42,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -81,6 +82,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -125,6 +127,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -193,6 +196,7 @@ unlock with key route#1#redis-cluster --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -237,6 +241,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test for i = 1, 20 do local code, body = t('/hello', ngx.HTTP_GET) @@ -274,6 +279,7 @@ code: 200 --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local function set_route(count) t('/apisix/admin/routes/1', @@ -336,6 +342,7 @@ code: 503 --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -391,6 +398,7 @@ connection refused --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -438,6 +446,7 @@ connection refused --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -495,6 +504,7 @@ failed to do ssl handshake --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/plugin/limit-count-redis-cluster2.t b/t/plugin/limit-count-redis-cluster2.t index ede8ee9dcc82..4bf43f2f36f2 100644 --- a/t/plugin/limit-count-redis-cluster2.t +++ b/t/plugin/limit-count-redis-cluster2.t @@ -42,6 +42,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -94,6 +95,7 @@ Done --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/plugin/limit-count-redis-cluster3.t b/t/plugin/limit-count-redis-cluster3.t index 2a599184cd75..5801de327846 100644 --- a/t/plugin/limit-count-redis-cluster3.t +++ b/t/plugin/limit-count-redis-cluster3.t @@ -55,6 +55,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local conf = { redis_cluster_nodes = {"127.0.0.1:5000", "127.0.0.1:5001"}, redis_cluster_name = "redis-cluster-1", @@ -88,6 +89,7 @@ remaining: 1 --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -139,6 +141,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/plugin/limit-count-redis.t b/t/plugin/limit-count-redis.t index d06188050df2..74d38bee0844 100644 --- a/t/plugin/limit-count-redis.t +++ b/t/plugin/limit-count-redis.t @@ -51,6 +51,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -90,6 +91,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -131,6 +133,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -195,6 +198,7 @@ unlock with key route#1#redis --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test -- set redis password local redis = require "resty.redis" @@ -286,6 +290,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -348,6 +353,7 @@ failed to limit count: WRONGPASS invalid username-password pair or user is disab --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', @@ -402,6 +408,7 @@ failed to limit count: WRONGPASS invalid username-password pair or user is disab --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', @@ -456,6 +463,7 @@ failed to limit count: WRONGPASS invalid username-password pair or user is disab --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', @@ -516,6 +524,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test -- set redis password local redis = require "resty.redis" diff --git a/t/plugin/limit-count-redis2.t b/t/plugin/limit-count-redis2.t index f4f4ea13113d..535bb135dd3a 100644 --- a/t/plugin/limit-count-redis2.t +++ b/t/plugin/limit-count-redis2.t @@ -51,6 +51,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -92,6 +93,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -144,6 +146,7 @@ failed to limit count: failed to change redis db, err: ERR DB index is out of ra --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -194,6 +197,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -247,6 +251,7 @@ connection refused --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/plugin/limit-count-redis3.t b/t/plugin/limit-count-redis3.t index bf952df1863b..246c1d6b3dc3 100644 --- a/t/plugin/limit-count-redis3.t +++ b/t/plugin/limit-count-redis3.t @@ -51,6 +51,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -91,6 +92,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local json = require "t.toolkit.json" local http = require "resty.http" local uri = "http://127.0.0.1:" .. ngx.var.server_port @@ -125,6 +127,7 @@ Done --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -165,6 +168,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local json = require "t.toolkit.json" local http = require "resty.http" local uri = "http://127.0.0.1:" .. ngx.var.server_port @@ -193,6 +197,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', @@ -253,6 +258,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', @@ -308,6 +314,7 @@ failed to do ssl handshake --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -348,6 +355,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local json = require "t.toolkit.json" local http = require "resty.http" local uri = "http://127.0.0.1:" .. ngx.var.server_port diff --git a/t/plugin/limit-count-redis4.t b/t/plugin/limit-count-redis4.t index d1fad198055d..c4ee1b459e2f 100644 --- a/t/plugin/limit-count-redis4.t +++ b/t/plugin/limit-count-redis4.t @@ -62,6 +62,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local conf = { allow_degradation = false, rejected_code = 503, @@ -94,6 +95,7 @@ remaining: 1 --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/plugin/limit-req-redis-cluster.t b/t/plugin/limit-req-redis-cluster.t index 4c36c2200294..db68994d7c0f 100644 --- a/t/plugin/limit-req-redis-cluster.t +++ b/t/plugin/limit-req-redis-cluster.t @@ -39,6 +39,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local plugin = require("apisix.plugins.limit-req") local ok, err = plugin.check_schema({ rate = 1, @@ -66,6 +67,7 @@ done --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -131,6 +133,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -184,6 +187,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -230,6 +234,7 @@ GET /t --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -271,6 +276,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -317,6 +323,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -362,6 +369,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers', ngx.HTTP_PUT, @@ -404,6 +412,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -449,6 +458,7 @@ apikey: auth-jack --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers', ngx.HTTP_PUT, @@ -491,7 +501,7 @@ passed --- more_headers apikey: auth-jack --- error_code eval -[403, 403, 403, 403] +[200, 403, 403, 403] @@ -499,6 +509,7 @@ apikey: auth-jack --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -554,6 +565,7 @@ The value of the configured key is empty, use client IP instead --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers/new_consumer', ngx.HTTP_DELETE) @@ -572,6 +584,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) @@ -590,6 +603,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local plugin = require("apisix.plugins.limit-req") local ok, err = plugin.check_schema({rate = 0, burst = 0, rejected_code = 503, key = 'remote_addr'}) if not ok then diff --git a/t/plugin/limit-req-redis.t b/t/plugin/limit-req-redis.t index 84664b7a2db2..89340595ba21 100644 --- a/t/plugin/limit-req-redis.t +++ b/t/plugin/limit-req-redis.t @@ -39,6 +39,7 @@ add_block_preprocessor(sub { my $config = $block->config // <<_EOC_; location /access_root_dir { content_by_lua_block { + require("lib.test_redis").flush_all() local httpc = require "resty.http" local hc = httpc:new() @@ -62,6 +63,7 @@ __DATA__ --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local plugin = require("apisix.plugins.limit-req") local ok, err = plugin.check_schema({ rate = 1, @@ -89,6 +91,7 @@ done --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -147,6 +150,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -199,6 +203,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -252,6 +257,7 @@ failed to limit req: WRONGPASS invalid username-password pair or user is disable --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -293,6 +299,7 @@ GET /t --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -334,6 +341,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -377,6 +385,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -419,6 +428,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers', ngx.HTTP_PUT, @@ -458,6 +468,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -503,6 +514,7 @@ apikey: auth-jack --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers', ngx.HTTP_PUT, @@ -542,7 +554,7 @@ passed --- more_headers apikey: auth-jack --- error_code eval -[403, 403, 403, 403] +[200, 403, 403, 403] @@ -550,6 +562,7 @@ apikey: auth-jack --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -602,6 +615,7 @@ The value of the configured key is empty, use client IP instead --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers/new_consumer', ngx.HTTP_DELETE) @@ -620,6 +634,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) @@ -638,6 +653,7 @@ passed --- config location /t { content_by_lua_block { + require("lib.test_redis").flush_all() local plugin = require("apisix.plugins.limit-req") local ok, err = plugin.check_schema({rate = 0, burst = 0, rejected_code = 503, key = 'remote_addr'}) if not ok then