feat: support expire prometheus metrics (#10869)

diff --git a/apisix-master-0.rockspec b/apisix-master-0.rockspec
index 7ad0d72..90327d6 100644
--- a/apisix-master-0.rockspec
+++ b/apisix-master-0.rockspec
@@ -51,7 +51,7 @@
     "lua-resty-openidc = 1.7.6-3",
     "luafilesystem = 1.7.0-2",
     "api7-lua-tinyyaml = 0.4.4",
-    "nginx-lua-prometheus = 0.20230607-1",
+    "nginx-lua-prometheus-api7 = 0.20240201-1",
     "jsonschema = 0.9.8",
     "lua-resty-ipmatcher = 0.6.1",
     "lua-resty-kafka = 0.22-0",
diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua
index c15c5d0..d04d9bb 100644
--- a/apisix/plugins/prometheus/exporter.lua
+++ b/apisix/plugins/prometheus/exporter.lua
@@ -132,6 +132,11 @@
         metric_prefix = attr.metric_prefix
     end
 
+    local exptime
+    if attr and attr.expire then
+        exptime = attr.expire
+    end
+
     prometheus = base_prometheus.init("prometheus-metrics", metric_prefix)
 
     metrics.connections = prometheus:gauge("nginx_http_current_connections",
@@ -144,7 +149,6 @@
     metrics.etcd_reachable = prometheus:gauge("etcd_reachable",
             "Config server etcd reachable from APISIX, 0 is unreachable")
 
-
     metrics.node_info = prometheus:gauge("node_info",
             "Info of APISIX node",
             {"hostname"})
@@ -163,7 +167,8 @@
 
     metrics.upstream_status = prometheus:gauge("upstream_status",
             "Upstream status from health check",
-            {"name", "ip", "port"})
+            {"name", "ip", "port"},
+            exptime)
 
     -- per service
 
@@ -173,7 +178,8 @@
     metrics.status = prometheus:counter("http_status",
             "HTTP status codes per service in APISIX",
             {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node",
-            unpack(extra_labels("http_status"))})
+            unpack(extra_labels("http_status"))},
+            exptime)
 
     local buckets = DEFAULT_BUCKETS
     if attr and attr.default_buckets then
@@ -183,11 +189,12 @@
     metrics.latency = prometheus:histogram("http_latency",
         "HTTP request latency in milliseconds per service in APISIX",
         {"type", "route", "service", "consumer", "node", unpack(extra_labels("http_latency"))},
-        buckets)
+        buckets, exptime)
 
     metrics.bandwidth = prometheus:counter("bandwidth",
             "Total bandwidth in bytes consumed per service in APISIX",
-            {"type", "route", "service", "consumer", "node", unpack(extra_labels("bandwidth"))})
+            {"type", "route", "service", "consumer", "node", unpack(extra_labels("bandwidth"))},
+            exptime)
 
     if prometheus_enabled_in_stream then
         init_stream_metrics()
diff --git a/conf/config-default.yaml b/conf/config-default.yaml
index 3ba8a4e..f2064aa 100755
--- a/conf/config-default.yaml
+++ b/conf/config-default.yaml
@@ -602,6 +602,9 @@
     #   - 100
     #   - 200
     #   - 500
+    # expire: 0                       # The expiration time after metrics become inactive, unit: second.
+                                      # 0 means the metrics will not expire
+                                      # If you need to set the expiration time, it is recommended to use 600, which is 10 minutes.
   server-info:                        # Plugin: server-info
     report_ttl: 60                    # Set the TTL in seconds for server info in etcd.
                                       # Maximum: 86400. Minimum: 3.
diff --git a/t/plugin/prometheus4.t b/t/plugin/prometheus4.t
index 2a72736..93c6f0d 100644
--- a/t/plugin/prometheus4.t
+++ b/t/plugin/prometheus4.t
@@ -189,3 +189,91 @@
 apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="105"\} \d+
 apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="205"\} \d+
 apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="505"\} \d+/
+
+
+
+=== TEST 9: set route with prometheus ttl
+--- yaml_config
+plugin_attr:
+    prometheus:
+        default_buckets:
+            - 15
+            - 55
+            - 105
+            - 205
+            - 505
+        expire: 1
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+
+            local code = t('/apisix/admin/routes/metrics',
+                ngx.HTTP_PUT,
+                [[{
+                    "plugins": {
+                        "public-api": {}
+                    },
+                    "uri": "/apisix/prometheus/metrics"
+                }]]
+                )
+            if code >= 300 then
+                ngx.status = code
+                return
+            end
+
+            local code, body = t('/apisix/admin/routes/1',
+                ngx.HTTP_PUT,
+                [[{
+                    "plugins": {
+                        "prometheus": {}
+                    },
+                    "upstream": {
+                        "nodes": {
+                            "127.0.0.1:1980": 1
+                        },
+                        "type": "roundrobin"
+                    },
+                    "uri": "/hello1"
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+                ngx.say(body)
+                return
+            end
+
+            local code, body = t('/hello1',
+                ngx.HTTP_GET,
+                "",
+                nil,
+                nil
+            )
+
+            if code >= 300 then
+                ngx.status = code
+                ngx.say(body)
+                return
+            end
+
+            ngx.sleep(2)
+
+            local code, pass, body = t('/apisix/prometheus/metrics',
+                ngx.HTTP_GET,
+                "",
+                nil,
+                nil
+            )
+            ngx.status = code
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body_unlike eval
+qr/apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="15"\} \d+
+apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="55"\} \d+
+apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="105"\} \d+
+apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="205"\} \d+
+apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="505"\} \d+/