blob: ff07b6a8c93c4ce7983b1909c7cda32e7d269734 [file] [log] [blame]
## fullnameOverride String to fully override common.names.fullname template
##
##
fullnameOverride: ""
##
##
## nameOverride String to partially override common.names.fullname template (will maintain the release name)
##
nameOverride: ""
##
##
## namespaceOverride String to partially override common.names.namespace template
##
namespaceOverride: ""
##
##
## labels String to override common.names.labels template
##
labels: ""
##
##
## annotations String to override common.names.annotations template
##
annotations: ""
##
##
##
##
## global.imageRegistry Global Docker image registry
## global.imagePullSecrets Global Docker registry secret names as an array
## global.storageClass Global StorageClass for Persistent Volume(s)
##
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
rbac:
enabled: true
# Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
pspEnabled: true
pspUseAppArmor: true
namespaced: false
extraRoleRules: []
useExistingRole: []
# - apiGroups: []
# resources: []
# verbs: []
extraClusterRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
## serviceAccount
serviceAccount:
enabled: true
## ## ServiceAccount name.
name: {}
## ServiceAccount nameTests.
nameTest: {}
## ServiceAccount labels.
labels: {}
## ServiceAccount annotations.
annotations: {}
## replicas
replicas: 1
# -- Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: secretName
## Create a headless service for the deployment
headlessService: false
## Create HorizontalPodAutoscaler object for deployment type
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPU: "60"
targetMemory: ""
behavior: {}
## Number of old ReplicaSets to retain
##
revisionHistoryLimit: 10
## See `kubectl explain deployment.spec.strategy` for more
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
deploymentStrategy:
type: RollingUpdate
## @param readinessProbe.enabled Enable readinessProbe on containers
readinessProbe:
httpGet:
path: /
port: 8080
## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe
initialDelaySeconds: 60
## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe
timeoutSeconds: 30
## @param readinessProbe.periodSeconds Period seconds for readinessProbe
periodSeconds: 10
## @param readinessProbe.successThreshold Success threshold for readinessProbe
successThreshold: 1
## @param readinessProbe.failureThreshold Failure threshold for readinessProbe
failureThreshold: 3
## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe
probeCommandTimeout: 1
##
## @param livenessProbe.enabled Enable livenessProbe on containers
livenessProbe:
httpGet:
path: /
port: 8080
## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe
initialDelaySeconds: 60
## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe
timeoutSeconds: 30
## @param livenessProbe.periodSeconds Period seconds for livenessProbe
periodSeconds: 10
## @param livenessProbe.successThreshold Success threshold for livenessProbe
successThreshold: 1
## @param livenessProbe.failureThreshold Failure threshold for livenessProbe
failureThreshold: 3
## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe
probeCommandTimeout: 1
## @param startupProbe.enabled Enable startupProbe on containers
startupProbe:
httpGet:
path: /
port: 8080
## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe
initialDelaySeconds: 60
## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe
timeoutSeconds: 30
## @param startupProbe.periodSeconds Period seconds for startupProbe
periodSeconds: 10
## @param startupProbe.successThreshold Success threshold for startupProbe
successThreshold: 1
## @param startupProbe.failureThreshold Failure threshold for startupProbe
failureThreshold: 3
## This module is the image acquisition method
image:
registry: docker.io
## e.g registry.k8s.io
##
##
repository: apache/dubbo-admin
##
##
tag: "0.5.0"
##
##
debug: false
##
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
##
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Can be templated.
##
pullSecrets: []
# - RegisterKeySecretName
## Configure Pods Security Context
securityContext:
# runAsUser: 570
# runAsGroup: 570
# fsGroup: 570
tests:
enabled: true
image: busybox
tag: ""
imagePullPolicy: IfNotPresent
securityContext: {}
## Init container Security Context
containerSecurityContext:
enabled: false
## Sensible environment variables that will be rendered as new secret object
## This can be useful for auth tokens, etc
envSecret: {}
# -- `minReadySeconds` to avoid killing pods before we are ready
##
minReadySeconds: 0
# -- Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
## Create or not configmap
ConfigmapEnabled: true
## configmap mounts
ConfigmapMounts: []
# - name: configMap-file
# mountPath: /config
# configMap: configMap-file
# readOnly: true
## secret mounts
SecretMounts:
# - name: secret-file
# secret:
# secretName: secret-file
## extraSecret Mounts
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# secretName: secret-files
# readOnly: true
# subPath: ""
## emptyDir mounts
EmptyDirMounts: []
# - name: ""
# mountPath: /
## Apply extra labels.
extraLabels: {}
## Assign a PriorityClassName to pods if set
# priorityClassName: {}
## Pod Annotations
# podAnnotations: {}
## Pod Labels
# podLabels: {}
## Expose the dubbo-admin service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
name: http
##
## Service name.
##
enabled: true
##
## Service enable true or false.
##
type: ClusterIP
##
## Service type.
##
clusterIP: ""
##
## Service clusterIP.
##
loadBalancerIP: ""
##
## Service loadBalancerIP.
##
loadBalancerSourceRanges: ""
##
## Service loadBalancerSourceRanges.
##
externalIPs: ""
##
## Service externalIPs.
##
nodePort: ""
##
## Service nodePort.
##
path: /
##
## Service path.
##
port: 38080
##
## Service port.
##
targetPort: http
##
## Service targetPort.
##
containerPort: 8080
##
## Service containerPort.
##
protocol: TCP
##
## Service protocol.
##
##
annotations: {}
##
## Service annotations. Can be templated.
##
labels: {}
##
## Service labels.
##
portName: service
##
## Service portName.
##
appProtocol: ""
##
## Service appProtocol.
##
## Zookeeper Service.
## @param service.type Kubernetes Service type
##
ports:
client: 2181
follower: 2888
election: 3888
## Node ports to expose
## NOTE: choose port between <30000-32767>
## @param service.nodePorts.client Node port for clients
## @param service.nodePorts.tls Node port for TLS
##
nodePorts:
client: ""
## @param service.disableBaseClientPort Remove client port from service definitions.
##
disableBaseClientPort: false
## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin
## Values: ClientIP or None
## ref: https://kubernetes.io/docs/user-guide/services/
##
sessionAffinity: None
## @param service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
##
##
##
externalTrafficPolicy: Cluster
## @param service.annotations Additional custom annotations for ZooKeeper service
##
##
extraPorts: []
## @param service.headless.annotations Annotations for the Headless Service
## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods
## @param service.headless.servicenameOverride String to partially override headless service name
##
headless:
publishNotReadyAddresses: true
annotations: {}
servicenameOverride: ""
ingress:
enabled: false
# For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
##
##
# pathType is only for k8s >= 1.1=
pathType: Prefix
##
##
hosts:
- chart-example.local
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
## Or for k8s > 1.19
# - path: /*
# pathType: Prefix
# backend:
# service:
# name: ssl-redirect
# port:
# name: use-annotation
##
##
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
extraresources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Topology Spread Constraints
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
##
topologySpreadConstraints: []
## Additional init containers (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
##
InitContainers: []
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: false
type: pvc
storageClassName: ""
accessModes:
- ReadWriteOnce
size: 10Gi
labels: {}
annotations: {}
existingClaim: ""
finalizers:
- kubernetes.io/pvc-protection
selectorLabels: {}
## Sub-directory of the PV to mount. Can be templated.
# subPath: ""
## Name of an existing PVC. Can be templated.
ClaimName: {}
## Extra labels to apply to a PVC.
extraPvcLabels: {}
## If persistence is not enabled, this allows to mount the
## local storage in-memory to improve performance
##
inMemory:
enabled: false
## The maximum usage on memory medium EmptyDir would be
## the minimum value between the SizeLimit specified
## here and the sum of memory limits of all containers in a pod
##
# sizeLimit: 300Mi
emptyDir:
## dubbo-admin emptyDir volume size limit
##
sizeLimit: ""
selector: {}
## Persistence for a dedicated data log directory
##
dataLogDir:
## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory
##
size: 8Gi
## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template
##
existingClaim: ""
## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC
## If set, the PVC can't have a PV dynamically provisioned for it
## E.g.
## selector:
## matchLabels:
## app: my-app
##
selector: {}
initChown:
## If false, data ownership will not be reset at startup
## This allows the dubbo-admin-server to be run with an arbitrary user
##
##
##
enabled: true
##
##
## initChownData container image
##
##
image:
repository: busybox
tag: "1.31.1"
sha: ""
pullPolicy: IfNotPresent
##
##
##
##
## initChown resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
securityContext:
runAsNonRoot: false
runAsUser: 0
## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request
lifecycleHooks: {}
# postStart:
# exec:
# command: []
# preStop:
# exec:
# command: []
## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
networkPolicy:
## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
##
enabled: false
## @param networkPolicy.allowExternal Don't require client label for connections
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to dubbo-admin port defined.
## When true, dubbo-admin will accept connections from any source
## (with the correct destination port).
##
ingress: true
## @param networkPolicy.ingress When true enables the creation
## an ingress network policy
##
allowExternal: true
## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed
## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the dubbo-admin.
## But sometimes, we want the dubbo-admin to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
##
## Example:
## explicitNamespacesSelector:
## matchLabels:
## role: frontend
## matchExpressions:
## - {key: role, operator: In, values: [frontend]}
##
explicitNamespacesSelector: {}
##
##
##
##
##
##
egress:
## @param networkPolicy.egress.enabled When enabled, an egress network policy will be
## created allowing dubbo-admin to connect to external data sources from kubernetes cluster.
enabled: false
##
## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress
ports: []
## Add ports to the egress by specifying - port: <port number>
## E.X.
## ports:
## - port: 80
## - port: 443
## Zookeeper Necessary configuration
zookeeper:
name: zookeeper
##
##
enabled: false
## enabled of false
##
## replicas
replicas: 1
## Create a headless service for the deployment
headlessService: false
##
##
image:
repository: bitnami/zookeeper
tag: 3.8.1-debian-11-r0
digest: ""
debug: false
pullPolicy: IfNotPresent
## Create HorizontalPodAutoscaler object for deployment type
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPU: "60"
targetMemory: ""
behavior: {}
##
##
## Number of old ReplicaSets to retain
##
revisionHistoryLimit: 10
##
##
## See `kubectl explain deployment.spec.strategy` for more
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
deploymentStrategy:
type: RollingUpdate
##
##
## zookeeper container Ports
containerPorts:
client: 2181
follower: 2888
election: 3888
##
##
extraVolumes: []
## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s)
## Example Use Case: mount certificates to enable TLS
## e.g:
## extraVolumeMounts:
## - name: zookeeper-keystore
## mountPath: /certs/keystore
## readOnly: true
## - name: zookeeper-truststore
## mountPath: /certs/truststore
## readOnly: true
##
extraVolumeMounts: []
## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s)
## e.g:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
##
## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats
##
##
auth:
client:
## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5
##
enabled: false
## @param auth.client.clientUser User that will use ZooKeeper clients to auth
##
clientUser: ""
## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth
##
clientPassword: ""
## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created
## Specify them as a string, for example: "user1,user2,admin"
##
serverUsers: ""
## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created
## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
##
serverPasswords: ""
## @param auth.client.existingSecret Use existing secret (ignores previous passwords)
##
existingSecret: ""
quorum:
## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5
##
enabled: false
## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers.
## Note: Make sure the user is included in auth.quorum.serverUsers
##
learnerUser: ""
## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers.
##
learnerPassword: ""
## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers.
## Specify them as a string, for example: "user1,user2,admin"
##
serverUsers: ""
## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created
## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin"
##
serverPasswords: ""
## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords)
##
existingSecret: ""
## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats
##
tickTime: 2000
## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader
##
initLimit: 10
## @param syncLimit How far out of date a server can be from a leader
##
syncLimit: 5
## @param preAllocSize Block size for transaction log file
##
preAllocSize: 65536
## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled)
##
snapCount: 100000
## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble
##
maxClientCnxns: 60
## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate
## Defaults to 20 times the tickTime
##
maxSessionTimeout: 40000
## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms)
## This env var is ignored if Xmx an Xms are configured via `jvmFlags`
##
heapSize: 1024
## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed
##
fourlwCommandsWhitelist: srvr, mntr, ruok
## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively
## Servers increment their ID starting at this minimal value.
## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively.
##
minServerId: 1
## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses
##
listenOnAllIPs: false
## Ongoing data directory cleanup configuration
##
autopurge:
## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain
##
snapRetainCount: 3
## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered
## Set to a positive integer to enable the auto purging
##
purgeInterval: 0
## @param logLevel Log level for the ZooKeeper server. ERROR by default
## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs
##
logLevel: ERROR
## @param jvmFlags Default JVM flags for the ZooKeeper process
##
jvmFlags: ""
## @param dataLogDir Dedicated data log directory
## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots.
## E.g.
## dataLogDir: /bitnami/zookeeper/dataLog
##
dataLogDir: ""
##
##
configuration: ""
## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper
## NOTE: When it's set the `configuration` parameter is ignored
##
existingConfigmap: ""
## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes
## e.g:
## extraEnvVars:
## - name: FOO
## value: "bar"
##
##
## @param clusterDomain Kubernetes Cluster Domain
##
clusterDomain: cluster.local
## @param extraDeploy Extra objects to deploy (evaluated as a template)
##
extraDeploy: []
## @param commonLabels Add labels to all the deployed resources
##
Labels: {}
## @param commonAnnotations Add annotations to all the deployed resources
##
Annotations: {}
## @param namespaceOverride Override namespace for ZooKeeper resources
## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent
##
diagnosticMode:
## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden)
##
enabled: false
## @param diagnosticMode.command Command to override all containers in the statefulset
##
command:
- sleep
## @param diagnosticMode.args Args to override all containers in the statefulset
##
args:
- infinity
## Nacos Necessary configuration
nacos:
name: nacos
## nacos name
##
enabled: true
## true of false
##
replicas: 1
## replicas
##
mode: standalone
# mode: cluster
##
##
domainName: cluster.local
##
## image
image:
registry: docker.io
## e.g registry.k8s.io
repository: nacos/nacos-server
tag: latest
pullPolicy: IfNotPresent
## plugin
plugin:
enable: true
image:
repository: nacos/nacos-peer-finder-plugin
tag: 1.1
pullPolicy: IfNotPresent
## service
service:
type: NodePort
port: 8848
nodePort: 30000
## persistence
persistence:
enabled: false
data:
accessModes:
- ReadWriteOnce
storageClassName: ""
resources:
requests:
storage: 5Gi
## storage
storage:
type: embedded
# type: mysql
# db:
# host: localhost
# name: nacos
# port: 3306
# username: usernmae
# password: password
# param: characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false
## @param Dubbo-admin Default Enable Configuration
properties:
admin.registry.address: zookeeper://zookeeper:2181
admin.config-center: zookeeper://zookeeper:2181
admin.metadata-report.address: zookeeper://zookeeper:2181
admin.root.user.name: root
admin.root.user.password: root
admin.check.sessionTimeoutMilli: 3600000
server.compression.enabled: true
server.compression.mime-types: text/css,text/javascript,application/javascript
server.compression.min-response-size: 10240
admin.check.tokenTimeoutMilli: 3600000
admin.check.signSecret: 86295dd0c4ef69a1036b0b0c15158d77
dubbo.application.name: dubbo-admin
dubbo.registry.address: ${admin.registry.address}
spring.datasource.url: jdbc:h2:mem:~/dubbo-admin;MODE=MYSQL;
spring.datasource.username: sa
spring.datasource.password:
mybatis-plus.global-config.db-config.id-type: none
dubbo.application.logger: slf4j
# nacos config, add parameters to url like username=nacos&password=nacos
# admin.registry.address: nacos://nacos:8848?group=DEFAULT_GROUP&namespace=public
# admin.config-center: nacos://nacos:8848?group=dubbo
# admin.metadata-report.address: nacos://nacos:8848?group=dubbo
# group (Deprecated it is recommended to use URL to add parameters,will be removed in the future)
# admin.registry.group: dubbo
# admin.config-center.group: dubbo
# admin.metadata-report.group: dubbo
# namespace used by nacos.(Deprecated it is recommended to use URL to add parameters,will be removed in the future)
# admin.registry.namespace: public
# admin.config-center.namespace: public
# admin.metadata-report.namespace: public
# apollo config
# admin.config-center: apollo://localhost:8070?token=e16e5cd903fd0c97a116c873b448544b9d086de9&app.id=test&env=dev&cluster=default&namespace=dubbo
# admin.apollo.token: e16e5cd903fd0c97a116c873b448544b9d086de9
# admin.apollo.appId: test
# admin.apollo.env: dev
# admin.apollo.cluster: default
# mysql
# spring.datasource.driver-class-name: com.mysql.jdbc.Driver
# spring.datasource.url: jdbc:mysql://localhost:3306/dubbo-admin?characterEncoding=utf8&connectTimeout=1000&socketTimeout=10000&autoReconnect=true
# spring.datasource.username: root
# spring.datasource.password: mysql