|
- ## Configure resource requests and limits
- ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
- ##
- image:
- repository: redis
- tag: 5.0.6-alpine
- pullPolicy: IfNotPresent
-
- ## Reference to one or more secrets to be used when pulling images
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ## This imagePullSecrets is only for redis images
- ##
- imagePullSecrets: []
- # - name: "image-pull-secret"
-
- ## replicas number for each component
- replicas: 1
-
- ## Kubernetes priorityClass name for the redis-ha-server pod
- # priorityClassName: ""
-
- ## Custom labels for the redis pod
- labels: {}
-
- ## Pods Service Account
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
- serviceAccount:
- ## Specifies whether a ServiceAccount should be created
- ##
- create: true
- ## The name of the ServiceAccount to use.
- ## If not set and create is true, a name is generated using the redis-ha.fullname template
- # name:
-
- ## Enables a HA Proxy for better LoadBalancing / Sentinel Master support. Automatically proxies to Redis master.
- ## Recommend for externally exposed Redis clusters.
- ## ref: https://cbonte.github.io/haproxy-dconv/1.9/intro.html
- haproxy:
- enabled: false
- # Enable if you want a dedicated port in haproxy for redis-slaves
- readOnly:
- enabled: false
- port: 6380
- replicas: 3
- image:
- repository: haproxy
- tag: 2.0.4
- pullPolicy: IfNotPresent
-
- ## Reference to one or more secrets to be used when pulling images
- ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- ##
- imagePullSecrets: []
- # - name: "image-pull-secret"
-
- annotations: {}
- resources: {}
- emptyDir: {}
- ## Enable sticky sessions to Redis nodes via HAProxy
- ## Very useful for long-living connections as in case of Sentry for example
- stickyBalancing: false
- ## Kubernetes priorityClass name for the haproxy pod
- # priorityClassName: ""
- ## Service type for HAProxy
- ##
- service:
- type: ClusterIP
- loadBalancerIP:
- annotations: {}
- serviceAccount:
- create: true
- ## Official HAProxy embedded prometheus metrics settings.
- ## Ref: https://github.com/haproxy/haproxy/tree/master/contrib/prometheus-exporter
- ##
- metrics:
- enabled: false
- # prometheus port & scrape path
- port: 9101
- portName: exporter-port
- scrapePath: /metrics
-
- serviceMonitor:
- # When set true then use a ServiceMonitor to configure scraping
- enabled: false
- # Set the namespace the ServiceMonitor should be deployed
- # namespace: monitoring
- # Set how frequently Prometheus should scrape
- # interval: 30s
- # Set path to redis-exporter telemtery-path
- # telemetryPath: /metrics
- # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
- # labels: {}
- # Set timeout for scrape
- # timeout: 10s
- init:
- resources: {}
- timeout:
- connect: 4s
- server: 30s
- client: 30s
- check: 2s
- securityContext:
- runAsUser: 1000
- fsGroup: 1000
- runAsNonRoot: true
-
- ## Whether the haproxy pods should be forced to run on separate nodes.
- hardAntiAffinity: false
-
- ## Additional affinities to add to the haproxy pods.
- additionalAffinities: {}
-
- ## Override all other affinity settings for the haproxy pods with a string.
- affinity: |
-
- ## Custom config-haproxy.cfg files used to override default settings. If this file is
- ## specified then the config-haproxy.cfg above will be ignored.
- # customConfig: |-
- # Define configuration here
- ## Place any additional configuration section to add to the default config-haproxy.cfg
- # extraConfig: |-
- # Define configuration here
-
-
- ## Role Based Access
- ## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
- ##
- rbac:
- create: true
-
- sysctlImage:
- enabled: false
- command: []
- registry: docker.io
- repository: busybox
- tag: 1.28
- pullPolicy: Always
- mountHostSys: false
- resources: {}
-
- ## Use an alternate scheduler, e.g. "stork".
- ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
- ##
- # schedulerName:
-
- ## Redis specific configuration options
- redis:
- port: 6379
- masterGroupName: "mymaster" # must match ^[\\w-\\.]+$) and can be templated
- config:
- ## Additional redis conf options can be added below
- ## For all available options see http://download.redis.io/redis-stable/redis.conf
- min-replicas-to-write: 0
- min-replicas-max-lag: 5 # Value in seconds
- maxmemory: "0" # Max memory to use for each redis instance. Default is unlimited.
- maxmemory-policy: "volatile-lru" # Max memory policy to use for each redis instance. Default is volatile-lru.
- # Determines if scheduled RDB backups are created. Default is false.
- # Please note that local (on-disk) RDBs will still be created when re-syncing with a new slave. The only way to prevent this is to enable diskless replication.
- save: "900 1"
- # When enabled, directly sends the RDB over the wire to slaves, without using the disk as intermediate storage. Default is false.
- repl-diskless-sync: "yes"
- rdbcompression: "yes"
- rdbchecksum: "yes"
-
-
- ## Custom redis.conf files used to override default settings. If this file is
- ## specified then the redis.config above will be ignored.
- # customConfig: |-
- # Define configuration here
-
- resources: {}
- # requests:
- # memory: 200Mi
- # cpu: 100m
- # limits:
- # memory: 700Mi
-
- ## Sentinel specific configuration options
- sentinel:
- port: 26379
- quorum: 2
- config:
- ## Additional sentinel conf options can be added below. Only options that
- ## are expressed in the format simialar to 'sentinel xxx mymaster xxx' will
- ## be properly templated expect maxclients option.
- ## For available options see http://download.redis.io/redis-stable/sentinel.conf
- down-after-milliseconds: 10000
- ## Failover timeout value in milliseconds
- failover-timeout: 180000
- parallel-syncs: 5
- maxclients: 10000
-
- ## Custom sentinel.conf files used to override default settings. If this file is
- ## specified then the sentinel.config above will be ignored.
- # customConfig: |-
- # Define configuration here
-
- resources: {}
- # requests:
- # memory: 200Mi
- # cpu: 100m
- # limits:
- # memory: 200Mi
-
- securityContext:
- runAsUser: 1000
- fsGroup: 1000
- runAsNonRoot: true
-
- ## Node labels, affinity, and tolerations for pod assignment
- ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
- ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
- ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
- nodeSelector: {}
-
- ## Whether the Redis server pods should be forced to run on separate nodes.
- ## This is accomplished by setting their AntiAffinity with requiredDuringSchedulingIgnoredDuringExecution as opposed to preferred.
- ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature
- ##
- hardAntiAffinity: false
-
- ## Additional affinities to add to the Redis server pods.
- ##
- ## Example:
- ## nodeAffinity:
- ## preferredDuringSchedulingIgnoredDuringExecution:
- ## - weight: 50
- ## preference:
- ## matchExpressions:
- ## - key: spot
- ## operator: NotIn
- ## values:
- ## - "true"
- ##
- ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
- ##
- additionalAffinities: {}
-
- ## Override all other affinity settings for the Redis server pods with a string.
- ##
- ## Example:
- ## affinity: |
- ## podAntiAffinity:
- ## requiredDuringSchedulingIgnoredDuringExecution:
- ## - labelSelector:
- ## matchLabels:
- ## app: {{ template "redis-ha.name" . }}
- ## release: {{ .Release.Name }}
- ## topologyKey: kubernetes.io/hostname
- ## preferredDuringSchedulingIgnoredDuringExecution:
- ## - weight: 100
- ## podAffinityTerm:
- ## labelSelector:
- ## matchLabels:
- ## app: {{ template "redis-ha.name" . }}
- ## release: {{ .Release.Name }}
- ## topologyKey: failure-domain.beta.kubernetes.io/zone
- ##
- affinity: |
-
- # Prometheus exporter specific configuration options
- exporter:
- enabled: false
- image: oliver006/redis_exporter
- tag: v1.3.2
- pullPolicy: IfNotPresent
-
- # prometheus port & scrape path
- port: 9121
- scrapePath: /metrics
-
- # cpu/memory resource limits/requests
- resources: {}
-
- # Additional args for redis exporter
- extraArgs: {}
-
- # Used to mount a LUA-Script via config map and use it for metrics-collection
- # script: |
- # -- Example script copied from: https://github.com/oliver006/redis_exporter/blob/master/contrib/sample_collect_script.lua
- # -- Example collect script for -script option
- # -- This returns a Lua table with alternating keys and values.
- # -- Both keys and values must be strings, similar to a HGETALL result.
- # -- More info about Redis Lua scripting: https://redis.io/commands/eval
- #
- # local result = {}
- #
- # -- Add all keys and values from some hash in db 5
- # redis.call("SELECT", 5)
- # local r = redis.call("HGETALL", "some-hash-with-stats")
- # if r ~= nil then
- # for _,v in ipairs(r) do
- # table.insert(result, v) -- alternating keys and values
- # end
- # end
- #
- # -- Set foo to 42
- # table.insert(result, "foo")
- # table.insert(result, "42") -- note the string, use tostring() if needed
- #
- # return result
-
- serviceMonitor:
- # When set true then use a ServiceMonitor to configure scraping
- enabled: false
- # Set the namespace the ServiceMonitor should be deployed
- # namespace: monitoring
- # Set how frequently Prometheus should scrape
- # interval: 30s
- # Set path to redis-exporter telemtery-path
- # telemetryPath: /metrics
- # Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
- # labels: {}
- # Set timeout for scrape
- # timeout: 10s
-
- podDisruptionBudget: {}
- # maxUnavailable: 1
- # minAvailable: 1
-
- ## Configures redis with AUTH (requirepass & masterauth conf params)
- auth: false
- # redisPassword:
-
- ## Use existing secret containing key `authKey` (ignores redisPassword)
- # existingSecret:
-
- ## Defines the key holding the redis password in existing secret.
- authKey: auth
-
- persistentVolume:
- enabled: true
- ## redis-ha data Persistent Volume Storage Class
- ## If defined, storageClassName: <storageClass>
- ## If set to "-", storageClassName: "", which disables dynamic provisioning
- ## If undefined (the default) or set to null, no storageClassName spec is
- ## set, choosing the default provisioner. (gp2 on AWS, standard on
- ## GKE, AWS & OpenStack)
- ##
- storageClass: "nfs-client"
- accessModes:
- - ReadWriteOnce
- size: 2Gi
- annotations: {}
- # reclaimPolicy per https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming
- reclaimPolicy: ""
- init:
- resources: {}
-
- # To use a hostPath for data, set persistentVolume.enabled to false
- # and define hostPath.path.
- # Warning: this might overwrite existing folders on the host system!
- hostPath:
- ## path is evaluated as template so placeholders are replaced
- # path: "/data/{{ .Release.Name }}"
-
- # if chown is true, an init-container with root permissions is launched to
- # change the owner of the hostPath folder to the user defined in the
- # security context
- chown: true
-
- emptyDir: {}
|