# Default values for cert-manager. # This is a YAML-formatted file. # Declare variables to be passed into your templates. replicaCount: 1 strategy: {} # type: RollingUpdate # rollingUpdate: # maxSurge: 0 # maxUnavailable: 1 image: repository: quay.io/jetstack/cert-manager-controller tag: canary pullPolicy: Always # Override the namespace used to store DNS provider credentials etc. for ClusterIssuer # resources. By default, the same namespace as cert-manager is deployed within is # used. This namespace will not be automatically created by the Helm chart. clusterResourceNamespace: "" leaderElection: # Override the namespace used to store the ConfigMap for leader election namespace: "" rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a service account should be created create: true # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: # Optional additional arguments extraArgs: [] # Use this flag to set a namespace that cert-manager will use to store # supporting resources required for each ClusterIssuer (default is kube-system) # - --cluster-resource-namespace=kube-system extraEnv: [] # - name: SOME_VAR # value: 'some value' resources: {} # requests: # cpu: 10m # memory: 32Mi podAnnotations: {} podLabels: {} # Optional DNS settings, useful if you have a public and private DNS zone for # the same domain on Route 53. What follows is an example of ensuring # cert-manager can access an ingress or DNS TXT records at all times. # NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for # the cluster to work. # podDnsPolicy: "None" # podDnsConfig: # nameservers: # - "1.1.1.1" # - "8.8.8.8" nodeSelector: {} ingressShim: {} # defaultIssuerName: "" # defaultIssuerKind: "" # defaultACMEChallengeType: "" # defaultACMEDNS01ChallengeProvider: "" webhook: enabled: true # Use these variables to configure the HTTP_PROXY environment variables # http_proxy: "http://proxy:8080" # http_proxy: "http://proxy:8080" # no_proxy: 127.0.0.1,localhost # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core # for example: # affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: foo.bar.com/role # operator: In # values: # - master affinity: {} # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core # for example: # tolerations: # - key: foo.bar.com/role # operator: Equal # value: master # effect: NoSchedule tolerations: []