e2e: use Vault 1.12.1 instead of the outdated 1.2.3

The main reason for bumping Vault's version is because 1.2.3 is not
compatible with the config parameter `disable_iss_validation`, which is
needed for accommodating the future tests [1] that rely on bound tokens
and static tokens.

For context, Vault 1.2.3 was released on Sep 9, 2019 [2] but
`disable_iss_validation` was only added on July 21st, 2020 in Vault
1.5.0.

Due to a breaking change that happened in Vault 1.5.0 [3] in which Vault
started loading the pod's token instead of using the same token (to be
reviewed) for authenticating. An alternative solution could have been to
prevent the service account from being mounted to the pod, but I figured
that having the two service accounts separated is a better practice.

[1]: https://github.com/cert-manager/cert-manager/pull/5502
[2]: https://github.com/hashicorp/vault/commit/c14bd9a2
[3]: https://github.com/hashicorp/vault/blob/main/CHANGELOG.md#150

Signed-off-by: Maël Valais <mael@vls.dev>
This commit is contained in:
Maël Valais 2022-11-25 13:47:24 +01:00
parent d85e424cd0
commit f4f72c16e6
5 changed files with 60 additions and 50 deletions

View File

@ -15,7 +15,7 @@ K8S_VERSION := 1.24
IMAGE_ingressnginx_amd64 := k8s.gcr.io/ingress-nginx/controller:v1.1.0@sha256:7464dc90abfaa084204176bcc0728f182b0611849395787143f6854dc6c38c85
IMAGE_kyverno_amd64 := ghcr.io/kyverno/kyverno:v1.7.1@sha256:aec4b029660d47aea025336150fdc2822c991f592d5170d754b6acaf158b513e
IMAGE_kyvernopre_amd64 := ghcr.io/kyverno/kyvernopre:v1.7.1@sha256:1bcec6bc854720e22f439c6dcea02fcf689f31976babcf03a449d750c2b1f34a
IMAGE_vault_amd64 := index.docker.io/library/vault:1.2.3@sha256:b1c86c9e173f15bb4a926e4144a63f7779531c30554ac7aee9b2a408b22b2c01
IMAGE_vault_amd64 := index.docker.io/library/vault:1.12.1@sha256:08dd1cb922624c51a5aefd4d9ce0ac5ed9688d96d8a5ad94664fa10e84702ed6
IMAGE_bind_amd64 := docker.io/eafxx/bind:latest-9f74179f@sha256:0b8c766f5bedbcbe559c7970c8e923aa0c4ca771e62fcf8dba64ffab980c9a51
IMAGE_sampleexternalissuer_amd64 := ghcr.io/cert-manager/sample-external-issuer/controller:v0.1.1@sha256:7dafe98c73d229bbac08067fccf9b2884c63c8e1412fe18f9986f59232cf3cb5
IMAGE_projectcontour_amd64 := ghcr.io/projectcontour/contour:v1.22.0@sha256:c8ee1e566340c1bfd11fc9a1a90d758bde562faecb722540207084330b300497
@ -25,7 +25,7 @@ IMAGE_vaultretagged_amd64 := local/vault:local
IMAGE_ingressnginx_arm64 := k8s.gcr.io/ingress-nginx/controller:v1.1.0@sha256:86be28e506653cbe29214cb272d60e7c8841ddaf530da29aa22b1b1017faa956
IMAGE_kyverno_arm64 := ghcr.io/kyverno/kyverno:v1.7.1@sha256:4355f1f65ea5e952886e929a15628f0c6704905035b4741c6f560378871c9335
IMAGE_kyvernopre_arm64 := ghcr.io/kyverno/kyvernopre:v1.7.1@sha256:141234fb74242155c7b843180b90ee5fb6a20c9e77598bd9c138c687059cdafd
IMAGE_vault_arm64 := index.docker.io/library/vault:1.2.3@sha256:226a269b83c4b28ff8a512e76f1e7b707eccea012e4c3ab4c7af7fff1777ca2d
IMAGE_vault_arm64 := $(IMAGE_vault_amd64)
IMAGE_bind_arm64 := docker.io/eafxx/bind:latest-9f74179f@sha256:85de273f24762c0445035d36290a440e8c5a6a64e9ae6227d92e8b0b0dc7dd6d
IMAGE_sampleexternalissuer_arm64 := # 🚧 NOT AVAILABLE FOR arm64 🚧
IMAGE_projectcontour_arm64 := ghcr.io/projectcontour/contour:v1.22.0@sha256:ca37e86e284e72b3a969c7845a56a1cfcd348f4cb75bf6312d5b11067efdd667
@ -131,7 +131,7 @@ $(LOAD_TARGETS): load-%: % $(BINDIR)/scratch/kind-exists | $(NEEDS_KIND)
# We don't pull using both the digest and tag because crane replaces the
# tag with "i-was-a-digest". We still check that the downloaded image
# matches the digest.
$(call image-tar,kyverno) $(call image-tar,kyvernopre) $(call image-tar,bind) $(call image-tar,projectcontour) $(call image-tar,sampleexternalissuer) $(call image-tar,vault) $(call image-tar,ingressnginx): $(BINDIR)/downloaded/containers/$(CRI_ARCH)/%.tar: | $(NEEDS_CRANE)
$(call image-tar,kyverno) $(call image-tar,kyvernopre) $(call image-tar,bind) $(call image-tar,projectcontour) $(call image-tar,sampleexternalissuer) $(call image-tar,ingressnginx): $(BINDIR)/downloaded/containers/$(CRI_ARCH)/%.tar: | $(NEEDS_CRANE)
@$(eval IMAGE=$(subst +,:,$*))
@$(eval IMAGE_WITHOUT_DIGEST=$(shell cut -d@ -f1 <<<"$(IMAGE)"))
@$(eval DIGEST=$(subst $(IMAGE_WITHOUT_DIGEST)@,,$(IMAGE)))
@ -140,7 +140,7 @@ $(call image-tar,kyverno) $(call image-tar,kyvernopre) $(call image-tar,bind) $(
$(CRANE) pull $(IMAGE_WITHOUT_DIGEST) $@ --platform=linux/$(CRI_ARCH)
# Same as above, except it supports multiarch images.
$(call image-tar,kind): $(BINDIR)/downloaded/containers/$(CRI_ARCH)/%.tar: | $(NEEDS_CRANE)
$(call image-tar,kind) $(call image-tar,vault): $(BINDIR)/downloaded/containers/$(CRI_ARCH)/%.tar: | $(NEEDS_CRANE)
@$(eval IMAGE=$(subst +,:,$*))
@$(eval IMAGE_WITHOUT_DIGEST=$(shell cut -d@ -f1 <<<"$(IMAGE)"))
@$(eval DIGEST=$(subst $(IMAGE_WITHOUT_DIGEST)@,,$(IMAGE)))

View File

@ -142,7 +142,7 @@ func (v *VaultInitializer) Init() error {
v.KubernetesAuthPath = "kubernetes"
}
v.proxy = newProxy(v.Namespace, v.PodName, v.Kubectl, v.VaultCA)
v.proxy = newProxy(v.PodNS, v.PodName, v.Kubectl, v.VaultCA)
client, err := v.proxy.init()
if err != nil {
return err
@ -446,36 +446,40 @@ func (v *VaultInitializer) setupKubernetesBasedAuth() error {
return nil
}
// CreateKubernetesRole creates a service account and ClusterRoleBinding for Kubernetes auth delegation
func (v *VaultInitializer) CreateKubernetesRole(client kubernetes.Interface, namespace, roleName, serviceAccountName string) error {
serviceAccount := NewVaultServiceAccount(serviceAccountName)
_, err := client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), serviceAccount, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("error creating ServiceAccount for Kubernetes auth: %s", err.Error())
}
role := NewVaultServiceAccountRole(namespace, serviceAccountName)
_, err = client.RbacV1().ClusterRoles().Create(context.TODO(), role, metav1.CreateOptions{})
// CreateKubernetesRole creates a service account and ClusterRoleBinding for
// Kubernetes auth delegation. The name "boundSA" refers to the Vault param
// "bound_service_account_names".
func (v *VaultInitializer) CreateKubernetesRole(client kubernetes.Interface, vaultRole, boundNS, boundSA string) error {
// Watch out, we refer to two different namespaces here:
// - v.PodNS = the pod's service account used by Vault's pod to
// authenticate with Kubernetes for the token review.
// - boundSA = the service account used to login using the Vault Kubernetes
// auth.
clusterRole := NewVaultServiceAccountRole(v.PodNS, v.PodSA)
_, err := client.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("error creating Role for Kubernetes auth ServiceAccount: %s", err.Error())
}
roleBinding := NewVaultServiceAccountClusterRoleBinding(role.Name, namespace, serviceAccountName)
roleBinding := NewVaultServiceAccountClusterRoleBinding(clusterRole.Name, v.PodNS, v.PodSA)
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), roleBinding, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("error creating RoleBinding for Kubernetes auth ServiceAccount: %s", err.Error())
}
_, err = client.CoreV1().ServiceAccounts(boundNS).Create(context.TODO(), NewVaultServiceAccount(boundSA), metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("error creating ServiceAccount for Kubernetes auth: %s", err.Error())
}
// vault write auth/kubernetes/role/<roleName>
roleParams := map[string]string{
"bound_service_account_names": serviceAccountName,
"bound_service_account_namespaces": namespace,
"bound_service_account_names": boundSA,
"bound_service_account_namespaces": boundNS,
"policies": "[" + v.Role + "]",
}
url := path.Join(fmt.Sprintf("/v1/auth/%s/role", v.KubernetesAuthPath), roleName)
url := path.Join(fmt.Sprintf("/v1/auth/%s/role", v.KubernetesAuthPath), vaultRole)
_, err = v.proxy.callVault("POST", url, "", roleParams)
if err != nil {
return fmt.Errorf("error configuring kubernetes auth role: %s", err.Error())
@ -489,8 +493,8 @@ func (v *VaultInitializer) CreateKubernetesRole(client kubernetes.Interface, nam
"allowed_uri_sans": "spiffe://cluster.local/*",
"enforce_hostnames": "false",
"allow_bare_domains": "true",
"bound_service_account_names": serviceAccountName,
"bound_service_account_namespaces": namespace,
"bound_service_account_names": boundSA,
"bound_service_account_namespaces": boundNS,
}
url = path.Join("/v1", v.IntermediateMount, "roles", v.Role)
@ -511,8 +515,8 @@ func (v *VaultInitializer) CreateKubernetesRole(client kubernetes.Interface, nam
params = map[string]string{
"period": "24h",
"policies": v.Role,
"bound_service_account_names": serviceAccountName,
"bound_service_account_namespaces": namespace,
"bound_service_account_names": boundSA,
"bound_service_account_namespaces": boundNS,
}
baseUrl := path.Join("/v1", "auth", v.KubernetesAuthPath, "role", v.Role)
@ -525,21 +529,21 @@ func (v *VaultInitializer) CreateKubernetesRole(client kubernetes.Interface, nam
}
// CleanKubernetesRole cleans up the ClusterRoleBinding and ServiceAccount for Kubernetes auth delegation
func (v *VaultInitializer) CleanKubernetesRole(client kubernetes.Interface, namespace, roleName, serviceAccountName string) error {
if err := client.RbacV1().RoleBindings(namespace).Delete(context.TODO(), roleName, metav1.DeleteOptions{}); err != nil {
func (v *VaultInitializer) CleanKubernetesRole(client kubernetes.Interface, vaultRole, boundNS, boundSA string) error {
clusterRole := NewVaultServiceAccountRole(v.PodNS, v.PodSA) // Just for getting the name.
if err := client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRole.Name, metav1.DeleteOptions{}); err != nil {
return err
}
if err := client.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRole.Name, metav1.DeleteOptions{}); err != nil {
return err
}
if err := client.RbacV1().Roles(namespace).Delete(context.TODO(), roleName, metav1.DeleteOptions{}); err != nil {
return err
}
if err := client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), serviceAccountName, metav1.DeleteOptions{}); err != nil {
if err := client.CoreV1().ServiceAccounts(boundNS).Delete(context.TODO(), boundSA, metav1.DeleteOptions{}); err != nil {
return err
}
// vault delete auth/kubernetes/role/<roleName>
url := path.Join(fmt.Sprintf("/v1/auth/%s/role", v.KubernetesAuthPath), roleName)
url := path.Join(fmt.Sprintf("/v1/auth/%s/role", v.KubernetesAuthPath), vaultRole)
_, err := v.proxy.callVault("DELETE", url, "", nil)
if err != nil {
return fmt.Errorf("error cleaning up kubernetes auth role: %s", err.Error())

View File

@ -40,9 +40,9 @@ import (
const (
vaultHelmChartRepo = "https://helm.releases.hashicorp.com"
vaultHelmChartVersion = "0.22.0"
vaultHelmChartVersion = "0.22.1"
vaultImageRepository = "index.docker.io/library/vault"
vaultImageTag = "1.2.3@sha256:b1c86c9e173f15bb4a926e4144a63f7779531c30554ac7aee9b2a408b22b2c01"
vaultImageTag = "1.12.1@sha256:08dd1cb922624c51a5aefd4d9ce0ac5ed9688d96d8a5ad94664fa10e84702ed6"
)
// Vault describes the configuration details for an instance of Vault
@ -72,8 +72,11 @@ type Details struct {
// PodName is the name of the Vault pod
PodName string
// Namespace is the namespace vault has been deployed into
Namespace string
// PodNS is the namespace that the Vault pod is deployed into.
PodNS string
// PodSA is the service accoutn that gets auto-mounted in the Vault pod.
PodSA string
// VaultCA is the CA used to sign the vault serving certificate
VaultCA []byte
@ -273,10 +276,12 @@ func (v *Vault) Provision() error {
continue
}
v.details.PodName = vaultPod.Name
v.details.PodNS = vaultPod.Namespace
v.details.PodSA = vaultPod.Spec.ServiceAccountName
break
}
v.details.Namespace = v.Namespace
v.details.Host = fmt.Sprintf("https://%s:8200", "chart-vault-"+v.Name+"."+v.Namespace)
return nil

View File

@ -67,7 +67,7 @@ var _ = framework.ConformanceDescribe("CertificateSigningRequests", func() {
type kubernetes struct {
testWithRootCA bool
role string
vaultRole string
addon *vault.Vault
initializer *vault.VaultInitializer
@ -120,7 +120,7 @@ func (k *kubernetes) delete(f *framework.Framework, signerName string) {
err := f.CertManagerClientSet.CertmanagerV1().ClusterIssuers().Delete(context.TODO(), ref.Name, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
k.initializer.CleanKubernetesRole(f.KubeClientSet, f.Config.Addons.CertManager.ClusterResourceNamespace, k.role, k.role)
k.initializer.CleanKubernetesRole(f.KubeClientSet, k.vaultRole, f.Config.Addons.CertManager.ClusterResourceNamespace, k.vaultRole)
}
Expect(k.initializer.Clean()).NotTo(HaveOccurred(), "failed to deprovision vault initializer")
@ -128,7 +128,7 @@ func (k *kubernetes) delete(f *framework.Framework, signerName string) {
}
func (k *kubernetes) initVault(f *framework.Framework, ns string) {
func (k *kubernetes) initVault(f *framework.Framework, boundNS string) {
By("Configuring the Vault server")
k.addon = &vault.Vault{
Base: addon.Base,
@ -136,7 +136,7 @@ func (k *kubernetes) initVault(f *framework.Framework, ns string) {
Namespace: f.Namespace.Name,
}
k.role = "vault-issuer-" + util.RandStringRunes(5)
k.vaultRole = "vault-issuer-" + util.RandStringRunes(5)
Expect(k.addon.Setup(f.Config)).NotTo(HaveOccurred(), "failed to setup vault")
Expect(k.addon.Provision()).NotTo(HaveOccurred(), "failed to provision vault")
@ -153,7 +153,7 @@ func (k *kubernetes) initVault(f *framework.Framework, ns string) {
IntermediateMount: intermediateMount,
ConfigureWithRoot: k.testWithRootCA,
KubernetesAuthPath: "kubernetes",
Role: k.role,
Role: k.vaultRole,
APIServerURL: apiHost,
APIServerCA: caCert,
}
@ -161,16 +161,17 @@ func (k *kubernetes) initVault(f *framework.Framework, ns string) {
Expect(k.initializer.Setup()).NotTo(HaveOccurred(), "failed to setup vault")
By("Creating a ServiceAccount for Vault authentication")
err := k.initializer.CreateKubernetesRole(f.KubeClientSet, ns, k.role, k.role)
boundSA := k.vaultRole
err := k.initializer.CreateKubernetesRole(f.KubeClientSet, k.vaultRole, boundNS, boundSA)
Expect(err).NotTo(HaveOccurred())
_, err = f.KubeClientSet.CoreV1().Secrets(ns).Create(context.TODO(), vault.NewVaultKubernetesSecret(k.role, k.role), metav1.CreateOptions{})
_, err = f.KubeClientSet.CoreV1().Secrets(boundNS).Create(context.TODO(), vault.NewVaultKubernetesSecret(k.vaultRole, k.vaultRole), metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
_, _, err = k.initializer.CreateAppRole()
Expect(err).NotTo(HaveOccurred())
}
func (k *kubernetes) issuerSpec(f *framework.Framework) cmapi.IssuerSpec {
vaultPath := path.Join(intermediateMount, "sign", k.role)
vaultPath := path.Join(intermediateMount, "sign", k.vaultRole)
return cmapi.IssuerSpec{
IssuerConfig: cmapi.IssuerConfig{
@ -181,10 +182,10 @@ func (k *kubernetes) issuerSpec(f *framework.Framework) cmapi.IssuerSpec {
Auth: cmapi.VaultAuth{
Kubernetes: &cmapi.VaultKubernetesAuth{
Path: "/v1/auth/kubernetes",
Role: k.role,
Role: k.vaultRole,
SecretRef: cmmeta.SecretKeySelector{
LocalObjectReference: cmmeta.LocalObjectReference{
Name: k.role,
Name: k.vaultRole,
},
},
},

View File

@ -93,7 +93,7 @@ var _ = framework.CertManagerDescribe("Vault Issuer", func() {
Expect(err).NotTo(HaveOccurred())
By("creating a service account for Vault authentication")
err = vaultInit.CreateKubernetesRole(f.KubeClientSet, f.Namespace.Name, vaultKubernetesRoleName, vaultSecretServiceAccount)
err = vaultInit.CreateKubernetesRole(f.KubeClientSet, vaultKubernetesRoleName, f.Namespace.Name, vaultSecretServiceAccount)
Expect(err).NotTo(HaveOccurred())
})
@ -104,7 +104,7 @@ var _ = framework.CertManagerDescribe("Vault Issuer", func() {
vaultInit.CleanAppRole()
By("Cleaning up Kubernetes")
vaultInit.CleanKubernetesRole(f.KubeClientSet, f.Namespace.Name, vaultKubernetesRoleName, vaultSecretServiceAccount)
vaultInit.CleanKubernetesRole(f.KubeClientSet, vaultKubernetesRoleName, f.Namespace.Name, vaultSecretServiceAccount)
By("Cleaning up Vault")
Expect(vaultInit.Clean()).NotTo(HaveOccurred())