Use k3d instead of kind

This commit is contained in:
Ben Banfield-Zanin 2025-11-14 16:14:52 +00:00
parent 61841f4e72
commit 1c529b9187
20 changed files with 200 additions and 262 deletions

View file

@ -79,12 +79,11 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- uses: engineerd/setup-kind@ecfad61750951586a9ef973db567df1d28671bdc # v0.6.2
- name: Install k3d with asdf
uses: asdf-vm/actions/install@1902764435ca0dd2f3388eea723a4f92a4eb8302 # v4
with:
version: "v0.29.0"
name: "ess-helm"
skipClusterCreation: "true"
skipClusterLogsExport: "true"
tool_versions: |
k3d 5.8.3
- uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
@ -124,17 +123,24 @@ jobs:
if: ${{ failure() }}
shell: bash
run: |
kind export logs --name ess-helm ./ess-helm-logs
kind export kubeconfig --name ess-helm
ns=$(kubectl --context kind-ess-helm get ns -l app.kubernetes.io/managed-by=pytest -o jsonpath='{.items[].metadata.name}')
mkdir ess-helm-logs
k3d kubeconfig merge ess-helm -ds
for ns in $(kubectl --context k3d-ess-helm get ns -o custom-columns=NS:.metadata.name --no-headers); do
mkdir -p "./ess-helm-logs/$ns"
for pod in $(kubectl --context k3d-ess-helm -n "$ns" get pod -o custom-columns=NS:.metadata.name --no-headers); do
kubectl --context k3d-ess-helm -n "$ns" logs --all-containers --prefix --timestamps --ignore-errors --previous "$pod" > "./ess-helm-logs/$ns/$pod.previous"
kubectl --context k3d-ess-helm -n "$ns" logs --all-containers --prefix --timestamps --ignore-errors "$pod" > "./ess-helm-logs/$ns/$pod.logs"
done
done
ess_ns=$(kubectl --context k3d-ess-helm get ns -l app.kubernetes.io/managed-by=pytest -o jsonpath='{.items[].metadata.name}')
resources=("pods" "deployments" "statefulsets" "services" "configmaps" "ingresses" "persistentvolumes" "persistentvolumeclaims" "endpoints")
for i in "${resources[@]}"; do
kubectl --context kind-ess-helm get "$i" -n "$ns" > "./ess-helm-logs/$i.txt"
kubectl --context k3d-ess-helm get "$i" -n "$ess_ns" > "./ess-helm-logs/$i.txt"
echo "----" >> "./ess-helm-logs/$i.txt"
kubectl --context kind-ess-helm get "$i" -o yaml -n "$ns" >> "./ess-helm-logs/$i.txt"
kubectl --context k3d-ess-helm get "$i" -o yaml -n "$ess_ns" >> "./ess-helm-logs/$i.txt"
done
kubectl --context kind-ess-helm get events --sort-by=.metadata.creationTimestamp -n "$ns" > ./ess-helm-logs/events.txt
kind delete cluster --name ess-helm
kubectl --context k3d-ess-helm get events --sort-by=.metadata.creationTimestamp -n "$ess_ns" > ./ess-helm-logs/events.txt
k3d cluster delete ess-helm
- name: Upload logs
if: ${{ failure() }}

View file

@ -16,6 +16,7 @@ Requirements for development:
Optional Tools:
* [`chart-testing`](https://github.com/helm/chart-testing) for Helm linting
* [`k3d`](https://k3d.io/stable/) for running the test cluster
* [`kubeconform`](https://github.com/yannh/kubeconform) for Kubernetes manifest validation
* [`shellcheck`](https://www.shellcheck.net/)
* Managed via Poetry and so should be available after `poetry install`
@ -159,10 +160,10 @@ run `pytest test --cache-clear`.
#### Special env variables
- `PYTEST_KEEP_CLUSTER=1` : Do not destroy the cluster at the end of the test run.
You must delete it using `kind delete cluster --name ess-helm` manually before running any other test run.
You must delete it using `k3d cluster delete ess-helm` manually before running any other test run.
#### Usage
Use `kind export kubeconfig --name ess-helm` to get access to the cluster.
Use `k3d kubeconfig merge ess-helm -ds` to get access to the cluster.
The tests will use the cluster constructed by `scripts/setup_test_cluster.sh` if that is
running. If the tests use an existing cluster, they won't destroy the cluster afterwards.

View file

@ -3,9 +3,6 @@
#
# SPDX-License-Identifier: AGPL-3.0-only
ingress:
controllerType: ingress-nginx
wellKnownDelegation:
ingress:
tlsSecret: "{{ $.Release.Name }}-well-known-web-tls"

View file

@ -16,8 +16,6 @@ haproxy:
podSecurityContext:
runAsGroup: 0
replicas: 2
ingress:
controllerType: ingress-nginx
initSecrets:
annotations:
has-no-service-monitor: "true"

View file

@ -18,8 +18,6 @@ global:
haproxy:
podSecurityContext:
runAsGroup: 0
ingress:
controllerType: ingress-nginx
matrixAuthenticationService:
enabled: false
matrixRTC:

View file

@ -10,9 +10,6 @@
certManager:
clusterIssuer: ess-selfsigned
ingress:
controllerType: ingress-nginx
matrixRTC:
# Because the authoriser service won't trust certificates issued by the above self-signed CA
extraEnv:
@ -24,4 +21,4 @@ matrixRTC:
- ess.localhost
- mrtc.ess.localhost
- synapse.ess.localhost
ip: '{{ ( (lookup "v1" "Service" "ingress-nginx" "ingress-nginx-controller") | default (dict "spec" (dict "clusterIP" "127.0.0.1")) ).spec.clusterIP }}'
ip: '{{ ( (lookup "v1" "Service" "kube-system" "traefik") | default (dict "spec" (dict "clusterIP" "127.0.0.1")) ).spec.clusterIP }}'

View file

@ -0,0 +1 @@
CI: switch from kind to k3d for integration tests.

39
poetry.lock generated
View file

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand.
[[package]]
name = "aiodns"
@ -1407,6 +1407,21 @@ http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "httpx-retries"
version = "0.4.5"
description = "A retry layer for HTTPX."
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "httpx_retries-0.4.5-py3-none-any.whl", hash = "sha256:ae22d6ef197a2da49242246a01d721474cbd6516b1fef155f6da694ee410bb37"},
{file = "httpx_retries-0.4.5.tar.gz", hash = "sha256:acee306d7384eefad71ac12fefe8b13d7b41c19595c538e68d9bd7e40e59539d"},
]
[package.dependencies]
httpx = ">=0.20.0"
[[package]]
name = "hyperframe"
version = "6.1.0"
@ -2625,14 +2640,14 @@ files = [
[[package]]
name = "pydantic"
version = "2.12.4"
version = "2.12.5"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
{file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"},
{file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"},
{file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"},
{file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"},
]
[package.dependencies]
@ -3035,6 +3050,13 @@ optional = false
python-versions = ">=3.8"
groups = ["dev"]
files = [
{file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"},
{file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"},
{file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"},
{file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"},
{file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"},
{file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"},
{file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"},
{file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"},
@ -3105,14 +3127,14 @@ files = [
[[package]]
name = "rdflib"
version = "7.4.0"
version = "7.5.0"
description = "RDFLib is a Python library for working with RDF, a simple yet powerful language for representing information."
optional = false
python-versions = ">=3.8.1"
groups = ["dev"]
files = [
{file = "rdflib-7.4.0-py3-none-any.whl", hash = "sha256:0af003470404ff21bc0eb04077cc97ee96da581f2429bf42a8e163fc1c2797bc"},
{file = "rdflib-7.4.0.tar.gz", hash = "sha256:c8ee16c31848c19c174aed96185327ea139ca3d392fac7fa882ddf5687f8f533"},
{file = "rdflib-7.5.0-py3-none-any.whl", hash = "sha256:b011dfc40d0fc8a44252e906dcd8fc806a7859bc231be190c37e9568a31ac572"},
{file = "rdflib-7.5.0.tar.gz", hash = "sha256:663083443908b1830e567350d72e74d9948b310f827966358d76eebdc92bf592"},
]
[package.dependencies]
@ -3124,6 +3146,7 @@ html = ["html5rdf (>=1.2,<2)"]
lxml = ["lxml (>=4.3,<6.0)"]
networkx = ["networkx (>=2,<4)"]
orjson = ["orjson (>=3.9.14,<4)"]
rdf4j = ["httpx (>=0.28.1,<0.29.0)"]
[[package]]
name = "referencing"
@ -4228,4 +4251,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.12,<4.0"
content-hash = "f72508bff3762d1effecd30601cf33e9d2325435f71b070ce9274a730974ef03"
content-hash = "71fdf35ede388f44f52caa847f3e3757e907df3ea9571659ceca2fede5db7557"

View file

@ -42,6 +42,7 @@ types-pyyaml = "^6.0.12.20250915"
semver = "^3.0.4"
prometheus-client = "^0.23.1"
yamllint = "^1.37.1"
httpx-retries = "^0.4.5"
[build-system]
requires = ["poetry-core>=2.1.0"]

View file

@ -7,17 +7,10 @@
set -e
kind_cluster_name="ess-helm"
k3d_cluster_name="ess-helm"
if kind get clusters 2> /dev/null| grep "$kind_cluster_name"; then
kind delete cluster --name $kind_cluster_name
if k3d cluster list 2> /dev/null | grep "$k3d_cluster_name"; then
k3d cluster delete $k3d_cluster_name
else
echo "Kind cluster ${kind_cluster_name} already destoryed"
fi
if docker ps -a | grep "${kind_cluster_name}-registry"; then
docker stop "${kind_cluster_name}-registry" || true
docker rm "${kind_cluster_name}-registry" || true
else
echo "Kind cluster's local registry already destroyed"
echo "k3d cluster ${k3d_cluster_name} already destoryed"
fi

View file

@ -7,8 +7,8 @@
set -e
kind_cluster_name="ess-helm"
kind_context_name="kind-$kind_cluster_name"
k3d_cluster_name="ess-helm"
k3d_context_name="k3d-$k3d_cluster_name"
# Space separated list of namespaces to use
ess_namespaces=${ESS_NAMESPACES:-ess}
@ -16,43 +16,25 @@ root_folder="$(git rev-parse --show-toplevel)"
ca_folder="$root_folder/.ca"
mkdir -p "$ca_folder"
if docker ps -a | grep "${kind_cluster_name}-registry"; then
docker stop "${kind_cluster_name}-registry" || true
docker rm "${kind_cluster_name}-registry" || true
fi
if kind get clusters 2>/dev/null | grep "$kind_cluster_name"; then
echo "Cluster '$kind_cluster_name' is already provisioned by Kind"
if k3d cluster list 2>/dev/null | grep "$k3d_cluster_name"; then
echo "Cluster '$k3d_cluster_name' is already provisioned by k3d"
else
echo "Creating new Kind cluster '$kind_cluster_name'"
(cd "$root_folder/tests/integration/fixtures/files/clusters"; kind create cluster --name "$kind_cluster_name" --config "kind.yml")
echo "Creating new k3d cluster '$k3d_cluster_name'"
k3d cluster create "$k3d_cluster_name" --config "tests/integration/fixtures/files/clusters/k3d.yml"
fi
network=$(docker inspect $kind_cluster_name-control-plane | jq '.[0].NetworkSettings.Networks | keys | .[0]' -r)
docker run \
-d --restart=always -p "127.0.0.1:5000:5000" --network "$network" --network-alias "registry" --name "${kind_cluster_name}-registry" \
registry:2
helm --kube-context $kind_context_name upgrade -i ingress-nginx --repo https://kubernetes.github.io/ingress-nginx ingress-nginx \
--namespace ingress-nginx \
--create-namespace \
-f "$root_folder/tests/integration/fixtures/files/charts/ingress-nginx.yml"
helm --kube-context $kind_context_name upgrade -i metrics-server --repo https://kubernetes-sigs.github.io/metrics-server metrics-server \
--namespace kube-system \
-f "$root_folder/tests/integration/fixtures/files/charts/metrics-server.yml"
helm --kube-context $kind_context_name upgrade -i prometheus-operator-crds --repo https://prometheus-community.github.io/helm-charts prometheus-operator-crds \
helm --kube-context $k3d_context_name upgrade -i prometheus-operator-crds --repo https://prometheus-community.github.io/helm-charts prometheus-operator-crds \
--namespace prometheus-operator \
--create-namespace
helm --kube-context $kind_context_name upgrade -i cert-manager --repo https://charts.jetstack.io cert-manager \
helm --kube-context $k3d_context_name upgrade -i cert-manager --repo https://charts.jetstack.io cert-manager \
--namespace cert-manager \
--create-namespace \
-f "$root_folder/tests/integration/fixtures/files/charts/cert-manager.yml"
# Create a new CA certificate
if [[ ! -f "$ca_folder"/ca.crt || ! -f "$ca_folder"/ca.pem ]]; then
cat <<EOF | kubectl --context $kind_context_name apply -f -
cat <<EOF | kubectl --context $k3d_context_name apply -f -
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
@ -80,19 +62,19 @@ spec:
group: cert-manager.io
---
EOF
kubectl --context $kind_context_name -n cert-manager wait --for condition=Ready Certificate/ess-ca
kubectl --context $k3d_context_name -n cert-manager wait --for condition=Ready Certificate/ess-ca
else
kubectl --context $kind_context_name delete ClusterIssuer ess-ca 2>/dev/null || true
kubectl --context $kind_context_name -n cert-manager delete Certificate ess-ca 2>/dev/null || true
kubectl --context $kind_context_name -n cert-manager delete Secret ess-ca 2>/dev/null || true
kubectl --context $kind_context_name -n cert-manager create secret generic ess-ca \
kubectl --context $k3d_context_name delete ClusterIssuer ess-ca 2>/dev/null || true
kubectl --context $k3d_context_name -n cert-manager delete Certificate ess-ca 2>/dev/null || true
kubectl --context $k3d_context_name -n cert-manager delete Secret ess-ca 2>/dev/null || true
kubectl --context $k3d_context_name -n cert-manager create secret generic ess-ca \
--type=kubernetes.io/tls \
--from-file=tls.crt="$ca_folder"/ca.crt \
--from-file=tls.key="$ca_folder"/ca.pem \
--from-file=ca.crt="$ca_folder"/ca.crt
fi
cat <<EOF | kubectl --context $kind_context_name apply -f -
cat <<EOF | kubectl --context $k3d_context_name apply -f -
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
@ -103,15 +85,15 @@ spec:
EOF
if [[ ! -f "$ca_folder"/ca.crt || ! -f "$ca_folder"/ca.pem ]]; then
kubectl --context $kind_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['ca\.crt']}" | base64 -d > "$ca_folder"/ca.crt
kubectl --context $kind_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['tls\.key']}" | base64 -d > "$ca_folder"/ca.pem
kubectl --context $k3d_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['ca\.crt']}" | base64 -d > "$ca_folder"/ca.crt
kubectl --context $k3d_context_name -n cert-manager get secret ess-ca -o jsonpath="{.data['tls\.key']}" | base64 -d > "$ca_folder"/ca.pem
fi
for namespace in $ess_namespaces; do
echo "Constructing ESS dependencies in $namespace"
server_version=$(kubectl --context $kind_context_name version | grep Server | sed 's/.*v/v/' | awk -F. '{print $1"."$2}')
server_version=$(kubectl --context $k3d_context_name version | grep Server | sed 's/.*v/v/' | awk -F. '{print $1"."$2}')
# We don't turn on enforce here as people may be experimenting but we do turn on warn so people see the warnings when helm install/upgrade
cat <<EOF | kubectl --context $kind_context_name apply -f -
cat <<EOF | kubectl --context $k3d_context_name apply -f -
apiVersion: v1
kind: Namespace
metadata:

View file

@ -4,7 +4,7 @@
# SPDX-License-Identifier: AGPL-3.0-only
from .ca import delegated_ca, root_ca, ssl_context
from .cluster import cluster, ess_namespace, helm_client, ingress, kube_client, prometheus_operator_crds, registry
from .cluster import cluster, ess_namespace, helm_client, ingress, kube_client, prometheus_operator_crds
from .data import ESSData, generated_data
from .helm import helm_prerequisites, ingress_ready, matrix_stack, secrets_generated
from .matrix_tools import build_matrix_tools, loaded_matrix_tools
@ -25,7 +25,6 @@ __all__ = [
"loaded_matrix_tools",
"matrix_stack",
"prometheus_operator_crds",
"registry",
"root_ca",
"secrets_generated",
"ssl_context",

View file

@ -7,25 +7,26 @@ import asyncio
import os
from pathlib import Path
import httpx
import httpx_retries
import pyhelm3
import pytest
import yaml
from lightkube import ApiError, AsyncClient, KubeConfig
from lightkube.config.client_adapter import verify_cluster
from lightkube.models.meta_v1 import ObjectMeta
from lightkube.resources.core_v1 import Namespace, Service
from pytest_kubernetes.options import ClusterOptions
from pytest_kubernetes.providers import KindManagerBase
from python_on_whales import docker
from pytest_kubernetes.providers import K3dManagerBase
from .data import ESSData
class PotentiallyExistingKindCluster(KindManagerBase):
class PotentiallyExistingK3dCluster(K3dManagerBase):
def __init__(self, cluster_name, provider_config=None):
super().__init__(cluster_name, provider_config)
clusters = self._exec(["get", "clusters"])
if cluster_name in clusters.stdout.decode("utf-8").split("\n"):
clusters = self._exec(["cluster", "list"])
if any([line.startswith(cluster_name + " ") for line in clusters.stdout.decode("utf-8").split("\n")]):
self.existing_cluster = True
else:
self.existing_cluster = False
@ -34,11 +35,10 @@ class PotentiallyExistingKindCluster(KindManagerBase):
if self.existing_cluster:
self._exec(
[
"export",
"kubeconfig",
"--name",
"print",
self.cluster_name,
"--kubeconfig ",
">",
str(cluster_options.kubeconfig_path),
]
)
@ -64,9 +64,9 @@ class PotentiallyExistingKindCluster(KindManagerBase):
@pytest.fixture(autouse=True, scope="session")
async def cluster():
# Both these names must match what `setup_test_cluster.sh` would create
this_cluster = PotentiallyExistingKindCluster("ess-helm")
this_cluster = PotentiallyExistingK3dCluster("ess-helm")
this_cluster.create(
ClusterOptions(cluster_name="ess-helm", provider_config=Path(__file__).parent / Path("files/clusters/kind.yml"))
ClusterOptions(cluster_name="ess-helm", provider_config=Path(__file__).parent / Path("files/clusters/k3d.yml"))
)
yield this_cluster
@ -82,78 +82,39 @@ async def helm_client(cluster):
@pytest.fixture(scope="session")
async def kube_client(cluster):
kube_config = KubeConfig.from_file(cluster.kubeconfig)
return AsyncClient(config=kube_config)
config = kube_config.get()
@pytest.fixture(autouse=True, scope="session")
async def ingress(cluster, kube_client, helm_client: pyhelm3.Client):
chart = await helm_client.get_chart("ingress-nginx", repo="https://kubernetes.github.io/ingress-nginx")
values_file = Path(__file__).parent.resolve() / Path("files/charts/ingress-nginx.yml")
# Install or upgrade a release
await helm_client.install_or_upgrade_release(
"ingress-nginx",
chart,
yaml.safe_load(values_file.read_text("utf-8")),
namespace="ingress-nginx",
create_namespace=True,
atomic=True,
wait=True,
# We've seen 429 errors with storage is (re)initializing. Let's retry those
ssl_context = verify_cluster(config.cluster, config.user, config.abs_file)
wrapped_transport = httpx.AsyncHTTPTransport(verify=ssl_context)
transport = httpx_retries.RetryTransport(
transport=wrapped_transport, retry=httpx_retries.Retry(status_forcelist=[429])
)
await asyncio.to_thread(
cluster.wait,
name="endpoints/ingress-nginx-controller-admission",
waitfor="jsonpath='{.subsets[].addresses}'",
namespace="ingress-nginx",
)
await asyncio.to_thread(
cluster.wait,
name="lease/ingress-nginx-leader",
waitfor="jsonpath='{.spec.holderIdentity}'",
namespace="ingress-nginx",
)
return (await kube_client.get(Service, name="ingress-nginx-controller", namespace="ingress-nginx")).spec.clusterIP
return AsyncClient(config=kube_config, transport=transport)
@pytest.fixture(autouse=True, scope="session")
async def registry(cluster):
pytest_registry_container_name = "pytest-ess-helm-registry"
test_cluster_registry_container_name = "ess-helm-registry"
# We have a registry created by `setup_test_cluster.sh`
if docker.container.exists(test_cluster_registry_container_name):
container_name = test_cluster_registry_container_name
# We have a registry created by a previous run of pytest
elif docker.container.exists(pytest_registry_container_name):
container_name = pytest_registry_container_name
# We have no registry, create one
else:
container_name = pytest_registry_container_name
container = docker.run(
name=container_name,
image="registry:2",
publish=[("127.0.0.1:5000", "5000")],
restart="always",
detach=True,
)
container = docker.container.inspect(container_name)
if not container.state.running:
container.start()
kind_network = docker.network.inspect("kind")
if container.id not in kind_network.containers:
docker.network.connect(kind_network, container, alias="registry")
yield
if container_name == pytest_registry_container_name:
container.stop()
container.remove()
@pytest.fixture(scope="session")
async def ingress(cluster, kube_client):
attempt = 0
while attempt < 120:
try:
# We can't just kubectl wait as that doesn't work with non-existent objects
# This can be setup before the LB port is accessible externally, so we do it afterwards
service = await kube_client.get(Service, name="traefik", namespace="kube-system")
await asyncio.to_thread(
cluster.wait,
name="service/traefik",
waitfor="jsonpath='{.status.loadBalancer.ingress[0].ip}'",
namespace="kube-system",
)
return service.spec.clusterIP
except ApiError:
await asyncio.sleep(1)
attempt += 1
raise Exception("Couldn't fetch Trafeik Service IP afrter 120s")
@pytest.fixture(autouse=True, scope="session")
@pytest.fixture(scope="session")
async def prometheus_operator_crds(helm_client):
if os.environ.get("SKIP_SERVICE_MONITORS_CRDS", "false") == "false":
chart = await helm_client.get_chart(
@ -173,7 +134,7 @@ async def prometheus_operator_crds(helm_client):
@pytest.fixture(scope="session")
async def ess_namespace(cluster: PotentiallyExistingKindCluster, kube_client: AsyncClient, generated_data: ESSData):
async def ess_namespace(cluster: PotentiallyExistingK3dCluster, kube_client: AsyncClient, generated_data: ESSData):
(major_version, minor_version) = cluster.version()
try:
await kube_client.get(Namespace, name=generated_data.ess_namespace)

View file

@ -1,20 +0,0 @@
# Copyright 2024 New Vector Ltd
# Copyright 2025 Element Creations Ltd
#
# SPDX-License-Identifier: AGPL-3.0-only
controller:
ingressClassResource:
default: true
config:
hsts: false
hostPort:
enabled: true
allowSnippetAnnotations: true
service:
type: ClusterIP
enabled: true

View file

@ -1,7 +0,0 @@
# Copyright 2024 New Vector Ltd
# Copyright 2025 Element Creations Ltd
#
# SPDX-License-Identifier: AGPL-3.0-only
args:
- --kubelet-insecure-tls

View file

@ -0,0 +1,57 @@
# Copyright 2024-2025 New Vector Ltd
# Copyright 2025 Element Creations Ltd
#
# SPDX-License-Identifier: AGPL-3.0-only
apiVersion: k3d.io/v1alpha5
kind: Simple
metadata:
name: ess-helm
# Until k3d comes with a recent k8s/k3s version
image: rancher/k3s:v1.34.2-k3s1
agents: 0
servers: 1
ports:
- port: 127.0.0.1:80:80
nodeFilters: [loadbalancer]
- port: 127.0.0.1:443:443
nodeFilters: [loadbalancer]
# Matrix RTC SFU TCP and Muxed UDP
- port: 127.0.0.1:30881:30881
nodeFilters: [loadbalancer]
- port: 127.0.0.1:30882:30882/UDP
nodeFilters: [loadbalancer]
options:
k3d:
wait: true
k3s:
extraArgs:
- arg: "--kubelet-arg=container-log-max-size=100Mi"
nodeFilters: [servers:*]
- arg: "--kubelet-arg=container-log-max-files=10"
nodeFilters: [servers:*]
- arg: "--kube-apiserver-arg=audit-log-path=/var/log/kubernetes/kube-apiserver-audit.log"
nodeFilters: [servers:*]
- arg: "--kube-apiserver-arg=audit-policy-file=/etc/kubernetes/policies/audit-policy.yaml"
nodeFilters: [servers:*]
runtime:
ulimits:
- name: nofile
soft: 100000
hard: 100000
files:
- source: audit-policy.yml
destination: /etc/kubernetes/policies/audit-policy.yaml
nodeFilters: [servers:*]
registries:
create:
name: k3d-ess-helm-registry
host: 127.0.0.1
hostPort: "5000"
# The k3d created registry will be available at localhost:5000 externally and so that's what we'll tag imges with
# The below makes it available internally at the same address
config: |
mirrors:
"localhost:5000":
endpoint:
- http://k3d-ess-helm-registry:5000

View file

@ -1,65 +0,0 @@
# Copyright 2024-2025 New Vector Ltd
# Copyright 2025 Element Creations Ltd
#
# SPDX-License-Identifier: AGPL-3.0-only
apiVersion: kind.x-k8s.io/v1alpha4
kind: Cluster
name: ess-helm
nodes:
- role: control-plane
kubeadmConfigPatches:
- |-
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
- |-
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
containerLogMaxSize: 100Mi
containerLogMaxFiles: 10
- |
kind: ClusterConfiguration
apiServer:
# enable auditing flags on the API server
extraArgs:
audit-log-path: /var/log/kubernetes/kube-apiserver-audit.log
audit-policy-file: /etc/kubernetes/policies/audit-policy.yaml
# mount new files / directories on the control plane
extraVolumes:
- name: audit-policies
hostPath: /etc/kubernetes/policies
mountPath: /etc/kubernetes/policies
readOnly: true
pathType: "DirectoryOrCreate"
- name: "audit-logs"
hostPath: "/var/log/kubernetes"
mountPath: "/var/log/kubernetes"
readOnly: false
pathType: DirectoryOrCreate
extraMounts:
- hostPath: ./audit-policy.yml
containerPath: /etc/kubernetes/policies/audit-policy.yaml
readOnly: true
- hostPath: ./local_mirrors
containerPath: /etc/containerd/certs.d
readOnly: true
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
# Matrix RTC SFU TCP and Muxed UDP
- containerPort: 30881
hostPort: 30881
protocol: TCP
- containerPort: 30882
hostPort: 30882
protocol: UDP
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"

View file

@ -1,7 +0,0 @@
# Copyright 2025 New Vector Ltd
# Copyright 2025 Element Creations Ltd
#
# SPDX-License-Identifier: AGPL-3.0-only
[host."http://registry:5000"]
capabilities = ["pull", "resolve"]

View file

@ -7,6 +7,7 @@ import asyncio
import base64
import os
from collections.abc import Awaitable
from ssl import SSLCertVerificationError, SSLContext
import pyhelm3
import pytest
@ -155,6 +156,7 @@ async def matrix_stack(
helm_client: pyhelm3.Client,
ingress,
helm_prerequisites,
prometheus_operator_crds,
ess_namespace: Namespace,
generated_data: ESSData,
loaded_matrix_tools: dict,
@ -197,7 +199,7 @@ async def matrix_stack(
@pytest.fixture(scope="session")
def ingress_ready(cluster, kube_client: AsyncClient, matrix_stack, generated_data: ESSData):
def ingress_ready(cluster, kube_client: AsyncClient, matrix_stack, generated_data: ESSData, ssl_context: SSLContext):
async def _ingress_ready(ingress_suffix):
await asyncio.to_thread(
cluster.wait,
@ -215,6 +217,27 @@ def ingress_ready(cluster, kube_client: AsyncClient, matrix_stack, generated_dat
)
await wait_for_endpoint_ready(service.metadata.name, generated_data.ess_namespace, cluster, kube_client)
if rule.host:
attempt = 0
while attempt < 20:
try:
# Wait for the port and certificate to be available
_, writer = await asyncio.wait_for(
asyncio.open_connection("127.0.0.1", 443, ssl=ssl_context, server_hostname=rule.host),
timeout=1,
)
writer.close()
await writer.wait_closed()
break
except (ConnectionResetError, ConnectionRefusedError, SSLCertVerificationError, TimeoutError):
await asyncio.sleep(1)
attempt += 1
else:
raise Exception(
f"Unable to connect to Ingress/{generated_data.release_name}-{ingress_suffix}"
" externally after 20s"
)
return _ingress_ready

View file

@ -25,7 +25,7 @@ async def build_matrix_tools():
@pytest.fixture(autouse=True, scope="session")
async def loaded_matrix_tools(registry, build_matrix_tools: Image):
async def loaded_matrix_tools(cluster, build_matrix_tools: Image):
# Until the image is made publicly available
# In local runs we always have to build it
if os.environ.get("BUILD_MATRIX_TOOLS"):