~/src/github.com/openstack-k8s-operators/install_yamls ~/ci-framework-data/artifacts error: the server doesn't have a resource type "openstackversion" bash scripts/validate-marketplace.sh + '[' -z 500s ']' + OPERATOR_NAMESPACE=openshift-marketplace ++ grep -viE 'running|completed' ++ oc get pods --no-headers -n openshift-marketplace + not_running_pods= + '[' -z '' ']' + echo 'All openshift-marketplace pods seems to me fine' All openshift-marketplace pods seems to me fine + OPERATORS='openshift-cert-manager-operator kubernetes-nmstate-operator metallb-operator' + for operator in $OPERATORS + n=0 + retries=20 + true + oc get packagemanifests -n openshift-marketplace + grep openshift-cert-manager-operator openshift-cert-manager-operator Red Hat Operators 343d + '[' 0 -eq 0 ']' + break + for operator in $OPERATORS + n=0 + retries=20 + true + grep kubernetes-nmstate-operator + oc get packagemanifests -n openshift-marketplace kubernetes-nmstate-operator Red Hat Operators 343d + '[' 0 -eq 0 ']' + break + for operator in $OPERATORS + n=0 + retries=20 + true + grep metallb-operator + oc get packagemanifests -n openshift-marketplace metallb-operator Red Hat Operators 343d + '[' 0 -eq 0 ']' + break bash scripts/gen-namespace.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests ']' + '[' -z metallb-system ']' + OUT_DIR=/home/zuul/ci-framework-data/artifacts/manifests/metallb-system + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/metallb-system ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/metallb-system + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/metallb-system/namespace.yaml namespace/metallb-system created timeout 500s bash -c "while ! (oc get project.v1.project.openshift.io metallb-system); do sleep 1; done" NAME DISPLAY NAME STATUS metallb-system Active bash scripts/gen-olm-metallb.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/metallb/op ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/metallb/op ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/metallb/op + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/metallb-system/metallb/cr ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/metallb-system/metallb/cr ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/metallb-system/metallb/cr + echo OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/metallb/op OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/metallb/op + echo DEPLOY_DIR /home/zuul/ci-framework-data/artifacts/manifests/metallb-system/metallb/cr DEPLOY_DIR /home/zuul/ci-framework-data/artifacts/manifests/metallb-system/metallb/cr + echo INTERFACE INTERFACE + cat + cat + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/metallb/op operatorgroup.operators.coreos.com/metallb-operator created subscription.operators.coreos.com/metallb-operator-sub created timeout 500s bash -c "while ! (oc get pod --no-headers=true -l control-plane=controller-manager -n metallb-system| grep metallb-operator-controller); do sleep 10; done" No resources found in metallb-system namespace. No resources found in metallb-system namespace. No resources found in metallb-system namespace. metallb-operator-controller-manager-56dbb5cfb5-ls84h 0/1 ContainerCreating 0 1s oc wait pod -n metallb-system --for condition=Ready -l control-plane=controller-manager --timeout=500s pod/metallb-operator-controller-manager-56dbb5cfb5-ls84h condition met timeout 500s bash -c "while ! (oc get pod --no-headers=true -l component=webhook-server -n metallb-system| grep metallb-operator-webhook); do sleep 10; done" metallb-operator-webhook-server-58b8447d8-56lmr 1/1 Running 0 40s oc wait pod -n metallb-system --for condition=Ready -l component=webhook-server --timeout=500s pod/metallb-operator-webhook-server-58b8447d8-56lmr condition met oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/metallb-system/metallb/cr/deploy_operator.yaml metallb.metallb.io/metallb created timeout 500s bash -c "while ! (oc get pod --no-headers=true -l component=speaker -n metallb-system | grep speaker); do sleep 10; done" No resources found in metallb-system namespace. speaker-8s85p 1/2 Running 0 10s oc wait pod -n metallb-system -l component=speaker --for condition=Ready --timeout=500s pod/speaker-8s85p condition met bash scripts/gen-namespace.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests ']' + '[' -z swift-kuttl-tests ']' + OUT_DIR=/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/namespace.yaml namespace/swift-kuttl-tests created timeout 500s bash -c "while ! (oc get project.v1.project.openshift.io swift-kuttl-tests); do sleep 1; done" NAME DISPLAY NAME STATUS swift-kuttl-tests Active oc project swift-kuttl-tests Now using project "swift-kuttl-tests" on server "https://api.crc.testing:6443". bash scripts/gen-input-kustomize.sh + OUT=/home/zuul/ci-framework-data/artifacts/manifests + '[' -z swift-kuttl-tests ']' + '[' -z osp-secret ']' + '[' -z 12345678 ']' + '[' -z 1234567842 ']' + '[' -z 767c3ed056cbaa3b9dfedb8c6f825bf0 ']' + '[' -z sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU= ']' + '[' -z COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f ']' + '[' -z openstack ']' + '[' -z libvirt-secret ']' + DIR=/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/input + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/input ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/input + pushd /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/input ~/ci-framework-data/artifacts/manifests/swift-kuttl-tests/input ~/src/github.com/openstack-k8s-operators/install_yamls + cat oc get secret/osp-secret || oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/input | oc apply -f - Error from server (NotFound): secrets "osp-secret" not found secret/libvirt-secret created secret/octavia-ca-passphrase created secret/osp-secret created oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/manila/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/manila/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/manila: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/manila-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/manila/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists manila;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/heat/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/heat/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/heat: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/heat-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/heat/cr oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/horizon/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/horizon/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/horizon: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/horizon-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/horizon/cr oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/nova/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/nova/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/nova: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/nova-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/nova/cr oc rsh openstack-galera-0 mysql -u root --password=12**********78 -ss -e "show databases like 'nova_%';" | xargs -I '{}' oc rsh openstack-galera-0 mysql -u root --password=12**********78 -ss -e "flush tables; drop database if exists {};" Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra-redis/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra-redis/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra-redis: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator-redis /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra-redis/cr oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/octavia/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/octavia/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/octavia: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/octavia-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/octavia/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists octavia;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/designate/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/designate/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/designate: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/designate-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/designate/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists designate;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/neutron/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/neutron/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/neutron: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/neutron-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/neutron/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists neutron;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ovn/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ovn/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ovn: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/ovn-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ovn/cr oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ironic/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ironic/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ironic: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/ironic-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/ironic/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists ironic;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists ironic_inspector;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/cinder/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/cinder/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/cinder: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/cinder-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/cinder/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists cinder;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/glance/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/glance/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/glance: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/glance-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/glance/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists glance;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/placement/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/placement/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/placement: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/placement-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/placement/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists placement;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift: no such file or directory No resources found rm -Rf -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists barbican;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists keystone;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/telemetry/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/telemetry/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/telemetry: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/telemetry-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/telemetry/cr rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/ceilometer-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/telemetry/cr oc rsh -t openstack-galera-0 mysql -u root --password=12**********78 -e "flush tables; drop database if exists aodh;" || true Error from server (NotFound): pods "openstack-galera-0" not found oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr | oc delete --ignore-not-found=true -f - error: must build at directory: not a valid directory: evalsymlink failure on '/home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr' : lstat /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra: no such file or directory No resources found rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr if oc get RabbitmqCluster; then oc delete --ignore-not-found=true RabbitmqCluster --all; fi error: the server doesn't have a resource type "RabbitmqCluster" rm -Rf /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr bash scripts/gen-namespace.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests ']' + '[' -z openstack-operators ']' + OUT_DIR=/home/zuul/ci-framework-data/artifacts/manifests/openstack-operators + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators ']' + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/namespace.yaml Warning: resource namespaces/openstack-operators is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by oc apply. oc apply should only be used on resources created declaratively by either oc create --save-config or oc apply. The missing annotation will be patched automatically. namespace/openstack-operators configured timeout 500s bash -c "while ! (oc get project.v1.project.openshift.io openstack-operators); do sleep 1; done" NAME DISPLAY NAME STATUS openstack-operators Active oc project openstack-operators Now using project "openstack-operators" on server "https://api.crc.testing:6443". bash scripts/gen-olm.sh + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' -z quay.io/openstack-k8s-operators/mariadb-operator-index:latest ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/mariadb/op ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/mariadb/op ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/mariadb/op + OPERATOR_CHANNEL=alpha + OPERATOR_SOURCE=mariadb-operator-index + OPERATOR_SOURCE_NAMESPACE=openstack-operators + echo OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/mariadb/op OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/mariadb/op + echo OPERATOR_CHANNEL alpha OPERATOR_CHANNEL alpha + echo OPERATOR_SOURCE mariadb-operator-index OPERATOR_SOURCE mariadb-operator-index + echo OPERATOR_SOURCE_NAMESPACE openstack-operators OPERATOR_SOURCE_NAMESPACE openstack-operators + cat + cat + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/mariadb/op catalogsource.operators.coreos.com/mariadb-operator-index created operatorgroup.operators.coreos.com/openstack created subscription.operators.coreos.com/mariadb-operator created mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/operator /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/mariadb/op /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr bash scripts/clone-operator-repo.sh Cloning repo: git clone -b main https://github.com/openstack-k8s-operators/openstack-operator.git openstack-operator Cloning into 'openstack-operator'... Cloning repo: git clone -b main https://github.com/openstack-k8s-operators/mariadb-operator.git mariadb-operator Cloning into 'mariadb-operator'... Running checkout: git checkout ff95971883bb Note: switching to 'ff95971883bb'. You are in 'detached HEAD' state. You can look around, make experimental changes and commit them, and you can discard any commits you make in this state without impacting any branches by switching back to a branch. If you want to create a new branch to retain commits you create, you may do so (now or later) by using -c with the switch command. Example: git switch -c Or undo this operation with: git switch - Turn off this advice by setting config variable advice.detachedHead to false HEAD is now at ff95971 Merge pull request #400 from dciabrin/galera-restore cp /home/zuul/ci-framework-data/artifacts/manifests/operator/mariadb-operator/config/samples/mariadb_v1beta1_galera.yaml /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr bash scripts/gen-service-kustomize.sh +++ dirname scripts/gen-service-kustomize.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/common.sh --source-only ++ set -e + '[' -z swift-kuttl-tests ']' + '[' -z Galera ']' + '[' -z osp-secret ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr ']' + '[' -n '' ']' + REPLACEMENTS= + IMAGE=unused + IMAGE_PATH=containerImage + STORAGE_REQUEST=10G + INTERFACE_MTU=1500 + VLAN_START=20 + VLAN_STEP=1 + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr ']' + pushd /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr ~/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr ~/src/github.com/openstack-k8s-operators/install_yamls + cat + [[ Galera == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + IFS=, + read -ra IMAGES + IFS=, + read -ra IMAGE_PATHS + '[' 1 '!=' 1 ']' + (( i=0 )) + (( i < 1 )) + SPEC_PATH=containerImage + SPEC_VALUE=unused + '[' unused '!=' unused ']' + (( i++ )) + (( i < 1 )) + '[' -n '' ']' + '[' Galera == OpenStackControlPlane ']' + '[' Galera == Galera ']' + cat + '[' Galera == NetConfig ']' + '[' -n '' ']' + [[ Galera == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + [[ Galera == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + '[' -n '' ']' + kustomization_add_resources + echo merge config dir merge config dir ++ grep -v kustomization ++ find . -type f -name '*.yaml' + yamls=./mariadb_v1beta1_galera.yaml + for y in ${yamls[@]} + kustomize edit add resource ./mariadb_v1beta1_galera.yaml + popd ~/src/github.com/openstack-k8s-operators/install_yamls make wait make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' error: the server doesn't have a resource type "openstackversion" bash scripts/operator-wait.sh + TIMEOUT=500s +++ dirname scripts/operator-wait.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq ']' + pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls + timeout 500s bash -c 'until [ "$(bash ./get-operator-status.sh)" == "Succeeded" ]; do sleep 5; done' + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "mariadb-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "mariadb-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "mariadb-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "mariadb-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "mariadb-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "mariadb-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z mariadb ']' + '[' mariadb = rabbitmq-cluster ']' + DEPL_NAME=mariadb-operator-controller-manager ++ oc get -n openstack-operators deployment mariadb-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=1 + '[' 1 '!=' 1 ']' + echo Succeeded + exit 0 + rc=0 + popd ~/src/github.com/openstack-k8s-operators/install_yamls + exit 0 make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' bash scripts/operator-deploy-resources.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr ']' + NEXT_WAIT_TIME=0 + '[' 0 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/mariadb/cr + oc apply -f - galera.mariadb.openstack.org/openstack created + '[' 0 -lt 15 ']' bash scripts/gen-olm.sh + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' -z quay.io/openstack-k8s-operators/infra-operator-index:latest ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/infra/op ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/infra/op ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/infra/op + OPERATOR_CHANNEL=alpha + OPERATOR_SOURCE=infra-operator-index + OPERATOR_SOURCE_NAMESPACE=openstack-operators + echo OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/infra/op OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/infra/op + echo OPERATOR_CHANNEL alpha OPERATOR_CHANNEL alpha + echo OPERATOR_SOURCE infra-operator-index OPERATOR_SOURCE infra-operator-index + echo OPERATOR_SOURCE_NAMESPACE openstack-operators OPERATOR_SOURCE_NAMESPACE openstack-operators + cat + cat + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/infra/op catalogsource.operators.coreos.com/infra-operator-index created operatorgroup.operators.coreos.com/openstack unchanged subscription.operators.coreos.com/infra-operator created mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/operator /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/infra/op /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr bash scripts/clone-operator-repo.sh Cloning repo: git clone -b main https://github.com/openstack-k8s-operators/infra-operator.git infra-operator Cloning into 'infra-operator'... Running checkout: git checkout 03b808364e4a Note: switching to '03b808364e4a'. You are in 'detached HEAD' state. You can look around, make experimental changes and commit them, and you can discard any commits you make in this state without impacting any branches by switching back to a branch. If you want to create a new branch to retain commits you create, you may do so (now or later) by using -c with the switch command. Example: git switch -c Or undo this operation with: git switch - Turn off this advice by setting config variable advice.detachedHead to false HEAD is now at 03b8083 Merge pull request #527 from lmiccini/block_rmquser_cleanup cp /home/zuul/ci-framework-data/artifacts/manifests/operator/infra-operator/config/samples/memcached_v1beta1_memcached.yaml /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr bash scripts/gen-service-kustomize.sh +++ dirname scripts/gen-service-kustomize.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/common.sh --source-only ++ set -e + '[' -z swift-kuttl-tests ']' + '[' -z Memcached ']' + '[' -z osp-secret ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr ']' + '[' -n '' ']' + REPLACEMENTS= + IMAGE=unused + IMAGE_PATH=containerImage + STORAGE_REQUEST=10G + INTERFACE_MTU=1500 + VLAN_START=20 + VLAN_STEP=1 + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr ']' + pushd /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr ~/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr ~/src/github.com/openstack-k8s-operators/install_yamls + cat + [[ Memcached == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + IFS=, + read -ra IMAGES + IFS=, + read -ra IMAGE_PATHS + '[' 1 '!=' 1 ']' + (( i=0 )) + (( i < 1 )) + SPEC_PATH=containerImage + SPEC_VALUE=unused + '[' unused '!=' unused ']' + (( i++ )) + (( i < 1 )) + '[' -n memcached ']' + cat + '[' Memcached == OpenStackControlPlane ']' + '[' Memcached == Galera ']' + '[' Memcached == NetConfig ']' + '[' -n '' ']' + [[ Memcached == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + [[ Memcached == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + '[' -n '' ']' + kustomization_add_resources + echo merge config dir merge config dir ++ find . -type f -name '*.yaml' ++ grep -v kustomization + yamls=./memcached_v1beta1_memcached.yaml + for y in ${yamls[@]} + kustomize edit add resource ./memcached_v1beta1_memcached.yaml + popd ~/src/github.com/openstack-k8s-operators/install_yamls make wait make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' error: the server doesn't have a resource type "openstackversion" bash scripts/operator-wait.sh + TIMEOUT=500s +++ dirname scripts/operator-wait.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq ']' + pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls + timeout 500s bash -c 'until [ "$(bash ./get-operator-status.sh)" == "Succeeded" ]; do sleep 5; done' + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "infra-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "infra-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "infra-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "infra-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "infra-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "infra-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z infra ']' + '[' infra = rabbitmq-cluster ']' + DEPL_NAME=infra-operator-controller-manager ++ oc get -n openstack-operators deployment infra-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=1 + '[' 1 '!=' 1 ']' + echo Succeeded + exit 0 + rc=0 + popd ~/src/github.com/openstack-k8s-operators/install_yamls + exit 0 make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' bash scripts/operator-deploy-resources.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr ']' + NEXT_WAIT_TIME=0 + '[' 0 -eq 15 ']' + oc apply -f - + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/infra/cr memcached.memcached.openstack.org/memcached created + '[' 0 -lt 15 ']' bash scripts/gen-olm.sh + '[' -z openstack-operators ']' + '[' -z rabbitmq-cluster ']' + '[' -z quay.io/openstack-k8s-operators/rabbitmq-cluster-operator-index:latest ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/rabbitmq-cluster/op ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/rabbitmq-cluster/op ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/rabbitmq-cluster/op + OPERATOR_CHANNEL=alpha + OPERATOR_SOURCE=rabbitmq-cluster-operator-index + OPERATOR_SOURCE_NAMESPACE=openstack-operators + echo OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/rabbitmq-cluster/op OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/rabbitmq-cluster/op + echo OPERATOR_CHANNEL alpha OPERATOR_CHANNEL alpha + echo OPERATOR_SOURCE rabbitmq-cluster-operator-index OPERATOR_SOURCE rabbitmq-cluster-operator-index + echo OPERATOR_SOURCE_NAMESPACE openstack-operators OPERATOR_SOURCE_NAMESPACE openstack-operators + cat + cat + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/rabbitmq-cluster/op catalogsource.operators.coreos.com/rabbitmq-cluster-operator-index created operatorgroup.operators.coreos.com/openstack unchanged subscription.operators.coreos.com/rabbitmq-cluster-operator created mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/operator /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/rabbitmq/op /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr bash -c "CHECKOUT_FROM_OPENSTACK_REF=false scripts/clone-operator-repo.sh" Cloning repo: git clone -b patches https://github.com/openstack-k8s-operators/rabbitmq-cluster-operator.git rabbitmq-operator Cloning into 'rabbitmq-operator'... cp /home/zuul/ci-framework-data/artifacts/manifests/operator/rabbitmq-operator/docs/examples/default-security-context/rabbitmq.yaml /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr bash scripts/gen-service-kustomize.sh +++ dirname scripts/gen-service-kustomize.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/common.sh --source-only ++ set -e + '[' -z swift-kuttl-tests ']' + '[' -z RabbitmqCluster ']' + '[' -z osp-secret ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr ']' + '[' -n '' ']' + REPLACEMENTS= + IMAGE=unused + IMAGE_PATH=image + STORAGE_REQUEST=10G + INTERFACE_MTU=1500 + VLAN_START=20 + VLAN_STEP=1 + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr ']' + pushd /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr ~/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr ~/src/github.com/openstack-k8s-operators/install_yamls + cat + [[ RabbitmqCluster == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + IFS=, + read -ra IMAGES + IFS=, + read -ra IMAGE_PATHS + '[' 1 '!=' 1 ']' + (( i=0 )) + (( i < 1 )) + SPEC_PATH=image + SPEC_VALUE=unused + '[' unused '!=' unused ']' + (( i++ )) + (( i < 1 )) + '[' -n rabbitmq ']' + cat + '[' RabbitmqCluster == OpenStackControlPlane ']' + '[' RabbitmqCluster == Galera ']' + '[' RabbitmqCluster == NetConfig ']' + '[' -n '' ']' + [[ RabbitmqCluster == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + [[ RabbitmqCluster == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + '[' -n '' ']' + kustomization_add_resources + echo merge config dir merge config dir ++ find . -type f -name '*.yaml' ++ grep -v kustomization + yamls=./rabbitmq.yaml + for y in ${yamls[@]} + kustomize edit add resource ./rabbitmq.yaml + popd ~/src/github.com/openstack-k8s-operators/install_yamls make wait make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' error: the server doesn't have a resource type "openstackversion" bash scripts/operator-wait.sh + TIMEOUT=500s +++ dirname scripts/operator-wait.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + '[' -z openstack-operators ']' + '[' -z rabbitmq ']' + '[' rabbitmq = rabbitmq ']' + OPERATOR_NAME=rabbitmq-cluster + pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls + timeout 500s bash -c 'until [ "$(bash ./get-operator-status.sh)" == "Succeeded" ]; do sleep 5; done' + '[' -z openstack-operators ']' + '[' -z rabbitmq-cluster ']' + '[' rabbitmq-cluster = rabbitmq-cluster ']' + DEPL_NAME=rabbitmq-cluster-operator ++ oc get -n openstack-operators deployment rabbitmq-cluster-operator -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "rabbitmq-cluster-operator" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z rabbitmq-cluster ']' + '[' rabbitmq-cluster = rabbitmq-cluster ']' + DEPL_NAME=rabbitmq-cluster-operator ++ oc get -n openstack-operators deployment rabbitmq-cluster-operator -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "rabbitmq-cluster-operator" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z rabbitmq-cluster ']' + '[' rabbitmq-cluster = rabbitmq-cluster ']' + DEPL_NAME=rabbitmq-cluster-operator ++ jq -e .status.availableReplicas ++ oc get -n openstack-operators deployment rabbitmq-cluster-operator -o json Error from server (NotFound): deployments.apps "rabbitmq-cluster-operator" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z rabbitmq-cluster ']' + '[' rabbitmq-cluster = rabbitmq-cluster ']' + DEPL_NAME=rabbitmq-cluster-operator ++ oc get -n openstack-operators deployment rabbitmq-cluster-operator -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "rabbitmq-cluster-operator" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z rabbitmq-cluster ']' + '[' rabbitmq-cluster = rabbitmq-cluster ']' + DEPL_NAME=rabbitmq-cluster-operator ++ oc get -n openstack-operators deployment rabbitmq-cluster-operator -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "rabbitmq-cluster-operator" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z rabbitmq-cluster ']' + '[' rabbitmq-cluster = rabbitmq-cluster ']' + DEPL_NAME=rabbitmq-cluster-operator ++ oc get -n openstack-operators deployment rabbitmq-cluster-operator -o json ++ jq -e .status.availableReplicas + REPLICAS=1 + '[' 1 '!=' 1 ']' + echo Succeeded + exit 0 + rc=0 + popd ~/src/github.com/openstack-k8s-operators/install_yamls + exit 0 make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' bash scripts/operator-deploy-resources.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr ']' + NEXT_WAIT_TIME=0 + '[' 0 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/rabbitmq/cr + oc apply -f - rabbitmqcluster.rabbitmq.com/rabbitmq created + '[' 0 -lt 15 ']' bash scripts/gen-olm.sh + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' -z quay.io/openstack-k8s-operators/keystone-operator-index:latest ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/keystone/op ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/keystone/op ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/keystone/op + OPERATOR_CHANNEL=alpha + OPERATOR_SOURCE=keystone-operator-index + OPERATOR_SOURCE_NAMESPACE=openstack-operators + echo OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/keystone/op OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/keystone/op + echo OPERATOR_CHANNEL alpha OPERATOR_CHANNEL alpha + echo OPERATOR_SOURCE keystone-operator-index OPERATOR_SOURCE keystone-operator-index + echo OPERATOR_SOURCE_NAMESPACE openstack-operators OPERATOR_SOURCE_NAMESPACE openstack-operators + cat + cat + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/keystone/op catalogsource.operators.coreos.com/keystone-operator-index created operatorgroup.operators.coreos.com/openstack unchanged subscription.operators.coreos.com/keystone-operator created mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/operator /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/keystone/op /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr bash scripts/clone-operator-repo.sh Cloning repo: git clone -b main https://github.com/openstack-k8s-operators/keystone-operator.git keystone-operator Cloning into 'keystone-operator'... Running checkout: git checkout 114b4c65a959 Note: switching to '114b4c65a959'. You are in 'detached HEAD' state. You can look around, make experimental changes and commit them, and you can discard any commits you make in this state without impacting any branches by switching back to a branch. If you want to create a new branch to retain commits you create, you may do so (now or later) by using -c with the switch command. Example: git switch -c Or undo this operation with: git switch - Turn off this advice by setting config variable advice.detachedHead to false HEAD is now at 114b4c6 Merge pull request #665 from vakwetu/simplify_client_call cp /home/zuul/ci-framework-data/artifacts/manifests/operator/keystone-operator/config/samples/keystone_v1beta1_keystoneapi.yaml /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr bash scripts/gen-service-kustomize.sh +++ dirname scripts/gen-service-kustomize.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/common.sh --source-only ++ set -e + '[' -z swift-kuttl-tests ']' + '[' -z KeystoneAPI ']' + '[' -z osp-secret ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr ']' + '[' -n '' ']' + REPLACEMENTS= + IMAGE=unused + IMAGE_PATH=containerImage + STORAGE_REQUEST=10G + INTERFACE_MTU=1500 + VLAN_START=20 + VLAN_STEP=1 + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr ']' + pushd /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr ~/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr ~/src/github.com/openstack-k8s-operators/install_yamls + cat + [[ KeystoneAPI == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + IFS=, + read -ra IMAGES + IFS=, + read -ra IMAGE_PATHS + '[' 1 '!=' 1 ']' + (( i=0 )) + (( i < 1 )) + SPEC_PATH=containerImage + SPEC_VALUE=unused + '[' unused '!=' unused ']' + (( i++ )) + (( i < 1 )) + '[' -n '' ']' + '[' KeystoneAPI == OpenStackControlPlane ']' + '[' KeystoneAPI == Galera ']' + '[' KeystoneAPI == NetConfig ']' + '[' -n '' ']' + [[ KeystoneAPI == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + [[ KeystoneAPI == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + '[' -n '' ']' + kustomization_add_resources + echo merge config dir merge config dir ++ find . -type f -name '*.yaml' ++ grep -v kustomization + yamls=./keystone_v1beta1_keystoneapi.yaml + for y in ${yamls[@]} + kustomize edit add resource ./keystone_v1beta1_keystoneapi.yaml + popd ~/src/github.com/openstack-k8s-operators/install_yamls make wait make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' error: the server doesn't have a resource type "openstackversion" bash scripts/operator-wait.sh + TIMEOUT=500s +++ dirname scripts/operator-wait.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq ']' + pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls + timeout 500s bash -c 'until [ "$(bash ./get-operator-status.sh)" == "Succeeded" ]; do sleep 5; done' + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "keystone-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "keystone-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "keystone-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "keystone-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "keystone-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "keystone-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z keystone ']' + '[' keystone = rabbitmq-cluster ']' + DEPL_NAME=keystone-operator-controller-manager ++ oc get -n openstack-operators deployment keystone-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=1 + '[' 1 '!=' 1 ']' + echo Succeeded + exit 0 + rc=0 + popd ~/src/github.com/openstack-k8s-operators/install_yamls + exit 0 make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' bash scripts/operator-deploy-resources.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr ']' + NEXT_WAIT_TIME=0 + '[' 0 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/keystone/cr + oc apply -f - keystoneapi.keystone.openstack.org/keystone created + '[' 0 -lt 15 ']' bash scripts/gen-olm.sh + '[' -z openstack-operators ']' + '[' -z barbican ']' + '[' -z quay.io/openstack-k8s-operators/barbican-operator-index:latest ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/barbican/op ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/barbican/op ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/barbican/op + OPERATOR_CHANNEL=alpha + OPERATOR_SOURCE=barbican-operator-index + OPERATOR_SOURCE_NAMESPACE=openstack-operators + echo OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/barbican/op OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/barbican/op + echo OPERATOR_CHANNEL alpha OPERATOR_CHANNEL alpha + echo OPERATOR_SOURCE barbican-operator-index OPERATOR_SOURCE barbican-operator-index + echo OPERATOR_SOURCE_NAMESPACE openstack-operators OPERATOR_SOURCE_NAMESPACE openstack-operators + cat + cat + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/barbican/op catalogsource.operators.coreos.com/barbican-operator-index created operatorgroup.operators.coreos.com/openstack unchanged subscription.operators.coreos.com/barbican-operator created mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/operator /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/barbican/op /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr bash scripts/clone-operator-repo.sh Cloning repo: git clone -b main https://github.com/openstack-k8s-operators/barbican-operator.git barbican-operator Cloning into 'barbican-operator'... Running checkout: git checkout bd373daa8e8c Note: switching to 'bd373daa8e8c'. You are in 'detached HEAD' state. You can look around, make experimental changes and commit them, and you can discard any commits you make in this state without impacting any branches by switching back to a branch. If you want to create a new branch to retain commits you create, you may do so (now or later) by using -c with the switch command. Example: git switch -c Or undo this operation with: git switch - Turn off this advice by setting config variable advice.detachedHead to false HEAD is now at bd373da Merge pull request #328 from openstack-k8s-operators/renovate/github.com-onsi-ginkgo-v2-2.x cp /home/zuul/ci-framework-data/artifacts/manifests/operator/barbican-operator/config/samples/barbican_v1beta1_barbican.yaml /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr bash scripts/gen-service-kustomize.sh +++ dirname scripts/gen-service-kustomize.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/common.sh --source-only ++ set -e + '[' -z swift-kuttl-tests ']' + '[' -z Barbican ']' + '[' -z osp-secret ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr ']' + '[' -n '' ']' + REPLACEMENTS= + IMAGE=unused + IMAGE_PATH=containerImage + STORAGE_REQUEST=10G + INTERFACE_MTU=1500 + VLAN_START=20 + VLAN_STEP=1 + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr ']' + pushd /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr ~/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr ~/src/github.com/openstack-k8s-operators/install_yamls + cat + [[ Barbican == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + IFS=, + read -ra IMAGES + IFS=, + read -ra IMAGE_PATHS + '[' 1 '!=' 1 ']' + (( i=0 )) + (( i < 1 )) + SPEC_PATH=containerImage + SPEC_VALUE=unused + '[' unused '!=' unused ']' + (( i++ )) + (( i < 1 )) + '[' -n '' ']' + '[' Barbican == OpenStackControlPlane ']' + '[' Barbican == Galera ']' + '[' Barbican == NetConfig ']' + '[' -n '' ']' + [[ Barbican == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + [[ Barbican == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + '[' -n '' ']' + kustomization_add_resources + echo merge config dir merge config dir ++ find . -type f -name '*.yaml' ++ grep -v kustomization + yamls=./barbican_v1beta1_barbican.yaml + for y in ${yamls[@]} + kustomize edit add resource ./barbican_v1beta1_barbican.yaml + popd ~/src/github.com/openstack-k8s-operators/install_yamls bash scripts/operator-deploy-resources.sh + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr ']' + NEXT_WAIT_TIME=0 + '[' 0 -eq 15 ']' + oc apply -f - + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 0 + '[' 1 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 1 + '[' 2 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 2 + '[' 3 -eq 15 ']' + oc apply -f - + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 3 + '[' 4 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 4 + '[' 5 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 5 + '[' 6 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 6 + '[' 7 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - error: resource mapping not found for name: "barbican" namespace: "swift-kuttl-tests" from "STDIN": no matches for kind "Barbican" in version "barbican.openstack.org/v1beta1" ensure CRDs are installed first + sleep 7 + '[' 8 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - The Barbican "barbican" is invalid: * spec.barbicanWorker.containerImage: Required value * spec.barbicanKeystoneListener.containerImage: Required value * spec.barbicanAPI.containerImage: Required value + sleep 8 + '[' 9 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - The Barbican "barbican" is invalid: * spec.barbicanWorker.containerImage: Required value * spec.barbicanKeystoneListener.containerImage: Required value * spec.barbicanAPI.containerImage: Required value + sleep 9 + '[' 10 -eq 15 ']' + oc apply -f - + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr Error from server (InternalError): error when creating "STDIN": Internal error occurred: failed calling webhook "mbarbican-v1beta1.kb.io": failed to call webhook: Post "https://barbican-operator-controller-manager-service.openstack-operators.svc:443/mutate-barbican-openstack-org-v1beta1-barbican?timeout=10s": no endpoints available for service "barbican-operator-controller-manager-service" + sleep 10 + '[' 11 -eq 15 ']' + oc kustomize /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/barbican/cr + oc apply -f - barbican.barbican.openstack.org/barbican created + '[' 11 -lt 15 ']' bash scripts/gen-olm.sh + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' -z 38.102.83.38:5001/openstack-k8s-operators/swift-operator-index:540e2f342e5db014ead8bd6f97c1b2bca8622d97 ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/swift/op ']' + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/swift/op ']' + mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/swift/op + OPERATOR_CHANNEL=alpha + OPERATOR_SOURCE=swift-operator-index + OPERATOR_SOURCE_NAMESPACE=openstack-operators + echo OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/swift/op OPERATOR_DIR /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/swift/op + echo OPERATOR_CHANNEL alpha OPERATOR_CHANNEL alpha + echo OPERATOR_SOURCE swift-operator-index OPERATOR_SOURCE swift-operator-index + echo OPERATOR_SOURCE_NAMESPACE openstack-operators OPERATOR_SOURCE_NAMESPACE openstack-operators + cat + cat + cat oc apply -f /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/swift/op catalogsource.operators.coreos.com/swift-operator-index created operatorgroup.operators.coreos.com/openstack unchanged subscription.operators.coreos.com/swift-operator created mkdir -p /home/zuul/ci-framework-data/artifacts/manifests/operator /home/zuul/ci-framework-data/artifacts/manifests/openstack-operators/swift/op /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr bash scripts/clone-operator-repo.sh Cloning repo: git clone /home/zuul/src/github.com/openstack-k8s-operators/swift-operator swift-operator Cloning into 'swift-operator'... done. cp /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/config/samples/swift_v1beta1_swift.yaml /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr bash scripts/gen-service-kustomize.sh +++ dirname scripts/gen-service-kustomize.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/common.sh --source-only ++ set -e + '[' -z swift-kuttl-tests ']' + '[' -z Swift ']' + '[' -z osp-secret ']' + '[' -z /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr ']' + '[' -n '' ']' + REPLACEMENTS= + IMAGE=unused + IMAGE_PATH=containerImage + STORAGE_REQUEST=10G + INTERFACE_MTU=1500 + VLAN_START=20 + VLAN_STEP=1 + '[' '!' -d /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr ']' + pushd /home/zuul/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr ~/ci-framework-data/artifacts/manifests/swift-kuttl-tests/swift/cr ~/src/github.com/openstack-k8s-operators/install_yamls + cat + [[ Swift == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + IFS=, + read -ra IMAGES + IFS=, + read -ra IMAGE_PATHS + '[' 1 '!=' 1 ']' + (( i=0 )) + (( i < 1 )) + SPEC_PATH=containerImage + SPEC_VALUE=unused + '[' unused '!=' unused ']' + (( i++ )) + (( i < 1 )) + '[' -n '' ']' + '[' Swift == OpenStackControlPlane ']' + '[' Swift == Galera ']' + '[' Swift == NetConfig ']' + '[' -n '' ']' + [[ Swift == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + [[ Swift == \O\p\e\n\S\t\a\c\k\C\o\n\t\r\o\l\P\l\a\n\e ]] + '[' -n '' ']' + kustomization_add_resources + echo merge config dir merge config dir ++ find . -type f -name '*.yaml' ++ grep -v kustomization + yamls=./swift_v1beta1_swift.yaml + for y in ${yamls[@]} + kustomize edit add resource ./swift_v1beta1_swift.yaml + popd ~/src/github.com/openstack-k8s-operators/install_yamls make wait make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' error: the server doesn't have a resource type "openstackversion" bash scripts/operator-wait.sh + TIMEOUT=500s +++ dirname scripts/operator-wait.sh ++ cd scripts ++ pwd -P + SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq ']' + pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls/scripts ~/src/github.com/openstack-k8s-operators/install_yamls + timeout 500s bash -c 'until [ "$(bash ./get-operator-status.sh)" == "Succeeded" ]; do sleep 5; done' + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas Error from server (NotFound): deployments.apps "swift-operator-controller-manager" not found + REPLICAS= + '[' '' '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=null + '[' null '!=' 1 ']' + exit 1 + '[' -z openstack-operators ']' + '[' -z swift ']' + '[' swift = rabbitmq-cluster ']' + DEPL_NAME=swift-operator-controller-manager ++ oc get -n openstack-operators deployment swift-operator-controller-manager -o json ++ jq -e .status.availableReplicas + REPLICAS=1 + '[' 1 '!=' 1 ']' + echo Succeeded + exit 0 + rc=0 + popd ~/src/github.com/openstack-k8s-operators/install_yamls + exit 0 make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' make swift_kuttl_run make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' error: the server doesn't have a resource type "openstackversion" SWIFT_KUTTL_DIR=/home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests kubectl-kuttl test --config /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/kuttl-test.yaml /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests --namespace swift-kuttl-tests 2026/02/01 07:38:20 kutt-test config testdirs is overridden with args: [ /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests ] === RUN kuttl harness.go:463: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://api.crc.testing:6443 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 600 seconds for each step harness.go:375: testsuite: /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests has 5 tests === RUN kuttl/harness === RUN kuttl/harness/basic-deploy === PAUSE kuttl/harness/basic-deploy === RUN kuttl/harness/basic-deploy_tls === PAUSE kuttl/harness/basic-deploy_tls === RUN kuttl/harness/basic-deploy_topology === PAUSE kuttl/harness/basic-deploy_topology === RUN kuttl/harness/customization === PAUSE kuttl/harness/customization === RUN kuttl/harness/replication === PAUSE kuttl/harness/replication === CONT kuttl/harness/basic-deploy logger.go:42: 07:38:20 | basic-deploy | Skipping creation of user-supplied namespace: swift-kuttl-tests logger.go:42: 07:38:20 | basic-deploy/0-deps | starting test step 0-deps logger.go:42: 07:38:20 | basic-deploy/0-deps | running command: [sh -c if ! [[ `oc get crd openstackdataplanenodesets.dataplane.openstack.org` ]]; then oc apply -f ../../deps/dataplane.openstack.org_openstackdataplanenodesets.yaml fi ] logger.go:42: 07:38:20 | basic-deploy/0-deps | Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "openstackdataplanenodesets.dataplane.openstack.org" not found logger.go:42: 07:38:21 | basic-deploy/0-deps | customresourcedefinition.apiextensions.k8s.io/openstackdataplanenodesets.dataplane.openstack.org created logger.go:42: 07:38:21 | basic-deploy/0-deps | test step completed 0-deps logger.go:42: 07:38:21 | basic-deploy/1-deploy-swift | starting test step 1-deploy-swift logger.go:42: 07:38:21 | basic-deploy/1-deploy-swift | running command: [sh -c oc apply -n $NAMESPACE -f ../../../../config/samples/swift_v1beta1_swift.yaml ] logger.go:42: 07:38:21 | basic-deploy/1-deploy-swift | swift.swift.openstack.org/swift created logger.go:42: 07:38:21 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:38:21 | basic-deploy/1-deploy-swift | Error from server (NotFound): jobs.batch "swift-ring-rebalance" not found logger.go:42: 07:38:21 | basic-deploy/1-deploy-swift | error: error from server (NotFound): jobs.batch "swift-ring-rebalance" not found in namespace "swift-kuttl-tests" logger.go:42: 07:38:22 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:38:22 | basic-deploy/1-deploy-swift | Error from server (NotFound): jobs.batch "swift-ring-rebalance" not found logger.go:42: 07:38:23 | basic-deploy/1-deploy-swift | error: error from server (NotFound): jobs.batch "swift-ring-rebalance" not found in namespace "swift-kuttl-tests" logger.go:42: 07:38:24 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:38:24 | basic-deploy/1-deploy-swift | Error from server (NotFound): jobs.batch "swift-ring-rebalance" not found logger.go:42: 07:38:24 | basic-deploy/1-deploy-swift | error: error from server (NotFound): jobs.batch "swift-ring-rebalance" not found in namespace "swift-kuttl-tests" logger.go:42: 07:38:25 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:38:26 | basic-deploy/1-deploy-swift | Error from server (NotFound): jobs.batch "swift-ring-rebalance" not found logger.go:42: 07:38:26 | basic-deploy/1-deploy-swift | error: error from server (NotFound): jobs.batch "swift-ring-rebalance" not found in namespace "swift-kuttl-tests" logger.go:42: 07:38:27 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:38:57 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:38:57 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:38:59 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:39:29 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:39:29 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:39:30 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:40:00 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:40:00 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:40:02 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:40:32 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:40:32 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:40:33 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:41:03 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:41:04 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:41:05 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:41:35 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:41:35 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:41:36 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:42:07 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:42:07 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:42:08 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:42:38 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:42:38 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:42:40 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:43:10 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:43:10 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:43:11 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:43:41 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:43:42 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:43:43 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:44:13 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:44:13 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:44:14 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:44:45 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:44:45 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:44:46 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:45:16 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:45:16 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:45:18 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:45:48 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:45:48 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:45:49 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:46:19 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:46:20 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:46:21 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:46:51 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:46:51 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:46:53 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:47:23 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:47:23 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:47:24 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:47:54 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:47:54 | basic-deploy/1-deploy-swift | Error from server (BadRequest): container "swift-ring-rebalance" in pod "swift-ring-rebalance-w2wt7" is waiting to start: ContainerCreating logger.go:42: 07:47:56 | basic-deploy/1-deploy-swift | running command: [sh -c $SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh] logger.go:42: 07:48:26 | basic-deploy/1-deploy-swift | error: timed out waiting for the condition on jobs/swift-ring-rebalance logger.go:42: 07:48:26 | basic-deploy/1-deploy-swift | test step failed 1-deploy-swift case.go:396: failed in step 1-deploy-swift case.go:398: --- Swift:swift-kuttl-tests/swift +++ Swift:swift-kuttl-tests/swift @@ -1,54 +1,110 @@ apiVersion: swift.openstack.org/v1beta1 kind: Swift metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"swift.openstack.org/v1beta1","kind":"Swift","metadata":{"annotations":{},"name":"swift","namespace":"swift-kuttl-tests"},"spec":{"swiftProxy":{"passwordSelectors":{"service":"SwiftPassword"},"replicas":1},"swiftRing":{"ringReplicas":1},"swiftStorage":{"replicas":1,"storageClass":"local-storage"}}} + finalizers: + - openstack.org/swift + managedFields: '[... elided field over 10 lines long ...]' name: swift namespace: swift-kuttl-tests spec: + memcachedInstance: memcached + ringConfigMaps: + - swift-ring-files + storageClass: "" swiftProxy: + apiTimeout: 60 + auth: {} + ceilometerEnabled: false + containerImageProxy: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + encryptionEnabled: false + memcachedInstance: memcached + override: {} passwordSelectors: service: SwiftPassword replicas: 1 + ringConfigMaps: + - swift-ring-files + secret: os**********et + serviceUser: swift + tls: + api: + internal: {} + public: {} swiftRing: + containerImage: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + enabled: true + minPartHours: 1 + partPower: 10 + ringConfigMaps: + - swift-ring-files ringReplicas: 1 + tls: {} swiftStorage: + containerImageAccount: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + containerImageContainer: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + containerImageObject: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + containerImageProxy: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + containerSharderEnabled: false + memcachedInstance: memcached replicas: 1 + ringConfigMaps: + - swift-ring-files storageClass: local-storage + storageRequest: 10Gi + tls: {} status: conditions: - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:38:26Z" + message: Setup started + reason: Requested + severity: Info + status: "False" type: Ready - - message: ' Memcached instance has been provisioned' + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: ' Memcached instance has been provisioned' reason: Ready status: "True" type: MemcachedReady - - message: RoleBinding created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: RoleBinding created reason: Ready status: "True" type: RoleBindingReady - - message: Role created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: Role created reason: Ready status: "True" type: RoleReady - - message: ServiceAccount created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: ServiceAccount created reason: Ready status: "True" type: ServiceAccountReady - - message: Service config create completed + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: Service config create completed reason: Ready status: "True" type: ServiceConfigReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:38:23Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: SwiftProxyReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:38:26Z" + message: Setup started + reason: Requested + severity: Info + status: "False" type: SwiftRingReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: SwiftStorageReady + observedGeneration: 1 case.go:398: resource Swift:swift-kuttl-tests/swift: .status.conditions.message: value mismatch, expected: Setup complete != actual: Setup started case.go:398: --- Pod:swift-kuttl-tests/ +++ Pod:swift-kuttl-tests/swift-proxy-7d8cf99555-6vq9r @@ -1,16 +1,94 @@ apiVersion: v1 kind: Pod metadata: + generateName: swift-proxy-7d8cf99555- labels: component: swift-proxy + pod-template-hash: 7d8cf99555 service: swift + managedFields: '[... elided field over 10 lines long ...]' + name: swift-proxy-7d8cf99555-6vq9r namespace: swift-kuttl-tests + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: swift-proxy-7d8cf99555 + uid: a76c5c38-2205-42f9-8837-28e0e7c85d5b +spec: '[... elided field over 10 lines long ...]' status: + conditions: '[... elided field over 10 lines long ...]' containerStatuses: - - name: proxy-httpd - ready: true - started: true - - name: proxy-server - ready: true - started: true + - containerID: cri-o://bc550c00403e30ba12df38404f9902b768425c1c4567d628a65fda0a79990d06 + image: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-proxy-server@sha256:32aab2bf162442b5c6bbb3716fbdb0ec53cb67d6b0e7f018766b29cd8cb8692d + lastState: + terminated: + containerID: cri-o://bc550c00403e30ba12df38404f9902b768425c1c4567d628a65fda0a79990d06 + exitCode: 0 + finishedAt: "2026-02-01T07:45:36Z" + reason: Completed + startedAt: "2026-02-01T07:45:23Z" + name: proxy-httpd + ready: false + restartCount: 7 + started: false + state: + waiting: + message: back-off 5m0s restarting failed container=proxy-httpd pod=swift-proxy-7d8cf99555-6vq9r_swift-kuttl-tests(8ccb8908-ffc6-4032-8907-da7491bf9304) + reason: CrashLoopBackOff + volumeMounts: + - mountPath: /etc/swift + name: etc-swift + - mountPath: /etc/httpd/conf/httpd.conf + name: config-data + readOnly: true + recursiveReadOnly: Disabled + - mountPath: /etc/httpd/conf.d/ssl.conf + name: config-data + readOnly: true + recursiveReadOnly: Disabled + - mountPath: /run/httpd + name: run-httpd + - mountPath: /var/log/httpd + name: log-httpd + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-7sl55 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://a4040cacf4e44fe2fba71125e67d7fed8b0dd9e27ff15ee01f56721f2ae8ee2d + image: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-proxy-server@sha256:32aab2bf162442b5c6bbb3716fbdb0ec53cb67d6b0e7f018766b29cd8cb8692d + lastState: + terminated: + containerID: cri-o://a4040cacf4e44fe2fba71125e67d7fed8b0dd9e27ff15ee01f56721f2ae8ee2d + exitCode: 1 + finishedAt: "2026-02-01T07:44:11Z" + reason: Error + startedAt: "2026-02-01T07:44:10Z" + name: proxy-server + ready: false + restartCount: 6 + started: false + state: + waiting: + message: back-off 5m0s restarting failed container=proxy-server pod=swift-proxy-7d8cf99555-6vq9r_swift-kuttl-tests(8ccb8908-ffc6-4032-8907-da7491bf9304) + reason: CrashLoopBackOff + volumeMounts: + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-7sl55 + readOnly: true + recursiveReadOnly: Disabled + hostIP: 192.168.126.11 + hostIPs: + - ip: 192.168.126.11 + phase: Running + podIP: 10.217.0.95 + podIPs: + - ip: 10.217.0.95 + qosClass: BestEffort + startTime: "2026-02-01T07:38:23Z" case.go:398: resource Pod:swift-kuttl-tests/: .status.containerStatuses.ready: value mismatch, expected: true != actual: false case.go:398: --- Pod:swift-kuttl-tests/swift-storage-0 +++ Pod:swift-kuttl-tests/swift-storage-0 @@ -1,136 +1,953 @@ apiVersion: v1 kind: Pod metadata: + generateName: swift-storage- labels: + apps.kubernetes.io/pod-index: "0" component: swift-storage + controller-revision-hash: swift-storage-56b4f569f5 service: swift + statefulset.kubernetes.io/pod-name: swift-storage-0 + managedFields: '[... elided field over 10 lines long ...]' name: swift-storage-0 namespace: swift-kuttl-tests + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: StatefulSet + name: swift-storage + uid: f482badd-06d5-42f0-a001-02557b864b50 spec: + affinity: '[... elided field over 10 lines long ...]' containers: - command: - /usr/bin/swift-account-server - /etc/swift/account-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imagePullPolicy: IfNotPresent name: account-server + ports: + - containerPort: 6202 + name: account + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-account-replicator - /etc/swift/account-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imagePullPolicy: IfNotPresent name: account-replicator + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-account-auditor - /etc/swift/account-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imagePullPolicy: IfNotPresent name: account-auditor + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-account-reaper - /etc/swift/account-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imagePullPolicy: IfNotPresent name: account-reaper + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-container-server - /etc/swift/container-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imagePullPolicy: IfNotPresent name: container-server + ports: + - containerPort: 6201 + name: container + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-container-replicator - /etc/swift/container-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imagePullPolicy: IfNotPresent name: container-replicator + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-container-auditor - /etc/swift/container-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imagePullPolicy: IfNotPresent name: container-auditor + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-container-updater - /etc/swift/container-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imagePullPolicy: IfNotPresent name: container-updater + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-object-server - /etc/swift/object-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imagePullPolicy: IfNotPresent name: object-server + ports: + - containerPort: 6200 + name: object + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-object-replicator - /etc/swift/object-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imagePullPolicy: IfNotPresent name: object-replicator + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-object-auditor - /etc/swift/object-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imagePullPolicy: IfNotPresent name: object-auditor + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-object-updater - /etc/swift/object-server.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imagePullPolicy: IfNotPresent name: object-updater + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/swift-object-expirer - /etc/swift/object-expirer.conf.d - -v + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + imagePullPolicy: IfNotPresent name: object-expirer + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - /usr/bin/rsync - --daemon - --no-detach - --config=/etc/swift/rsyncd.conf - --log-file=/dev/stdout + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imagePullPolicy: IfNotPresent name: rsync + ports: + - containerPort: 873 + name: rsync + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true - command: - sh - -c - while true; do /usr/bin/swift-recon-cron /etc/swift/object-server.conf.d -v; sleep 300; done + env: + - name: CONFIG_HASH + value: nfbh686h5b7h5ddh5h546h664h5b7hcbh8dh67ch589h5b9hbfh9bh58bh655h5ffh675hd9h5c8h595h8fh5cbh657h9h5bch598h68h8fh5b6h54fq + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imagePullPolicy: IfNotPresent name: swift-recon-cron + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + runAsUser: 42445 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + dnsPolicy: ClusterFirst + enableServiceLinks: true + hostname: swift-storage-0 + imagePullSecrets: + - name: swift-swift-dockercfg-hwgzn + nodeName: crc + preemptionPolicy: PreemptLowerPriority + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: '[... elided field over 10 lines long ...]' + serviceAccount: swift-swift + serviceAccountName: swift-swift + subdomain: swift-storage + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: '[... elided field over 10 lines long ...]' status: + conditions: '[... elided field over 10 lines long ...]' containerStatuses: - - name: account-auditor - ready: true - started: true - - name: account-reaper - ready: true - started: true - - name: account-replicator - ready: true - started: true - - name: account-server - ready: true - started: true - - name: container-auditor - ready: true - started: true - - name: container-replicator - ready: true - started: true - - name: container-server - ready: true - started: true - - name: container-updater - ready: true - started: true - - name: object-auditor - ready: true - started: true - - name: object-expirer - ready: true - started: true - - name: object-replicator - ready: true - started: true - - name: object-server - ready: true - started: true - - name: object-updater - ready: true - started: true - - name: rsync - ready: true - started: true - - name: swift-recon-cron - ready: true - started: true + - containerID: cri-o://c677208601eec0c0fae2c620f112d3a005a89800a130f6a2742cfc65c7caf407 + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-account@sha256:44d881639804053fb0ee337aba3a91cac88419b2db798a043bcf2fd1f3a2f70d + lastState: {} + name: account-auditor + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:33Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://c9e3d55dd0fa17eedf107eb2b3e5dac364ff8077e8a1d4e0d9016998e9e14b2a + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-account@sha256:44d881639804053fb0ee337aba3a91cac88419b2db798a043bcf2fd1f3a2f70d + lastState: {} + name: account-reaper + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:33Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://258cbae264fd7af86d488b1e1991bd6d29d7a59f6f1f3730a5482333f2b1614d + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-account@sha256:44d881639804053fb0ee337aba3a91cac88419b2db798a043bcf2fd1f3a2f70d + lastState: + terminated: + containerID: cri-o://258cbae264fd7af86d488b1e1991bd6d29d7a59f6f1f3730a5482333f2b1614d + exitCode: 1 + finishedAt: "2026-02-01T07:44:28Z" + reason: Error + startedAt: "2026-02-01T07:44:27Z" + name: account-replicator + ready: false + restartCount: 6 + started: false + state: + waiting: + message: back-off 5m0s restarting failed container=account-replicator pod=swift-storage-0_swift-kuttl-tests(1edd7394-0f8e-4271-8774-f228946e62f3) + reason: CrashLoopBackOff + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://abaae4399d0309909ee61f1119476fc6ca124d2a5861328d8b9f177c3ee8d541 + image: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-account@sha256:44d881639804053fb0ee337aba3a91cac88419b2db798a043bcf2fd1f3a2f70d + lastState: {} + name: account-server + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:33Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://3f92566bd67947d9babfc2464c78a74c7f787b215d8cc4f97cb5e94b3c298f10 + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-container@sha256:84fc7b1f4a5e6848eb35976883d0e29ab556ebce6fb6c37fc6a3a4a77c9c8ea8 + lastState: {} + name: container-auditor + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:35Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://8363cd5ac27caae0c967f465d3ea98de522e6bd2b9748bfd438db020c4918fc2 + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-container@sha256:84fc7b1f4a5e6848eb35976883d0e29ab556ebce6fb6c37fc6a3a4a77c9c8ea8 + lastState: + terminated: + containerID: cri-o://8363cd5ac27caae0c967f465d3ea98de522e6bd2b9748bfd438db020c4918fc2 + exitCode: 1 + finishedAt: "2026-02-01T07:44:28Z" + reason: Error + startedAt: "2026-02-01T07:44:27Z" + name: container-replicator + ready: false + restartCount: 6 + started: false + state: + waiting: + message: back-off 5m0s restarting failed container=container-replicator pod=swift-storage-0_swift-kuttl-tests(1edd7394-0f8e-4271-8774-f228946e62f3) + reason: CrashLoopBackOff + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://eb8a3ffd071b9c2b3f1584e981522df172dcb88a198689e7934e8735ecf4b50a + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-container@sha256:84fc7b1f4a5e6848eb35976883d0e29ab556ebce6fb6c37fc6a3a4a77c9c8ea8 + lastState: {} + name: container-server + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:35Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://2ccacf7054750fc124e6d667a5b3a4fca74d9159c050ae51185ce7c6b495bbe6 + image: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-container@sha256:84fc7b1f4a5e6848eb35976883d0e29ab556ebce6fb6c37fc6a3a4a77c9c8ea8 + lastState: + terminated: + containerID: cri-o://675783f3860e44aa26dc702d2c9b79308d6ca04cb0bf0b461ea1c6f19635f2c4 + exitCode: 1 + finishedAt: "2026-02-01T07:46:00Z" + reason: Error + startedAt: "2026-02-01T07:43:39Z" + name: container-updater + ready: true + restartCount: 3 + started: true + state: + running: + startedAt: "2026-02-01T07:46:25Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://115bbc64e704d41ae4244ee3df9b13e55015920e53f212f314acf31071b2bf14 + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-object@sha256:b7680a4c80d419864e18bb8e1d8f2cc421a0b1a68ac3ed45293c2cbcfe65535a + lastState: {} + name: object-auditor + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:37Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://8c4cf3f95117443917fb19196d11e99401bdee77261b71fff91b1e3715b29328 + image: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-proxy-server@sha256:32aab2bf162442b5c6bbb3716fbdb0ec53cb67d6b0e7f018766b29cd8cb8692d + lastState: + terminated: + containerID: cri-o://8c4cf3f95117443917fb19196d11e99401bdee77261b71fff91b1e3715b29328 + exitCode: 1 + finishedAt: "2026-02-01T07:44:28Z" + reason: Error + startedAt: "2026-02-01T07:44:28Z" + name: object-expirer + ready: false + restartCount: 6 + started: false + state: + waiting: + message: back-off 5m0s restarting failed container=object-expirer pod=swift-storage-0_swift-kuttl-tests(1edd7394-0f8e-4271-8774-f228946e62f3) + reason: CrashLoopBackOff + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://57f650c2bf61220733002708c6de1b1f0b9bedf1608f819556e91bcbf73a479c + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-object@sha256:b7680a4c80d419864e18bb8e1d8f2cc421a0b1a68ac3ed45293c2cbcfe65535a + lastState: {} + name: object-replicator + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:37Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://e1ae71b74256ecedefc7fbf253c43d8171b47774a342cb3954c7d0625c83ceb4 + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-object@sha256:b7680a4c80d419864e18bb8e1d8f2cc421a0b1a68ac3ed45293c2cbcfe65535a + lastState: {} + name: object-server + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:37Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://24c70f8e7a963f439f9a715dbf780d7f583dd8ae4f27ef3b92192f1f9ffc56ea + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-object@sha256:b7680a4c80d419864e18bb8e1d8f2cc421a0b1a68ac3ed45293c2cbcfe65535a + lastState: + terminated: + containerID: cri-o://a099e806e124b688716a90012a83109f2769650600cbbb38008ff999723edbe7 + exitCode: 1 + finishedAt: "2026-02-01T07:46:58Z" + reason: Error + startedAt: "2026-02-01T07:44:28Z" + name: object-updater + ready: true + restartCount: 4 + started: true + state: + running: + startedAt: "2026-02-01T07:47:48Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://1244aa8579be5d9284ebc00671702c6922c1ee0c32324cc3fb026ab5c3634876 + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-object@sha256:b7680a4c80d419864e18bb8e1d8f2cc421a0b1a68ac3ed45293c2cbcfe65535a + lastState: {} + name: rsync + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:37Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + - containerID: cri-o://c2bb2c50979d81b48db3da8d1503421df516cf45c6cb8eddcab8d29e7b89e40b + image: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + imageID: quay.io/podified-antelope-centos9/openstack-swift-object@sha256:b7680a4c80d419864e18bb8e1d8f2cc421a0b1a68ac3ed45293c2cbcfe65535a + lastState: {} + name: swift-recon-cron + ready: true + restartCount: 0 + started: true + state: + running: + startedAt: "2026-02-01T07:38:38Z" + volumeMounts: + - mountPath: /srv/node/pv + name: swift + - mountPath: /etc/swift + name: etc-swift + - mountPath: /var/cache/swift + name: cache + - mountPath: /var/lock + name: lock + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access-wt6t9 + readOnly: true + recursiveReadOnly: Disabled + hostIP: 192.168.126.11 + hostIPs: + - ip: 192.168.126.11 + phase: Running + podIP: 10.217.0.94 + podIPs: + - ip: 10.217.0.94 + qosClass: BestEffort + startTime: "2026-02-01T07:38:22Z" case.go:398: resource Pod:swift-kuttl-tests/swift-storage-0: .status.containerStatuses.ready: value mismatch, expected: true != actual: false case.go:398: --- Job:swift-kuttl-tests/swift-ring-rebalance +++ Job:swift-kuttl-tests/swift-ring-rebalance @@ -1,10 +1,25 @@ apiVersion: batch/v1 kind: Job metadata: + annotations: + hash: n5bfh595h657h76h649h6bh698h59fh5c8h55bh59fh646h55h6bh4hddh687h8fh5d5h9fhb6h68hch55fh5dfh566hbdh55dh7dh65h5dfh87q labels: job-name: swift-ring-rebalance + managedFields: '[... elided field over 10 lines long ...]' name: swift-ring-rebalance namespace: swift-kuttl-tests + ownerReferences: + - apiVersion: swift.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: SwiftRing + name: swift-ring + uid: 27400b0e-09bd-41ce-be70-03fcafc3e04d +spec: '[... elided field over 10 lines long ...]' status: - succeeded: 1 + active: 1 + ready: 0 + startTime: "2026-02-01T07:38:26Z" + terminating: 0 + uncountedTerminatedPods: {} case.go:398: resource Job:swift-kuttl-tests/swift-ring-rebalance: .status.succeeded: key is missing from map case.go:398: command "$SWIFT_KUTTL_DIR/../common/scripts/check_ring_rebalance_output.sh" exceeded 25 sec timeout, context deadline exceeded logger.go:42: 07:48:26 | basic-deploy | skipping kubernetes event logging === CONT kuttl/harness/customization logger.go:42: 07:48:26 | customization | Ignoring deploy as it does not match file name regexp: ^(\d+)-(?:[^\.]+)(?:\.yaml)?$ 2026/02/01 07:48:26 object detected with no GVK Kind for path /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests/customization/03-test-ceilometer.yaml 2026/02/01 07:48:26 object detected with no GVK Kind for path /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests/customization/04-test-encryption.yaml 2026/02/01 07:48:26 object detected with no GVK Kind for path /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests/customization/08-test-ringsettings.yaml 2026/02/01 07:48:26 object detected with no GVK Kind for path /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests/customization/05-test-containersharder.yaml 2026/02/01 07:48:26 object detected with no GVK Kind for path /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests/customization/06-test-customization.yaml 2026/02/01 07:48:26 object detected with no GVK Kind for path /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests/customization/07-test-custom-swiftconf.yaml logger.go:42: 07:48:26 | customization | Skipping creation of user-supplied namespace: swift-kuttl-tests logger.go:42: 07:48:26 | customization/0-deps | starting test step 0-deps logger.go:42: 07:48:26 | customization/0-deps | running command: [sh -c if ! [[ `oc get crd openstackdataplanenodesets.dataplane.openstack.org` ]]; then oc apply -f ../../deps/dataplane.openstack.org_openstackdataplanenodesets.yaml fi ] logger.go:42: 07:48:26 | customization/0-deps | test step completed 0-deps logger.go:42: 07:48:26 | customization/1-deploy-swift | starting test step 1-deploy-swift logger.go:42: 07:48:26 | customization/1-deploy-swift | running command: [sh -c oc apply -n $NAMESPACE -f deploy/swift-conf-secrect.yaml cp ../../../../config/samples/swift_v1beta1_swift.yaml deploy oc kustomize deploy | oc apply -n $NAMESPACE -f - ] logger.go:42: 07:48:26 | customization/1-deploy-swift | Warning: resource secrets/swift-conf is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by oc apply. oc apply should only be used on resources created declaratively by either oc create --save-config or oc apply. The missing annotation will be patched automatically. logger.go:42: 07:48:26 | customization/1-deploy-swift | secret/swift-conf configured logger.go:42: 07:48:28 | customization/1-deploy-swift | swift.swift.openstack.org/swift configured logger.go:42: 07:58:29 | customization/1-deploy-swift | test step failed 1-deploy-swift case.go:396: failed in step 1-deploy-swift case.go:398: --- Swift:swift-kuttl-tests/swift +++ Swift:swift-kuttl-tests/swift @@ -1,44 +1,65 @@ apiVersion: swift.openstack.org/v1beta1 kind: Swift metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"swift.openstack.org/v1beta1","kind":"Swift","metadata":{"annotations":{},"name":"swift","namespace":"swift-kuttl-tests"},"spec":{"swiftProxy":{"ceilometerEnabled":true,"defaultConfigOverwrite":{"01-proxy-server.conf":"[DEFAULT]\nworkers = 3\n"},"encryptionEnabled":true,"notificationsBus":{"cluster":"rabbitmq"},"passwordSelectors":{"service":"SwiftPassword"},"replicas":1},"swiftRing":{"minPartHours":2,"partPower":2,"ringReplicas":1},"swiftStorage":{"containerSharderEnabled":true,"defaultConfigOverwrite":{"01-account-server.conf":"[DEFAULT]\nworkers = 3\n","01-container-server.conf":"[DEFAULT]\nworkers = 3\n","01-object-server.conf":"[DEFAULT]\nworkers = 3"},"replicas":1,"storageClass":"local-storage"}}} + finalizers: + - openstack.org/swift + managedFields: '[... elided field over 10 lines long ...]' name: swift namespace: swift-kuttl-tests +spec: '[... elided field over 10 lines long ...]' status: conditions: - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:48:54Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: Ready - - message: ' Memcached instance has been provisioned' + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: ' Memcached instance has been provisioned' reason: Ready status: "True" type: MemcachedReady - - message: RoleBinding created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: RoleBinding created reason: Ready status: "True" type: RoleBindingReady - - message: Role created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: Role created reason: Ready status: "True" type: RoleReady - - message: ServiceAccount created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: ServiceAccount created reason: Ready status: "True" type: ServiceAccountReady - - message: Service config create completed + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: Service config create completed reason: Ready status: "True" type: ServiceConfigReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:48:54Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: SwiftProxyReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:38:26Z" + message: Setup started + reason: Requested + severity: Info + status: "False" type: SwiftRingReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:48:29Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: SwiftStorageReady + observedGeneration: 2 case.go:398: resource Swift:swift-kuttl-tests/swift: .status.conditions.message: value mismatch, expected: Setup complete != actual: Deployment in progress logger.go:42: 07:58:29 | customization | skipping kubernetes event logging === CONT kuttl/harness/replication logger.go:42: 07:58:29 | replication | Ignoring deploy as it does not match file name regexp: ^(\d+)-(?:[^\.]+)(?:\.yaml)?$ logger.go:42: 07:58:29 | replication | Skipping creation of user-supplied namespace: swift-kuttl-tests logger.go:42: 07:58:29 | replication/0-deps | starting test step 0-deps logger.go:42: 07:58:29 | replication/0-deps | running command: [sh -c if ! [[ `oc get crd openstackdataplanenodesets.dataplane.openstack.org` ]]; then oc apply -f ../../deps/dataplane.openstack.org_openstackdataplanenodesets.yaml fi ] logger.go:42: 07:58:29 | replication/0-deps | test step completed 0-deps logger.go:42: 07:58:29 | replication/1-deploy-swift | starting test step 1-deploy-swift logger.go:42: 07:58:29 | replication/1-deploy-swift | running command: [sh -c cp ../../../../config/samples/swift_v1beta1_swift.yaml deploy oc kustomize deploy | oc apply -n $NAMESPACE -f - ] logger.go:42: 07:58:30 | replication/1-deploy-swift | swift.swift.openstack.org/swift configured logger.go:42: 08:08:31 | replication/1-deploy-swift | test step failed 1-deploy-swift case.go:396: failed in step 1-deploy-swift case.go:398: --- Swift:swift-kuttl-tests/swift +++ Swift:swift-kuttl-tests/swift @@ -1,54 +1,109 @@ apiVersion: swift.openstack.org/v1beta1 kind: Swift metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"swift.openstack.org/v1beta1","kind":"Swift","metadata":{"annotations":{},"name":"swift","namespace":"swift-kuttl-tests"},"spec":{"swiftProxy":{"passwordSelectors":{"service":"SwiftPassword"},"replicas":1},"swiftRing":{"partPower":2,"ringReplicas":3},"swiftStorage":{"replicas":3,"storageClass":"local-storage"}}} + finalizers: + - openstack.org/swift + managedFields: '[... elided field over 10 lines long ...]' name: swift namespace: swift-kuttl-tests spec: + memcachedInstance: memcached + ringConfigMaps: + - swift-ring-files + storageClass: "" swiftProxy: + apiTimeout: 60 + auth: {} + ceilometerEnabled: false + containerImageProxy: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + encryptionEnabled: false + memcachedInstance: memcached + override: {} passwordSelectors: service: SwiftPassword replicas: 1 + ringConfigMaps: + - swift-ring-files + secret: os**********et + serviceUser: swift + tls: + api: + internal: {} + public: {} swiftRing: + containerImage: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + enabled: true + minPartHours: 1 + partPower: 2 + ringConfigMaps: + - swift-ring-files ringReplicas: 3 + tls: {} swiftStorage: + containerImageAccount: quay.io/podified-antelope-centos9/openstack-swift-account:current-podified + containerImageContainer: quay.io/podified-antelope-centos9/openstack-swift-container:current-podified + containerImageObject: quay.io/podified-antelope-centos9/openstack-swift-object:current-podified + containerImageProxy: quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified + containerSharderEnabled: false + memcachedInstance: memcached replicas: 3 + ringConfigMaps: + - swift-ring-files storageClass: local-storage + storageRequest: 10Gi + tls: {} status: conditions: - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:58:31Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: Ready - - message: ' Memcached instance has been provisioned' + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: ' Memcached instance has been provisioned' reason: Ready status: "True" type: MemcachedReady - - message: RoleBinding created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: RoleBinding created reason: Ready status: "True" type: RoleBindingReady - - message: Role created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: Role created reason: Ready status: "True" type: RoleReady - - message: ServiceAccount created + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: ServiceAccount created reason: Ready status: "True" type: ServiceAccountReady - - message: Service config create completed + - lastTransitionTime: "2026-02-01T07:38:21Z" + message: Service config create completed reason: Ready status: "True" type: ServiceConfigReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:58:31Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: SwiftProxyReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:58:32Z" + message: Setup started + reason: Init + status: Unknown type: SwiftRingReady - - message: Setup complete - reason: Ready - status: "True" + - lastTransitionTime: "2026-02-01T07:58:30Z" + message: Deployment in progress + reason: Requested + severity: Info + status: "False" type: SwiftStorageReady + observedGeneration: 3 case.go:398: resource Swift:swift-kuttl-tests/swift: .status.conditions.message: value mismatch, expected: Setup complete != actual: Deployment in progress logger.go:42: 08:08:31 | replication | skipping kubernetes event logging === CONT kuttl/harness/basic-deploy_topology 2026/02/01 08:08:31 object detected with no GVK Kind for path /home/zuul/ci-framework-data/artifacts/manifests/operator/swift-operator/test/kuttl/tests/basic-deploy_topology/01-assert-deploy-swift.yaml logger.go:42: 08:08:31 | basic-deploy_topology | Skipping creation of user-supplied namespace: swift-kuttl-tests logger.go:42: 08:08:31 | basic-deploy_topology/0-deps | starting test step 0-deps logger.go:42: 08:08:31 | basic-deploy_topology/0-deps | running command: [sh -c if ! [[ `oc get crd openstackdataplanenodesets.dataplane.openstack.org` ]]; then oc apply -f ../../deps/dataplane.openstack.org_openstackdataplanenodesets.yaml fi ] logger.go:42: 08:08:31 | basic-deploy_topology/0-deps | Topology:swift-kuttl-tests/swift-topology created logger.go:42: 08:08:31 | basic-deploy_topology/0-deps | test step completed 0-deps logger.go:42: 08:08:31 | basic-deploy_topology/1-deploy-swift | starting test step 1-deploy-swift logger.go:42: 08:08:31 | basic-deploy_topology/1-deploy-swift | running command: [sh -c oc apply -n $NAMESPACE -f ../../../../config/samples/swift_v1beta1_swift_topology.yaml ] logger.go:42: 08:08:31 | basic-deploy_topology/1-deploy-swift | The Swift "swift" is invalid: spec.swiftStorage.replicas: Invalid value: 1: SwiftStorage does not support scale-in case.go:396: failed in step 1-deploy-swift case.go:398: command "oc apply -n $NAMESPACE -f ../../../../config/samples/swift_v1beta1_..." failed, exit status 1 logger.go:42: 08:08:31 | basic-deploy_topology | skipping kubernetes event logging === CONT kuttl/harness/basic-deploy_tls logger.go:42: 08:08:31 | basic-deploy_tls | Skipping creation of user-supplied namespace: swift-kuttl-tests logger.go:42: 08:08:31 | basic-deploy_tls/0-deps | starting test step 0-deps logger.go:42: 08:08:31 | basic-deploy_tls/0-deps | running command: [sh -c if ! [[ `oc get crd openstackdataplanenodesets.dataplane.openstack.org` ]]; then oc apply -f ../../deps/dataplane.openstack.org_openstackdataplanenodesets.yaml fi ] logger.go:42: 08:08:32 | basic-deploy_tls/0-deps | Secret:sw**********le created logger.go:42: 08:08:32 | basic-deploy_tls/0-deps | Secret:sw**********vc created logger.go:42: 08:08:32 | basic-deploy_tls/0-deps | Secret:sw**********vc created logger.go:42: 08:08:32 | basic-deploy_tls/0-deps | test step completed 0-deps logger.go:42: 08:08:32 | basic-deploy_tls/1-deploy-swift | starting test step 1-deploy-swift logger.go:42: 08:08:32 | basic-deploy_tls/1-deploy-swift | running command: [sh -c oc apply -n $NAMESPACE -f ../../../../config/samples/swift_v1beta1_swift_tls.yaml ] logger.go:42: 08:08:32 | basic-deploy_tls/1-deploy-swift | The Swift "swift" is invalid: spec.swiftStorage.replicas: Invalid value: 1: SwiftStorage does not support scale-in case.go:396: failed in step 1-deploy-swift case.go:398: command "oc apply -n $NAMESPACE -f ../../../../config/samples/swift_v1beta1_..." failed, exit status 1 logger.go:42: 08:08:32 | basic-deploy_tls | skipping kubernetes event logging === NAME kuttl harness.go:406: run tests finished harness.go:514: cleaning up harness.go:571: removing temp folder: "" --- FAIL: kuttl (1811.73s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/basic-deploy (605.57s) --- FAIL: kuttl/harness/customization (603.34s) --- FAIL: kuttl/harness/replication (601.57s) --- FAIL: kuttl/harness/basic-deploy_topology (0.62s) --- FAIL: kuttl/harness/basic-deploy_tls (0.61s) FAIL make[1]: *** [Makefile:2066: swift_kuttl_run] Error 1 make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls' make: *** [Makefile:2073: swift_kuttl] Error 2