[WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (edpm_node_ip). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (source_galera_members). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (source_mariadb_ip). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (edpm_node_hostname). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (enable_tlse). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (prelaunch_barbican_secret). Using last defined value only. Using /home/zuul/src/review.rdoproject.org/rdo-jobs/playbooks/data_plane_adoption/ansible.cfg as config file PLAY [Prelude] ***************************************************************** TASK [prelude_local : undefined oc_login_command] ****************************** skipping: [localhost] => {"changed": false, "false_condition": "oc_login_command is not defined", "skip_reason": "Conditional result was False"} TASK [prelude_local : test for oc CLI presence] ******************************** changed: [localhost] => {"ansible_facts": {"discovered_interpreter_python": "/usr/bin/python3"}, "changed": true, "cmd": "\ncommand -v oc\n", "delta": "0:00:00.005645", "end": "2026-02-27 17:42:46.063334", "failed_when_result": false, "msg": "", "rc": 0, "start": "2026-02-27 17:42:46.057689", "stderr": "", "stderr_lines": [], "stdout": "/home/zuul/bin/oc", "stdout_lines": ["/home/zuul/bin/oc"]} TASK [prelude_local : oc CLI not found] **************************************** skipping: [localhost] => {"changed": false, "false_condition": "oc_cli_present_result.rc != 0", "skip_reason": "Conditional result was False"} TASK [prelude_local : test for install_yamls presence] ************************* ok: [localhost] => {"changed": false, "stat": {"atime": 1772210258.8177016, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 280, "charset": "us-ascii", "checksum": "2f51540dd80a73c9696dba35147f0e442db7788c", "ctime": 1772210104.098671, "dev": 64513, "device_type": 0, "executable": false, "exists": true, "gid": 1000, "gr_name": "zuul", "inode": 138417082, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "text/plain", "mode": "0644", "mtime": 1772209152.7369058, "nlink": 1, "path": "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls//Makefile", "pw_name": "zuul", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 142104, "uid": 1000, "version": "2029898941", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": false, "xoth": false, "xusr": false}} TASK [prelude_local : missing install_yamls] *********************************** skipping: [localhost] => {"changed": false, "false_condition": "not install_yamls_makefile_stat.stat.exists", "skip_reason": "Conditional result was False"} TASK [prelude_local : clone install_yamls] ************************************* skipping: [localhost] => {"changed": false, "false_condition": "not install_yamls_makefile_stat.stat.exists", "skip_reason": "Conditional result was False"} TASK [prelude_local : perform oc login] **************************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc login -u kubeadmin -p 123456789\n", "delta": "0:00:00.402214", "end": "2026-02-27 17:42:47.187996", "msg": "", "rc": 0, "start": "2026-02-27 17:42:46.785782", "stderr": "+ oc login -u kubeadmin -p 123456789", "stderr_lines": ["+ oc login -u kubeadmin -p 123456789"], "stdout": "WARNING: Using insecure TLS client config. Setting this option is not supported!\n\nLogin successful.\n\nYou have access to 72 projects, the list has been suppressed. You can list all projects with 'oc projects'\n\nUsing project \"openstack\".", "stdout_lines": ["WARNING: Using insecure TLS client config. Setting this option is not supported!", "", "Login successful.", "", "You have access to 72 projects, the list has been suppressed. You can list all projects with 'oc projects'", "", "Using project \"openstack\"."]} TASK [prelude_local : use diffrent namespace for OSPdO adoption] *************** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src | bool | default(false)", "skip_reason": "Conditional result was False"} TASK [prelude_local : create namespace] **************************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\ncd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\nNAMESPACE=openstack make namespace\n", "delta": "0:00:01.047116", "end": "2026-02-27 17:42:48.552098", "msg": "", "rc": 0, "start": "2026-02-27 17:42:47.504982", "stderr": "+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n+ NAMESPACE=openstack\n+ make namespace\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z openstack ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ cat", "stderr_lines": ["+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/", "+ NAMESPACE=openstack", "+ make namespace", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z openstack ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ cat"], "stdout": "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml\nnamespace/openstack unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\nopenstack Active\noc project openstack\nAlready on project \"openstack\" on server \"https://api.crc.testing:6443\".\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "stdout_lines": ["make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml", "namespace/openstack unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "openstack Active", "oc project openstack", "Already on project \"openstack\" on server \"https://api.crc.testing:6443\".", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'"]} TASK [prelude_local : set default namespace to openstack] ********************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc project openstack\n", "delta": "0:00:00.159289", "end": "2026-02-27 17:42:48.959411", "msg": "", "rc": 0, "start": "2026-02-27 17:42:48.800122", "stderr": "+ oc project openstack", "stderr_lines": ["+ oc project openstack"], "stdout": "Already on project \"openstack\" on server \"https://api.crc.testing:6443\".", "stdout_lines": ["Already on project \"openstack\" on server \"https://api.crc.testing:6443\"."]} PLAY [Cleanup] ***************************************************************** TASK [pcp_cleanup : clean up any remains of podified deployment] *************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n# Cleanup OpenStackControlPlane object\noc delete --ignore-not-found=true OpenStackControlPlane --all || true\n\n# Ensure that all pods in openstack namespace are deleted\nwhile oc get pod | grep -E 'rabbitmq-server-0|openstack-galera-0'; do\n sleep 2;\ndone\n\n# Cleanup OpenStackDataplane objects\noc delete --ignore-not-found=true OpenStackDataPlaneDeployment --all || true\noc delete --ignore-not-found=true OpenStackDataPlaneNodeSet --all || true\noc delete --ignore-not-found=true OpenStackDataPlaneService --all || true\n\n# Delete Adoption helper pods\noc delete --ignore-not-found=true --wait=false pod mariadb-copy-data\noc delete --ignore-not-found=true --wait=false pvc mariadb-data\noc delete --ignore-not-found=true --wait=false pod ovn-copy-data\n\n# Delete secrets\nfor secret in $(oc get secrets -o name); do\n echo \"Deleting secret ${secret}\";\n # (TODO: holser) The following 'oc patch' command removes finalizers from secrets to allow deletion.\n # This is a workaround for an issue where secrets may be stuck in a terminating state due to finalizers.\n # Once OSPRH-10262 is merged and the issue is resolved, this patch can be removed.\n oc patch ${secret} -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n oc delete ${secret} ;\ndone\n\n# Make pvs available if they are released\noc get pv -o json | jq -r '.items[] | select(.status.phase == \"Released\") | .metadata.name' | xargs -I{} oc patch pv {} --type='merge' -p '{\"spec\":{\"claimRef\": null}}'\n\n# Delete IT certificates\noc delete --ignore-not-found issuer rootca-internal\noc delete --ignore-not-found secret rootca-internal\n\noc delete subscription cluster-observability-operator -n openshift-operators --ignore-not-found\n", "delta": "0:00:18.893221", "end": "2026-02-27 17:43:08.176196", "msg": "", "rc": 0, "start": "2026-02-27 17:42:49.282975", "stderr": "+ oc delete --ignore-not-found=true OpenStackControlPlane --all\n+ oc get pod\n+ grep -E 'rabbitmq-server-0|openstack-galera-0'\nNo resources found in openstack namespace.\n+ oc delete --ignore-not-found=true OpenStackDataPlaneDeployment --all\n+ oc delete --ignore-not-found=true OpenStackDataPlaneNodeSet --all\n+ oc delete --ignore-not-found=true OpenStackDataPlaneService --all\n+ oc delete --ignore-not-found=true --wait=false pod mariadb-copy-data\n+ oc delete --ignore-not-found=true --wait=false pvc mariadb-data\n+ oc delete --ignore-not-found=true --wait=false pod ovn-copy-data\n++ oc get secrets -o name\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/builder-dockercfg-l7wsg'\n+ oc patch secret/builder-dockercfg-l7wsg -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/builder-dockercfg-l7wsg\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-barbican-internal-svc'\n+ oc patch secret/cert-barbican-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-barbican-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-barbican-public-route'\n+ oc patch secret/cert-barbican-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-barbican-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-barbican-public-svc'\n+ oc patch secret/cert-barbican-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-barbican-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ceilometer-internal-svc'\n+ oc patch secret/cert-ceilometer-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ceilometer-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-cinder-internal-svc'\n+ oc patch secret/cert-cinder-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-cinder-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-cinder-public-route'\n+ oc patch secret/cert-cinder-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-cinder-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-cinder-public-svc'\n+ oc patch secret/cert-cinder-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-cinder-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-galera-openstack-cell1-svc'\n+ oc patch secret/cert-galera-openstack-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-galera-openstack-cell1-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-galera-openstack-svc'\n+ oc patch secret/cert-galera-openstack-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-galera-openstack-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-glance-default-internal-svc'\n+ oc patch secret/cert-glance-default-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-glance-default-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-glance-default-public-route'\n+ oc patch secret/cert-glance-default-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-glance-default-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-glance-default-public-svc'\n+ oc patch secret/cert-glance-default-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-glance-default-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-keystone-internal-svc'\n+ oc patch secret/cert-keystone-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-keystone-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-keystone-public-route'\n+ oc patch secret/cert-keystone-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-keystone-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-keystone-public-svc'\n+ oc patch secret/cert-keystone-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-keystone-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-kube-state-metrics-svc'\n+ oc patch secret/cert-kube-state-metrics-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-kube-state-metrics-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-memcached-svc'\n+ oc patch secret/cert-memcached-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-memcached-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-internal-svc'\n+ oc patch secret/cert-neutron-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-ovndbs'\n+ oc patch secret/cert-neutron-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-public-route'\n+ oc patch secret/cert-neutron-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-public-svc'\n+ oc patch secret/cert-neutron-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-internal-svc'\n+ oc patch secret/cert-nova-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-metadata-internal-svc'\n+ oc patch secret/cert-nova-metadata-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-metadata-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-route'\n+ oc patch secret/cert-nova-novncproxy-cell1-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-novncproxy-cell1-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-svc'\n+ oc patch secret/cert-nova-novncproxy-cell1-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-novncproxy-cell1-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-vencrypt'\n+ oc patch secret/cert-nova-novncproxy-cell1-vencrypt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-novncproxy-cell1-vencrypt\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-public-route'\n+ oc patch secret/cert-nova-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-public-svc'\n+ oc patch secret/cert-nova-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovn-metrics'\n+ oc patch secret/cert-ovn-metrics -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovn-metrics\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovncontroller-ovndbs'\n+ oc patch secret/cert-ovncontroller-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovncontroller-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovndbcluster-nb-ovndbs'\n+ oc patch secret/cert-ovndbcluster-nb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovndbcluster-nb-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovndbcluster-sb-ovndbs'\n+ oc patch secret/cert-ovndbcluster-sb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovndbcluster-sb-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovnnorthd-ovndbs'\n+ oc patch secret/cert-ovnnorthd-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovnnorthd-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-placement-internal-svc'\n+ oc patch secret/cert-placement-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-placement-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-placement-public-route'\n+ oc patch secret/cert-placement-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-placement-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-placement-public-svc'\n+ oc patch secret/cert-placement-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-placement-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-rabbitmq-cell1-svc'\n+ oc patch secret/cert-rabbitmq-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-rabbitmq-cell1-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-rabbitmq-svc'\n+ oc patch secret/cert-rabbitmq-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-rabbitmq-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-swift-internal-svc'\n+ oc patch secret/cert-swift-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-swift-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-swift-public-route'\n+ oc patch secret/cert-swift-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-swift-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-swift-public-svc'\n+ oc patch secret/cert-swift-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-swift-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/combined-ca-bundle'\n+ oc patch secret/combined-ca-bundle -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/combined-ca-bundle\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/default-dockercfg-g2c9q'\n+ oc patch secret/default-dockercfg-g2c9q -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/default-dockercfg-g2c9q\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/deployer-dockercfg-wrr6t'\n+ oc patch secret/deployer-dockercfg-wrr6t -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/deployer-dockercfg-wrr6t\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/libvirt-secret'\n+ oc patch secret/libvirt-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/libvirt-secret\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/octavia-ca-passphrase'\n+ oc patch secret/octavia-ca-passphrase -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/octavia-ca-passphrase\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/osp-secret'\n+ oc patch secret/osp-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/osp-secret\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-internal'\n+ oc patch secret/rootca-internal -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-internal\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-libvirt'\n+ oc patch secret/rootca-libvirt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-libvirt\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-ovn'\n+ oc patch secret/rootca-ovn -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-ovn\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-public'\n+ oc patch secret/rootca-public -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-public\n+ oc get pv -o json\n+ jq -r '.items[] | select(.status.phase == \"Released\") | .metadata.name'\n+ xargs '-I{}' oc patch pv '{}' --type=merge -p '{\"spec\":{\"claimRef\": null}}'\n+ oc delete --ignore-not-found issuer rootca-internal\n+ oc delete --ignore-not-found secret rootca-internal\n+ oc delete subscription cluster-observability-operator -n openshift-operators --ignore-not-found", "stderr_lines": ["+ oc delete --ignore-not-found=true OpenStackControlPlane --all", "+ oc get pod", "+ grep -E 'rabbitmq-server-0|openstack-galera-0'", "No resources found in openstack namespace.", "+ oc delete --ignore-not-found=true OpenStackDataPlaneDeployment --all", "+ oc delete --ignore-not-found=true OpenStackDataPlaneNodeSet --all", "+ oc delete --ignore-not-found=true OpenStackDataPlaneService --all", "+ oc delete --ignore-not-found=true --wait=false pod mariadb-copy-data", "+ oc delete --ignore-not-found=true --wait=false pvc mariadb-data", "+ oc delete --ignore-not-found=true --wait=false pod ovn-copy-data", "++ oc get secrets -o name", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/builder-dockercfg-l7wsg'", "+ oc patch secret/builder-dockercfg-l7wsg -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/builder-dockercfg-l7wsg", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-barbican-internal-svc'", "+ oc patch secret/cert-barbican-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-barbican-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-barbican-public-route'", "+ oc patch secret/cert-barbican-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-barbican-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-barbican-public-svc'", "+ oc patch secret/cert-barbican-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-barbican-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ceilometer-internal-svc'", "+ oc patch secret/cert-ceilometer-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ceilometer-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-cinder-internal-svc'", "+ oc patch secret/cert-cinder-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-cinder-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-cinder-public-route'", "+ oc patch secret/cert-cinder-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-cinder-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-cinder-public-svc'", "+ oc patch secret/cert-cinder-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-cinder-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-galera-openstack-cell1-svc'", "+ oc patch secret/cert-galera-openstack-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-galera-openstack-cell1-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-galera-openstack-svc'", "+ oc patch secret/cert-galera-openstack-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-galera-openstack-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-glance-default-internal-svc'", "+ oc patch secret/cert-glance-default-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-glance-default-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-glance-default-public-route'", "+ oc patch secret/cert-glance-default-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-glance-default-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-glance-default-public-svc'", "+ oc patch secret/cert-glance-default-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-glance-default-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-keystone-internal-svc'", "+ oc patch secret/cert-keystone-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-keystone-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-keystone-public-route'", "+ oc patch secret/cert-keystone-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-keystone-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-keystone-public-svc'", "+ oc patch secret/cert-keystone-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-keystone-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-kube-state-metrics-svc'", "+ oc patch secret/cert-kube-state-metrics-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-kube-state-metrics-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-memcached-svc'", "+ oc patch secret/cert-memcached-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-memcached-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-internal-svc'", "+ oc patch secret/cert-neutron-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-ovndbs'", "+ oc patch secret/cert-neutron-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-public-route'", "+ oc patch secret/cert-neutron-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-public-svc'", "+ oc patch secret/cert-neutron-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-internal-svc'", "+ oc patch secret/cert-nova-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-metadata-internal-svc'", "+ oc patch secret/cert-nova-metadata-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-metadata-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-route'", "+ oc patch secret/cert-nova-novncproxy-cell1-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-novncproxy-cell1-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-svc'", "+ oc patch secret/cert-nova-novncproxy-cell1-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-novncproxy-cell1-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-vencrypt'", "+ oc patch secret/cert-nova-novncproxy-cell1-vencrypt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-novncproxy-cell1-vencrypt", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-public-route'", "+ oc patch secret/cert-nova-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-public-svc'", "+ oc patch secret/cert-nova-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovn-metrics'", "+ oc patch secret/cert-ovn-metrics -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovn-metrics", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovncontroller-ovndbs'", "+ oc patch secret/cert-ovncontroller-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovncontroller-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovndbcluster-nb-ovndbs'", "+ oc patch secret/cert-ovndbcluster-nb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovndbcluster-nb-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovndbcluster-sb-ovndbs'", "+ oc patch secret/cert-ovndbcluster-sb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovndbcluster-sb-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovnnorthd-ovndbs'", "+ oc patch secret/cert-ovnnorthd-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovnnorthd-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-placement-internal-svc'", "+ oc patch secret/cert-placement-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-placement-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-placement-public-route'", "+ oc patch secret/cert-placement-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-placement-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-placement-public-svc'", "+ oc patch secret/cert-placement-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-placement-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-rabbitmq-cell1-svc'", "+ oc patch secret/cert-rabbitmq-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-rabbitmq-cell1-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-rabbitmq-svc'", "+ oc patch secret/cert-rabbitmq-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-rabbitmq-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-swift-internal-svc'", "+ oc patch secret/cert-swift-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-swift-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-swift-public-route'", "+ oc patch secret/cert-swift-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-swift-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-swift-public-svc'", "+ oc patch secret/cert-swift-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-swift-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/combined-ca-bundle'", "+ oc patch secret/combined-ca-bundle -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/combined-ca-bundle", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/default-dockercfg-g2c9q'", "+ oc patch secret/default-dockercfg-g2c9q -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/default-dockercfg-g2c9q", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/deployer-dockercfg-wrr6t'", "+ oc patch secret/deployer-dockercfg-wrr6t -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/deployer-dockercfg-wrr6t", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/libvirt-secret'", "+ oc patch secret/libvirt-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/libvirt-secret", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/octavia-ca-passphrase'", "+ oc patch secret/octavia-ca-passphrase -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/octavia-ca-passphrase", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/osp-secret'", "+ oc patch secret/osp-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/osp-secret", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-internal'", "+ oc patch secret/rootca-internal -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-internal", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-libvirt'", "+ oc patch secret/rootca-libvirt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-libvirt", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-ovn'", "+ oc patch secret/rootca-ovn -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-ovn", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-public'", "+ oc patch secret/rootca-public -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-public", "+ oc get pv -o json", "+ jq -r '.items[] | select(.status.phase == \"Released\") | .metadata.name'", "+ xargs '-I{}' oc patch pv '{}' --type=merge -p '{\"spec\":{\"claimRef\": null}}'", "+ oc delete --ignore-not-found issuer rootca-internal", "+ oc delete --ignore-not-found secret rootca-internal", "+ oc delete subscription cluster-observability-operator -n openshift-operators --ignore-not-found"], "stdout": "No resources found\nNo resources found\nNo resources found\nNo resources found\nDeleting secret secret/builder-dockercfg-l7wsg\nsecret/builder-dockercfg-l7wsg patched (no change)\nsecret \"builder-dockercfg-l7wsg\" deleted from openstack namespace\nDeleting secret secret/cert-barbican-internal-svc\nsecret/cert-barbican-internal-svc patched (no change)\nsecret \"cert-barbican-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-barbican-public-route\nsecret/cert-barbican-public-route patched (no change)\nsecret \"cert-barbican-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-barbican-public-svc\nsecret/cert-barbican-public-svc patched (no change)\nsecret \"cert-barbican-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-ceilometer-internal-svc\nsecret/cert-ceilometer-internal-svc patched (no change)\nsecret \"cert-ceilometer-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-cinder-internal-svc\nsecret/cert-cinder-internal-svc patched (no change)\nsecret \"cert-cinder-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-cinder-public-route\nsecret/cert-cinder-public-route patched (no change)\nsecret \"cert-cinder-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-cinder-public-svc\nsecret/cert-cinder-public-svc patched (no change)\nsecret \"cert-cinder-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-galera-openstack-cell1-svc\nsecret/cert-galera-openstack-cell1-svc patched (no change)\nsecret \"cert-galera-openstack-cell1-svc\" deleted from openstack namespace\nDeleting secret secret/cert-galera-openstack-svc\nsecret/cert-galera-openstack-svc patched (no change)\nsecret \"cert-galera-openstack-svc\" deleted from openstack namespace\nDeleting secret secret/cert-glance-default-internal-svc\nsecret/cert-glance-default-internal-svc patched (no change)\nsecret \"cert-glance-default-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-glance-default-public-route\nsecret/cert-glance-default-public-route patched (no change)\nsecret \"cert-glance-default-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-glance-default-public-svc\nsecret/cert-glance-default-public-svc patched (no change)\nsecret \"cert-glance-default-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-keystone-internal-svc\nsecret/cert-keystone-internal-svc patched (no change)\nsecret \"cert-keystone-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-keystone-public-route\nsecret/cert-keystone-public-route patched (no change)\nsecret \"cert-keystone-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-keystone-public-svc\nsecret/cert-keystone-public-svc patched (no change)\nsecret \"cert-keystone-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-kube-state-metrics-svc\nsecret/cert-kube-state-metrics-svc patched (no change)\nsecret \"cert-kube-state-metrics-svc\" deleted from openstack namespace\nDeleting secret secret/cert-memcached-svc\nsecret/cert-memcached-svc patched (no change)\nsecret \"cert-memcached-svc\" deleted from openstack namespace\nDeleting secret secret/cert-neutron-internal-svc\nsecret/cert-neutron-internal-svc patched (no change)\nsecret \"cert-neutron-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-neutron-ovndbs\nsecret/cert-neutron-ovndbs patched (no change)\nsecret \"cert-neutron-ovndbs\" deleted from openstack namespace\nDeleting secret secret/cert-neutron-public-route\nsecret/cert-neutron-public-route patched (no change)\nsecret \"cert-neutron-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-neutron-public-svc\nsecret/cert-neutron-public-svc patched (no change)\nsecret \"cert-neutron-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-nova-internal-svc\nsecret/cert-nova-internal-svc patched (no change)\nsecret \"cert-nova-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-nova-metadata-internal-svc\nsecret/cert-nova-metadata-internal-svc patched (no change)\nsecret \"cert-nova-metadata-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-nova-novncproxy-cell1-public-route\nsecret/cert-nova-novncproxy-cell1-public-route patched (no change)\nsecret \"cert-nova-novncproxy-cell1-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-nova-novncproxy-cell1-public-svc\nsecret/cert-nova-novncproxy-cell1-public-svc patched (no change)\nsecret \"cert-nova-novncproxy-cell1-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-nova-novncproxy-cell1-vencrypt\nsecret/cert-nova-novncproxy-cell1-vencrypt patched (no change)\nsecret \"cert-nova-novncproxy-cell1-vencrypt\" deleted from openstack namespace\nDeleting secret secret/cert-nova-public-route\nsecret/cert-nova-public-route patched (no change)\nsecret \"cert-nova-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-nova-public-svc\nsecret/cert-nova-public-svc patched (no change)\nsecret \"cert-nova-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-ovn-metrics\nsecret/cert-ovn-metrics patched (no change)\nsecret \"cert-ovn-metrics\" deleted from openstack namespace\nDeleting secret secret/cert-ovncontroller-ovndbs\nsecret/cert-ovncontroller-ovndbs patched (no change)\nsecret \"cert-ovncontroller-ovndbs\" deleted from openstack namespace\nDeleting secret secret/cert-ovndbcluster-nb-ovndbs\nsecret/cert-ovndbcluster-nb-ovndbs patched (no change)\nsecret \"cert-ovndbcluster-nb-ovndbs\" deleted from openstack namespace\nDeleting secret secret/cert-ovndbcluster-sb-ovndbs\nsecret/cert-ovndbcluster-sb-ovndbs patched (no change)\nsecret \"cert-ovndbcluster-sb-ovndbs\" deleted from openstack namespace\nDeleting secret secret/cert-ovnnorthd-ovndbs\nsecret/cert-ovnnorthd-ovndbs patched (no change)\nsecret \"cert-ovnnorthd-ovndbs\" deleted from openstack namespace\nDeleting secret secret/cert-placement-internal-svc\nsecret/cert-placement-internal-svc patched (no change)\nsecret \"cert-placement-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-placement-public-route\nsecret/cert-placement-public-route patched (no change)\nsecret \"cert-placement-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-placement-public-svc\nsecret/cert-placement-public-svc patched (no change)\nsecret \"cert-placement-public-svc\" deleted from openstack namespace\nDeleting secret secret/cert-rabbitmq-cell1-svc\nsecret/cert-rabbitmq-cell1-svc patched (no change)\nsecret \"cert-rabbitmq-cell1-svc\" deleted from openstack namespace\nDeleting secret secret/cert-rabbitmq-svc\nsecret/cert-rabbitmq-svc patched (no change)\nsecret \"cert-rabbitmq-svc\" deleted from openstack namespace\nDeleting secret secret/cert-swift-internal-svc\nsecret/cert-swift-internal-svc patched (no change)\nsecret \"cert-swift-internal-svc\" deleted from openstack namespace\nDeleting secret secret/cert-swift-public-route\nsecret/cert-swift-public-route patched (no change)\nsecret \"cert-swift-public-route\" deleted from openstack namespace\nDeleting secret secret/cert-swift-public-svc\nsecret/cert-swift-public-svc patched (no change)\nsecret \"cert-swift-public-svc\" deleted from openstack namespace\nDeleting secret secret/combined-ca-bundle\nsecret/combined-ca-bundle patched (no change)\nsecret \"combined-ca-bundle\" deleted from openstack namespace\nDeleting secret secret/default-dockercfg-g2c9q\nsecret/default-dockercfg-g2c9q patched (no change)\nsecret \"default-dockercfg-g2c9q\" deleted from openstack namespace\nDeleting secret secret/deployer-dockercfg-wrr6t\nsecret/deployer-dockercfg-wrr6t patched (no change)\nsecret \"deployer-dockercfg-wrr6t\" deleted from openstack namespace\nDeleting secret secret/libvirt-secret\nsecret/libvirt-secret patched (no change)\nsecret \"libvirt-secret\" deleted from openstack namespace\nDeleting secret secret/octavia-ca-passphrase\nsecret/octavia-ca-passphrase patched (no change)\nsecret \"octavia-ca-passphrase\" deleted from openstack namespace\nDeleting secret secret/osp-secret\nsecret/osp-secret patched (no change)\nsecret \"osp-secret\" deleted from openstack namespace\nDeleting secret secret/rootca-internal\nsecret/rootca-internal patched (no change)\nsecret \"rootca-internal\" deleted from openstack namespace\nDeleting secret secret/rootca-libvirt\nsecret/rootca-libvirt patched (no change)\nsecret \"rootca-libvirt\" deleted from openstack namespace\nDeleting secret secret/rootca-ovn\nsecret/rootca-ovn patched (no change)\nsecret \"rootca-ovn\" deleted from openstack namespace\nDeleting secret secret/rootca-public\nsecret/rootca-public patched (no change)\nsecret \"rootca-public\" deleted from openstack namespace\npersistentvolume/local-storage01-crc patched\npersistentvolume/local-storage07-crc patched", "stdout_lines": ["No resources found", "No resources found", "No resources found", "No resources found", "Deleting secret secret/builder-dockercfg-l7wsg", "secret/builder-dockercfg-l7wsg patched (no change)", "secret \"builder-dockercfg-l7wsg\" deleted from openstack namespace", "Deleting secret secret/cert-barbican-internal-svc", "secret/cert-barbican-internal-svc patched (no change)", "secret \"cert-barbican-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-barbican-public-route", "secret/cert-barbican-public-route patched (no change)", "secret \"cert-barbican-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-barbican-public-svc", "secret/cert-barbican-public-svc patched (no change)", "secret \"cert-barbican-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-ceilometer-internal-svc", "secret/cert-ceilometer-internal-svc patched (no change)", "secret \"cert-ceilometer-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-cinder-internal-svc", "secret/cert-cinder-internal-svc patched (no change)", "secret \"cert-cinder-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-cinder-public-route", "secret/cert-cinder-public-route patched (no change)", "secret \"cert-cinder-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-cinder-public-svc", "secret/cert-cinder-public-svc patched (no change)", "secret \"cert-cinder-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-galera-openstack-cell1-svc", "secret/cert-galera-openstack-cell1-svc patched (no change)", "secret \"cert-galera-openstack-cell1-svc\" deleted from openstack namespace", "Deleting secret secret/cert-galera-openstack-svc", "secret/cert-galera-openstack-svc patched (no change)", "secret \"cert-galera-openstack-svc\" deleted from openstack namespace", "Deleting secret secret/cert-glance-default-internal-svc", "secret/cert-glance-default-internal-svc patched (no change)", "secret \"cert-glance-default-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-glance-default-public-route", "secret/cert-glance-default-public-route patched (no change)", "secret \"cert-glance-default-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-glance-default-public-svc", "secret/cert-glance-default-public-svc patched (no change)", "secret \"cert-glance-default-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-keystone-internal-svc", "secret/cert-keystone-internal-svc patched (no change)", "secret \"cert-keystone-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-keystone-public-route", "secret/cert-keystone-public-route patched (no change)", "secret \"cert-keystone-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-keystone-public-svc", "secret/cert-keystone-public-svc patched (no change)", "secret \"cert-keystone-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-kube-state-metrics-svc", "secret/cert-kube-state-metrics-svc patched (no change)", "secret \"cert-kube-state-metrics-svc\" deleted from openstack namespace", "Deleting secret secret/cert-memcached-svc", "secret/cert-memcached-svc patched (no change)", "secret \"cert-memcached-svc\" deleted from openstack namespace", "Deleting secret secret/cert-neutron-internal-svc", "secret/cert-neutron-internal-svc patched (no change)", "secret \"cert-neutron-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-neutron-ovndbs", "secret/cert-neutron-ovndbs patched (no change)", "secret \"cert-neutron-ovndbs\" deleted from openstack namespace", "Deleting secret secret/cert-neutron-public-route", "secret/cert-neutron-public-route patched (no change)", "secret \"cert-neutron-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-neutron-public-svc", "secret/cert-neutron-public-svc patched (no change)", "secret \"cert-neutron-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-nova-internal-svc", "secret/cert-nova-internal-svc patched (no change)", "secret \"cert-nova-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-nova-metadata-internal-svc", "secret/cert-nova-metadata-internal-svc patched (no change)", "secret \"cert-nova-metadata-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-nova-novncproxy-cell1-public-route", "secret/cert-nova-novncproxy-cell1-public-route patched (no change)", "secret \"cert-nova-novncproxy-cell1-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-nova-novncproxy-cell1-public-svc", "secret/cert-nova-novncproxy-cell1-public-svc patched (no change)", "secret \"cert-nova-novncproxy-cell1-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-nova-novncproxy-cell1-vencrypt", "secret/cert-nova-novncproxy-cell1-vencrypt patched (no change)", "secret \"cert-nova-novncproxy-cell1-vencrypt\" deleted from openstack namespace", "Deleting secret secret/cert-nova-public-route", "secret/cert-nova-public-route patched (no change)", "secret \"cert-nova-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-nova-public-svc", "secret/cert-nova-public-svc patched (no change)", "secret \"cert-nova-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-ovn-metrics", "secret/cert-ovn-metrics patched (no change)", "secret \"cert-ovn-metrics\" deleted from openstack namespace", "Deleting secret secret/cert-ovncontroller-ovndbs", "secret/cert-ovncontroller-ovndbs patched (no change)", "secret \"cert-ovncontroller-ovndbs\" deleted from openstack namespace", "Deleting secret secret/cert-ovndbcluster-nb-ovndbs", "secret/cert-ovndbcluster-nb-ovndbs patched (no change)", "secret \"cert-ovndbcluster-nb-ovndbs\" deleted from openstack namespace", "Deleting secret secret/cert-ovndbcluster-sb-ovndbs", "secret/cert-ovndbcluster-sb-ovndbs patched (no change)", "secret \"cert-ovndbcluster-sb-ovndbs\" deleted from openstack namespace", "Deleting secret secret/cert-ovnnorthd-ovndbs", "secret/cert-ovnnorthd-ovndbs patched (no change)", "secret \"cert-ovnnorthd-ovndbs\" deleted from openstack namespace", "Deleting secret secret/cert-placement-internal-svc", "secret/cert-placement-internal-svc patched (no change)", "secret \"cert-placement-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-placement-public-route", "secret/cert-placement-public-route patched (no change)", "secret \"cert-placement-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-placement-public-svc", "secret/cert-placement-public-svc patched (no change)", "secret \"cert-placement-public-svc\" deleted from openstack namespace", "Deleting secret secret/cert-rabbitmq-cell1-svc", "secret/cert-rabbitmq-cell1-svc patched (no change)", "secret \"cert-rabbitmq-cell1-svc\" deleted from openstack namespace", "Deleting secret secret/cert-rabbitmq-svc", "secret/cert-rabbitmq-svc patched (no change)", "secret \"cert-rabbitmq-svc\" deleted from openstack namespace", "Deleting secret secret/cert-swift-internal-svc", "secret/cert-swift-internal-svc patched (no change)", "secret \"cert-swift-internal-svc\" deleted from openstack namespace", "Deleting secret secret/cert-swift-public-route", "secret/cert-swift-public-route patched (no change)", "secret \"cert-swift-public-route\" deleted from openstack namespace", "Deleting secret secret/cert-swift-public-svc", "secret/cert-swift-public-svc patched (no change)", "secret \"cert-swift-public-svc\" deleted from openstack namespace", "Deleting secret secret/combined-ca-bundle", "secret/combined-ca-bundle patched (no change)", "secret \"combined-ca-bundle\" deleted from openstack namespace", "Deleting secret secret/default-dockercfg-g2c9q", "secret/default-dockercfg-g2c9q patched (no change)", "secret \"default-dockercfg-g2c9q\" deleted from openstack namespace", "Deleting secret secret/deployer-dockercfg-wrr6t", "secret/deployer-dockercfg-wrr6t patched (no change)", "secret \"deployer-dockercfg-wrr6t\" deleted from openstack namespace", "Deleting secret secret/libvirt-secret", "secret/libvirt-secret patched (no change)", "secret \"libvirt-secret\" deleted from openstack namespace", "Deleting secret secret/octavia-ca-passphrase", "secret/octavia-ca-passphrase patched (no change)", "secret \"octavia-ca-passphrase\" deleted from openstack namespace", "Deleting secret secret/osp-secret", "secret/osp-secret patched (no change)", "secret \"osp-secret\" deleted from openstack namespace", "Deleting secret secret/rootca-internal", "secret/rootca-internal patched (no change)", "secret \"rootca-internal\" deleted from openstack namespace", "Deleting secret secret/rootca-libvirt", "secret/rootca-libvirt patched (no change)", "secret \"rootca-libvirt\" deleted from openstack namespace", "Deleting secret secret/rootca-ovn", "secret/rootca-ovn patched (no change)", "secret \"rootca-ovn\" deleted from openstack namespace", "Deleting secret secret/rootca-public", "secret/rootca-public patched (no change)", "secret \"rootca-public\" deleted from openstack namespace", "persistentvolume/local-storage01-crc patched", "persistentvolume/local-storage07-crc patched"]} TASK [pcp_cleanup : revert standalone VM to snapshotted state] ***************** skipping: [localhost] => {"changed": false, "false_condition": "standalone_revert_enabled|bool", "skip_reason": "Conditional result was False"} TASK [pcp_cleanup : reset CRC storage] ***************************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\ncd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n# Try up to 3 times to clean up and recreate CRC storage, with a 5-second delay between attempts\nfor i in {1..3}; do\n make crc_storage_cleanup crc_storage && break || sleep 5\ndone\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\nNAMESPACE=openstack make namespace\nfor CELL in $(echo $RENAMED_CELLS); do\n oc delete pvc mysql-db-openstack-$CELL-galera-0 --ignore-not-found=true\n oc delete pvc persistence-rabbitmq-$CELL-server-0 --ignore-not-found=true\ndone\n", "delta": "0:00:21.279488", "end": "2026-02-27 17:43:29.735722", "msg": "", "rc": 0, "start": "2026-02-27 17:43:08.456234", "stderr": "+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n+ for i in {1..3}\n+ make crc_storage_cleanup crc_storage\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z crc-storage ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage\n+ cat\n++ oc get pv --selector provisioned-by=crc-devsetup --no-headers\n++ grep Bound\n++ awk '{print $6}'\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/mysql-db-openstack-cell1-galera-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/mysql-db-openstack-cell1-galera-0\n++ cut -d / -f 2\n+ NAME=mysql-db-openstack-cell1-galera-0\n+ oc delete -n openstack pvc/mysql-db-openstack-cell1-galera-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/swift-swift-storage-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/swift-swift-storage-0\n++ cut -d / -f 2\n+ NAME=swift-swift-storage-0\n+ oc delete -n openstack pvc/swift-swift-storage-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0\n++ cut -d / -f 2\n+ NAME=ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0\n+ oc delete -n openstack pvc/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/mysql-db-openstack-galera-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/mysql-db-openstack-galera-0\n++ cut -d / -f 2\n+ NAME=mysql-db-openstack-galera-0\n+ oc delete -n openstack pvc/mysql-db-openstack-galera-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0\n++ cut -d / -f 2\n+ NAME=ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0\n+ oc delete -n openstack pvc/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/glance-glance-default-external-api-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/glance-glance-default-external-api-0\n++ cut -d / -f 2\n+ NAME=glance-glance-default-external-api-0\n+ oc delete -n openstack pvc/glance-glance-default-external-api-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/glance-glance-default-internal-api-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/glance-glance-default-internal-api-0\n++ cut -d / -f 2\n+ NAME=glance-glance-default-internal-api-0\n+ oc delete -n openstack pvc/glance-glance-default-internal-api-0 --ignore-not-found\n++ oc get pv --selector provisioned-by=crc-devsetup --no-headers\n++ awk '{print $1}'\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage01-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage02-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage03-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage04-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage05-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage06-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage07-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage08-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage09-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage10-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage11-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage12-crc\n+++ dirname scripts/delete-pv.sh\n++ cd scripts\n++ pwd -P\n+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh\n++ set -ex\n++ OPERATION=create\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n+ PV_NUM=12\n+ TIMEOUT=500s\n++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker\n+ NODE_NAMES=crc\n+ '[' -z crc ']'\n+ for node in $NODE_NAMES\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc delete\n++ set -ex\n++ NODE=crc\n++ OPERATION=delete\n++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found\n++ cat\n++ oc apply -f -\nWarning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)\n+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s\n+++ dirname scripts/create-pv.sh\n++ cd scripts\n++ pwd -P\n+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh\n++ set -ex\n++ OPERATION=create\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n+ PV_NUM=12\n+ TIMEOUT=500s\n++ jq -r '.items[] | select(.status.phase | test(\"Released\")).metadata.name'\n++ oc get pv -o json\n+ released=\n++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker\n+ NODE_NAMES=crc\n+ '[' -z crc ']'\n+ for node in $NODE_NAMES\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc create\n++ set -ex\n++ NODE=crc\n++ OPERATION=create\n++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found\n++ cat\n++ oc apply -f -\nWarning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)\n+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s\n+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out\n+ '[' -z '\"local-storage\"' ']'\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc\n+ PV_NUM=12\n+ STORAGE_CAPACITY=10\n++ oc get node -o name -l node-role.kubernetes.io/worker\n++ sed -e 's|node/||'\n++ head -c-1\n++ tr '\\n' ' '\n+ NODE_NAMES=crc\n+ '[' -z crc ']'\n+ cat\n+ for node in $NODE_NAMES\n++ seq -w 12\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ cat\n+ break\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ NAMESPACE=openstack\n+ make namespace\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z openstack ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'\n+ cat\n++ echo cell1\n+ for CELL in $(echo $RENAMED_CELLS)\n+ oc delete pvc mysql-db-openstack-cell1-galera-0 --ignore-not-found=true\n+ oc delete pvc persistence-rabbitmq-cell1-server-0 --ignore-not-found=true", "stderr_lines": ["+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/", "+ for i in {1..3}", "+ make crc_storage_cleanup crc_storage", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z crc-storage ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage", "+ cat", "++ oc get pv --selector provisioned-by=crc-devsetup --no-headers", "++ grep Bound", "++ awk '{print $6}'", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/mysql-db-openstack-cell1-galera-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/mysql-db-openstack-cell1-galera-0", "++ cut -d / -f 2", "+ NAME=mysql-db-openstack-cell1-galera-0", "+ oc delete -n openstack pvc/mysql-db-openstack-cell1-galera-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/swift-swift-storage-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/swift-swift-storage-0", "++ cut -d / -f 2", "+ NAME=swift-swift-storage-0", "+ oc delete -n openstack pvc/swift-swift-storage-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0", "++ cut -d / -f 2", "+ NAME=ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0", "+ oc delete -n openstack pvc/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/mysql-db-openstack-galera-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/mysql-db-openstack-galera-0", "++ cut -d / -f 2", "+ NAME=mysql-db-openstack-galera-0", "+ oc delete -n openstack pvc/mysql-db-openstack-galera-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0", "++ cut -d / -f 2", "+ NAME=ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0", "+ oc delete -n openstack pvc/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/glance-glance-default-external-api-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/glance-glance-default-external-api-0", "++ cut -d / -f 2", "+ NAME=glance-glance-default-external-api-0", "+ oc delete -n openstack pvc/glance-glance-default-external-api-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/glance-glance-default-internal-api-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/glance-glance-default-internal-api-0", "++ cut -d / -f 2", "+ NAME=glance-glance-default-internal-api-0", "+ oc delete -n openstack pvc/glance-glance-default-internal-api-0 --ignore-not-found", "++ oc get pv --selector provisioned-by=crc-devsetup --no-headers", "++ awk '{print $1}'", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage01-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage02-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage03-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage04-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage05-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage06-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage07-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage08-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage09-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage10-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage11-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage12-crc", "+++ dirname scripts/delete-pv.sh", "++ cd scripts", "++ pwd -P", "+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh", "++ set -ex", "++ OPERATION=create", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "+ PV_NUM=12", "+ TIMEOUT=500s", "++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker", "+ NODE_NAMES=crc", "+ '[' -z crc ']'", "+ for node in $NODE_NAMES", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc delete", "++ set -ex", "++ NODE=crc", "++ OPERATION=delete", "++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found", "++ cat", "++ oc apply -f -", "Warning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)", "+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s", "+++ dirname scripts/create-pv.sh", "++ cd scripts", "++ pwd -P", "+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh", "++ set -ex", "++ OPERATION=create", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "+ PV_NUM=12", "+ TIMEOUT=500s", "++ jq -r '.items[] | select(.status.phase | test(\"Released\")).metadata.name'", "++ oc get pv -o json", "+ released=", "++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker", "+ NODE_NAMES=crc", "+ '[' -z crc ']'", "+ for node in $NODE_NAMES", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc create", "++ set -ex", "++ NODE=crc", "++ OPERATION=create", "++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found", "++ cat", "++ oc apply -f -", "Warning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)", "+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s", "+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out", "+ '[' -z '\"local-storage\"' ']'", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc", "+ PV_NUM=12", "+ STORAGE_CAPACITY=10", "++ oc get node -o name -l node-role.kubernetes.io/worker", "++ sed -e 's|node/||'", "++ head -c-1", "++ tr '\\n' ' '", "+ NODE_NAMES=crc", "+ '[' -z crc ']'", "+ cat", "+ for node in $NODE_NAMES", "++ seq -w 12", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ cat", "+ break", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ NAMESPACE=openstack", "+ make namespace", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z openstack ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'", "+ cat", "++ echo cell1", "+ for CELL in $(echo $RENAMED_CELLS)", "+ oc delete pvc mysql-db-openstack-cell1-galera-0 --ignore-not-found=true", "+ oc delete pvc persistence-rabbitmq-cell1-server-0 --ignore-not-found=true"], "stdout": "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage/namespace.yaml\nnamespace/crc-storage unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io crc-storage); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\ncrc-storage Active\noc project crc-storage\nNow using project \"crc-storage\" on server \"https://api.crc.testing:6443\".\nbash scripts/cleanup-crc-pv.sh\npersistentvolumeclaim \"mysql-db-openstack-cell1-galera-0\" deleted from openstack namespace\npersistentvolumeclaim \"swift-swift-storage-0\" deleted from openstack namespace\npersistentvolumeclaim \"ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0\" deleted from openstack namespace\npersistentvolumeclaim \"mysql-db-openstack-galera-0\" deleted from openstack namespace\npersistentvolumeclaim \"ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0\" deleted from openstack namespace\npersistentvolumeclaim \"glance-glance-default-external-api-0\" deleted from openstack namespace\npersistentvolumeclaim \"glance-glance-default-internal-api-0\" deleted from openstack namespace\npersistentvolume \"local-storage01-crc\" deleted\npersistentvolume \"local-storage02-crc\" deleted\npersistentvolume \"local-storage03-crc\" deleted\npersistentvolume \"local-storage04-crc\" deleted\npersistentvolume \"local-storage05-crc\" deleted\npersistentvolume \"local-storage06-crc\" deleted\npersistentvolume \"local-storage07-crc\" deleted\npersistentvolume \"local-storage08-crc\" deleted\npersistentvolume \"local-storage09-crc\" deleted\npersistentvolume \"local-storage10-crc\" deleted\npersistentvolume \"local-storage11-crc\" deleted\npersistentvolume \"local-storage12-crc\" deleted\nif oc get sc \"local-storage\"; then oc delete sc \"local-storage\"; fi\nNAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE\nlocal-storage kubernetes.io/no-provisioner Delete WaitForFirstConsumer true 64m\nstorageclass.storage.k8s.io \"local-storage\" deleted\nbash scripts/delete-pv.sh\nconfigmap/crc-storage unchanged\nserviceaccount/crc-storage unchanged\nrole.rbac.authorization.k8s.io/crc-storage-role unchanged\nrolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged\njob.batch \"crc-storage-crc\" deleted from crc-storage namespace\njob.batch/crc-storage-crc created\njob.batch/crc-storage-crc condition met\nbash scripts/create-pv.sh\nconfigmap/crc-storage unchanged\nserviceaccount/crc-storage unchanged\nrole.rbac.authorization.k8s.io/crc-storage-role unchanged\nrolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged\njob.batch \"crc-storage-crc\" deleted from crc-storage namespace\njob.batch/crc-storage-crc created\njob.batch/crc-storage-crc condition met\nbash scripts/gen-crc-pv-kustomize.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc/storage.yaml\nstorageclass.storage.k8s.io/local-storage created\npersistentvolume/local-storage01-crc created\npersistentvolume/local-storage02-crc created\npersistentvolume/local-storage03-crc created\npersistentvolume/local-storage04-crc created\npersistentvolume/local-storage05-crc created\npersistentvolume/local-storage06-crc created\npersistentvolume/local-storage07-crc created\npersistentvolume/local-storage08-crc created\npersistentvolume/local-storage09-crc created\npersistentvolume/local-storage10-crc created\npersistentvolume/local-storage11-crc created\npersistentvolume/local-storage12-crc created\npersistentvolumeclaim/ansible-ee-logs unchanged\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nmake[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml\nnamespace/openstack unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\nopenstack Active\noc project openstack\nNow using project \"openstack\" on server \"https://api.crc.testing:6443\".\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "stdout_lines": ["make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage/namespace.yaml", "namespace/crc-storage unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io crc-storage); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "crc-storage Active", "oc project crc-storage", "Now using project \"crc-storage\" on server \"https://api.crc.testing:6443\".", "bash scripts/cleanup-crc-pv.sh", "persistentvolumeclaim \"mysql-db-openstack-cell1-galera-0\" deleted from openstack namespace", "persistentvolumeclaim \"swift-swift-storage-0\" deleted from openstack namespace", "persistentvolumeclaim \"ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0\" deleted from openstack namespace", "persistentvolumeclaim \"mysql-db-openstack-galera-0\" deleted from openstack namespace", "persistentvolumeclaim \"ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0\" deleted from openstack namespace", "persistentvolumeclaim \"glance-glance-default-external-api-0\" deleted from openstack namespace", "persistentvolumeclaim \"glance-glance-default-internal-api-0\" deleted from openstack namespace", "persistentvolume \"local-storage01-crc\" deleted", "persistentvolume \"local-storage02-crc\" deleted", "persistentvolume \"local-storage03-crc\" deleted", "persistentvolume \"local-storage04-crc\" deleted", "persistentvolume \"local-storage05-crc\" deleted", "persistentvolume \"local-storage06-crc\" deleted", "persistentvolume \"local-storage07-crc\" deleted", "persistentvolume \"local-storage08-crc\" deleted", "persistentvolume \"local-storage09-crc\" deleted", "persistentvolume \"local-storage10-crc\" deleted", "persistentvolume \"local-storage11-crc\" deleted", "persistentvolume \"local-storage12-crc\" deleted", "if oc get sc \"local-storage\"; then oc delete sc \"local-storage\"; fi", "NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE", "local-storage kubernetes.io/no-provisioner Delete WaitForFirstConsumer true 64m", "storageclass.storage.k8s.io \"local-storage\" deleted", "bash scripts/delete-pv.sh", "configmap/crc-storage unchanged", "serviceaccount/crc-storage unchanged", "role.rbac.authorization.k8s.io/crc-storage-role unchanged", "rolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged", "job.batch \"crc-storage-crc\" deleted from crc-storage namespace", "job.batch/crc-storage-crc created", "job.batch/crc-storage-crc condition met", "bash scripts/create-pv.sh", "configmap/crc-storage unchanged", "serviceaccount/crc-storage unchanged", "role.rbac.authorization.k8s.io/crc-storage-role unchanged", "rolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged", "job.batch \"crc-storage-crc\" deleted from crc-storage namespace", "job.batch/crc-storage-crc created", "job.batch/crc-storage-crc condition met", "bash scripts/gen-crc-pv-kustomize.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc/storage.yaml", "storageclass.storage.k8s.io/local-storage created", "persistentvolume/local-storage01-crc created", "persistentvolume/local-storage02-crc created", "persistentvolume/local-storage03-crc created", "persistentvolume/local-storage04-crc created", "persistentvolume/local-storage05-crc created", "persistentvolume/local-storage06-crc created", "persistentvolume/local-storage07-crc created", "persistentvolume/local-storage08-crc created", "persistentvolume/local-storage09-crc created", "persistentvolume/local-storage10-crc created", "persistentvolume/local-storage11-crc created", "persistentvolume/local-storage12-crc created", "persistentvolumeclaim/ansible-ee-logs unchanged", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml", "namespace/openstack unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "openstack Active", "oc project openstack", "Now using project \"openstack\" on server \"https://api.crc.testing:6443\".", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'"]} PLAY [Adoption] **************************************************************** TASK [development_environment : pre-launch test VM instance] ******************* changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nexport OPENSTACK_COMMAND=\"ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack\"\nexport PING_COMMAND=\"ping \"\nexport EDPM_CONFIGURE_HUGEPAGES=false\nexport CINDER_VOLUME_BACKEND_CONFIGURED=true\nexport CINDER_BACKUP_BACKEND_CONFIGURED=true\nexport PING_TEST_VM=false\nset -e\n\nalias openstack=\"$OPENSTACK_COMMAND\"\nalias ping_cmd=\"$PING_COMMAND\"\n\nfunction wait_for_status() {\n local time=0\n local msg=\"Waiting for $2\"\n local status=\"${3:-available}\"\n local result\n while [ $time -le 30 ] ; do\n result=$(${BASH_ALIASES[openstack]} $1 -f json)\n echo $result | jq -r \".status\" | grep -q $status && break\n echo \"result=$result\"\n echo \"$msg\"\n time=$(( time + 5 ))\n sleep 5\n done\n}\n\nfunction create_volume_resources() {\n # create a data volume\n if ! ${BASH_ALIASES[openstack]} volume show disk ; then\n ${BASH_ALIASES[openstack]} volume create --image cirros --size 1 disk\n wait_for_status \"volume show disk\" \"test volume 'disk' creation\"\n fi\n\n # create volume snapshot\n if ! ${BASH_ALIASES[openstack]} volume snapshot show snapshot ; then\n ${BASH_ALIASES[openstack]} volume snapshot create --volume disk snapshot\n wait_for_status \"volume snapshot show snapshot\" \"test volume 'disk' snapshot availability\"\n fi\n\n # Add volume to the test VM\n if ${BASH_ALIASES[openstack]} volume show disk -f json | jq -r '.status' | grep -q available ; then\n ${BASH_ALIASES[openstack]} server add volume test disk\n fi\n}\n\nfunction create_backup_resources() {\n # create volume backup\n if ! ${BASH_ALIASES[openstack]} volume backup show backup; then\n ${BASH_ALIASES[openstack]} volume backup create --name backup disk --force\n wait_for_status \"volume backup show backup\" \"test volume 'disk' backup completion\"\n fi\n}\n\nfunction create_bfv_volume() {\n # Launch an instance from boot-volume (BFV)\n if ! ${BASH_ALIASES[openstack]} volume show boot-volume ; then\n ${BASH_ALIASES[openstack]} volume create --image cirros --size 1 boot-volume\n wait_for_status \"volume show boot-volume\" \"test volume 'boot-volume' creation\"\n fi\n if ${BASH_ALIASES[openstack]} volume show boot-volume -f json | jq -r '.status' | grep -q available ; then\n ${BASH_ALIASES[openstack]} server create --flavor m1.small --volume boot-volume --nic net-id=private bfv-server --wait\n fi\n}\n\n# Create Image\nIMG=cirros-0.6.3-x86_64-disk.img\nURL=http://download.cirros-cloud.net/0.6.3/$IMG\nDISK_FORMAT=qcow2\nRAW=$IMG\ncurl -L -# $URL > /tmp/$IMG\nif type qemu-img >/dev/null 2>&1; then\n RAW=$(echo $IMG | sed s/img/raw/g)\n qemu-img convert -f qcow2 -O raw /tmp/$IMG /tmp/$RAW\n DISK_FORMAT=raw\nfi\n${BASH_ALIASES[openstack]} image show cirros || \\\n ${BASH_ALIASES[openstack]} image create --container-format bare --disk-format $DISK_FORMAT cirros < /tmp/$RAW\n\n# Create flavor\n${BASH_ALIASES[openstack]} flavor show m1.small || \\\n ${BASH_ALIASES[openstack]} flavor create --ram 512 --vcpus 1 --disk 1 --ephemeral 1 m1.small\nif [ \"${EDPM_CONFIGURE_HUGEPAGES:-false}\" = \"true\" ] ; then\n ${BASH_ALIASES[openstack]} flavor set m1.small --property hw:mem_page_size=2MB\nfi\n\n# Create networks\n${BASH_ALIASES[openstack]} network show private || ${BASH_ALIASES[openstack]} network create private --share\n${BASH_ALIASES[openstack]} subnet show priv_sub || ${BASH_ALIASES[openstack]} subnet create priv_sub --subnet-range 192.168.0.0/24 --network private\n${BASH_ALIASES[openstack]} network show public || ${BASH_ALIASES[openstack]} network create public --external --provider-network-type flat --provider-physical-network datacentre\n${BASH_ALIASES[openstack]} subnet show public_subnet || \\\n ${BASH_ALIASES[openstack]} subnet create public_subnet --subnet-range 192.168.122.0/24 --allocation-pool start=192.168.122.171,end=192.168.122.250 --gateway 192.168.122.1 --dhcp --network public\n${BASH_ALIASES[openstack]} router show priv_router || {\n ${BASH_ALIASES[openstack]} router create priv_router\n ${BASH_ALIASES[openstack]} router add subnet priv_router priv_sub\n ${BASH_ALIASES[openstack]} router set priv_router --external-gateway public\n}\n\n# Create a floating IP\n${BASH_ALIASES[openstack]} floating ip show 192.168.122.20 || \\\n ${BASH_ALIASES[openstack]} floating ip create public --floating-ip-address 192.168.122.20\n\n# Create a test instance\n${BASH_ALIASES[openstack]} server show test || {\n ${BASH_ALIASES[openstack]} server create --flavor m1.small --image cirros --nic net-id=private test --wait\n ${BASH_ALIASES[openstack]} server add floating ip test 192.168.122.20\n}\n\nif [ \"$PING_TEST_VM\" = \"true\" ]; then\n # Create a floating IP\n ${BASH_ALIASES[openstack]} floating ip show 192.168.122.21 || \\\n ${BASH_ALIASES[openstack]} floating ip create public --floating-ip-address 192.168.122.21\n\n # Create a test-ping instance\n ${BASH_ALIASES[openstack]} server show test-ping || {\n ${BASH_ALIASES[openstack]} server create --flavor m1.small --image cirros --nic net-id=private test-ping --wait\n ${BASH_ALIASES[openstack]} server add floating ip test-ping 192.168.122.21\n }\nfi\n\n# Create security groups\n\n${BASH_ALIASES[openstack]} security group show adoption-test || {\n ${BASH_ALIASES[openstack]} security group create adoption-test\n ${BASH_ALIASES[openstack]} security group rule create --protocol icmp --ingress --icmp-type -1 adoption-test\n ${BASH_ALIASES[openstack]} security group rule create --protocol tcp --ingress --dst-port 22 adoption-test\n}\n\n# Add security groups\n\n${BASH_ALIASES[openstack]} server add security group test adoption-test || true\nif [ \"$PING_TEST_VM\" = \"true\" ]; then\n ${BASH_ALIASES[openstack]} server add security group test-ping adoption-test || true\nfi\n\nexport FIP=192.168.122.20\n# check connectivity via FIP\nTRIES=0\nuntil ${BASH_ALIASES[ping_cmd]} -D -c1 -W2 \"$FIP\"; do\n ((TRIES++)) || true\n if [ \"$TRIES\" -gt 20 ]; then\n echo \"Ping timeout\"\n exit 1\n fi\ndone\n\nif [ \"$CINDER_VOLUME_BACKEND_CONFIGURED\" = \"true\" ]; then\n create_volume_resources\n create_bfv_volume\nfi\n\nif [ \"$CINDER_BACKUP_BACKEND_CONFIGURED\" = \"true\" ]; then\n create_backup_resources\nfi\n", "delta": "0:02:54.086238", "end": "2026-02-27 17:46:24.289330", "msg": "", "rc": 0, "start": "2026-02-27 17:43:30.203092", "stderr": "+ export 'OPENSTACK_COMMAND=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'\n+ OPENSTACK_COMMAND='ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'\n+ export 'PING_COMMAND=ping '\n+ PING_COMMAND='ping '\n+ export EDPM_CONFIGURE_HUGEPAGES=false\n+ EDPM_CONFIGURE_HUGEPAGES=false\n+ export CINDER_VOLUME_BACKEND_CONFIGURED=true\n+ CINDER_VOLUME_BACKEND_CONFIGURED=true\n+ export CINDER_BACKUP_BACKEND_CONFIGURED=true\n+ CINDER_BACKUP_BACKEND_CONFIGURED=true\n+ export PING_TEST_VM=false\n+ PING_TEST_VM=false\n+ set -e\n+ alias 'openstack=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'\n+ alias 'ping_cmd=ping '\n+ IMG=cirros-0.6.3-x86_64-disk.img\n+ URL=http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img\n+ DISK_FORMAT=qcow2\n+ RAW=cirros-0.6.3-x86_64-disk.img\n+ curl -L -# http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img\n#=#=# \r\r######################################################################## 100.0%##O#-# \r##=O#- # \r\r############################ 39.5%\r######################################################################## 100.0%\n+ type qemu-img\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image show cirros\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo Image found for cirros\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image create --container-format bare --disk-format qcow2 cirros\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor show m1.small\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo Flavor found for m1.small\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor create --ram 512 --vcpus 1 --disk 1 --ephemeral 1 m1.small\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ '[' false = true ']'\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show private\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nError while executing command: No Network found for private\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create private --share\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show priv_sub\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo Subnet found for priv_sub\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create priv_sub --subnet-range 192.168.0.0/24 --network private\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show public\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nError while executing command: No Network found for public\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create public --external --provider-network-type flat --provider-physical-network datacentre\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show public_subnet\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo Subnet found for public_subnet\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create public_subnet --subnet-range 192.168.122.0/24 --allocation-pool start=192.168.122.171,end=192.168.122.250 --gateway 192.168.122.1 --dhcp --network public\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router show priv_router\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo Router found for priv_router\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router create priv_router\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router add subnet priv_router priv_sub\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router set priv_router --external-gateway public\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip show 192.168.122.20\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nError while executing command: No FloatingIP found for 192.168.122.20\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip create public --floating-ip-address 192.168.122.20\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server show test\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo server with a name or ID of 'test' exists.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server create --flavor m1.small --image cirros --nic net-id=private test --wait\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add floating ip test 192.168.122.20\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ '[' false = true ']'\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group show adoption-test\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nError while executing command: No SecurityGroup found for adoption-test\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group create adoption-test\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule create --protocol icmp --ingress --icmp-type -1 adoption-test\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule create --protocol tcp --ingress --dst-port 22 adoption-test\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add security group test adoption-test\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ '[' false = true ']'\n+ export FIP=192.168.122.20\n+ FIP=192.168.122.20\n+ TRIES=0\n+ ping -D -c1 -W2 192.168.122.20\n+ '[' true = true ']'\n+ create_volume_resources\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo volume with a name or ID of 'disk' exists.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume create --image cirros --size 1 disk\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ wait_for_status 'volume show disk' 'test volume '\\''disk'\\'' creation'\n+ local time=0\n+ local 'msg=Waiting for test volume '\\''disk'\\'' creation'\n+ local status=available\n+ local result\n+ '[' 0 -le 30 ']'\n++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk -f json\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ result='{\n \"attachments\": [],\n \"availability_zone\": \"nova\",\n \"bootable\": \"false\",\n \"consistencygroup_id\": null,\n \"created_at\": \"2026-02-27T17:45:17.000000\",\n \"description\": null,\n \"encrypted\": false,\n \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",\n \"migration_status\": null,\n \"multiattach\": false,\n \"name\": \"disk\",\n \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",\n \"os-vol-mig-status-attr:migstat\": null,\n \"os-vol-mig-status-attr:name_id\": null,\n \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",\n \"properties\": {},\n \"replication_status\": null,\n \"size\": 1,\n \"snapshot_id\": null,\n \"source_volid\": null,\n \"status\": \"creating\",\n \"type\": \"tripleo\",\n \"updated_at\": \"2026-02-27T17:45:17.000000\",\n \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",\n \"volume_image_metadata\": {\n \"signature_verified\": \"False\"\n }\n}'\n+ echo '{' '\"attachments\":' '[],' '\"availability_zone\":' '\"nova\",' '\"bootable\":' '\"false\",' '\"consistencygroup_id\":' null, '\"created_at\":' '\"2026-02-27T17:45:17.000000\",' '\"description\":' null, '\"encrypted\":' false, '\"id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",' '\"migration_status\":' null, '\"multiattach\":' false, '\"name\":' '\"disk\",' '\"os-vol-host-attr:host\":' '\"hostgroup@tripleo_ceph#tripleo_ceph\",' '\"os-vol-mig-status-attr:migstat\":' null, '\"os-vol-mig-status-attr:name_id\":' null, '\"os-vol-tenant-attr:tenant_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"replication_status\":' null, '\"size\":' 1, '\"snapshot_id\":' null, '\"source_volid\":' null, '\"status\":' '\"creating\",' '\"type\":' '\"tripleo\",' '\"updated_at\":' '\"2026-02-27T17:45:17.000000\",' '\"user_id\":' '\"93d9b9d6929c41ee8d62569442846100\",' '\"volume_image_metadata\":' '{' '\"signature_verified\":' '\"False\"' '}' '}'\n+ jq -r .status\n+ grep -q available\n+ echo 'result={\n \"attachments\": [],\n \"availability_zone\": \"nova\",\n \"bootable\": \"false\",\n \"consistencygroup_id\": null,\n \"created_at\": \"2026-02-27T17:45:17.000000\",\n \"description\": null,\n \"encrypted\": false,\n \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",\n \"migration_status\": null,\n \"multiattach\": false,\n \"name\": \"disk\",\n \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",\n \"os-vol-mig-status-attr:migstat\": null,\n \"os-vol-mig-status-attr:name_id\": null,\n \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",\n \"properties\": {},\n \"replication_status\": null,\n \"size\": 1,\n \"snapshot_id\": null,\n \"source_volid\": null,\n \"status\": \"creating\",\n \"type\": \"tripleo\",\n \"updated_at\": \"2026-02-27T17:45:17.000000\",\n \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",\n \"volume_image_metadata\": {\n \"signature_verified\": \"False\"\n }\n}'\n+ echo 'Waiting for test volume '\\''disk'\\'' creation'\n+ time=5\n+ sleep 5\n+ '[' 5 -le 30 ']'\n++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk -f json\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ result='{\n \"attachments\": [],\n \"availability_zone\": \"nova\",\n \"bootable\": \"true\",\n \"consistencygroup_id\": null,\n \"created_at\": \"2026-02-27T17:45:17.000000\",\n \"description\": null,\n \"encrypted\": false,\n \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",\n \"migration_status\": null,\n \"multiattach\": false,\n \"name\": \"disk\",\n \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",\n \"os-vol-mig-status-attr:migstat\": null,\n \"os-vol-mig-status-attr:name_id\": null,\n \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",\n \"properties\": {},\n \"replication_status\": null,\n \"size\": 1,\n \"snapshot_id\": null,\n \"source_volid\": null,\n \"status\": \"available\",\n \"type\": \"tripleo\",\n \"updated_at\": \"2026-02-27T17:45:24.000000\",\n \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",\n \"volume_image_metadata\": {\n \"signature_verified\": \"False\",\n \"owner_specified.openstack.md5\": \"\",\n \"owner_specified.openstack.sha256\": \"\",\n \"owner_specified.openstack.object\": \"images/cirros\",\n \"image_id\": \"515db511-7de7-465a-bc9d-1db3f2238101\",\n \"image_name\": \"cirros\",\n \"checksum\": \"87617e24a5e30cb3b87fda8c0764838f\",\n \"container_format\": \"bare\",\n \"disk_format\": \"qcow2\",\n \"min_disk\": \"0\",\n \"min_ram\": \"0\",\n \"size\": \"21692416\"\n }\n}'\n+ echo '{' '\"attachments\":' '[],' '\"availability_zone\":' '\"nova\",' '\"bootable\":' '\"true\",' '\"consistencygroup_id\":' null, '\"created_at\":' '\"2026-02-27T17:45:17.000000\",' '\"description\":' null, '\"encrypted\":' false, '\"id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",' '\"migration_status\":' null, '\"multiattach\":' false, '\"name\":' '\"disk\",' '\"os-vol-host-attr:host\":' '\"hostgroup@tripleo_ceph#tripleo_ceph\",' '\"os-vol-mig-status-attr:migstat\":' null, '\"os-vol-mig-status-attr:name_id\":' null, '\"os-vol-tenant-attr:tenant_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"replication_status\":' null, '\"size\":' 1, '\"snapshot_id\":' null, '\"source_volid\":' null, '\"status\":' '\"available\",' '\"type\":' '\"tripleo\",' '\"updated_at\":' '\"2026-02-27T17:45:24.000000\",' '\"user_id\":' '\"93d9b9d6929c41ee8d62569442846100\",' '\"volume_image_metadata\":' '{' '\"signature_verified\":' '\"False\",' '\"owner_specified.openstack.md5\":' '\"\",' '\"owner_specified.openstack.sha256\":' '\"\",' '\"owner_specified.openstack.object\":' '\"images/cirros\",' '\"image_id\":' '\"515db511-7de7-465a-bc9d-1db3f2238101\",' '\"image_name\":' '\"cirros\",' '\"checksum\":' '\"87617e24a5e30cb3b87fda8c0764838f\",' '\"container_format\":' '\"bare\",' '\"disk_format\":' '\"qcow2\",' '\"min_disk\":' '\"0\",' '\"min_ram\":' '\"0\",' '\"size\":' '\"21692416\"' '}' '}'\n+ jq -r .status\n+ grep -q available\n+ break\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume snapshot show snapshot\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo snapshot with a name or ID of 'snapshot' exists.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume snapshot create --volume disk snapshot\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ wait_for_status 'volume snapshot show snapshot' 'test volume '\\''disk'\\'' snapshot availability'\n+ local time=0\n+ local 'msg=Waiting for test volume '\\''disk'\\'' snapshot availability'\n+ local status=available\n+ local result\n+ '[' 0 -le 30 ']'\n++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume snapshot show snapshot -f json\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ result='{\n \"created_at\": \"2026-02-27T17:45:34.000000\",\n \"description\": null,\n \"id\": \"0bd1f9bc-dcf0-44f0-9849-8beaf13e0ed3\",\n \"name\": \"snapshot\",\n \"os-extended-snapshot-attributes:progress\": \"100%\",\n \"os-extended-snapshot-attributes:project_id\": \"8c8307df41034feea2d7e66d9c38ae14\",\n \"properties\": {},\n \"size\": 1,\n \"status\": \"available\",\n \"updated_at\": \"2026-02-27T17:45:35.000000\",\n \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"\n}'\n+ echo '{' '\"created_at\":' '\"2026-02-27T17:45:34.000000\",' '\"description\":' null, '\"id\":' '\"0bd1f9bc-dcf0-44f0-9849-8beaf13e0ed3\",' '\"name\":' '\"snapshot\",' '\"os-extended-snapshot-attributes:progress\":' '\"100%\",' '\"os-extended-snapshot-attributes:project_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"size\":' 1, '\"status\":' '\"available\",' '\"updated_at\":' '\"2026-02-27T17:45:35.000000\",' '\"volume_id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"' '}'\n+ jq -r .status\n+ grep -q available\n+ break\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk -f json\n+ jq -r .status\n+ grep -q available\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add volume test disk\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ create_bfv_volume\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show boot-volume\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo volume with a name or ID of 'boot-volume' exists.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume create --image cirros --size 1 boot-volume\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ wait_for_status 'volume show boot-volume' 'test volume '\\''boot-volume'\\'' creation'\n+ local time=0\n+ local 'msg=Waiting for test volume '\\''boot-volume'\\'' creation'\n+ local status=available\n+ local result\n+ '[' 0 -le 30 ']'\n++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show boot-volume -f json\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ result='{\n \"attachments\": [],\n \"availability_zone\": \"nova\",\n \"bootable\": \"true\",\n \"consistencygroup_id\": null,\n \"created_at\": \"2026-02-27T17:45:48.000000\",\n \"description\": null,\n \"encrypted\": false,\n \"id\": \"3fedaf47-cb2b-4b6e-afc6-3380205fe9b4\",\n \"migration_status\": null,\n \"multiattach\": false,\n \"name\": \"boot-volume\",\n \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",\n \"os-vol-mig-status-attr:migstat\": null,\n \"os-vol-mig-status-attr:name_id\": null,\n \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",\n \"properties\": {},\n \"replication_status\": null,\n \"size\": 1,\n \"snapshot_id\": null,\n \"source_volid\": null,\n \"status\": \"available\",\n \"type\": \"tripleo\",\n \"updated_at\": \"2026-02-27T17:45:50.000000\",\n \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",\n \"volume_image_metadata\": {\n \"signature_verified\": \"False\",\n \"owner_specified.openstack.md5\": \"\",\n \"owner_specified.openstack.sha256\": \"\",\n \"owner_specified.openstack.object\": \"images/cirros\",\n \"image_id\": \"515db511-7de7-465a-bc9d-1db3f2238101\",\n \"image_name\": \"cirros\",\n \"checksum\": \"87617e24a5e30cb3b87fda8c0764838f\",\n \"container_format\": \"bare\",\n \"disk_format\": \"qcow2\",\n \"min_disk\": \"0\",\n \"min_ram\": \"0\",\n \"size\": \"21692416\"\n }\n}'\n+ echo '{' '\"attachments\":' '[],' '\"availability_zone\":' '\"nova\",' '\"bootable\":' '\"true\",' '\"consistencygroup_id\":' null, '\"created_at\":' '\"2026-02-27T17:45:48.000000\",' '\"description\":' null, '\"encrypted\":' false, '\"id\":' '\"3fedaf47-cb2b-4b6e-afc6-3380205fe9b4\",' '\"migration_status\":' null, '\"multiattach\":' false, '\"name\":' '\"boot-volume\",' '\"os-vol-host-attr:host\":' '\"hostgroup@tripleo_ceph#tripleo_ceph\",' '\"os-vol-mig-status-attr:migstat\":' null, '\"os-vol-mig-status-attr:name_id\":' null, '\"os-vol-tenant-attr:tenant_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"replication_status\":' null, '\"size\":' 1, '\"snapshot_id\":' null, '\"source_volid\":' null, '\"status\":' '\"available\",' '\"type\":' '\"tripleo\",' '\"updated_at\":' '\"2026-02-27T17:45:50.000000\",' '\"user_id\":' '\"93d9b9d6929c41ee8d62569442846100\",' '\"volume_image_metadata\":' '{' '\"signature_verified\":' '\"False\",' '\"owner_specified.openstack.md5\":' '\"\",' '\"owner_specified.openstack.sha256\":' '\"\",' '\"owner_specified.openstack.object\":' '\"images/cirros\",' '\"image_id\":' '\"515db511-7de7-465a-bc9d-1db3f2238101\",' '\"image_name\":' '\"cirros\",' '\"checksum\":' '\"87617e24a5e30cb3b87fda8c0764838f\",' '\"container_format\":' '\"bare\",' '\"disk_format\":' '\"qcow2\",' '\"min_disk\":' '\"0\",' '\"min_ram\":' '\"0\",' '\"size\":' '\"21692416\"' '}' '}'\n+ jq -r .status\n+ grep -q available\n+ break\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show boot-volume -f json\n+ grep -q available\n+ jq -r .status\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server create --flavor m1.small --volume boot-volume --nic net-id=private bfv-server --wait\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ '[' true = true ']'\n+ create_backup_resources\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup show backup\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\nNo volumebackup with a name or ID of 'backup' exists.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup create --name backup disk --force\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ wait_for_status 'volume backup show backup' 'test volume '\\''disk'\\'' backup completion'\n+ local time=0\n+ local 'msg=Waiting for test volume '\\''disk'\\'' backup completion'\n+ local status=available\n+ local result\n+ '[' 0 -le 30 ']'\n++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup show backup -f json\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ result='{\n \"availability_zone\": null,\n \"container\": \"backups\",\n \"created_at\": \"2026-02-27T17:46:13.000000\",\n \"data_timestamp\": \"2026-02-27T17:46:13.000000\",\n \"description\": null,\n \"fail_reason\": null,\n \"has_dependent_backups\": false,\n \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",\n \"is_incremental\": false,\n \"name\": \"backup\",\n \"object_count\": 0,\n \"size\": 1,\n \"snapshot_id\": null,\n \"status\": \"creating\",\n \"updated_at\": \"2026-02-27T17:46:15.000000\",\n \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"\n}'\n+ echo '{' '\"availability_zone\":' null, '\"container\":' '\"backups\",' '\"created_at\":' '\"2026-02-27T17:46:13.000000\",' '\"data_timestamp\":' '\"2026-02-27T17:46:13.000000\",' '\"description\":' null, '\"fail_reason\":' null, '\"has_dependent_backups\":' false, '\"id\":' '\"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",' '\"is_incremental\":' false, '\"name\":' '\"backup\",' '\"object_count\":' 0, '\"size\":' 1, '\"snapshot_id\":' null, '\"status\":' '\"creating\",' '\"updated_at\":' '\"2026-02-27T17:46:15.000000\",' '\"volume_id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"' '}'\n+ jq -r .status\n+ grep -q available\n+ echo 'result={\n \"availability_zone\": null,\n \"container\": \"backups\",\n \"created_at\": \"2026-02-27T17:46:13.000000\",\n \"data_timestamp\": \"2026-02-27T17:46:13.000000\",\n \"description\": null,\n \"fail_reason\": null,\n \"has_dependent_backups\": false,\n \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",\n \"is_incremental\": false,\n \"name\": \"backup\",\n \"object_count\": 0,\n \"size\": 1,\n \"snapshot_id\": null,\n \"status\": \"creating\",\n \"updated_at\": \"2026-02-27T17:46:15.000000\",\n \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"\n}'\n+ echo 'Waiting for test volume '\\''disk'\\'' backup completion'\n+ time=5\n+ sleep 5\n+ '[' 5 -le 30 ']'\n++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup show backup -f json\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.\r\n+ result='{\n \"availability_zone\": null,\n \"container\": \"backups\",\n \"created_at\": \"2026-02-27T17:46:13.000000\",\n \"data_timestamp\": \"2026-02-27T17:46:13.000000\",\n \"description\": null,\n \"fail_reason\": null,\n \"has_dependent_backups\": false,\n \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",\n \"is_incremental\": false,\n \"name\": \"backup\",\n \"object_count\": 0,\n \"size\": 1,\n \"snapshot_id\": null,\n \"status\": \"available\",\n \"updated_at\": \"2026-02-27T17:46:19.000000\",\n \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"\n}'\n+ echo '{' '\"availability_zone\":' null, '\"container\":' '\"backups\",' '\"created_at\":' '\"2026-02-27T17:46:13.000000\",' '\"data_timestamp\":' '\"2026-02-27T17:46:13.000000\",' '\"description\":' null, '\"fail_reason\":' null, '\"has_dependent_backups\":' false, '\"id\":' '\"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",' '\"is_incremental\":' false, '\"name\":' '\"backup\",' '\"object_count\":' 0, '\"size\":' 1, '\"snapshot_id\":' null, '\"status\":' '\"available\",' '\"updated_at\":' '\"2026-02-27T17:46:19.000000\",' '\"volume_id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"' '}'\n+ grep -q available\n+ jq -r .status\n+ break", "stderr_lines": ["+ export 'OPENSTACK_COMMAND=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'", "+ OPENSTACK_COMMAND='ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'", "+ export 'PING_COMMAND=ping '", "+ PING_COMMAND='ping '", "+ export EDPM_CONFIGURE_HUGEPAGES=false", "+ EDPM_CONFIGURE_HUGEPAGES=false", "+ export CINDER_VOLUME_BACKEND_CONFIGURED=true", "+ CINDER_VOLUME_BACKEND_CONFIGURED=true", "+ export CINDER_BACKUP_BACKEND_CONFIGURED=true", "+ CINDER_BACKUP_BACKEND_CONFIGURED=true", "+ export PING_TEST_VM=false", "+ PING_TEST_VM=false", "+ set -e", "+ alias 'openstack=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'", "+ alias 'ping_cmd=ping '", "+ IMG=cirros-0.6.3-x86_64-disk.img", "+ URL=http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img", "+ DISK_FORMAT=qcow2", "+ RAW=cirros-0.6.3-x86_64-disk.img", "+ curl -L -# http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img", "#=#=# ", "", "######################################################################## 100.0%##O#-# ", "##=O#- # ", "", "############################ 39.5%", "######################################################################## 100.0%", "+ type qemu-img", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image show cirros", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No Image found for cirros", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image create --container-format bare --disk-format qcow2 cirros", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor show m1.small", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No Flavor found for m1.small", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor create --ram 512 --vcpus 1 --disk 1 --ephemeral 1 m1.small", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ '[' false = true ']'", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show private", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "Error while executing command: No Network found for private", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create private --share", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show priv_sub", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No Subnet found for priv_sub", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create priv_sub --subnet-range 192.168.0.0/24 --network private", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show public", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "Error while executing command: No Network found for public", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create public --external --provider-network-type flat --provider-physical-network datacentre", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show public_subnet", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No Subnet found for public_subnet", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create public_subnet --subnet-range 192.168.122.0/24 --allocation-pool start=192.168.122.171,end=192.168.122.250 --gateway 192.168.122.1 --dhcp --network public", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router show priv_router", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No Router found for priv_router", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router create priv_router", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router add subnet priv_router priv_sub", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router set priv_router --external-gateway public", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip show 192.168.122.20", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "Error while executing command: No FloatingIP found for 192.168.122.20", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip create public --floating-ip-address 192.168.122.20", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server show test", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No server with a name or ID of 'test' exists.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server create --flavor m1.small --image cirros --nic net-id=private test --wait", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add floating ip test 192.168.122.20", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ '[' false = true ']'", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group show adoption-test", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "Error while executing command: No SecurityGroup found for adoption-test", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group create adoption-test", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule create --protocol icmp --ingress --icmp-type -1 adoption-test", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule create --protocol tcp --ingress --dst-port 22 adoption-test", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add security group test adoption-test", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ '[' false = true ']'", "+ export FIP=192.168.122.20", "+ FIP=192.168.122.20", "+ TRIES=0", "+ ping -D -c1 -W2 192.168.122.20", "+ '[' true = true ']'", "+ create_volume_resources", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No volume with a name or ID of 'disk' exists.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume create --image cirros --size 1 disk", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ wait_for_status 'volume show disk' 'test volume '\\''disk'\\'' creation'", "+ local time=0", "+ local 'msg=Waiting for test volume '\\''disk'\\'' creation'", "+ local status=available", "+ local result", "+ '[' 0 -le 30 ']'", "++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk -f json", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ result='{", " \"attachments\": [],", " \"availability_zone\": \"nova\",", " \"bootable\": \"false\",", " \"consistencygroup_id\": null,", " \"created_at\": \"2026-02-27T17:45:17.000000\",", " \"description\": null,", " \"encrypted\": false,", " \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",", " \"migration_status\": null,", " \"multiattach\": false,", " \"name\": \"disk\",", " \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",", " \"os-vol-mig-status-attr:migstat\": null,", " \"os-vol-mig-status-attr:name_id\": null,", " \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",", " \"properties\": {},", " \"replication_status\": null,", " \"size\": 1,", " \"snapshot_id\": null,", " \"source_volid\": null,", " \"status\": \"creating\",", " \"type\": \"tripleo\",", " \"updated_at\": \"2026-02-27T17:45:17.000000\",", " \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",", " \"volume_image_metadata\": {", " \"signature_verified\": \"False\"", " }", "}'", "+ echo '{' '\"attachments\":' '[],' '\"availability_zone\":' '\"nova\",' '\"bootable\":' '\"false\",' '\"consistencygroup_id\":' null, '\"created_at\":' '\"2026-02-27T17:45:17.000000\",' '\"description\":' null, '\"encrypted\":' false, '\"id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",' '\"migration_status\":' null, '\"multiattach\":' false, '\"name\":' '\"disk\",' '\"os-vol-host-attr:host\":' '\"hostgroup@tripleo_ceph#tripleo_ceph\",' '\"os-vol-mig-status-attr:migstat\":' null, '\"os-vol-mig-status-attr:name_id\":' null, '\"os-vol-tenant-attr:tenant_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"replication_status\":' null, '\"size\":' 1, '\"snapshot_id\":' null, '\"source_volid\":' null, '\"status\":' '\"creating\",' '\"type\":' '\"tripleo\",' '\"updated_at\":' '\"2026-02-27T17:45:17.000000\",' '\"user_id\":' '\"93d9b9d6929c41ee8d62569442846100\",' '\"volume_image_metadata\":' '{' '\"signature_verified\":' '\"False\"' '}' '}'", "+ jq -r .status", "+ grep -q available", "+ echo 'result={", " \"attachments\": [],", " \"availability_zone\": \"nova\",", " \"bootable\": \"false\",", " \"consistencygroup_id\": null,", " \"created_at\": \"2026-02-27T17:45:17.000000\",", " \"description\": null,", " \"encrypted\": false,", " \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",", " \"migration_status\": null,", " \"multiattach\": false,", " \"name\": \"disk\",", " \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",", " \"os-vol-mig-status-attr:migstat\": null,", " \"os-vol-mig-status-attr:name_id\": null,", " \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",", " \"properties\": {},", " \"replication_status\": null,", " \"size\": 1,", " \"snapshot_id\": null,", " \"source_volid\": null,", " \"status\": \"creating\",", " \"type\": \"tripleo\",", " \"updated_at\": \"2026-02-27T17:45:17.000000\",", " \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",", " \"volume_image_metadata\": {", " \"signature_verified\": \"False\"", " }", "}'", "+ echo 'Waiting for test volume '\\''disk'\\'' creation'", "+ time=5", "+ sleep 5", "+ '[' 5 -le 30 ']'", "++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk -f json", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ result='{", " \"attachments\": [],", " \"availability_zone\": \"nova\",", " \"bootable\": \"true\",", " \"consistencygroup_id\": null,", " \"created_at\": \"2026-02-27T17:45:17.000000\",", " \"description\": null,", " \"encrypted\": false,", " \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",", " \"migration_status\": null,", " \"multiattach\": false,", " \"name\": \"disk\",", " \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",", " \"os-vol-mig-status-attr:migstat\": null,", " \"os-vol-mig-status-attr:name_id\": null,", " \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",", " \"properties\": {},", " \"replication_status\": null,", " \"size\": 1,", " \"snapshot_id\": null,", " \"source_volid\": null,", " \"status\": \"available\",", " \"type\": \"tripleo\",", " \"updated_at\": \"2026-02-27T17:45:24.000000\",", " \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",", " \"volume_image_metadata\": {", " \"signature_verified\": \"False\",", " \"owner_specified.openstack.md5\": \"\",", " \"owner_specified.openstack.sha256\": \"\",", " \"owner_specified.openstack.object\": \"images/cirros\",", " \"image_id\": \"515db511-7de7-465a-bc9d-1db3f2238101\",", " \"image_name\": \"cirros\",", " \"checksum\": \"87617e24a5e30cb3b87fda8c0764838f\",", " \"container_format\": \"bare\",", " \"disk_format\": \"qcow2\",", " \"min_disk\": \"0\",", " \"min_ram\": \"0\",", " \"size\": \"21692416\"", " }", "}'", "+ echo '{' '\"attachments\":' '[],' '\"availability_zone\":' '\"nova\",' '\"bootable\":' '\"true\",' '\"consistencygroup_id\":' null, '\"created_at\":' '\"2026-02-27T17:45:17.000000\",' '\"description\":' null, '\"encrypted\":' false, '\"id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",' '\"migration_status\":' null, '\"multiattach\":' false, '\"name\":' '\"disk\",' '\"os-vol-host-attr:host\":' '\"hostgroup@tripleo_ceph#tripleo_ceph\",' '\"os-vol-mig-status-attr:migstat\":' null, '\"os-vol-mig-status-attr:name_id\":' null, '\"os-vol-tenant-attr:tenant_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"replication_status\":' null, '\"size\":' 1, '\"snapshot_id\":' null, '\"source_volid\":' null, '\"status\":' '\"available\",' '\"type\":' '\"tripleo\",' '\"updated_at\":' '\"2026-02-27T17:45:24.000000\",' '\"user_id\":' '\"93d9b9d6929c41ee8d62569442846100\",' '\"volume_image_metadata\":' '{' '\"signature_verified\":' '\"False\",' '\"owner_specified.openstack.md5\":' '\"\",' '\"owner_specified.openstack.sha256\":' '\"\",' '\"owner_specified.openstack.object\":' '\"images/cirros\",' '\"image_id\":' '\"515db511-7de7-465a-bc9d-1db3f2238101\",' '\"image_name\":' '\"cirros\",' '\"checksum\":' '\"87617e24a5e30cb3b87fda8c0764838f\",' '\"container_format\":' '\"bare\",' '\"disk_format\":' '\"qcow2\",' '\"min_disk\":' '\"0\",' '\"min_ram\":' '\"0\",' '\"size\":' '\"21692416\"' '}' '}'", "+ jq -r .status", "+ grep -q available", "+ break", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume snapshot show snapshot", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No snapshot with a name or ID of 'snapshot' exists.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume snapshot create --volume disk snapshot", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ wait_for_status 'volume snapshot show snapshot' 'test volume '\\''disk'\\'' snapshot availability'", "+ local time=0", "+ local 'msg=Waiting for test volume '\\''disk'\\'' snapshot availability'", "+ local status=available", "+ local result", "+ '[' 0 -le 30 ']'", "++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume snapshot show snapshot -f json", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ result='{", " \"created_at\": \"2026-02-27T17:45:34.000000\",", " \"description\": null,", " \"id\": \"0bd1f9bc-dcf0-44f0-9849-8beaf13e0ed3\",", " \"name\": \"snapshot\",", " \"os-extended-snapshot-attributes:progress\": \"100%\",", " \"os-extended-snapshot-attributes:project_id\": \"8c8307df41034feea2d7e66d9c38ae14\",", " \"properties\": {},", " \"size\": 1,", " \"status\": \"available\",", " \"updated_at\": \"2026-02-27T17:45:35.000000\",", " \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"", "}'", "+ echo '{' '\"created_at\":' '\"2026-02-27T17:45:34.000000\",' '\"description\":' null, '\"id\":' '\"0bd1f9bc-dcf0-44f0-9849-8beaf13e0ed3\",' '\"name\":' '\"snapshot\",' '\"os-extended-snapshot-attributes:progress\":' '\"100%\",' '\"os-extended-snapshot-attributes:project_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"size\":' 1, '\"status\":' '\"available\",' '\"updated_at\":' '\"2026-02-27T17:45:35.000000\",' '\"volume_id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"' '}'", "+ jq -r .status", "+ grep -q available", "+ break", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show disk -f json", "+ jq -r .status", "+ grep -q available", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add volume test disk", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ create_bfv_volume", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show boot-volume", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No volume with a name or ID of 'boot-volume' exists.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume create --image cirros --size 1 boot-volume", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ wait_for_status 'volume show boot-volume' 'test volume '\\''boot-volume'\\'' creation'", "+ local time=0", "+ local 'msg=Waiting for test volume '\\''boot-volume'\\'' creation'", "+ local status=available", "+ local result", "+ '[' 0 -le 30 ']'", "++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show boot-volume -f json", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ result='{", " \"attachments\": [],", " \"availability_zone\": \"nova\",", " \"bootable\": \"true\",", " \"consistencygroup_id\": null,", " \"created_at\": \"2026-02-27T17:45:48.000000\",", " \"description\": null,", " \"encrypted\": false,", " \"id\": \"3fedaf47-cb2b-4b6e-afc6-3380205fe9b4\",", " \"migration_status\": null,", " \"multiattach\": false,", " \"name\": \"boot-volume\",", " \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",", " \"os-vol-mig-status-attr:migstat\": null,", " \"os-vol-mig-status-attr:name_id\": null,", " \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",", " \"properties\": {},", " \"replication_status\": null,", " \"size\": 1,", " \"snapshot_id\": null,", " \"source_volid\": null,", " \"status\": \"available\",", " \"type\": \"tripleo\",", " \"updated_at\": \"2026-02-27T17:45:50.000000\",", " \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",", " \"volume_image_metadata\": {", " \"signature_verified\": \"False\",", " \"owner_specified.openstack.md5\": \"\",", " \"owner_specified.openstack.sha256\": \"\",", " \"owner_specified.openstack.object\": \"images/cirros\",", " \"image_id\": \"515db511-7de7-465a-bc9d-1db3f2238101\",", " \"image_name\": \"cirros\",", " \"checksum\": \"87617e24a5e30cb3b87fda8c0764838f\",", " \"container_format\": \"bare\",", " \"disk_format\": \"qcow2\",", " \"min_disk\": \"0\",", " \"min_ram\": \"0\",", " \"size\": \"21692416\"", " }", "}'", "+ echo '{' '\"attachments\":' '[],' '\"availability_zone\":' '\"nova\",' '\"bootable\":' '\"true\",' '\"consistencygroup_id\":' null, '\"created_at\":' '\"2026-02-27T17:45:48.000000\",' '\"description\":' null, '\"encrypted\":' false, '\"id\":' '\"3fedaf47-cb2b-4b6e-afc6-3380205fe9b4\",' '\"migration_status\":' null, '\"multiattach\":' false, '\"name\":' '\"boot-volume\",' '\"os-vol-host-attr:host\":' '\"hostgroup@tripleo_ceph#tripleo_ceph\",' '\"os-vol-mig-status-attr:migstat\":' null, '\"os-vol-mig-status-attr:name_id\":' null, '\"os-vol-tenant-attr:tenant_id\":' '\"8c8307df41034feea2d7e66d9c38ae14\",' '\"properties\":' '{},' '\"replication_status\":' null, '\"size\":' 1, '\"snapshot_id\":' null, '\"source_volid\":' null, '\"status\":' '\"available\",' '\"type\":' '\"tripleo\",' '\"updated_at\":' '\"2026-02-27T17:45:50.000000\",' '\"user_id\":' '\"93d9b9d6929c41ee8d62569442846100\",' '\"volume_image_metadata\":' '{' '\"signature_verified\":' '\"False\",' '\"owner_specified.openstack.md5\":' '\"\",' '\"owner_specified.openstack.sha256\":' '\"\",' '\"owner_specified.openstack.object\":' '\"images/cirros\",' '\"image_id\":' '\"515db511-7de7-465a-bc9d-1db3f2238101\",' '\"image_name\":' '\"cirros\",' '\"checksum\":' '\"87617e24a5e30cb3b87fda8c0764838f\",' '\"container_format\":' '\"bare\",' '\"disk_format\":' '\"qcow2\",' '\"min_disk\":' '\"0\",' '\"min_ram\":' '\"0\",' '\"size\":' '\"21692416\"' '}' '}'", "+ jq -r .status", "+ grep -q available", "+ break", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume show boot-volume -f json", "+ grep -q available", "+ jq -r .status", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server create --flavor m1.small --volume boot-volume --nic net-id=private bfv-server --wait", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ '[' true = true ']'", "+ create_backup_resources", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup show backup", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "No volumebackup with a name or ID of 'backup' exists.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup create --name backup disk --force", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ wait_for_status 'volume backup show backup' 'test volume '\\''disk'\\'' backup completion'", "+ local time=0", "+ local 'msg=Waiting for test volume '\\''disk'\\'' backup completion'", "+ local status=available", "+ local result", "+ '[' 0 -le 30 ']'", "++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup show backup -f json", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ result='{", " \"availability_zone\": null,", " \"container\": \"backups\",", " \"created_at\": \"2026-02-27T17:46:13.000000\",", " \"data_timestamp\": \"2026-02-27T17:46:13.000000\",", " \"description\": null,", " \"fail_reason\": null,", " \"has_dependent_backups\": false,", " \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",", " \"is_incremental\": false,", " \"name\": \"backup\",", " \"object_count\": 0,", " \"size\": 1,", " \"snapshot_id\": null,", " \"status\": \"creating\",", " \"updated_at\": \"2026-02-27T17:46:15.000000\",", " \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"", "}'", "+ echo '{' '\"availability_zone\":' null, '\"container\":' '\"backups\",' '\"created_at\":' '\"2026-02-27T17:46:13.000000\",' '\"data_timestamp\":' '\"2026-02-27T17:46:13.000000\",' '\"description\":' null, '\"fail_reason\":' null, '\"has_dependent_backups\":' false, '\"id\":' '\"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",' '\"is_incremental\":' false, '\"name\":' '\"backup\",' '\"object_count\":' 0, '\"size\":' 1, '\"snapshot_id\":' null, '\"status\":' '\"creating\",' '\"updated_at\":' '\"2026-02-27T17:46:15.000000\",' '\"volume_id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"' '}'", "+ jq -r .status", "+ grep -q available", "+ echo 'result={", " \"availability_zone\": null,", " \"container\": \"backups\",", " \"created_at\": \"2026-02-27T17:46:13.000000\",", " \"data_timestamp\": \"2026-02-27T17:46:13.000000\",", " \"description\": null,", " \"fail_reason\": null,", " \"has_dependent_backups\": false,", " \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",", " \"is_incremental\": false,", " \"name\": \"backup\",", " \"object_count\": 0,", " \"size\": 1,", " \"snapshot_id\": null,", " \"status\": \"creating\",", " \"updated_at\": \"2026-02-27T17:46:15.000000\",", " \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"", "}'", "+ echo 'Waiting for test volume '\\''disk'\\'' backup completion'", "+ time=5", "+ sleep 5", "+ '[' 5 -le 30 ']'", "++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack volume backup show backup -f json", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "+ result='{", " \"availability_zone\": null,", " \"container\": \"backups\",", " \"created_at\": \"2026-02-27T17:46:13.000000\",", " \"data_timestamp\": \"2026-02-27T17:46:13.000000\",", " \"description\": null,", " \"fail_reason\": null,", " \"has_dependent_backups\": false,", " \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",", " \"is_incremental\": false,", " \"name\": \"backup\",", " \"object_count\": 0,", " \"size\": 1,", " \"snapshot_id\": null,", " \"status\": \"available\",", " \"updated_at\": \"2026-02-27T17:46:19.000000\",", " \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"", "}'", "+ echo '{' '\"availability_zone\":' null, '\"container\":' '\"backups\",' '\"created_at\":' '\"2026-02-27T17:46:13.000000\",' '\"data_timestamp\":' '\"2026-02-27T17:46:13.000000\",' '\"description\":' null, '\"fail_reason\":' null, '\"has_dependent_backups\":' false, '\"id\":' '\"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",' '\"is_incremental\":' false, '\"name\":' '\"backup\",' '\"object_count\":' 0, '\"size\":' 1, '\"snapshot_id\":' null, '\"status\":' '\"available\",' '\"updated_at\":' '\"2026-02-27T17:46:19.000000\",' '\"volume_id\":' '\"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"' '}'", "+ grep -q available", "+ jq -r .status", "+ break"], "stdout": "+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+\n| Field | Value |\n+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+\n| container_format | bare |\n| created_at | 2026-02-27T17:43:36Z |\n| disk_format | qcow2 |\n| file | /v2/images/515db511-7de7-465a-bc9d-1db3f2238101/file |\n| id | 515db511-7de7-465a-bc9d-1db3f2238101 |\n| min_disk | 0 |\n| min_ram | 0 |\n| name | cirros |\n| owner | 8c8307df41034feea2d7e66d9c38ae14 |\n| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' |\n| protected | False |\n| schema | /v2/schemas/image |\n| status | queued |\n| tags | |\n| updated_at | 2026-02-27T17:43:36Z |\n| visibility | shared |\n+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+\n+----------------------------+--------------------------------------+\n| Field | Value |\n+----------------------------+--------------------------------------+\n| OS-FLV-DISABLED:disabled | False |\n| OS-FLV-EXT-DATA:ephemeral | 1 |\n| description | None |\n| disk | 1 |\n| id | 0410c4f3-80fc-472f-99b2-45042a836694 |\n| name | m1.small |\n| os-flavor-access:is_public | True |\n| properties | |\n| ram | 512 |\n| rxtx_factor | 1.0 |\n| swap | |\n| vcpus | 1 |\n+----------------------------+--------------------------------------+\n+---------------------------+--------------------------------------+\n| Field | Value |\n+---------------------------+--------------------------------------+\n| admin_state_up | UP |\n| availability_zone_hints | |\n| availability_zones | |\n| created_at | 2026-02-27T17:43:49Z |\n| description | |\n| dns_domain | |\n| id | 8884680d-17a4-4358-8041-a381c5bfe181 |\n| ipv4_address_scope | None |\n| ipv6_address_scope | None |\n| is_default | False |\n| is_vlan_transparent | None |\n| mtu | 1442 |\n| name | private |\n| port_security_enabled | True |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| provider:network_type | geneve |\n| provider:physical_network | None |\n| provider:segmentation_id | 55261 |\n| qos_policy_id | None |\n| revision_number | 1 |\n| router:external | Internal |\n| segments | None |\n| shared | True |\n| status | ACTIVE |\n| subnets | |\n| tags | |\n| updated_at | 2026-02-27T17:43:49Z |\n+---------------------------+--------------------------------------+\n+----------------------+--------------------------------------+\n| Field | Value |\n+----------------------+--------------------------------------+\n| allocation_pools | 192.168.0.2-192.168.0.254 |\n| cidr | 192.168.0.0/24 |\n| created_at | 2026-02-27T17:43:55Z |\n| description | |\n| dns_nameservers | |\n| dns_publish_fixed_ip | None |\n| enable_dhcp | True |\n| gateway_ip | 192.168.0.1 |\n| host_routes | |\n| id | 7943a7a8-9a44-4360-8699-65460a3838c9 |\n| ip_version | 4 |\n| ipv6_address_mode | None |\n| ipv6_ra_mode | None |\n| name | priv_sub |\n| network_id | 8884680d-17a4-4358-8041-a381c5bfe181 |\n| prefix_length | None |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| revision_number | 0 |\n| segment_id | None |\n| service_types | |\n| subnetpool_id | None |\n| tags | |\n| updated_at | 2026-02-27T17:43:55Z |\n+----------------------+--------------------------------------+\n+---------------------------+--------------------------------------+\n| Field | Value |\n+---------------------------+--------------------------------------+\n| admin_state_up | UP |\n| availability_zone_hints | |\n| availability_zones | |\n| created_at | 2026-02-27T17:44:01Z |\n| description | |\n| dns_domain | |\n| id | e728ec87-c8bf-41f8-993a-6e48da3a50b5 |\n| ipv4_address_scope | None |\n| ipv6_address_scope | None |\n| is_default | False |\n| is_vlan_transparent | None |\n| mtu | 1500 |\n| name | public |\n| port_security_enabled | True |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| provider:network_type | flat |\n| provider:physical_network | datacentre |\n| provider:segmentation_id | None |\n| qos_policy_id | None |\n| revision_number | 1 |\n| router:external | External |\n| segments | None |\n| shared | False |\n| status | ACTIVE |\n| subnets | |\n| tags | |\n| updated_at | 2026-02-27T17:44:01Z |\n+---------------------------+--------------------------------------+\n+----------------------+--------------------------------------+\n| Field | Value |\n+----------------------+--------------------------------------+\n| allocation_pools | 192.168.122.171-192.168.122.250 |\n| cidr | 192.168.122.0/24 |\n| created_at | 2026-02-27T17:44:07Z |\n| description | |\n| dns_nameservers | |\n| dns_publish_fixed_ip | None |\n| enable_dhcp | True |\n| gateway_ip | 192.168.122.1 |\n| host_routes | |\n| id | 22719ca2-db19-434d-9506-7e1ff13977a5 |\n| ip_version | 4 |\n| ipv6_address_mode | None |\n| ipv6_ra_mode | None |\n| name | public_subnet |\n| network_id | e728ec87-c8bf-41f8-993a-6e48da3a50b5 |\n| prefix_length | None |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| revision_number | 0 |\n| segment_id | None |\n| service_types | |\n| subnetpool_id | None |\n| tags | |\n| updated_at | 2026-02-27T17:44:07Z |\n+----------------------+--------------------------------------+\n+-------------------------+--------------------------------------+\n| Field | Value |\n+-------------------------+--------------------------------------+\n| admin_state_up | UP |\n| availability_zone_hints | |\n| availability_zones | |\n| created_at | 2026-02-27T17:44:13Z |\n| description | |\n| external_gateway_info | null |\n| flavor_id | None |\n| id | 7fb94fe0-0361-4353-b4f1-0fc0f5d3e6af |\n| name | priv_router |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| revision_number | 1 |\n| routes | |\n| status | ACTIVE |\n| tags | |\n| updated_at | 2026-02-27T17:44:13Z |\n+-------------------------+--------------------------------------+\n+---------------------+--------------------------------------+\n| Field | Value |\n+---------------------+--------------------------------------+\n| created_at | 2026-02-27T17:44:29Z |\n| description | |\n| dns_domain | |\n| dns_name | |\n| fixed_ip_address | None |\n| floating_ip_address | 192.168.122.20 |\n| floating_network_id | e728ec87-c8bf-41f8-993a-6e48da3a50b5 |\n| id | 6566a5b4-d6e3-4aec-98b2-2c7e2672833d |\n| name | 192.168.122.20 |\n| port_details | None |\n| port_id | None |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| qos_policy_id | None |\n| revision_number | 0 |\n| router_id | None |\n| status | DOWN |\n| subnet_id | None |\n| tags | [] |\n| updated_at | 2026-02-27T17:44:29Z |\n+---------------------+--------------------------------------+\n\n+-------------------------------------+----------------------------------------------------------+\n| Field | Value |\n+-------------------------------------+----------------------------------------------------------+\n| OS-DCF:diskConfig | MANUAL |\n| OS-EXT-AZ:availability_zone | nova |\n| OS-EXT-SRV-ATTR:host | standalone.localdomain |\n| OS-EXT-SRV-ATTR:hypervisor_hostname | standalone.localdomain |\n| OS-EXT-SRV-ATTR:instance_name | instance-00000001 |\n| OS-EXT-STS:power_state | Running |\n| OS-EXT-STS:task_state | None |\n| OS-EXT-STS:vm_state | active |\n| OS-SRV-USG:launched_at | 2026-02-27T17:44:47.000000 |\n| OS-SRV-USG:terminated_at | None |\n| accessIPv4 | |\n| accessIPv6 | |\n| addresses | private=192.168.0.235 |\n| adminPass | SvBFgYsVqYs5 |\n| config_drive | |\n| created | 2026-02-27T17:44:36Z |\n| flavor | m1.small (0410c4f3-80fc-472f-99b2-45042a836694) |\n| hostId | 03cdec8b180f092f35ff754dec8182fa1408246759e54ffc36742686 |\n| id | 272bf86a-2d41-484a-8168-6a8575429dbc |\n| image | cirros (515db511-7de7-465a-bc9d-1db3f2238101) |\n| key_name | None |\n| name | test |\n| progress | 0 |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| properties | |\n| security_groups | name='default' |\n| status | ACTIVE |\n| updated | 2026-02-27T17:44:47Z |\n| user_id | 93d9b9d6929c41ee8d62569442846100 |\n| volumes_attached | |\n+-------------------------------------+----------------------------------------------------------+\n+-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| Field | Value |\n+-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n| created_at | 2026-02-27T17:45:02Z |\n| description | adoption-test |\n| id | d6a668b2-7395-4930-9267-e74f87fe100e |\n| name | adoption-test |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| revision_number | 1 |\n| rules | created_at='2026-02-27T17:45:02Z', direction='egress', ethertype='IPv6', id='64465c08-6bfc-4605-abd0-30cb8471c249', standard_attr_id='48', updated_at='2026-02-27T17:45:02Z' |\n| | created_at='2026-02-27T17:45:02Z', direction='egress', ethertype='IPv4', id='74c7e5d8-458f-4d3c-9e06-64c1fa49c4d5', standard_attr_id='47', updated_at='2026-02-27T17:45:02Z' |\n| stateful | True |\n| tags | [] |\n| updated_at | 2026-02-27T17:45:02Z |\n+-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\n+-------------------------+--------------------------------------+\n| Field | Value |\n+-------------------------+--------------------------------------+\n| created_at | 2026-02-27T17:45:05Z |\n| description | |\n| direction | ingress |\n| ether_type | IPv4 |\n| id | 8b59b789-799b-43c3-a175-d9414b27277c |\n| name | None |\n| port_range_max | None |\n| port_range_min | None |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| protocol | icmp |\n| remote_address_group_id | None |\n| remote_group_id | None |\n| remote_ip_prefix | 0.0.0.0/0 |\n| revision_number | 0 |\n| security_group_id | d6a668b2-7395-4930-9267-e74f87fe100e |\n| tags | [] |\n| updated_at | 2026-02-27T17:45:05Z |\n+-------------------------+--------------------------------------+\n+-------------------------+--------------------------------------+\n| Field | Value |\n+-------------------------+--------------------------------------+\n| created_at | 2026-02-27T17:45:07Z |\n| description | |\n| direction | ingress |\n| ether_type | IPv4 |\n| id | 442bed43-8977-4634-ab32-43f2785fe0fc |\n| name | None |\n| port_range_max | 22 |\n| port_range_min | 22 |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| protocol | tcp |\n| remote_address_group_id | None |\n| remote_group_id | None |\n| remote_ip_prefix | 0.0.0.0/0 |\n| revision_number | 0 |\n| security_group_id | d6a668b2-7395-4930-9267-e74f87fe100e |\n| tags | [] |\n| updated_at | 2026-02-27T17:45:07Z |\n+-------------------------+--------------------------------------+\nPING 192.168.122.20 (192.168.122.20) 56(84) bytes of data.\n[1772214311.851396] 64 bytes from 192.168.122.20: icmp_seq=1 ttl=63 time=13.7 ms\n\n--- 192.168.122.20 ping statistics ---\n1 packets transmitted, 1 received, 0% packet loss, time 0ms\nrtt min/avg/max/mdev = 13.650/13.650/13.650/0.000 ms\n+---------------------+--------------------------------------+\n| Field | Value |\n+---------------------+--------------------------------------+\n| attachments | [] |\n| availability_zone | nova |\n| bootable | false |\n| consistencygroup_id | None |\n| created_at | 2026-02-27T17:45:17.529226 |\n| description | None |\n| encrypted | False |\n| id | cd62b452-066a-4dfa-b3a8-2c4de48ebbe4 |\n| migration_status | None |\n| multiattach | False |\n| name | disk |\n| properties | |\n| replication_status | None |\n| size | 1 |\n| snapshot_id | None |\n| source_volid | None |\n| status | creating |\n| type | tripleo |\n| updated_at | None |\n| user_id | 93d9b9d6929c41ee8d62569442846100 |\n+---------------------+--------------------------------------+\nresult={\n \"attachments\": [],\n \"availability_zone\": \"nova\",\n \"bootable\": \"false\",\n \"consistencygroup_id\": null,\n \"created_at\": \"2026-02-27T17:45:17.000000\",\n \"description\": null,\n \"encrypted\": false,\n \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",\n \"migration_status\": null,\n \"multiattach\": false,\n \"name\": \"disk\",\n \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",\n \"os-vol-mig-status-attr:migstat\": null,\n \"os-vol-mig-status-attr:name_id\": null,\n \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",\n \"properties\": {},\n \"replication_status\": null,\n \"size\": 1,\n \"snapshot_id\": null,\n \"source_volid\": null,\n \"status\": \"creating\",\n \"type\": \"tripleo\",\n \"updated_at\": \"2026-02-27T17:45:17.000000\",\n \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",\n \"volume_image_metadata\": {\n \"signature_verified\": \"False\"\n }\n}\nWaiting for test volume 'disk' creation\n+-------------+--------------------------------------+\n| Field | Value |\n+-------------+--------------------------------------+\n| created_at | 2026-02-27T17:45:34.245606 |\n| description | None |\n| id | 0bd1f9bc-dcf0-44f0-9849-8beaf13e0ed3 |\n| name | snapshot |\n| properties | |\n| size | 1 |\n| status | creating |\n| updated_at | None |\n| volume_id | cd62b452-066a-4dfa-b3a8-2c4de48ebbe4 |\n+-------------+--------------------------------------+\n+---------------------+--------------------------------------+\n| Field | Value |\n+---------------------+--------------------------------------+\n| attachments | [] |\n| availability_zone | nova |\n| bootable | false |\n| consistencygroup_id | None |\n| created_at | 2026-02-27T17:45:48.392275 |\n| description | None |\n| encrypted | False |\n| id | 3fedaf47-cb2b-4b6e-afc6-3380205fe9b4 |\n| migration_status | None |\n| multiattach | False |\n| name | boot-volume |\n| properties | |\n| replication_status | None |\n| size | 1 |\n| snapshot_id | None |\n| source_volid | None |\n| status | creating |\n| type | tripleo |\n| updated_at | None |\n| user_id | 93d9b9d6929c41ee8d62569442846100 |\n+---------------------+--------------------------------------+\n\n+-------------------------------------+----------------------------------------------------------+\n| Field | Value |\n+-------------------------------------+----------------------------------------------------------+\n| OS-DCF:diskConfig | MANUAL |\n| OS-EXT-AZ:availability_zone | nova |\n| OS-EXT-SRV-ATTR:host | standalone.localdomain |\n| OS-EXT-SRV-ATTR:hypervisor_hostname | standalone.localdomain |\n| OS-EXT-SRV-ATTR:instance_name | instance-00000002 |\n| OS-EXT-STS:power_state | Running |\n| OS-EXT-STS:task_state | None |\n| OS-EXT-STS:vm_state | active |\n| OS-SRV-USG:launched_at | 2026-02-27T17:46:02.000000 |\n| OS-SRV-USG:terminated_at | None |\n| accessIPv4 | |\n| accessIPv6 | |\n| addresses | private=192.168.0.24 |\n| adminPass | qrGTyPGdMks3 |\n| config_drive | |\n| created | 2026-02-27T17:45:57Z |\n| flavor | m1.small (0410c4f3-80fc-472f-99b2-45042a836694) |\n| hostId | 03cdec8b180f092f35ff754dec8182fa1408246759e54ffc36742686 |\n| id | c996f66b-6a3f-4780-9a59-f5906e19830d |\n| image | N/A (booted from volume) |\n| key_name | None |\n| name | bfv-server |\n| progress | 0 |\n| project_id | 8c8307df41034feea2d7e66d9c38ae14 |\n| properties | |\n| security_groups | name='default' |\n| status | ACTIVE |\n| updated | 2026-02-27T17:46:02Z |\n| user_id | 93d9b9d6929c41ee8d62569442846100 |\n| volumes_attached | id='3fedaf47-cb2b-4b6e-afc6-3380205fe9b4' |\n+-------------------------------------+----------------------------------------------------------+\n+-------+--------------------------------------+\n| Field | Value |\n+-------+--------------------------------------+\n| id | ac0dd04b-35ed-4195-b8b4-74ede0735dce |\n| name | backup |\n+-------+--------------------------------------+\nresult={\n \"availability_zone\": null,\n \"container\": \"backups\",\n \"created_at\": \"2026-02-27T17:46:13.000000\",\n \"data_timestamp\": \"2026-02-27T17:46:13.000000\",\n \"description\": null,\n \"fail_reason\": null,\n \"has_dependent_backups\": false,\n \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",\n \"is_incremental\": false,\n \"name\": \"backup\",\n \"object_count\": 0,\n \"size\": 1,\n \"snapshot_id\": null,\n \"status\": \"creating\",\n \"updated_at\": \"2026-02-27T17:46:15.000000\",\n \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"\n}\nWaiting for test volume 'disk' backup completion", "stdout_lines": ["+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+", "| Field | Value |", "+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+", "| container_format | bare |", "| created_at | 2026-02-27T17:43:36Z |", "| disk_format | qcow2 |", "| file | /v2/images/515db511-7de7-465a-bc9d-1db3f2238101/file |", "| id | 515db511-7de7-465a-bc9d-1db3f2238101 |", "| min_disk | 0 |", "| min_ram | 0 |", "| name | cirros |", "| owner | 8c8307df41034feea2d7e66d9c38ae14 |", "| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' |", "| protected | False |", "| schema | /v2/schemas/image |", "| status | queued |", "| tags | |", "| updated_at | 2026-02-27T17:43:36Z |", "| visibility | shared |", "+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+", "+----------------------------+--------------------------------------+", "| Field | Value |", "+----------------------------+--------------------------------------+", "| OS-FLV-DISABLED:disabled | False |", "| OS-FLV-EXT-DATA:ephemeral | 1 |", "| description | None |", "| disk | 1 |", "| id | 0410c4f3-80fc-472f-99b2-45042a836694 |", "| name | m1.small |", "| os-flavor-access:is_public | True |", "| properties | |", "| ram | 512 |", "| rxtx_factor | 1.0 |", "| swap | |", "| vcpus | 1 |", "+----------------------------+--------------------------------------+", "+---------------------------+--------------------------------------+", "| Field | Value |", "+---------------------------+--------------------------------------+", "| admin_state_up | UP |", "| availability_zone_hints | |", "| availability_zones | |", "| created_at | 2026-02-27T17:43:49Z |", "| description | |", "| dns_domain | |", "| id | 8884680d-17a4-4358-8041-a381c5bfe181 |", "| ipv4_address_scope | None |", "| ipv6_address_scope | None |", "| is_default | False |", "| is_vlan_transparent | None |", "| mtu | 1442 |", "| name | private |", "| port_security_enabled | True |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| provider:network_type | geneve |", "| provider:physical_network | None |", "| provider:segmentation_id | 55261 |", "| qos_policy_id | None |", "| revision_number | 1 |", "| router:external | Internal |", "| segments | None |", "| shared | True |", "| status | ACTIVE |", "| subnets | |", "| tags | |", "| updated_at | 2026-02-27T17:43:49Z |", "+---------------------------+--------------------------------------+", "+----------------------+--------------------------------------+", "| Field | Value |", "+----------------------+--------------------------------------+", "| allocation_pools | 192.168.0.2-192.168.0.254 |", "| cidr | 192.168.0.0/24 |", "| created_at | 2026-02-27T17:43:55Z |", "| description | |", "| dns_nameservers | |", "| dns_publish_fixed_ip | None |", "| enable_dhcp | True |", "| gateway_ip | 192.168.0.1 |", "| host_routes | |", "| id | 7943a7a8-9a44-4360-8699-65460a3838c9 |", "| ip_version | 4 |", "| ipv6_address_mode | None |", "| ipv6_ra_mode | None |", "| name | priv_sub |", "| network_id | 8884680d-17a4-4358-8041-a381c5bfe181 |", "| prefix_length | None |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| revision_number | 0 |", "| segment_id | None |", "| service_types | |", "| subnetpool_id | None |", "| tags | |", "| updated_at | 2026-02-27T17:43:55Z |", "+----------------------+--------------------------------------+", "+---------------------------+--------------------------------------+", "| Field | Value |", "+---------------------------+--------------------------------------+", "| admin_state_up | UP |", "| availability_zone_hints | |", "| availability_zones | |", "| created_at | 2026-02-27T17:44:01Z |", "| description | |", "| dns_domain | |", "| id | e728ec87-c8bf-41f8-993a-6e48da3a50b5 |", "| ipv4_address_scope | None |", "| ipv6_address_scope | None |", "| is_default | False |", "| is_vlan_transparent | None |", "| mtu | 1500 |", "| name | public |", "| port_security_enabled | True |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| provider:network_type | flat |", "| provider:physical_network | datacentre |", "| provider:segmentation_id | None |", "| qos_policy_id | None |", "| revision_number | 1 |", "| router:external | External |", "| segments | None |", "| shared | False |", "| status | ACTIVE |", "| subnets | |", "| tags | |", "| updated_at | 2026-02-27T17:44:01Z |", "+---------------------------+--------------------------------------+", "+----------------------+--------------------------------------+", "| Field | Value |", "+----------------------+--------------------------------------+", "| allocation_pools | 192.168.122.171-192.168.122.250 |", "| cidr | 192.168.122.0/24 |", "| created_at | 2026-02-27T17:44:07Z |", "| description | |", "| dns_nameservers | |", "| dns_publish_fixed_ip | None |", "| enable_dhcp | True |", "| gateway_ip | 192.168.122.1 |", "| host_routes | |", "| id | 22719ca2-db19-434d-9506-7e1ff13977a5 |", "| ip_version | 4 |", "| ipv6_address_mode | None |", "| ipv6_ra_mode | None |", "| name | public_subnet |", "| network_id | e728ec87-c8bf-41f8-993a-6e48da3a50b5 |", "| prefix_length | None |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| revision_number | 0 |", "| segment_id | None |", "| service_types | |", "| subnetpool_id | None |", "| tags | |", "| updated_at | 2026-02-27T17:44:07Z |", "+----------------------+--------------------------------------+", "+-------------------------+--------------------------------------+", "| Field | Value |", "+-------------------------+--------------------------------------+", "| admin_state_up | UP |", "| availability_zone_hints | |", "| availability_zones | |", "| created_at | 2026-02-27T17:44:13Z |", "| description | |", "| external_gateway_info | null |", "| flavor_id | None |", "| id | 7fb94fe0-0361-4353-b4f1-0fc0f5d3e6af |", "| name | priv_router |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| revision_number | 1 |", "| routes | |", "| status | ACTIVE |", "| tags | |", "| updated_at | 2026-02-27T17:44:13Z |", "+-------------------------+--------------------------------------+", "+---------------------+--------------------------------------+", "| Field | Value |", "+---------------------+--------------------------------------+", "| created_at | 2026-02-27T17:44:29Z |", "| description | |", "| dns_domain | |", "| dns_name | |", "| fixed_ip_address | None |", "| floating_ip_address | 192.168.122.20 |", "| floating_network_id | e728ec87-c8bf-41f8-993a-6e48da3a50b5 |", "| id | 6566a5b4-d6e3-4aec-98b2-2c7e2672833d |", "| name | 192.168.122.20 |", "| port_details | None |", "| port_id | None |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| qos_policy_id | None |", "| revision_number | 0 |", "| router_id | None |", "| status | DOWN |", "| subnet_id | None |", "| tags | [] |", "| updated_at | 2026-02-27T17:44:29Z |", "+---------------------+--------------------------------------+", "", "+-------------------------------------+----------------------------------------------------------+", "| Field | Value |", "+-------------------------------------+----------------------------------------------------------+", "| OS-DCF:diskConfig | MANUAL |", "| OS-EXT-AZ:availability_zone | nova |", "| OS-EXT-SRV-ATTR:host | standalone.localdomain |", "| OS-EXT-SRV-ATTR:hypervisor_hostname | standalone.localdomain |", "| OS-EXT-SRV-ATTR:instance_name | instance-00000001 |", "| OS-EXT-STS:power_state | Running |", "| OS-EXT-STS:task_state | None |", "| OS-EXT-STS:vm_state | active |", "| OS-SRV-USG:launched_at | 2026-02-27T17:44:47.000000 |", "| OS-SRV-USG:terminated_at | None |", "| accessIPv4 | |", "| accessIPv6 | |", "| addresses | private=192.168.0.235 |", "| adminPass | SvBFgYsVqYs5 |", "| config_drive | |", "| created | 2026-02-27T17:44:36Z |", "| flavor | m1.small (0410c4f3-80fc-472f-99b2-45042a836694) |", "| hostId | 03cdec8b180f092f35ff754dec8182fa1408246759e54ffc36742686 |", "| id | 272bf86a-2d41-484a-8168-6a8575429dbc |", "| image | cirros (515db511-7de7-465a-bc9d-1db3f2238101) |", "| key_name | None |", "| name | test |", "| progress | 0 |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| properties | |", "| security_groups | name='default' |", "| status | ACTIVE |", "| updated | 2026-02-27T17:44:47Z |", "| user_id | 93d9b9d6929c41ee8d62569442846100 |", "| volumes_attached | |", "+-------------------------------------+----------------------------------------------------------+", "+-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", "| Field | Value |", "+-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", "| created_at | 2026-02-27T17:45:02Z |", "| description | adoption-test |", "| id | d6a668b2-7395-4930-9267-e74f87fe100e |", "| name | adoption-test |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| revision_number | 1 |", "| rules | created_at='2026-02-27T17:45:02Z', direction='egress', ethertype='IPv6', id='64465c08-6bfc-4605-abd0-30cb8471c249', standard_attr_id='48', updated_at='2026-02-27T17:45:02Z' |", "| | created_at='2026-02-27T17:45:02Z', direction='egress', ethertype='IPv4', id='74c7e5d8-458f-4d3c-9e06-64c1fa49c4d5', standard_attr_id='47', updated_at='2026-02-27T17:45:02Z' |", "| stateful | True |", "| tags | [] |", "| updated_at | 2026-02-27T17:45:02Z |", "+-----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+", "+-------------------------+--------------------------------------+", "| Field | Value |", "+-------------------------+--------------------------------------+", "| created_at | 2026-02-27T17:45:05Z |", "| description | |", "| direction | ingress |", "| ether_type | IPv4 |", "| id | 8b59b789-799b-43c3-a175-d9414b27277c |", "| name | None |", "| port_range_max | None |", "| port_range_min | None |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| protocol | icmp |", "| remote_address_group_id | None |", "| remote_group_id | None |", "| remote_ip_prefix | 0.0.0.0/0 |", "| revision_number | 0 |", "| security_group_id | d6a668b2-7395-4930-9267-e74f87fe100e |", "| tags | [] |", "| updated_at | 2026-02-27T17:45:05Z |", "+-------------------------+--------------------------------------+", "+-------------------------+--------------------------------------+", "| Field | Value |", "+-------------------------+--------------------------------------+", "| created_at | 2026-02-27T17:45:07Z |", "| description | |", "| direction | ingress |", "| ether_type | IPv4 |", "| id | 442bed43-8977-4634-ab32-43f2785fe0fc |", "| name | None |", "| port_range_max | 22 |", "| port_range_min | 22 |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| protocol | tcp |", "| remote_address_group_id | None |", "| remote_group_id | None |", "| remote_ip_prefix | 0.0.0.0/0 |", "| revision_number | 0 |", "| security_group_id | d6a668b2-7395-4930-9267-e74f87fe100e |", "| tags | [] |", "| updated_at | 2026-02-27T17:45:07Z |", "+-------------------------+--------------------------------------+", "PING 192.168.122.20 (192.168.122.20) 56(84) bytes of data.", "[1772214311.851396] 64 bytes from 192.168.122.20: icmp_seq=1 ttl=63 time=13.7 ms", "", "--- 192.168.122.20 ping statistics ---", "1 packets transmitted, 1 received, 0% packet loss, time 0ms", "rtt min/avg/max/mdev = 13.650/13.650/13.650/0.000 ms", "+---------------------+--------------------------------------+", "| Field | Value |", "+---------------------+--------------------------------------+", "| attachments | [] |", "| availability_zone | nova |", "| bootable | false |", "| consistencygroup_id | None |", "| created_at | 2026-02-27T17:45:17.529226 |", "| description | None |", "| encrypted | False |", "| id | cd62b452-066a-4dfa-b3a8-2c4de48ebbe4 |", "| migration_status | None |", "| multiattach | False |", "| name | disk |", "| properties | |", "| replication_status | None |", "| size | 1 |", "| snapshot_id | None |", "| source_volid | None |", "| status | creating |", "| type | tripleo |", "| updated_at | None |", "| user_id | 93d9b9d6929c41ee8d62569442846100 |", "+---------------------+--------------------------------------+", "result={", " \"attachments\": [],", " \"availability_zone\": \"nova\",", " \"bootable\": \"false\",", " \"consistencygroup_id\": null,", " \"created_at\": \"2026-02-27T17:45:17.000000\",", " \"description\": null,", " \"encrypted\": false,", " \"id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\",", " \"migration_status\": null,", " \"multiattach\": false,", " \"name\": \"disk\",", " \"os-vol-host-attr:host\": \"hostgroup@tripleo_ceph#tripleo_ceph\",", " \"os-vol-mig-status-attr:migstat\": null,", " \"os-vol-mig-status-attr:name_id\": null,", " \"os-vol-tenant-attr:tenant_id\": \"8c8307df41034feea2d7e66d9c38ae14\",", " \"properties\": {},", " \"replication_status\": null,", " \"size\": 1,", " \"snapshot_id\": null,", " \"source_volid\": null,", " \"status\": \"creating\",", " \"type\": \"tripleo\",", " \"updated_at\": \"2026-02-27T17:45:17.000000\",", " \"user_id\": \"93d9b9d6929c41ee8d62569442846100\",", " \"volume_image_metadata\": {", " \"signature_verified\": \"False\"", " }", "}", "Waiting for test volume 'disk' creation", "+-------------+--------------------------------------+", "| Field | Value |", "+-------------+--------------------------------------+", "| created_at | 2026-02-27T17:45:34.245606 |", "| description | None |", "| id | 0bd1f9bc-dcf0-44f0-9849-8beaf13e0ed3 |", "| name | snapshot |", "| properties | |", "| size | 1 |", "| status | creating |", "| updated_at | None |", "| volume_id | cd62b452-066a-4dfa-b3a8-2c4de48ebbe4 |", "+-------------+--------------------------------------+", "+---------------------+--------------------------------------+", "| Field | Value |", "+---------------------+--------------------------------------+", "| attachments | [] |", "| availability_zone | nova |", "| bootable | false |", "| consistencygroup_id | None |", "| created_at | 2026-02-27T17:45:48.392275 |", "| description | None |", "| encrypted | False |", "| id | 3fedaf47-cb2b-4b6e-afc6-3380205fe9b4 |", "| migration_status | None |", "| multiattach | False |", "| name | boot-volume |", "| properties | |", "| replication_status | None |", "| size | 1 |", "| snapshot_id | None |", "| source_volid | None |", "| status | creating |", "| type | tripleo |", "| updated_at | None |", "| user_id | 93d9b9d6929c41ee8d62569442846100 |", "+---------------------+--------------------------------------+", "", "+-------------------------------------+----------------------------------------------------------+", "| Field | Value |", "+-------------------------------------+----------------------------------------------------------+", "| OS-DCF:diskConfig | MANUAL |", "| OS-EXT-AZ:availability_zone | nova |", "| OS-EXT-SRV-ATTR:host | standalone.localdomain |", "| OS-EXT-SRV-ATTR:hypervisor_hostname | standalone.localdomain |", "| OS-EXT-SRV-ATTR:instance_name | instance-00000002 |", "| OS-EXT-STS:power_state | Running |", "| OS-EXT-STS:task_state | None |", "| OS-EXT-STS:vm_state | active |", "| OS-SRV-USG:launched_at | 2026-02-27T17:46:02.000000 |", "| OS-SRV-USG:terminated_at | None |", "| accessIPv4 | |", "| accessIPv6 | |", "| addresses | private=192.168.0.24 |", "| adminPass | qrGTyPGdMks3 |", "| config_drive | |", "| created | 2026-02-27T17:45:57Z |", "| flavor | m1.small (0410c4f3-80fc-472f-99b2-45042a836694) |", "| hostId | 03cdec8b180f092f35ff754dec8182fa1408246759e54ffc36742686 |", "| id | c996f66b-6a3f-4780-9a59-f5906e19830d |", "| image | N/A (booted from volume) |", "| key_name | None |", "| name | bfv-server |", "| progress | 0 |", "| project_id | 8c8307df41034feea2d7e66d9c38ae14 |", "| properties | |", "| security_groups | name='default' |", "| status | ACTIVE |", "| updated | 2026-02-27T17:46:02Z |", "| user_id | 93d9b9d6929c41ee8d62569442846100 |", "| volumes_attached | id='3fedaf47-cb2b-4b6e-afc6-3380205fe9b4' |", "+-------------------------------------+----------------------------------------------------------+", "+-------+--------------------------------------+", "| Field | Value |", "+-------+--------------------------------------+", "| id | ac0dd04b-35ed-4195-b8b4-74ede0735dce |", "| name | backup |", "+-------+--------------------------------------+", "result={", " \"availability_zone\": null,", " \"container\": \"backups\",", " \"created_at\": \"2026-02-27T17:46:13.000000\",", " \"data_timestamp\": \"2026-02-27T17:46:13.000000\",", " \"description\": null,", " \"fail_reason\": null,", " \"has_dependent_backups\": false,", " \"id\": \"ac0dd04b-35ed-4195-b8b4-74ede0735dce\",", " \"is_incremental\": false,", " \"name\": \"backup\",", " \"object_count\": 0,", " \"size\": 1,", " \"snapshot_id\": null,", " \"status\": \"creating\",", " \"updated_at\": \"2026-02-27T17:46:15.000000\",", " \"volume_id\": \"cd62b452-066a-4dfa-b3a8-2c4de48ebbe4\"", "}", "Waiting for test volume 'disk' backup completion"]} TASK [development_environment : pre-launch test Ironic instance] *************** skipping: [localhost] => {"changed": false, "false_condition": "'pre_launch_ironic.bash' in prelaunch_test_instance_scripts", "skip_reason": "Conditional result was False"} TASK [development_environment : Start the ping test to the VM instance.] ******* skipping: [localhost] => {"changed": false, "false_condition": "ping_test|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Create stop l3 agent connectivity check scripts.] *** skipping: [localhost] => {"changed": false, "false_condition": "ping_test|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Remember that the ping test is running.] ******* skipping: [localhost] => {"changed": false, "false_condition": "ping_test|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : creates Barbican secret] *********************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack secret store --name testSecret --payload 'TestPayload'\n", "delta": "0:00:03.143315", "end": "2026-02-27 17:46:27.924979", "msg": "", "rc": 0, "start": "2026-02-27 17:46:24.781664", "stderr": "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack secret store --name testSecret --payload TestPayload\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "stderr_lines": ["+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack secret store --name testSecret --payload TestPayload", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts."], "stdout": "+---------------+------------------------------------------------------------------------+\n| Field | Value |\n+---------------+------------------------------------------------------------------------+\n| Secret href | http://172.21.0.2:9311/v1/secrets/1d0f4712-eebd-48ea-8063-8d50df59c7d9 |\n| Name | testSecret |\n| Created | None |\n| Status | None |\n| Content types | None |\n| Algorithm | aes |\n| Bit length | 256 |\n| Secret type | opaque |\n| Mode | cbc |\n| Expiration | None |\n+---------------+------------------------------------------------------------------------+", "stdout_lines": ["+---------------+------------------------------------------------------------------------+", "| Field | Value |", "+---------------+------------------------------------------------------------------------+", "| Secret href | http://172.21.0.2:9311/v1/secrets/1d0f4712-eebd-48ea-8063-8d50df59c7d9 |", "| Name | testSecret |", "| Created | None |", "| Status | None |", "| Content types | None |", "| Algorithm | aes |", "| Bit length | 256 |", "| Secret type | opaque |", "| Mode | cbc |", "| Expiration | None |", "+---------------+------------------------------------------------------------------------+"]} TASK [development_environment : Issue session fernet token] ******************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\n", "delta": "0:00:02.475860", "end": "2026-02-27 17:46:30.703487", "msg": "", "rc": 0, "start": "2026-02-27 17:46:28.227627", "stderr": "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "stderr_lines": ["+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts."], "stdout": "gAAAAABpodh2ebpAYftt0GAdBr7GxcEDwy-xel4aRqfS0lnf8RiCZU3iqgbdWVMGXmyfBgzhSIEZUeYMazRU2yzyLN0QI6X7fz2KnTsXZbUB700fOq9gYqE-dJvGgOU51bqJEPKUvy5TlZ85IsllR1GM_LblMbE03jq8u_HSDWF5Sl-hyMeYq6w", "stdout_lines": ["gAAAAABpodh2ebpAYftt0GAdBr7GxcEDwy-xel4aRqfS0lnf8RiCZU3iqgbdWVMGXmyfBgzhSIEZUeYMazRU2yzyLN0QI6X7fz2KnTsXZbUB700fOq9gYqE-dJvGgOU51bqJEPKUvy5TlZ85IsllR1GM_LblMbE03jq8u_HSDWF5Sl-hyMeYq6w"]} TASK [development_environment : Render OIDC cloudrc] *************************** skipping: [localhost] => {"changed": false, "false_condition": "enable_federation | default(false) | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Upload OIDC cloudrc to source cloud] *********** skipping: [localhost] => {"changed": false, "false_condition": "enable_federation | default(false) | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Issue OIDC token] ****************************** skipping: [localhost] => {"changed": false, "false_condition": "enable_federation | default(false) | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Create credential for sanity checking its value after adoption] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack credential create admin test -f value -c id\n", "delta": "0:00:03.395119", "end": "2026-02-27 17:46:34.513737", "msg": "", "rc": 0, "start": "2026-02-27 17:46:31.118618", "stderr": "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack credential create admin test -f value -c id\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nWarning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts.", "stderr_lines": ["+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack credential create admin test -f value -c id", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Warning: Permanently added '192.168.122.100' (ED25519) to the list of known hosts."], "stdout": "9a2870bb05d74e6d820d701a945a5bc2", "stdout_lines": ["9a2870bb05d74e6d820d701a945a5bc2"]} TASK [development_environment : execute create resources script] *************** skipping: [localhost] => {"changed": false, "false_condition": "neutron_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : execute prepare-pinger script] ***************** skipping: [localhost] => {"changed": false, "false_condition": "neutron_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : execute start-pinger script] ******************* skipping: [localhost] => {"changed": false, "false_condition": "neutron_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : tobiko installation] *************************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : oc undercloud installation] ******************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : copy kube conf to undercloud] ****************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : upload tobiko-playbook.yaml to the undercloud] *** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Add tobiko.conf to the undercloud] ************* skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Run Tobiko from the undercloud] **************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : copy keys from undercloud for tobiko] ********** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : create Octavia load balancer] ****************** skipping: [localhost] => {"changed": false, "false_condition": "prelaunch_octavia_workload|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : get the load balancer VIP address] ************* skipping: [localhost] => {"changed": false, "false_condition": "prelaunch_octavia_workload|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : test the load balancer VIP address] ************ skipping: [localhost] => {"changed": false, "false_condition": "prelaunch_octavia_workload|bool", "skip_reason": "Conditional result was False"} TASK [tls_adoption : Create Certificate Issuer with cert and key from IPA] ***** skipping: [localhost] => {"changed": false, "false_condition": "enable_tlse|default(false)", "skip_reason": "Conditional result was False"} TASK [backend_services : create osp-secret] ************************************ changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\ncd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\nmake input\n", "delta": "0:00:01.489820", "end": "2026-02-27 17:46:36.893982", "msg": "", "rc": 0, "start": "2026-02-27 17:46:35.404162", "stderr": "+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n+ make input\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z openstack ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'\n+ cat\n+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out\n+ '[' -z openstack ']'\n+ '[' -z osp-secret ']'\n+ '[' -z 12345678 ']'\n+ '[' -z 1234567842 ']'\n+ '[' -z 767c3ed056cbaa3b9dfedb8c6f825bf0 ']'\n+ '[' -z sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU= ']'\n+ '[' -z COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f ']'\n+ '[' -z openstack ']'\n+ '[' -z libvirt-secret ']'\n+ DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input\n+ pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input\n+ cat\nError from server (NotFound): secrets \"osp-secret\" not found", "stderr_lines": ["+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/", "+ make input", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z openstack ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'", "+ cat", "+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out", "+ '[' -z openstack ']'", "+ '[' -z osp-secret ']'", "+ '[' -z 12345678 ']'", "+ '[' -z 1234567842 ']'", "+ '[' -z 767c3ed056cbaa3b9dfedb8c6f825bf0 ']'", "+ '[' -z sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU= ']'", "+ '[' -z COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f ']'", "+ '[' -z openstack ']'", "+ '[' -z libvirt-secret ']'", "+ DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input", "+ pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input", "+ cat", "Error from server (NotFound): secrets \"osp-secret\" not found"], "stdout": "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml\nnamespace/openstack unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\nopenstack Active\noc project openstack\nAlready on project \"openstack\" on server \"https://api.crc.testing:6443\".\nbash scripts/gen-input-kustomize.sh\n~/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ~/src/github.com/openstack-k8s-operators/install_yamls\noc get secret/osp-secret || oc kustomize /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input | oc apply -f -\nsecret/libvirt-secret created\nsecret/octavia-ca-passphrase created\nsecret/osp-secret created\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "stdout_lines": ["make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml", "namespace/openstack unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "openstack Active", "oc project openstack", "Already on project \"openstack\" on server \"https://api.crc.testing:6443\".", "bash scripts/gen-input-kustomize.sh", "~/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ~/src/github.com/openstack-k8s-operators/install_yamls", "oc get secret/osp-secret || oc kustomize /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input | oc apply -f -", "secret/libvirt-secret created", "secret/octavia-ca-passphrase created", "secret/osp-secret created", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'"]} TASK [backend_services : execute alternative tasks when source env is ODPdO] *** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [backend_services : set service passwords] ******************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n oc set data secret/osp-secret \"AodhPassword=KGQpVrxiTZtwG4ROLhWl2Wxz4\"\n oc set data secret/osp-secret \"BarbicanPassword=TycBK9bcKqK7n5movge5u3gGe\"\n oc set data secret/osp-secret \"CeilometerPassword=wJy0vjZ3r5ZefnSISa1Rpf1xA\"\n oc set data secret/osp-secret \"CinderPassword=5pC39srDQSN3Z5cthDw9hfjfS\"\n oc set data secret/osp-secret \"GlancePassword=7nawnVcdqlj6BV4Q7hyDxMtKt\"\n oc set data secret/osp-secret \"IronicPassword=LyA0eRVQAffuyokTIIrqkpZvl\"\n oc set data secret/osp-secret \"IronicInspectorPassword=LyA0eRVQAffuyokTIIrqkpZvl\"\n oc set data secret/osp-secret \"NeutronPassword=7CPUE28w4Pmbs4boA63iHrGef\"\n oc set data secret/osp-secret \"NovaPassword=73Prrc6Zkxr5GmKDFouRdR7yR\"\n oc set data secret/osp-secret \"OctaviaPassword=JIzJopF59iKTZvemrQViY9wL3\"\n oc set data secret/osp-secret \"PlacementPassword=VY6v4HV0FFTdWkKg8WV0K9553\"\n oc set data secret/osp-secret \"HeatPassword=ibND2Zkvmn2A5NRKTaAhdRwKr\"\n oc set data secret/osp-secret \"HeatAuthEncryptionKey=ExgpgRE3vIrQoYF78Z8YtRnEMGcP9mvr\"\n oc set data secret/osp-secret \"HeatStackDomainAdminPassword=6ciDtlvzuOz6zu0esrqhvv7El\"\n oc set data secret/osp-secret \"ManilaPassword=5Gkb63iuWVVpaYzeTaf2JNhWO\"\n oc set data secret/osp-secret \"SwiftPassword=dpV0z4lmNQEv8FR8uZrJ1GbAV\"\n", "delta": "0:00:02.388947", "end": "2026-02-27 17:46:39.609948", "msg": "", "rc": 0, "start": "2026-02-27 17:46:37.221001", "stderr": "+ oc set data secret/osp-secret AodhPassword=KGQpVrxiTZtwG4ROLhWl2Wxz4\n+ oc set data secret/osp-secret BarbicanPassword=TycBK9bcKqK7n5movge5u3gGe\n+ oc set data secret/osp-secret CeilometerPassword=wJy0vjZ3r5ZefnSISa1Rpf1xA\n+ oc set data secret/osp-secret CinderPassword=5pC39srDQSN3Z5cthDw9hfjfS\n+ oc set data secret/osp-secret GlancePassword=7nawnVcdqlj6BV4Q7hyDxMtKt\n+ oc set data secret/osp-secret IronicPassword=LyA0eRVQAffuyokTIIrqkpZvl\n+ oc set data secret/osp-secret IronicInspectorPassword=LyA0eRVQAffuyokTIIrqkpZvl\n+ oc set data secret/osp-secret NeutronPassword=7CPUE28w4Pmbs4boA63iHrGef\n+ oc set data secret/osp-secret NovaPassword=73Prrc6Zkxr5GmKDFouRdR7yR\n+ oc set data secret/osp-secret OctaviaPassword=JIzJopF59iKTZvemrQViY9wL3\n+ oc set data secret/osp-secret PlacementPassword=VY6v4HV0FFTdWkKg8WV0K9553\n+ oc set data secret/osp-secret HeatPassword=ibND2Zkvmn2A5NRKTaAhdRwKr\n+ oc set data secret/osp-secret HeatAuthEncryptionKey=ExgpgRE3vIrQoYF78Z8YtRnEMGcP9mvr\n+ oc set data secret/osp-secret HeatStackDomainAdminPassword=6ciDtlvzuOz6zu0esrqhvv7El\n+ oc set data secret/osp-secret ManilaPassword=5Gkb63iuWVVpaYzeTaf2JNhWO\n+ oc set data secret/osp-secret SwiftPassword=dpV0z4lmNQEv8FR8uZrJ1GbAV", "stderr_lines": ["+ oc set data secret/osp-secret AodhPassword=KGQpVrxiTZtwG4ROLhWl2Wxz4", "+ oc set data secret/osp-secret BarbicanPassword=TycBK9bcKqK7n5movge5u3gGe", "+ oc set data secret/osp-secret CeilometerPassword=wJy0vjZ3r5ZefnSISa1Rpf1xA", "+ oc set data secret/osp-secret CinderPassword=5pC39srDQSN3Z5cthDw9hfjfS", "+ oc set data secret/osp-secret GlancePassword=7nawnVcdqlj6BV4Q7hyDxMtKt", "+ oc set data secret/osp-secret IronicPassword=LyA0eRVQAffuyokTIIrqkpZvl", "+ oc set data secret/osp-secret IronicInspectorPassword=LyA0eRVQAffuyokTIIrqkpZvl", "+ oc set data secret/osp-secret NeutronPassword=7CPUE28w4Pmbs4boA63iHrGef", "+ oc set data secret/osp-secret NovaPassword=73Prrc6Zkxr5GmKDFouRdR7yR", "+ oc set data secret/osp-secret OctaviaPassword=JIzJopF59iKTZvemrQViY9wL3", "+ oc set data secret/osp-secret PlacementPassword=VY6v4HV0FFTdWkKg8WV0K9553", "+ oc set data secret/osp-secret HeatPassword=ibND2Zkvmn2A5NRKTaAhdRwKr", "+ oc set data secret/osp-secret HeatAuthEncryptionKey=ExgpgRE3vIrQoYF78Z8YtRnEMGcP9mvr", "+ oc set data secret/osp-secret HeatStackDomainAdminPassword=6ciDtlvzuOz6zu0esrqhvv7El", "+ oc set data secret/osp-secret ManilaPassword=5Gkb63iuWVVpaYzeTaf2JNhWO", "+ oc set data secret/osp-secret SwiftPassword=dpV0z4lmNQEv8FR8uZrJ1GbAV"], "stdout": "secret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated", "stdout_lines": ["secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated"]} TASK [backend_services : create tmp directory] ********************************* changed: [localhost] => {"changed": true, "cmd": ["mkdir", "-p", "../../tests/config/tmp"], "delta": "0:00:00.004865", "end": "2026-02-27 17:46:39.890490", "msg": "", "rc": 0, "start": "2026-02-27 17:46:39.885625", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []} TASK [backend_services : template out the controlplane deployment] ************* changed: [localhost] => {"changed": true, "checksum": "1310c5723298d30d028ef6dc6a295f37574462b2", "dest": "../config/tmp/test_deployment.yaml", "gid": 1000, "group": "zuul", "md5sum": "8ae2bd55092c1b24f6d45979dabfa673", "mode": "0644", "owner": "zuul", "secontext": "unconfined_u:object_r:user_home_t:s0", "size": 3141, "src": "/home/zuul/.ansible/tmp/ansible-tmp-1772214399.963557-43932-102681783370393/source", "state": "file", "uid": 1000} TASK [backend_services : template out the OpenStackVersion deployment with container overrides] *** skipping: [localhost] => {"changed": false, "false_condition": "periodic|default(false)", "skip_reason": "Conditional result was False"} TASK [backend_services : Apply OpenStackVersion with container overrides to environment] *** skipping: [localhost] => {"changed": false, "false_condition": "periodic|default(false)", "skip_reason": "Conditional result was False"} TASK [backend_services : Get OpenStackVersion resource name for custom Barbican images] *** skipping: [localhost] => {"changed": false, "false_condition": "(barbican_custom_api_image is defined and barbican_custom_api_image) or (barbican_custom_worker_image is defined and barbican_custom_worker_image)", "skip_reason": "Conditional result was False"} TASK [backend_services : Patch OpenStackVersion with custom Barbican images] *** skipping: [localhost] => {"changed": false, "false_condition": "(barbican_custom_api_image is defined and barbican_custom_api_image) or (barbican_custom_worker_image is defined and barbican_custom_worker_image)", "skip_reason": "Conditional result was False"} TASK [backend_services : execute alternative tasks when source env is ODPdO] *** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [backend_services : deploy the OpenStackControlPlane CR] ****************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc apply -f ../config/tmp/test_deployment.yaml\n", "delta": "0:00:00.238449", "end": "2026-02-27 17:46:41.227132", "msg": "", "rc": 0, "start": "2026-02-27 17:46:40.988683", "stderr": "+ oc apply -f ../config/tmp/test_deployment.yaml\nWarning: spec.galera.template[openstack].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!\nWarning: spec.galera.template[openstack-cell1].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!", "stderr_lines": ["+ oc apply -f ../config/tmp/test_deployment.yaml", "Warning: spec.galera.template[openstack].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!", "Warning: spec.galera.template[openstack-cell1].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!"], "stdout": "openstackcontrolplane.core.openstack.org/openstack created", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack created"]} TASK [backend_services : verify that MariaDB and RabbitMQ are running, for all defined cells] *** FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (60 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (59 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (58 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (57 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (56 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (55 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (54 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (53 retries left). changed: [localhost] => {"attempts": 9, "changed": true, "cmd": "set -euxo pipefail\n\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\noc get pod openstack-galera-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\noc get pod rabbitmq-server-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\nfor CELL in $(echo $RENAMED_CELLS); do\n oc get pod openstack-$CELL-galera-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\n oc get pod rabbitmq-$CELL-server-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\ndone\n", "delta": "0:00:00.644612", "end": "2026-02-27 17:47:26.189150", "msg": "", "rc": 0, "start": "2026-02-27 17:47:25.544538", "stderr": "+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ oc get pod openstack-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'\n+ grep Running\n+ oc get pod rabbitmq-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'\n+ grep Running\n++ echo cell1\n+ for CELL in $(echo $RENAMED_CELLS)\n+ oc get pod openstack-cell1-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'\n+ grep Running\n+ grep Running\n+ oc get pod rabbitmq-cell1-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "stderr_lines": ["+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ oc get pod openstack-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "+ grep Running", "+ oc get pod rabbitmq-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "+ grep Running", "++ echo cell1", "+ for CELL in $(echo $RENAMED_CELLS)", "+ oc get pod openstack-cell1-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "+ grep Running", "+ grep Running", "+ oc get pod rabbitmq-cell1-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'"], "stdout": "Running\nRunning\nRunning\nRunning", "stdout_lines": ["Running", "Running", "Running", "Running"]} TASK [backend_services : verify that MariaDB and RabbitMQ CR's deployed, for all defined cells] *** changed: [localhost] => (item=Galera) => {"ansible_loop_var": "item", "attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\noc get Galera -o json | jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'\n", "delta": "0:00:00.169786", "end": "2026-02-27 17:47:26.625837", "failed_when_result": false, "item": "Galera", "msg": "", "rc": 0, "start": "2026-02-27 17:47:26.456051", "stderr": "+ oc get Galera -o json\n+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'", "stderr_lines": ["+ oc get Galera -o json", "+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'"], "stdout": "true", "stdout_lines": ["true"]} FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ CR's deployed, for all defined cells (60 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ CR's deployed, for all defined cells (59 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ CR's deployed, for all defined cells (58 retries left). changed: [localhost] => (item=Rabbitmqs) => {"ansible_loop_var": "item", "attempts": 4, "changed": true, "cmd": "set -euxo pipefail\n\n\noc get Rabbitmqs -o json | jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'\n", "delta": "0:00:00.153036", "end": "2026-02-27 17:47:43.278967", "failed_when_result": false, "item": "Rabbitmqs", "msg": "", "rc": 0, "start": "2026-02-27 17:47:43.125931", "stderr": "+ oc get Rabbitmqs -o json\n+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'", "stderr_lines": ["+ oc get Rabbitmqs -o json", "+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'"], "stdout": "true", "stdout_lines": ["true"]} TASK [backend_services : Patch openstack upstream dns server to set the correct value for the environment] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\ncrname=$(oc get openstackcontrolplane -o name)\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/dns/template/options\", \"value\": [{\"key\": \"server\", \"values\": [\"192.168.122.10\"]}]}]'\n", "delta": "0:00:00.870206", "end": "2026-02-27 17:47:44.405643", "msg": "", "rc": 0, "start": "2026-02-27 17:47:43.535437", "stderr": "++ oc get openstackcontrolplane -o name\n+ crname=openstackcontrolplane.core.openstack.org/openstack\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/dns/template/options\", \"value\": [{\"key\": \"server\", \"values\": [\"192.168.122.10\"]}]}]'", "stderr_lines": ["++ oc get openstackcontrolplane -o name", "+ crname=openstackcontrolplane.core.openstack.org/openstack", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/dns/template/options\", \"value\": [{\"key\": \"server\", \"values\": [\"192.168.122.10\"]}]}]'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [backend_services : Patch rabbitmq resources for lower resource consumption] *** ok: [localhost] => {"changed": false, "cmd": "set -euxo pipefail\n\n\ncrname=$(oc get openstackcontrolplane -o name)\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/cpu\", \"value\": 500m}]'\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/memory\", \"value\": 500Mi}]'\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/cpu\", \"value\": 500m}]'\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/memory\", \"value\": 500Mi}]'\n", "delta": "0:00:02.697469", "end": "2026-02-27 17:47:47.376177", "msg": "", "rc": 0, "start": "2026-02-27 17:47:44.678708", "stderr": "++ oc get openstackcontrolplane -o name\n+ crname=openstackcontrolplane.core.openstack.org/openstack\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/cpu\", \"value\": 500m}]'\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/memory\", \"value\": 500Mi}]'\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/cpu\", \"value\": 500m}]'\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/memory\", \"value\": 500Mi}]'", "stderr_lines": ["++ oc get openstackcontrolplane -o name", "+ crname=openstackcontrolplane.core.openstack.org/openstack", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/cpu\", \"value\": 500m}]'", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/memory\", \"value\": 500Mi}]'", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/cpu\", \"value\": 500m}]'", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/memory\", \"value\": 500Mi}]'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched\nopenstackcontrolplane.core.openstack.org/openstack patched\nopenstackcontrolplane.core.openstack.org/openstack patched\nopenstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched", "openstackcontrolplane.core.openstack.org/openstack patched", "openstackcontrolplane.core.openstack.org/openstack patched", "openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [backend_services : Verify that OpenStackControlPlane is waiting for openstackclient] *** FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (60 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (59 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (58 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (57 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (56 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (55 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (54 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (53 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (52 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (51 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (50 retries left). changed: [localhost] => {"attempts": 12, "changed": true, "cmd": "set -euxo pipefail\n\n\n\noc get openstackcontrolplane openstack -o jsonpath='{.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}' | grep 'OpenStackControlPlane Client not started'\n", "delta": "0:00:00.143596", "end": "2026-02-27 17:48:47.373230", "msg": "", "rc": 0, "start": "2026-02-27 17:48:47.229634", "stderr": "+ oc get openstackcontrolplane openstack -o 'jsonpath={.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'\n+ grep 'OpenStackControlPlane Client not started'", "stderr_lines": ["+ oc get openstackcontrolplane openstack -o 'jsonpath={.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'", "+ grep 'OpenStackControlPlane Client not started'"], "stdout": "OpenStackControlPlane Client not started", "stdout_lines": ["OpenStackControlPlane Client not started"]} TASK [ceph_backend_configuration : set shell vars for stopping openstack services] *** ok: [localhost] => {"ansible_facts": {"ceph_backend_configuration_shell_vars": "# if tripleo uses external ceph, ssh to ceph nodes otherwise controller nodes\n# for external ceph, external_ceph and ceph1_ssh vars should be defined\nCEPH_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCEPH_KEY=$($CEPH_SSH \"cat /etc/ceph/ceph.client.openstack.keyring | base64 -w 0\")\nCEPH_CONF=$($CEPH_SSH \"cat /etc/ceph/ceph.conf | base64 -w 0\")\n"}, "changed": false} TASK [ceph_backend_configuration : update the openstack keyring caps for Manila] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nCEPH_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCEPH_CAPS=\"mgr 'allow *' mon 'allow r, profile rbd' osd 'profile rbd pool=vms, profile rbd pool=volumes, profile rbd pool=images, profile rbd pool=backups, allow rw pool manila_data'\"\nOSP_KEYRING=\"client.openstack\"\nCEPH_ADM=$($CEPH_SSH \"cephadm shell -- ceph auth caps $OSP_KEYRING $CEPH_CAPS\")\n", "delta": "0:00:03.784814", "end": "2026-02-27 17:48:51.522687", "msg": "", "rc": 0, "start": "2026-02-27 17:48:47.737873", "stderr": "+ CEPH_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CEPH_CAPS='mgr '\\''allow *'\\'' mon '\\''allow r, profile rbd'\\'' osd '\\''profile rbd pool=vms, profile rbd pool=volumes, profile rbd pool=images, profile rbd pool=backups, allow rw pool manila_data'\\'''\n+ OSP_KEYRING=client.openstack\n++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 'cephadm shell -- ceph auth caps client.openstack mgr '\\''allow *'\\'' mon '\\''allow r, profile rbd'\\'' osd '\\''profile rbd pool=vms, profile rbd pool=volumes, profile rbd pool=images, profile rbd pool=backups, allow rw pool manila_data'\\'''\nInferring fsid bb8a3706-a2c2-5bfc-8b66-bafd24f637e3\nInferring config /var/lib/ceph/bb8a3706-a2c2-5bfc-8b66-bafd24f637e3/mon.standalone/config\nUsing ceph image with id '957884a57883' and tag 'latest' created on 2026-02-09 10:26:08 +0000 UTC\nregistry.redhat.io/rhceph/rhceph-7-rhel9@sha256:50945286dae5941044aa91c7700ff058ab2cb308d5d1d6d6bb2daf28aa7a0ca3\nupdated caps for client.openstack\n+ CEPH_ADM=", "stderr_lines": ["+ CEPH_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CEPH_CAPS='mgr '\\''allow *'\\'' mon '\\''allow r, profile rbd'\\'' osd '\\''profile rbd pool=vms, profile rbd pool=volumes, profile rbd pool=images, profile rbd pool=backups, allow rw pool manila_data'\\'''", "+ OSP_KEYRING=client.openstack", "++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 'cephadm shell -- ceph auth caps client.openstack mgr '\\''allow *'\\'' mon '\\''allow r, profile rbd'\\'' osd '\\''profile rbd pool=vms, profile rbd pool=volumes, profile rbd pool=images, profile rbd pool=backups, allow rw pool manila_data'\\'''", "Inferring fsid bb8a3706-a2c2-5bfc-8b66-bafd24f637e3", "Inferring config /var/lib/ceph/bb8a3706-a2c2-5bfc-8b66-bafd24f637e3/mon.standalone/config", "Using ceph image with id '957884a57883' and tag 'latest' created on 2026-02-09 10:26:08 +0000 UTC", "registry.redhat.io/rhceph/rhceph-7-rhel9@sha256:50945286dae5941044aa91c7700ff058ab2cb308d5d1d6d6bb2daf28aa7a0ca3", "updated caps for client.openstack", "+ CEPH_ADM="], "stdout": "", "stdout_lines": []} TASK [ceph_backend_configuration : create ceph-conf-files secret] ************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n# if tripleo uses external ceph, ssh to ceph nodes otherwise controller nodes\n# for external ceph, external_ceph and ceph1_ssh vars should be defined\nCEPH_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCEPH_KEY=$($CEPH_SSH \"cat /etc/ceph/ceph.client.openstack.keyring | base64 -w 0\")\nCEPH_CONF=$($CEPH_SSH \"cat /etc/ceph/ceph.conf | base64 -w 0\")\n\n\noc apply -f - < {"changed": true, "cmd": "set -euxo pipefail\n\n\n\noc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n extraMounts:\n - name: v1\n region: r1\n extraVol:\n - propagation:\n - CinderVolume\n - CinderBackup\n - GlanceAPI\n - ManilaShare\n extraVolType: Ceph\n volumes:\n - name: ceph\n projected:\n sources:\n - secret:\n name: ceph-conf-files\n mounts:\n - name: ceph\n mountPath: \"/etc/ceph\"\n readOnly: true\n'\n", "delta": "0:00:00.307813", "end": "2026-02-27 17:48:53.008215", "msg": "", "rc": 0, "start": "2026-02-27 17:48:52.700402", "stderr": "+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n extraMounts:\n - name: v1\n region: r1\n extraVol:\n - propagation:\n - CinderVolume\n - CinderBackup\n - GlanceAPI\n - ManilaShare\n extraVolType: Ceph\n volumes:\n - name: ceph\n projected:\n sources:\n - secret:\n name: ceph-conf-files\n mounts:\n - name: ceph\n mountPath: \"/etc/ceph\"\n readOnly: true\n'", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:", " extraMounts:", " - name: v1", " region: r1", " extraVol:", " - propagation:", " - CinderVolume", " - CinderBackup", " - GlanceAPI", " - ManilaShare", " extraVolType: Ceph", " volumes:", " - name: ceph", " projected:", " sources:", " - secret:", " name: ceph-conf-files", " mounts:", " - name: ceph", " mountPath: \"/etc/ceph\"", " readOnly: true", "'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [execute alternative tasks when source env is OSPdO] ********************** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [get_services_configuration : create bgpconfiguration] ******************** skipping: [localhost] => {"changed": false, "false_condition": "bgp", "skip_reason": "Conditional result was False"} TASK [get_services_configuration : create mariadb-client container] ************ changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\n# delete existing mariadb-client pods\noc delete pod mariadb-client || true\noc run mariadb-client ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} --restart=Never -- /usr/bin/sleep infinity\n", "delta": "0:00:00.325025", "end": "2026-02-27 17:48:53.691023", "msg": "", "rc": 0, "start": "2026-02-27 17:48:53.365998", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ grep ' MysqlRootPassword:'\n++ cat /home/zuul/overcloud-passwords.yaml\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ oc delete pod mariadb-client\nError from server (NotFound): pods \"mariadb-client\" not found\n+ true\n+ oc run mariadb-client --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified --restart=Never -- /usr/bin/sleep infinity\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\", "[\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ grep ' MysqlRootPassword:'", "++ cat /home/zuul/overcloud-passwords.yaml", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ oc delete pod mariadb-client", "Error from server (NotFound): pods \"mariadb-client\" not found", "+ true", "+ oc run mariadb-client --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified --restart=Never -- /usr/bin/sleep infinity", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")"], "stdout": "pod/mariadb-client created", "stdout_lines": ["pod/mariadb-client created"]} TASK [get_services_configuration : wait until SOURCE_MARIADB_IP is reachable] *** FAILED - RETRYING: [localhost]: wait until SOURCE_MARIADB_IP is reachable (60 retries left). ok: [localhost] => {"attempts": 2, "changed": false, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\noc rsh mariadb-client mysql -rsh ${SOURCE_MARIADB_IP[default]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[default]} -e 'select 1;'\n", "delta": "0:00:00.272201", "end": "2026-02-27 17:48:57.765267", "msg": "", "rc": 0, "start": "2026-02-27 17:48:57.493066", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'select 1;'", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\", "[\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'select 1;'"], "stdout": "1", "stdout_lines": ["1"]} TASK [get_services_configuration : test connection to the original DB] ********* changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nunset PULL_OPENSTACK_CONFIGURATION_DATABASES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\nfor CELL in $(echo $CELLS); do\n PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]=$(oc rsh mariadb-client mysql -rsh ${SOURCE_MARIADB_IP[$CELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} -e 'SHOW databases;')\ndone\n", "delta": "0:00:00.291325", "end": "2026-02-27 17:48:58.366374", "msg": "", "rc": 0, "start": "2026-02-27 17:48:58.075049", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ unset PULL_OPENSTACK_CONFIGURATION_DATABASES\n+ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'SHOW databases;'\n+ PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]='aodh\nbarbican\ncinder\nglance\ngnocchi\nheat\ninformation_schema\nkeystone\nmanila\nmysql\nnova\nnova_api\nnova_cell0\noctavia\noctavia_persistence\novs_neutron\nperformance_schema\nplacement'", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\", "[\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ unset PULL_OPENSTACK_CONFIGURATION_DATABASES", "+ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'SHOW databases;'", "+ PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]='aodh", "barbican", "cinder", "glance", "gnocchi", "heat", "information_schema", "keystone", "manila", "mysql", "nova", "nova_api", "nova_cell0", "octavia", "octavia_persistence", "ovs_neutron", "performance_schema", "placement'"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : run mysqlcheck on the original DB to look for things that are not OK] *** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nunset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\nrun_mysqlcheck() {\n PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK=$(oc rsh mariadb-client mysqlcheck --all-databases -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} | grep -v OK)\n}\nfor CELL in $(echo $CELLS); do\n run_mysqlcheck $CELL\ndone\nif [ \"$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\" != \"\" ]; then\n # Try mysql_upgrade to fix mysqlcheck failure\n for CELL in $(echo $CELLS); do\n MYSQL_UPGRADE=$(oc rsh mariadb-client mysql_upgrade --skip-version-check -v -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p${SOURCE_DB_ROOT_PASSWORD[$CELL]})\n # rerun mysqlcheck to check if problem is resolved\n run_mysqlcheck\n done\nfi\necho \"$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\"\n", "delta": "0:00:01.172926", "end": "2026-02-27 17:48:59.841563", "failed_when_result": false, "msg": "non-zero return code", "rc": 1, "start": "2026-02-27 17:48:58.668637", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n+ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ run_mysqlcheck default\n++ oc rsh mariadb-client mysqlcheck --all-databases -h 172.17.0.100 -u root -pKeTtbe7od9\n++ grep -v OK\n+ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK=", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\", "[\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "+ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ run_mysqlcheck default", "++ oc rsh mariadb-client mysqlcheck --all-databases -h 172.17.0.100 -u root -pKeTtbe7od9", "++ grep -v OK", "+ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK="], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : get Nova cells mappings from database] ****** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nexport PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=$(oc rsh mariadb-client mysql -rsh ${SOURCE_MARIADB_IP[default]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[default]} nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;')\n", "delta": "0:00:00.304388", "end": "2026-02-27 17:49:00.480365", "msg": "", "rc": 0, "start": "2026-02-27 17:49:00.175977", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'\n+ export 'PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n4f76d825-c003-4872-ad67-1b306e5e6cb5\tdefault\trabbit://{username}:{password}@standalone.internalapi.localdomain:5672/?ssl=0\tmysql+pymysql://{username}:{password}@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n4f76d825-c003-4872-ad67-1b306e5e6cb5\tdefault\trabbit://{username}:{password}@standalone.internalapi.localdomain:5672/?ssl=0\tmysql+pymysql://{username}:{password}@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\", "[\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'", "+ export 'PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "4f76d825-c003-4872-ad67-1b306e5e6cb5\tdefault\trabbit://{username}:{password}@standalone.internalapi.localdomain:5672/?ssl=0\tmysql+pymysql://{username}:{password}@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "4f76d825-c003-4872-ad67-1b306e5e6cb5\tdefault\trabbit://{username}:{password}@standalone.internalapi.localdomain:5672/?ssl=0\tmysql+pymysql://{username}:{password}@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : get the host names of the registered Nova compute services] *** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nunset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\nfor CELL in $(echo $CELLS); do\n PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=$(oc rsh mariadb-client mysql -rsh ${SOURCE_MARIADB_IP[$CELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$CELL]} -e \"select host from nova.services where services.binary='nova-compute' and deleted=0;\" )\ndone\n", "delta": "0:00:00.307906", "end": "2026-02-27 17:49:01.102741", "msg": "", "rc": 0, "start": "2026-02-27 17:49:00.794835", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n+ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'\n+ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=standalone.localdomain", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\", "[\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "+ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'", "+ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=standalone.localdomain"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : get the list of mapped Nova cells] ********** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\nexport PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=$($CONTROLLER1_SSH sudo podman exec -it nova_api nova-manage cell_v2 list_cells)\n", "delta": "0:00:04.252039", "end": "2026-02-27 17:49:05.620176", "msg": "", "rc": 0, "start": "2026-02-27 17:49:01.368137", "stderr": "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_api nova-manage cell_v2 list_cells\n+ export 'PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+\r\n| Name | UUID | Transport URL | Database Connection | Disabled |\r\n+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+\r\n| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n| default | 4f76d825-c003-4872-ad67-1b306e5e6cb5 | rabbit://guest:****@standalone.internalapi.localdomain:5672/?ssl=0 | mysql+pymysql://nova:****@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+\r'\n+ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+\r\n| Name | UUID | Transport URL | Database Connection | Disabled |\r\n+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+\r\n| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n| default | 4f76d825-c003-4872-ad67-1b306e5e6cb5 | rabbit://guest:****@standalone.internalapi.localdomain:5672/?ssl=0 | mysql+pymysql://nova:****@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+\r'", "stderr_lines": ["+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_api nova-manage cell_v2 list_cells", "+ export 'PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+", "| Name | UUID | Transport URL | Database Connection | Disabled |", "+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+", "| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "| default | 4f76d825-c003-4872-ad67-1b306e5e6cb5 | rabbit://guest:****@standalone.internalapi.localdomain:5672/?ssl=0 | mysql+pymysql://nova:****@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+", "'", "+ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+", "| Name | UUID | Transport URL | Database Connection | Disabled |", "+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+", "| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@172.17.0.2/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "| default | 4f76d825-c003-4872-ad67-1b306e5e6cb5 | rabbit://guest:****@standalone.internalapi.localdomain:5672/?ssl=0 | mysql+pymysql://nova:****@172.17.0.2/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "+---------+--------------------------------------+--------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+----------+", "'"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : store exported variables for future use] **** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nfor CELL in $(echo $CELLS); do\n RCELL=$CELL\n [ \"$CELL\" = \"$DEFAULT_CELL_NAME\" ] && RCELL=default\n cat > ~/.source_cloud_exported_variables_$CELL << EOF\nunset PULL_OPENSTACK_CONFIGURATION_DATABASES\nunset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\nunset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\nPULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]=\"$(oc rsh mariadb-client \\\n mysql -rsh ${SOURCE_MARIADB_IP[$RCELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} -e 'SHOW databases;')\"\n\nPULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[$CELL]=\"$(oc rsh mariadb-client \\\n mysqlcheck --all-databases -h ${SOURCE_MARIADB_IP[$RCELL]} -u root -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} | grep -v OK)\"\n\nPULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=\"$(oc rsh mariadb-client \\\n mysql -rsh ${SOURCE_MARIADB_IP[$RCELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} -e \\\n \"select host from nova.services where services.binary='nova-compute' and deleted=0;\" )\"\n\nif [ \"$RCELL\" = \"default\" ]; then\n PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=\"$(oc rsh mariadb-client \\\n mysql -rsh ${SOURCE_MARIADB_IP[$RCELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} nova_api -e \\\n 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;')\"\n PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=\"$($CONTROLLER1_SSH sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells)\"\nfi\nEOF\ndone\nchmod 0600 ~/.source_cloud_exported_variables*\n", "delta": "0:00:04.957988", "end": "2026-02-27 17:49:10.891248", "msg": "", "rc": 0, "start": "2026-02-27 17:49:05.933260", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\\n[\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ RCELL=default\n+ '[' default = cell1 ']'\n+ cat\n++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'SHOW databases;'\n++ oc rsh mariadb-client mysqlcheck --all-databases -h 172.17.0.100 -u root -pKeTtbe7od9\n++ grep -v OK\n++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'\n++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'\n++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells\n+ chmod 0600 /home/zuul/.source_cloud_exported_variables_default", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": \\", "[\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=KeTtbe7od9", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ RCELL=default", "+ '[' default = cell1 ']'", "+ cat", "++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'SHOW databases;'", "++ oc rsh mariadb-client mysqlcheck --all-databases -h 172.17.0.100 -u root -pKeTtbe7od9", "++ grep -v OK", "++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'", "++ oc rsh mariadb-client mysql -rsh 172.17.0.100 -uroot -pKeTtbe7od9 nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'", "++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells", "+ chmod 0600 /home/zuul/.source_cloud_exported_variables_default"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : delete mariadb-client pod] ****************** changed: [localhost] => {"changed": true, "cmd": "oc delete pod mariadb-client\n", "delta": "0:00:01.827633", "end": "2026-02-27 17:49:13.056633", "msg": "", "rc": 0, "start": "2026-02-27 17:49:11.229000", "stderr": "", "stderr_lines": [], "stdout": "pod \"mariadb-client\" deleted from openstack namespace", "stdout_lines": ["pod \"mariadb-client\" deleted from openstack namespace"]} TASK [stop_openstack_services : set shell vars for stopping openstack services] *** ok: [localhost] => {"ansible_facts": {"stop_openstack_services_shell_vars": "CONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n"}, "changed": false} TASK [stop_openstack_services : Remove colocation constraints between manila-share and ceph-nfs] *** skipping: [localhost] => {"changed": false, "false_condition": "manila_backend | default(\"\") == \"cephnfs\"", "skip_reason": "Conditional result was False"} TASK [stop_openstack_services : stop control plane services] ******************* fatal: [localhost]: FAILED! => {"changed": true, "cmd": "set -euxo pipefail\n\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\nServicesToStop=(\"tripleo_aodh_api.service\"\n \"tripleo_aodh_api_cron.service\"\n \"tripleo_aodh_evaluator.service\"\n \"tripleo_aodh_listener.service\"\n \"tripleo_aodh_notifier.service\"\n \"tripleo_ceilometer_agent_central.service\"\n \"tripleo_ceilometer_agent_notification.service\"\n \"tripleo_octavia_api.service\"\n \"tripleo_octavia_health_manager.service\"\n \"tripleo_octavia_rsyslog.service\"\n \"tripleo_octavia_driver_agent.service\"\n \"tripleo_octavia_housekeeping.service\"\n \"tripleo_octavia_worker.service\"\n \"tripleo_designate_api.service\"\n \"tripleo_designate_backend_bind9.service\"\n \"tripleo_designate_central.service\"\n \"tripleo_designate_mdns.service\"\n \"tripleo_designate_producer.service\"\n \"tripleo_designate_worker.service\"\n \"tripleo_unbound.service\"\n \"tripleo_horizon.service\"\n \"tripleo_keystone.service\"\n \"tripleo_barbican_api.service\"\n \"tripleo_barbican_worker.service\"\n \"tripleo_barbican_keystone_listener.service\"\n \"tripleo_cinder_api.service\"\n \"tripleo_cinder_api_cron.service\"\n \"tripleo_cinder_scheduler.service\"\n \"tripleo_cinder_volume.service\"\n \"tripleo_cinder_backup.service\"\n \"tripleo_collectd.service\"\n \"tripleo_glance_api.service\"\n \"tripleo_gnocchi_api.service\"\n \"tripleo_gnocchi_metricd.service\"\n \"tripleo_gnocchi_statsd.service\"\n \"tripleo_manila_api.service\"\n \"tripleo_manila_api_cron.service\"\n \"tripleo_manila_scheduler.service\"\n \"tripleo_neutron_api.service\"\n \"tripleo_placement_api.service\"\n \"tripleo_nova_api_cron.service\"\n \"tripleo_nova_api.service\"\n \"tripleo_nova_conductor.service\"\n \"tripleo_nova_metadata.service\"\n \"tripleo_nova_scheduler.service\"\n \"tripleo_nova_vnc_proxy.service\"\n \"tripleo_aodh_api.service\"\n \"tripleo_aodh_api_cron.service\"\n \"tripleo_aodh_evaluator.service\"\n \"tripleo_aodh_listener.service\"\n \"tripleo_aodh_notifier.service\"\n \"tripleo_ceilometer_agent_central.service\"\n \"tripleo_ceilometer_agent_compute.service\"\n \"tripleo_ceilometer_agent_ipmi.service\"\n \"tripleo_ceilometer_agent_notification.service\"\n \"tripleo_ovn_cluster_northd.service\"\n \"tripleo_ironic_neutron_agent.service\"\n \"tripleo_ironic_api.service\"\n \"tripleo_ironic_inspector.service\"\n \"tripleo_ironic_conductor.service\"\n \"tripleo_ironic_inspector_dnsmasq.service\"\n \"tripleo_ironic_pxe_http.service\"\n \"tripleo_ironic_pxe_tftp.service\")\n\nPacemakerResourcesToStop=(\"openstack-cinder-volume\"\n \"openstack-cinder-backup\"\n \"openstack-manila-share\")\n\necho \"Stopping systemd OpenStack services\"\nfor service in ${ServicesToStop[*]}; do\n for i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n echo \"Stopping the $service in controller $i\"\n if ${!SSH_CMD} sudo systemctl is-active $service; then\n ${!SSH_CMD} sudo systemctl stop $service\n fi\n fi\n done\ndone\n\necho \"Checking systemd OpenStack services\"\nfor service in ${ServicesToStop[*]}; do\n for i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n if ! ${!SSH_CMD} systemctl show $service | grep ActiveState=inactive >/dev/null; then\n echo \"ERROR: Service $service still running on controller $i\"\n else\n echo \"OK: Service $service is not running on controller $i\"\n fi\n fi\n done\ndone\n\necho \"Stopping pacemaker OpenStack services\"\nfor i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n echo \"Using controller $i to run pacemaker commands\"\n for resource in ${PacemakerResourcesToStop[*]}; do\n if ${!SSH_CMD} sudo pcs resource config $resource &>/dev/null; then\n echo \"Stopping $resource\"\n ${!SSH_CMD} sudo pcs resource disable $resource\n else\n echo \"Service $resource not present\"\n fi\n done\n break\n fi\ndone\n\necho \"Checking pacemaker OpenStack services\"\nfor i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n echo \"Using controller $i to run pacemaker commands\"\n for resource in ${PacemakerResourcesToStop[*]}; do\n if ${!SSH_CMD} sudo pcs resource config $resource &>/dev/null; then\n if ! ${!SSH_CMD} sudo pcs resource status $resource | grep Started; then\n echo \"OK: Service $resource is stopped\"\n else\n echo \"ERROR: Service $resource is started\"\n fi\n fi\n done\n break\n fi\ndone\n", "delta": "0:03:18.469217", "end": "2026-02-27 17:52:31.884705", "msg": "non-zero return code", "rc": 255, "start": "2026-02-27 17:49:13.415488", "stderr": "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ ServicesToStop=(\"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_octavia_api.service\" \"tripleo_octavia_health_manager.service\" \"tripleo_octavia_rsyslog.service\" \"tripleo_octavia_driver_agent.service\" \"tripleo_octavia_housekeeping.service\" \"tripleo_octavia_worker.service\" \"tripleo_designate_api.service\" \"tripleo_designate_backend_bind9.service\" \"tripleo_designate_central.service\" \"tripleo_designate_mdns.service\" \"tripleo_designate_producer.service\" \"tripleo_designate_worker.service\" \"tripleo_unbound.service\" \"tripleo_horizon.service\" \"tripleo_keystone.service\" \"tripleo_barbican_api.service\" \"tripleo_barbican_worker.service\" \"tripleo_barbican_keystone_listener.service\" \"tripleo_cinder_api.service\" \"tripleo_cinder_api_cron.service\" \"tripleo_cinder_scheduler.service\" \"tripleo_cinder_volume.service\" \"tripleo_cinder_backup.service\" \"tripleo_collectd.service\" \"tripleo_glance_api.service\" \"tripleo_gnocchi_api.service\" \"tripleo_gnocchi_metricd.service\" \"tripleo_gnocchi_statsd.service\" \"tripleo_manila_api.service\" \"tripleo_manila_api_cron.service\" \"tripleo_manila_scheduler.service\" \"tripleo_neutron_api.service\" \"tripleo_placement_api.service\" \"tripleo_nova_api_cron.service\" \"tripleo_nova_api.service\" \"tripleo_nova_conductor.service\" \"tripleo_nova_metadata.service\" \"tripleo_nova_scheduler.service\" \"tripleo_nova_vnc_proxy.service\" \"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_compute.service\" \"tripleo_ceilometer_agent_ipmi.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_ovn_cluster_northd.service\" \"tripleo_ironic_neutron_agent.service\" \"tripleo_ironic_api.service\" \"tripleo_ironic_inspector.service\" \"tripleo_ironic_conductor.service\" \"tripleo_ironic_inspector_dnsmasq.service\" \"tripleo_ironic_pxe_http.service\" \"tripleo_ironic_pxe_tftp.service\")\n+ PacemakerResourcesToStop=(\"openstack-cinder-volume\" \"openstack-cinder-backup\" \"openstack-manila-share\")\n+ echo 'Stopping systemd OpenStack services'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_api.service\n+ : sudo systemctl stop tripleo_aodh_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_api.service\n+ : sudo systemctl stop tripleo_aodh_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_api_cron.service\n+ : sudo systemctl stop tripleo_aodh_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_api_cron.service\n+ : sudo systemctl stop tripleo_aodh_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_evaluator.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_evaluator.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_evaluator.service\n+ : sudo systemctl stop tripleo_aodh_evaluator.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_evaluator.service\n+ : sudo systemctl stop tripleo_aodh_evaluator.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_listener.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_listener.service\n+ : sudo systemctl stop tripleo_aodh_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_listener.service\n+ : sudo systemctl stop tripleo_aodh_listener.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_notifier.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_notifier.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_notifier.service\n+ : sudo systemctl stop tripleo_aodh_notifier.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_notifier.service\n+ : sudo systemctl stop tripleo_aodh_notifier.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_api.service\n+ : sudo systemctl stop tripleo_octavia_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_api.service\n+ : sudo systemctl stop tripleo_octavia_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_health_manager.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_health_manager.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_health_manager.service\n+ : sudo systemctl stop tripleo_octavia_health_manager.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_health_manager.service\n+ : sudo systemctl stop tripleo_octavia_health_manager.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_rsyslog.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_rsyslog.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_rsyslog.service\n+ : sudo systemctl stop tripleo_octavia_rsyslog.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_rsyslog.service\n+ : sudo systemctl stop tripleo_octavia_rsyslog.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_driver_agent.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_driver_agent.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_driver_agent.service\n+ : sudo systemctl stop tripleo_octavia_driver_agent.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_driver_agent.service\n+ : sudo systemctl stop tripleo_octavia_driver_agent.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_housekeeping.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_housekeeping.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_housekeeping.service\n+ : sudo systemctl stop tripleo_octavia_housekeeping.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_housekeeping.service\n+ : sudo systemctl stop tripleo_octavia_housekeeping.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_worker.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_worker.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_worker.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_worker.service\n+ : sudo systemctl stop tripleo_octavia_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_worker.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_worker.service\n+ : sudo systemctl stop tripleo_octavia_worker.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_designate_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_designate_api.service\n+ : sudo systemctl stop tripleo_designate_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_designate_api.service\n+ : sudo systemctl stop tripleo_designate_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_designate_backend_bind9.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_backend_bind9.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_backend_bind9.service in controller 2'\n+ : sudo systemctl is-active tripleo_designate_backend_bind9.service\n+ : sudo systemctl stop tripleo_designate_backend_bind9.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_backend_bind9.service in controller 3'\n+ : sudo systemctl is-active tripleo_designate_backend_bind9.service\n+ : sudo systemctl stop tripleo_designate_backend_bind9.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_designate_central.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_central.service in controller 2'\n+ : sudo systemctl is-active tripleo_designate_central.service\n+ : sudo systemctl stop tripleo_designate_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_central.service in controller 3'\n+ : sudo systemctl is-active tripleo_designate_central.service\n+ : sudo systemctl stop tripleo_designate_central.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_designate_mdns.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_mdns.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_mdns.service in controller 2'\n+ : sudo systemctl is-active tripleo_designate_mdns.service\n+ : sudo systemctl stop tripleo_designate_mdns.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_mdns.service in controller 3'\n+ : sudo systemctl is-active tripleo_designate_mdns.service\n+ : sudo systemctl stop tripleo_designate_mdns.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_designate_producer.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_producer.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_producer.service in controller 2'\n+ : sudo systemctl is-active tripleo_designate_producer.service\n+ : sudo systemctl stop tripleo_designate_producer.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_producer.service in controller 3'\n+ : sudo systemctl is-active tripleo_designate_producer.service\n+ : sudo systemctl stop tripleo_designate_producer.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_designate_worker.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_worker.service in controller 2'\n+ : sudo systemctl is-active tripleo_designate_worker.service\n+ : sudo systemctl stop tripleo_designate_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_designate_worker.service in controller 3'\n+ : sudo systemctl is-active tripleo_designate_worker.service\n+ : sudo systemctl stop tripleo_designate_worker.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_unbound.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_unbound.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_unbound.service in controller 2'\n+ : sudo systemctl is-active tripleo_unbound.service\n+ : sudo systemctl stop tripleo_unbound.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_unbound.service in controller 3'\n+ : sudo systemctl is-active tripleo_unbound.service\n+ : sudo systemctl stop tripleo_unbound.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_horizon.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_horizon.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_horizon.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_horizon.service in controller 2'\n+ : sudo systemctl is-active tripleo_horizon.service\n+ : sudo systemctl stop tripleo_horizon.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_horizon.service in controller 3'\n+ : sudo systemctl is-active tripleo_horizon.service\n+ : sudo systemctl stop tripleo_horizon.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_keystone.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_keystone.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_keystone.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_keystone.service in controller 2'\n+ : sudo systemctl is-active tripleo_keystone.service\n+ : sudo systemctl stop tripleo_keystone.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_keystone.service in controller 3'\n+ : sudo systemctl is-active tripleo_keystone.service\n+ : sudo systemctl stop tripleo_keystone.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_barbican_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_barbican_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_barbican_api.service\n+ : sudo systemctl stop tripleo_barbican_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_barbican_api.service\n+ : sudo systemctl stop tripleo_barbican_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_barbican_worker.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_worker.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_barbican_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_worker.service in controller 2'\n+ : sudo systemctl is-active tripleo_barbican_worker.service\n+ : sudo systemctl stop tripleo_barbican_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_worker.service in controller 3'\n+ : sudo systemctl is-active tripleo_barbican_worker.service\n+ : sudo systemctl stop tripleo_barbican_worker.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_keystone_listener.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_barbican_keystone_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 2'\n+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service\n+ : sudo systemctl stop tripleo_barbican_keystone_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 3'\n+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service\n+ : sudo systemctl stop tripleo_barbican_keystone_listener.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_api.service\n+ : sudo systemctl stop tripleo_cinder_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_api.service\n+ : sudo systemctl stop tripleo_cinder_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_api_cron.service\n+ : sudo systemctl stop tripleo_cinder_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_api_cron.service\n+ : sudo systemctl stop tripleo_cinder_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_scheduler.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_scheduler.service\n+ : sudo systemctl stop tripleo_cinder_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_scheduler.service\n+ : sudo systemctl stop tripleo_cinder_scheduler.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_volume.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_volume.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_volume.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_volume.service\n+ : sudo systemctl stop tripleo_cinder_volume.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_volume.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_volume.service\n+ : sudo systemctl stop tripleo_cinder_volume.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_backup.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_backup.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_backup.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_backup.service\n+ : sudo systemctl stop tripleo_cinder_backup.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_backup.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_backup.service\n+ : sudo systemctl stop tripleo_cinder_backup.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_collectd.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_collectd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_collectd.service in controller 2'\n+ : sudo systemctl is-active tripleo_collectd.service\n+ : sudo systemctl stop tripleo_collectd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_collectd.service in controller 3'\n+ : sudo systemctl is-active tripleo_collectd.service\n+ : sudo systemctl stop tripleo_collectd.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_glance_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_glance_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_glance_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_glance_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_glance_api.service\n+ : sudo systemctl stop tripleo_glance_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_glance_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_glance_api.service\n+ : sudo systemctl stop tripleo_glance_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_gnocchi_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_gnocchi_api.service\n+ : sudo systemctl stop tripleo_gnocchi_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_gnocchi_api.service\n+ : sudo systemctl stop tripleo_gnocchi_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_metricd.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_metricd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 2'\n+ : sudo systemctl is-active tripleo_gnocchi_metricd.service\n+ : sudo systemctl stop tripleo_gnocchi_metricd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 3'\n+ : sudo systemctl is-active tripleo_gnocchi_metricd.service\n+ : sudo systemctl stop tripleo_gnocchi_metricd.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_statsd.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_statsd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 2'\n+ : sudo systemctl is-active tripleo_gnocchi_statsd.service\n+ : sudo systemctl stop tripleo_gnocchi_statsd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 3'\n+ : sudo systemctl is-active tripleo_gnocchi_statsd.service\n+ : sudo systemctl stop tripleo_gnocchi_statsd.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_manila_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_manila_api.service\n+ : sudo systemctl stop tripleo_manila_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_manila_api.service\n+ : sudo systemctl stop tripleo_manila_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_manila_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_manila_api_cron.service\n+ : sudo systemctl stop tripleo_manila_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_manila_api_cron.service\n+ : sudo systemctl stop tripleo_manila_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_manila_scheduler.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_scheduler.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_scheduler.service in controller 2'\n+ : sudo systemctl is-active tripleo_manila_scheduler.service\n+ : sudo systemctl stop tripleo_manila_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_scheduler.service in controller 3'\n+ : sudo systemctl is-active tripleo_manila_scheduler.service\n+ : sudo systemctl stop tripleo_manila_scheduler.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_neutron_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_neutron_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_neutron_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_neutron_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_neutron_api.service\n+ : sudo systemctl stop tripleo_neutron_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_neutron_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_neutron_api.service\n+ : sudo systemctl stop tripleo_neutron_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_placement_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_placement_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_placement_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_placement_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_placement_api.service\n+ : sudo systemctl stop tripleo_placement_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_placement_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_placement_api.service\n+ : sudo systemctl stop tripleo_placement_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_api_cron.service\n+ : sudo systemctl stop tripleo_nova_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_api_cron.service\n+ : sudo systemctl stop tripleo_nova_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_api.service\n+ : sudo systemctl stop tripleo_nova_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_api.service\n+ : sudo systemctl stop tripleo_nova_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_conductor.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_conductor.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_conductor.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_conductor.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_conductor.service\n+ : sudo systemctl stop tripleo_nova_conductor.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_conductor.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_conductor.service\n+ : sudo systemctl stop tripleo_nova_conductor.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_metadata.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_metadata.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_metadata.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_metadata.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_metadata.service\n+ : sudo systemctl stop tripleo_nova_metadata.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_metadata.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_metadata.service\n+ : sudo systemctl stop tripleo_nova_metadata.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_scheduler.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_scheduler.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_scheduler.service\nkex_exchange_identification: read: Connection reset by peer\r\nConnection reset by 192.168.122.100 port 22", "stderr_lines": ["+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ ServicesToStop=(\"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_octavia_api.service\" \"tripleo_octavia_health_manager.service\" \"tripleo_octavia_rsyslog.service\" \"tripleo_octavia_driver_agent.service\" \"tripleo_octavia_housekeeping.service\" \"tripleo_octavia_worker.service\" \"tripleo_designate_api.service\" \"tripleo_designate_backend_bind9.service\" \"tripleo_designate_central.service\" \"tripleo_designate_mdns.service\" \"tripleo_designate_producer.service\" \"tripleo_designate_worker.service\" \"tripleo_unbound.service\" \"tripleo_horizon.service\" \"tripleo_keystone.service\" \"tripleo_barbican_api.service\" \"tripleo_barbican_worker.service\" \"tripleo_barbican_keystone_listener.service\" \"tripleo_cinder_api.service\" \"tripleo_cinder_api_cron.service\" \"tripleo_cinder_scheduler.service\" \"tripleo_cinder_volume.service\" \"tripleo_cinder_backup.service\" \"tripleo_collectd.service\" \"tripleo_glance_api.service\" \"tripleo_gnocchi_api.service\" \"tripleo_gnocchi_metricd.service\" \"tripleo_gnocchi_statsd.service\" \"tripleo_manila_api.service\" \"tripleo_manila_api_cron.service\" \"tripleo_manila_scheduler.service\" \"tripleo_neutron_api.service\" \"tripleo_placement_api.service\" \"tripleo_nova_api_cron.service\" \"tripleo_nova_api.service\" \"tripleo_nova_conductor.service\" \"tripleo_nova_metadata.service\" \"tripleo_nova_scheduler.service\" \"tripleo_nova_vnc_proxy.service\" \"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_compute.service\" \"tripleo_ceilometer_agent_ipmi.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_ovn_cluster_northd.service\" \"tripleo_ironic_neutron_agent.service\" \"tripleo_ironic_api.service\" \"tripleo_ironic_inspector.service\" \"tripleo_ironic_conductor.service\" \"tripleo_ironic_inspector_dnsmasq.service\" \"tripleo_ironic_pxe_http.service\" \"tripleo_ironic_pxe_tftp.service\")", "+ PacemakerResourcesToStop=(\"openstack-cinder-volume\" \"openstack-cinder-backup\" \"openstack-manila-share\")", "+ echo 'Stopping systemd OpenStack services'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_api.service", "+ : sudo systemctl stop tripleo_aodh_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_api.service", "+ : sudo systemctl stop tripleo_aodh_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_api_cron.service", "+ : sudo systemctl stop tripleo_aodh_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_api_cron.service", "+ : sudo systemctl stop tripleo_aodh_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_evaluator.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_evaluator.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_evaluator.service", "+ : sudo systemctl stop tripleo_aodh_evaluator.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_evaluator.service", "+ : sudo systemctl stop tripleo_aodh_evaluator.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_listener.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_listener.service", "+ : sudo systemctl stop tripleo_aodh_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_listener.service", "+ : sudo systemctl stop tripleo_aodh_listener.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_notifier.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_notifier.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_notifier.service", "+ : sudo systemctl stop tripleo_aodh_notifier.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_notifier.service", "+ : sudo systemctl stop tripleo_aodh_notifier.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_api.service", "+ : sudo systemctl stop tripleo_octavia_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_api.service", "+ : sudo systemctl stop tripleo_octavia_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_health_manager.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_health_manager.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_health_manager.service", "+ : sudo systemctl stop tripleo_octavia_health_manager.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_health_manager.service", "+ : sudo systemctl stop tripleo_octavia_health_manager.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_rsyslog.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_rsyslog.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_rsyslog.service", "+ : sudo systemctl stop tripleo_octavia_rsyslog.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_rsyslog.service", "+ : sudo systemctl stop tripleo_octavia_rsyslog.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_driver_agent.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_driver_agent.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_driver_agent.service", "+ : sudo systemctl stop tripleo_octavia_driver_agent.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_driver_agent.service", "+ : sudo systemctl stop tripleo_octavia_driver_agent.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_housekeeping.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_housekeeping.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_housekeeping.service", "+ : sudo systemctl stop tripleo_octavia_housekeeping.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_housekeeping.service", "+ : sudo systemctl stop tripleo_octavia_housekeeping.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_worker.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_worker.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_worker.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_worker.service", "+ : sudo systemctl stop tripleo_octavia_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_worker.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_worker.service", "+ : sudo systemctl stop tripleo_octavia_worker.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_designate_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_designate_api.service", "+ : sudo systemctl stop tripleo_designate_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_designate_api.service", "+ : sudo systemctl stop tripleo_designate_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_designate_backend_bind9.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_backend_bind9.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_backend_bind9.service in controller 2'", "+ : sudo systemctl is-active tripleo_designate_backend_bind9.service", "+ : sudo systemctl stop tripleo_designate_backend_bind9.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_backend_bind9.service in controller 3'", "+ : sudo systemctl is-active tripleo_designate_backend_bind9.service", "+ : sudo systemctl stop tripleo_designate_backend_bind9.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_designate_central.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_central.service in controller 2'", "+ : sudo systemctl is-active tripleo_designate_central.service", "+ : sudo systemctl stop tripleo_designate_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_central.service in controller 3'", "+ : sudo systemctl is-active tripleo_designate_central.service", "+ : sudo systemctl stop tripleo_designate_central.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_designate_mdns.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_mdns.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_mdns.service in controller 2'", "+ : sudo systemctl is-active tripleo_designate_mdns.service", "+ : sudo systemctl stop tripleo_designate_mdns.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_mdns.service in controller 3'", "+ : sudo systemctl is-active tripleo_designate_mdns.service", "+ : sudo systemctl stop tripleo_designate_mdns.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_designate_producer.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_producer.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_producer.service in controller 2'", "+ : sudo systemctl is-active tripleo_designate_producer.service", "+ : sudo systemctl stop tripleo_designate_producer.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_producer.service in controller 3'", "+ : sudo systemctl is-active tripleo_designate_producer.service", "+ : sudo systemctl stop tripleo_designate_producer.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_designate_worker.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_designate_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_worker.service in controller 2'", "+ : sudo systemctl is-active tripleo_designate_worker.service", "+ : sudo systemctl stop tripleo_designate_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_designate_worker.service in controller 3'", "+ : sudo systemctl is-active tripleo_designate_worker.service", "+ : sudo systemctl stop tripleo_designate_worker.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_unbound.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_unbound.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_unbound.service in controller 2'", "+ : sudo systemctl is-active tripleo_unbound.service", "+ : sudo systemctl stop tripleo_unbound.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_unbound.service in controller 3'", "+ : sudo systemctl is-active tripleo_unbound.service", "+ : sudo systemctl stop tripleo_unbound.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_horizon.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_horizon.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_horizon.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_horizon.service in controller 2'", "+ : sudo systemctl is-active tripleo_horizon.service", "+ : sudo systemctl stop tripleo_horizon.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_horizon.service in controller 3'", "+ : sudo systemctl is-active tripleo_horizon.service", "+ : sudo systemctl stop tripleo_horizon.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_keystone.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_keystone.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_keystone.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_keystone.service in controller 2'", "+ : sudo systemctl is-active tripleo_keystone.service", "+ : sudo systemctl stop tripleo_keystone.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_keystone.service in controller 3'", "+ : sudo systemctl is-active tripleo_keystone.service", "+ : sudo systemctl stop tripleo_keystone.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_barbican_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_barbican_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_barbican_api.service", "+ : sudo systemctl stop tripleo_barbican_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_barbican_api.service", "+ : sudo systemctl stop tripleo_barbican_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_barbican_worker.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_worker.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_barbican_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_worker.service in controller 2'", "+ : sudo systemctl is-active tripleo_barbican_worker.service", "+ : sudo systemctl stop tripleo_barbican_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_worker.service in controller 3'", "+ : sudo systemctl is-active tripleo_barbican_worker.service", "+ : sudo systemctl stop tripleo_barbican_worker.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_keystone_listener.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_barbican_keystone_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 2'", "+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service", "+ : sudo systemctl stop tripleo_barbican_keystone_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 3'", "+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service", "+ : sudo systemctl stop tripleo_barbican_keystone_listener.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_api.service", "+ : sudo systemctl stop tripleo_cinder_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_api.service", "+ : sudo systemctl stop tripleo_cinder_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_api_cron.service", "+ : sudo systemctl stop tripleo_cinder_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_api_cron.service", "+ : sudo systemctl stop tripleo_cinder_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_scheduler.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_scheduler.service", "+ : sudo systemctl stop tripleo_cinder_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_scheduler.service", "+ : sudo systemctl stop tripleo_cinder_scheduler.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_volume.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_volume.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_volume.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_volume.service", "+ : sudo systemctl stop tripleo_cinder_volume.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_volume.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_volume.service", "+ : sudo systemctl stop tripleo_cinder_volume.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_backup.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_backup.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_backup.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_backup.service", "+ : sudo systemctl stop tripleo_cinder_backup.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_backup.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_backup.service", "+ : sudo systemctl stop tripleo_cinder_backup.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_collectd.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_collectd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_collectd.service in controller 2'", "+ : sudo systemctl is-active tripleo_collectd.service", "+ : sudo systemctl stop tripleo_collectd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_collectd.service in controller 3'", "+ : sudo systemctl is-active tripleo_collectd.service", "+ : sudo systemctl stop tripleo_collectd.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_glance_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_glance_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_glance_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_glance_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_glance_api.service", "+ : sudo systemctl stop tripleo_glance_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_glance_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_glance_api.service", "+ : sudo systemctl stop tripleo_glance_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_gnocchi_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_gnocchi_api.service", "+ : sudo systemctl stop tripleo_gnocchi_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_gnocchi_api.service", "+ : sudo systemctl stop tripleo_gnocchi_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_metricd.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_metricd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 2'", "+ : sudo systemctl is-active tripleo_gnocchi_metricd.service", "+ : sudo systemctl stop tripleo_gnocchi_metricd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 3'", "+ : sudo systemctl is-active tripleo_gnocchi_metricd.service", "+ : sudo systemctl stop tripleo_gnocchi_metricd.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_statsd.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_statsd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 2'", "+ : sudo systemctl is-active tripleo_gnocchi_statsd.service", "+ : sudo systemctl stop tripleo_gnocchi_statsd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 3'", "+ : sudo systemctl is-active tripleo_gnocchi_statsd.service", "+ : sudo systemctl stop tripleo_gnocchi_statsd.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_manila_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_manila_api.service", "+ : sudo systemctl stop tripleo_manila_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_manila_api.service", "+ : sudo systemctl stop tripleo_manila_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_manila_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_manila_api_cron.service", "+ : sudo systemctl stop tripleo_manila_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_manila_api_cron.service", "+ : sudo systemctl stop tripleo_manila_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_manila_scheduler.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_scheduler.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_scheduler.service in controller 2'", "+ : sudo systemctl is-active tripleo_manila_scheduler.service", "+ : sudo systemctl stop tripleo_manila_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_scheduler.service in controller 3'", "+ : sudo systemctl is-active tripleo_manila_scheduler.service", "+ : sudo systemctl stop tripleo_manila_scheduler.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_neutron_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_neutron_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_neutron_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_neutron_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_neutron_api.service", "+ : sudo systemctl stop tripleo_neutron_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_neutron_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_neutron_api.service", "+ : sudo systemctl stop tripleo_neutron_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_placement_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_placement_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_placement_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_placement_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_placement_api.service", "+ : sudo systemctl stop tripleo_placement_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_placement_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_placement_api.service", "+ : sudo systemctl stop tripleo_placement_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_api_cron.service", "+ : sudo systemctl stop tripleo_nova_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_api_cron.service", "+ : sudo systemctl stop tripleo_nova_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_api.service", "+ : sudo systemctl stop tripleo_nova_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_api.service", "+ : sudo systemctl stop tripleo_nova_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_conductor.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_conductor.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_conductor.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_conductor.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_conductor.service", "+ : sudo systemctl stop tripleo_nova_conductor.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_conductor.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_conductor.service", "+ : sudo systemctl stop tripleo_nova_conductor.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_metadata.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_metadata.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_metadata.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_metadata.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_metadata.service", "+ : sudo systemctl stop tripleo_nova_metadata.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_metadata.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_metadata.service", "+ : sudo systemctl stop tripleo_nova_metadata.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_scheduler.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_scheduler.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_scheduler.service", "kex_exchange_identification: read: Connection reset by peer", "Connection reset by 192.168.122.100 port 22"], "stdout": "Stopping systemd OpenStack services\nStopping the tripleo_aodh_api.service in controller 1\nactive\nStopping the tripleo_aodh_api.service in controller 2\nStopping the tripleo_aodh_api.service in controller 3\nStopping the tripleo_aodh_api_cron.service in controller 1\nactive\nStopping the tripleo_aodh_api_cron.service in controller 2\nStopping the tripleo_aodh_api_cron.service in controller 3\nStopping the tripleo_aodh_evaluator.service in controller 1\nactive\nStopping the tripleo_aodh_evaluator.service in controller 2\nStopping the tripleo_aodh_evaluator.service in controller 3\nStopping the tripleo_aodh_listener.service in controller 1\nactive\nStopping the tripleo_aodh_listener.service in controller 2\nStopping the tripleo_aodh_listener.service in controller 3\nStopping the tripleo_aodh_notifier.service in controller 1\nactive\nStopping the tripleo_aodh_notifier.service in controller 2\nStopping the tripleo_aodh_notifier.service in controller 3\nStopping the tripleo_ceilometer_agent_central.service in controller 1\nactive\nStopping the tripleo_ceilometer_agent_central.service in controller 2\nStopping the tripleo_ceilometer_agent_central.service in controller 3\nStopping the tripleo_ceilometer_agent_notification.service in controller 1\nactive\nStopping the tripleo_ceilometer_agent_notification.service in controller 2\nStopping the tripleo_ceilometer_agent_notification.service in controller 3\nStopping the tripleo_octavia_api.service in controller 1\nactive\nStopping the tripleo_octavia_api.service in controller 2\nStopping the tripleo_octavia_api.service in controller 3\nStopping the tripleo_octavia_health_manager.service in controller 1\nactive\nStopping the tripleo_octavia_health_manager.service in controller 2\nStopping the tripleo_octavia_health_manager.service in controller 3\nStopping the tripleo_octavia_rsyslog.service in controller 1\nactive\nStopping the tripleo_octavia_rsyslog.service in controller 2\nStopping the tripleo_octavia_rsyslog.service in controller 3\nStopping the tripleo_octavia_driver_agent.service in controller 1\nactive\nStopping the tripleo_octavia_driver_agent.service in controller 2\nStopping the tripleo_octavia_driver_agent.service in controller 3\nStopping the tripleo_octavia_housekeeping.service in controller 1\nactive\nStopping the tripleo_octavia_housekeeping.service in controller 2\nStopping the tripleo_octavia_housekeeping.service in controller 3\nStopping the tripleo_octavia_worker.service in controller 1\nactive\nStopping the tripleo_octavia_worker.service in controller 2\nStopping the tripleo_octavia_worker.service in controller 3\nStopping the tripleo_designate_api.service in controller 1\ninactive\nStopping the tripleo_designate_api.service in controller 2\nStopping the tripleo_designate_api.service in controller 3\nStopping the tripleo_designate_backend_bind9.service in controller 1\ninactive\nStopping the tripleo_designate_backend_bind9.service in controller 2\nStopping the tripleo_designate_backend_bind9.service in controller 3\nStopping the tripleo_designate_central.service in controller 1\ninactive\nStopping the tripleo_designate_central.service in controller 2\nStopping the tripleo_designate_central.service in controller 3\nStopping the tripleo_designate_mdns.service in controller 1\ninactive\nStopping the tripleo_designate_mdns.service in controller 2\nStopping the tripleo_designate_mdns.service in controller 3\nStopping the tripleo_designate_producer.service in controller 1\ninactive\nStopping the tripleo_designate_producer.service in controller 2\nStopping the tripleo_designate_producer.service in controller 3\nStopping the tripleo_designate_worker.service in controller 1\ninactive\nStopping the tripleo_designate_worker.service in controller 2\nStopping the tripleo_designate_worker.service in controller 3\nStopping the tripleo_unbound.service in controller 1\ninactive\nStopping the tripleo_unbound.service in controller 2\nStopping the tripleo_unbound.service in controller 3\nStopping the tripleo_horizon.service in controller 1\nactive\nStopping the tripleo_horizon.service in controller 2\nStopping the tripleo_horizon.service in controller 3\nStopping the tripleo_keystone.service in controller 1\nactive\nStopping the tripleo_keystone.service in controller 2\nStopping the tripleo_keystone.service in controller 3\nStopping the tripleo_barbican_api.service in controller 1\nactive\nStopping the tripleo_barbican_api.service in controller 2\nStopping the tripleo_barbican_api.service in controller 3\nStopping the tripleo_barbican_worker.service in controller 1\nactive\nStopping the tripleo_barbican_worker.service in controller 2\nStopping the tripleo_barbican_worker.service in controller 3\nStopping the tripleo_barbican_keystone_listener.service in controller 1\nactive\nStopping the tripleo_barbican_keystone_listener.service in controller 2\nStopping the tripleo_barbican_keystone_listener.service in controller 3\nStopping the tripleo_cinder_api.service in controller 1\nactive\nStopping the tripleo_cinder_api.service in controller 2\nStopping the tripleo_cinder_api.service in controller 3\nStopping the tripleo_cinder_api_cron.service in controller 1\nactive\nStopping the tripleo_cinder_api_cron.service in controller 2\nStopping the tripleo_cinder_api_cron.service in controller 3\nStopping the tripleo_cinder_scheduler.service in controller 1\nactive\nStopping the tripleo_cinder_scheduler.service in controller 2\nStopping the tripleo_cinder_scheduler.service in controller 3\nStopping the tripleo_cinder_volume.service in controller 1\ninactive\nStopping the tripleo_cinder_volume.service in controller 2\nStopping the tripleo_cinder_volume.service in controller 3\nStopping the tripleo_cinder_backup.service in controller 1\ninactive\nStopping the tripleo_cinder_backup.service in controller 2\nStopping the tripleo_cinder_backup.service in controller 3\nStopping the tripleo_collectd.service in controller 1\ninactive\nStopping the tripleo_collectd.service in controller 2\nStopping the tripleo_collectd.service in controller 3\nStopping the tripleo_glance_api.service in controller 1\nactive\nStopping the tripleo_glance_api.service in controller 2\nStopping the tripleo_glance_api.service in controller 3\nStopping the tripleo_gnocchi_api.service in controller 1\nactive\nStopping the tripleo_gnocchi_api.service in controller 2\nStopping the tripleo_gnocchi_api.service in controller 3\nStopping the tripleo_gnocchi_metricd.service in controller 1\nactive\nStopping the tripleo_gnocchi_metricd.service in controller 2\nStopping the tripleo_gnocchi_metricd.service in controller 3\nStopping the tripleo_gnocchi_statsd.service in controller 1\nactive\nStopping the tripleo_gnocchi_statsd.service in controller 2\nStopping the tripleo_gnocchi_statsd.service in controller 3\nStopping the tripleo_manila_api.service in controller 1\nactive\nStopping the tripleo_manila_api.service in controller 2\nStopping the tripleo_manila_api.service in controller 3\nStopping the tripleo_manila_api_cron.service in controller 1\nactive\nStopping the tripleo_manila_api_cron.service in controller 2\nStopping the tripleo_manila_api_cron.service in controller 3\nStopping the tripleo_manila_scheduler.service in controller 1\nactive\nStopping the tripleo_manila_scheduler.service in controller 2\nStopping the tripleo_manila_scheduler.service in controller 3\nStopping the tripleo_neutron_api.service in controller 1\nactive\nStopping the tripleo_neutron_api.service in controller 2\nStopping the tripleo_neutron_api.service in controller 3\nStopping the tripleo_placement_api.service in controller 1\nactive\nStopping the tripleo_placement_api.service in controller 2\nStopping the tripleo_placement_api.service in controller 3\nStopping the tripleo_nova_api_cron.service in controller 1\nactive\nStopping the tripleo_nova_api_cron.service in controller 2\nStopping the tripleo_nova_api_cron.service in controller 3\nStopping the tripleo_nova_api.service in controller 1\nactive\nStopping the tripleo_nova_api.service in controller 2\nStopping the tripleo_nova_api.service in controller 3\nStopping the tripleo_nova_conductor.service in controller 1\nactive\nStopping the tripleo_nova_conductor.service in controller 2\nStopping the tripleo_nova_conductor.service in controller 3\nStopping the tripleo_nova_metadata.service in controller 1\nactive\nStopping the tripleo_nova_metadata.service in controller 2\nStopping the tripleo_nova_metadata.service in controller 3\nStopping the tripleo_nova_scheduler.service in controller 1\nactive", "stdout_lines": ["Stopping systemd OpenStack services", "Stopping the tripleo_aodh_api.service in controller 1", "active", "Stopping the tripleo_aodh_api.service in controller 2", "Stopping the tripleo_aodh_api.service in controller 3", "Stopping the tripleo_aodh_api_cron.service in controller 1", "active", "Stopping the tripleo_aodh_api_cron.service in controller 2", "Stopping the tripleo_aodh_api_cron.service in controller 3", "Stopping the tripleo_aodh_evaluator.service in controller 1", "active", "Stopping the tripleo_aodh_evaluator.service in controller 2", "Stopping the tripleo_aodh_evaluator.service in controller 3", "Stopping the tripleo_aodh_listener.service in controller 1", "active", "Stopping the tripleo_aodh_listener.service in controller 2", "Stopping the tripleo_aodh_listener.service in controller 3", "Stopping the tripleo_aodh_notifier.service in controller 1", "active", "Stopping the tripleo_aodh_notifier.service in controller 2", "Stopping the tripleo_aodh_notifier.service in controller 3", "Stopping the tripleo_ceilometer_agent_central.service in controller 1", "active", "Stopping the tripleo_ceilometer_agent_central.service in controller 2", "Stopping the tripleo_ceilometer_agent_central.service in controller 3", "Stopping the tripleo_ceilometer_agent_notification.service in controller 1", "active", "Stopping the tripleo_ceilometer_agent_notification.service in controller 2", "Stopping the tripleo_ceilometer_agent_notification.service in controller 3", "Stopping the tripleo_octavia_api.service in controller 1", "active", "Stopping the tripleo_octavia_api.service in controller 2", "Stopping the tripleo_octavia_api.service in controller 3", "Stopping the tripleo_octavia_health_manager.service in controller 1", "active", "Stopping the tripleo_octavia_health_manager.service in controller 2", "Stopping the tripleo_octavia_health_manager.service in controller 3", "Stopping the tripleo_octavia_rsyslog.service in controller 1", "active", "Stopping the tripleo_octavia_rsyslog.service in controller 2", "Stopping the tripleo_octavia_rsyslog.service in controller 3", "Stopping the tripleo_octavia_driver_agent.service in controller 1", "active", "Stopping the tripleo_octavia_driver_agent.service in controller 2", "Stopping the tripleo_octavia_driver_agent.service in controller 3", "Stopping the tripleo_octavia_housekeeping.service in controller 1", "active", "Stopping the tripleo_octavia_housekeeping.service in controller 2", "Stopping the tripleo_octavia_housekeeping.service in controller 3", "Stopping the tripleo_octavia_worker.service in controller 1", "active", "Stopping the tripleo_octavia_worker.service in controller 2", "Stopping the tripleo_octavia_worker.service in controller 3", "Stopping the tripleo_designate_api.service in controller 1", "inactive", "Stopping the tripleo_designate_api.service in controller 2", "Stopping the tripleo_designate_api.service in controller 3", "Stopping the tripleo_designate_backend_bind9.service in controller 1", "inactive", "Stopping the tripleo_designate_backend_bind9.service in controller 2", "Stopping the tripleo_designate_backend_bind9.service in controller 3", "Stopping the tripleo_designate_central.service in controller 1", "inactive", "Stopping the tripleo_designate_central.service in controller 2", "Stopping the tripleo_designate_central.service in controller 3", "Stopping the tripleo_designate_mdns.service in controller 1", "inactive", "Stopping the tripleo_designate_mdns.service in controller 2", "Stopping the tripleo_designate_mdns.service in controller 3", "Stopping the tripleo_designate_producer.service in controller 1", "inactive", "Stopping the tripleo_designate_producer.service in controller 2", "Stopping the tripleo_designate_producer.service in controller 3", "Stopping the tripleo_designate_worker.service in controller 1", "inactive", "Stopping the tripleo_designate_worker.service in controller 2", "Stopping the tripleo_designate_worker.service in controller 3", "Stopping the tripleo_unbound.service in controller 1", "inactive", "Stopping the tripleo_unbound.service in controller 2", "Stopping the tripleo_unbound.service in controller 3", "Stopping the tripleo_horizon.service in controller 1", "active", "Stopping the tripleo_horizon.service in controller 2", "Stopping the tripleo_horizon.service in controller 3", "Stopping the tripleo_keystone.service in controller 1", "active", "Stopping the tripleo_keystone.service in controller 2", "Stopping the tripleo_keystone.service in controller 3", "Stopping the tripleo_barbican_api.service in controller 1", "active", "Stopping the tripleo_barbican_api.service in controller 2", "Stopping the tripleo_barbican_api.service in controller 3", "Stopping the tripleo_barbican_worker.service in controller 1", "active", "Stopping the tripleo_barbican_worker.service in controller 2", "Stopping the tripleo_barbican_worker.service in controller 3", "Stopping the tripleo_barbican_keystone_listener.service in controller 1", "active", "Stopping the tripleo_barbican_keystone_listener.service in controller 2", "Stopping the tripleo_barbican_keystone_listener.service in controller 3", "Stopping the tripleo_cinder_api.service in controller 1", "active", "Stopping the tripleo_cinder_api.service in controller 2", "Stopping the tripleo_cinder_api.service in controller 3", "Stopping the tripleo_cinder_api_cron.service in controller 1", "active", "Stopping the tripleo_cinder_api_cron.service in controller 2", "Stopping the tripleo_cinder_api_cron.service in controller 3", "Stopping the tripleo_cinder_scheduler.service in controller 1", "active", "Stopping the tripleo_cinder_scheduler.service in controller 2", "Stopping the tripleo_cinder_scheduler.service in controller 3", "Stopping the tripleo_cinder_volume.service in controller 1", "inactive", "Stopping the tripleo_cinder_volume.service in controller 2", "Stopping the tripleo_cinder_volume.service in controller 3", "Stopping the tripleo_cinder_backup.service in controller 1", "inactive", "Stopping the tripleo_cinder_backup.service in controller 2", "Stopping the tripleo_cinder_backup.service in controller 3", "Stopping the tripleo_collectd.service in controller 1", "inactive", "Stopping the tripleo_collectd.service in controller 2", "Stopping the tripleo_collectd.service in controller 3", "Stopping the tripleo_glance_api.service in controller 1", "active", "Stopping the tripleo_glance_api.service in controller 2", "Stopping the tripleo_glance_api.service in controller 3", "Stopping the tripleo_gnocchi_api.service in controller 1", "active", "Stopping the tripleo_gnocchi_api.service in controller 2", "Stopping the tripleo_gnocchi_api.service in controller 3", "Stopping the tripleo_gnocchi_metricd.service in controller 1", "active", "Stopping the tripleo_gnocchi_metricd.service in controller 2", "Stopping the tripleo_gnocchi_metricd.service in controller 3", "Stopping the tripleo_gnocchi_statsd.service in controller 1", "active", "Stopping the tripleo_gnocchi_statsd.service in controller 2", "Stopping the tripleo_gnocchi_statsd.service in controller 3", "Stopping the tripleo_manila_api.service in controller 1", "active", "Stopping the tripleo_manila_api.service in controller 2", "Stopping the tripleo_manila_api.service in controller 3", "Stopping the tripleo_manila_api_cron.service in controller 1", "active", "Stopping the tripleo_manila_api_cron.service in controller 2", "Stopping the tripleo_manila_api_cron.service in controller 3", "Stopping the tripleo_manila_scheduler.service in controller 1", "active", "Stopping the tripleo_manila_scheduler.service in controller 2", "Stopping the tripleo_manila_scheduler.service in controller 3", "Stopping the tripleo_neutron_api.service in controller 1", "active", "Stopping the tripleo_neutron_api.service in controller 2", "Stopping the tripleo_neutron_api.service in controller 3", "Stopping the tripleo_placement_api.service in controller 1", "active", "Stopping the tripleo_placement_api.service in controller 2", "Stopping the tripleo_placement_api.service in controller 3", "Stopping the tripleo_nova_api_cron.service in controller 1", "active", "Stopping the tripleo_nova_api_cron.service in controller 2", "Stopping the tripleo_nova_api_cron.service in controller 3", "Stopping the tripleo_nova_api.service in controller 1", "active", "Stopping the tripleo_nova_api.service in controller 2", "Stopping the tripleo_nova_api.service in controller 3", "Stopping the tripleo_nova_conductor.service in controller 1", "active", "Stopping the tripleo_nova_conductor.service in controller 2", "Stopping the tripleo_nova_conductor.service in controller 3", "Stopping the tripleo_nova_metadata.service in controller 1", "active", "Stopping the tripleo_nova_metadata.service in controller 2", "Stopping the tripleo_nova_metadata.service in controller 3", "Stopping the tripleo_nova_scheduler.service in controller 1", "active"]} PLAY RECAP ********************************************************************* localhost : ok=35 changed=30 unreachable=0 failed=1 skipped=36 rescued=0 ignored=0