[WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (edpm_node_ip). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (source_galera_members). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (source_mariadb_ip). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (edpm_node_hostname). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (enable_tlse). Using last defined value only. [WARNING]: While constructing a mapping from /home/zuul/src/review.rdoproject.org/rdo- jobs/playbooks/data_plane_adoption/vars.yaml, line 6, column 1, found a duplicate dict key (prelaunch_barbican_secret). Using last defined value only. Using /home/zuul/src/review.rdoproject.org/rdo-jobs/playbooks/data_plane_adoption/ansible.cfg as config file PLAY [Prelude] ***************************************************************** TASK [prelude_local : undefined oc_login_command] ****************************** skipping: [localhost] => {"changed": false, "false_condition": "oc_login_command is not defined", "skip_reason": "Conditional result was False"} TASK [prelude_local : test for oc CLI presence] ******************************** changed: [localhost] => {"ansible_facts": {"discovered_interpreter_python": "/usr/bin/python3"}, "changed": true, "cmd": "\ncommand -v oc\n", "delta": "0:00:00.008645", "end": "2025-10-06 14:57:37.036536", "failed_when_result": false, "msg": "", "rc": 0, "start": "2025-10-06 14:57:37.027891", "stderr": "", "stderr_lines": [], "stdout": "/home/zuul/bin/oc", "stdout_lines": ["/home/zuul/bin/oc"]} TASK [prelude_local : oc CLI not found] **************************************** skipping: [localhost] => {"changed": false, "false_condition": "oc_cli_present_result.rc != 0", "skip_reason": "Conditional result was False"} TASK [prelude_local : test for install_yamls presence] ************************* ok: [localhost] => {"changed": false, "stat": {"atime": 1759758494.4337373, "attr_flags": "", "attributes": [], "block_size": 4096, "blocks": 280, "charset": "us-ascii", "checksum": "213a9da57f230d51896ada1b8856870c7ce6d3f6", "ctime": 1759758346.86403, "dev": 64513, "device_type": 0, "executable": false, "exists": true, "gid": 1000, "gr_name": "zuul", "inode": 134222697, "isblk": false, "ischr": false, "isdir": false, "isfifo": false, "isgid": false, "islnk": false, "isreg": true, "issock": false, "isuid": false, "mimetype": "text/plain", "mode": "0644", "mtime": 1759757671.1765263, "nlink": 1, "path": "/home/zuul/src/github.com/openstack-k8s-operators/install_yamls//Makefile", "pw_name": "zuul", "readable": true, "rgrp": true, "roth": true, "rusr": true, "size": 141484, "uid": 1000, "version": "3099408018", "wgrp": false, "woth": false, "writeable": true, "wusr": true, "xgrp": false, "xoth": false, "xusr": false}} TASK [prelude_local : missing install_yamls] *********************************** skipping: [localhost] => {"changed": false, "false_condition": "not install_yamls_makefile_stat.stat.exists", "skip_reason": "Conditional result was False"} TASK [prelude_local : clone install_yamls] ************************************* skipping: [localhost] => {"changed": false, "false_condition": "not install_yamls_makefile_stat.stat.exists", "skip_reason": "Conditional result was False"} TASK [prelude_local : perform oc login] **************************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc login -u kubeadmin -p 123456789\n", "delta": "0:00:00.310719", "end": "2025-10-06 14:57:38.114286", "msg": "", "rc": 0, "start": "2025-10-06 14:57:37.803567", "stderr": "+ oc login -u kubeadmin -p 123456789", "stderr_lines": ["+ oc login -u kubeadmin -p 123456789"], "stdout": "WARNING: Using insecure TLS client config. Setting this option is not supported!\n\nLogin successful.\n\nYou have access to 72 projects, the list has been suppressed. You can list all projects with 'oc projects'\n\nUsing project \"openstack\".", "stdout_lines": ["WARNING: Using insecure TLS client config. Setting this option is not supported!", "", "Login successful.", "", "You have access to 72 projects, the list has been suppressed. You can list all projects with 'oc projects'", "", "Using project \"openstack\"."]} TASK [prelude_local : use diffrent namespace for OSPdO adoption] *************** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src | bool | default(false)", "skip_reason": "Conditional result was False"} TASK [prelude_local : create namespace] **************************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\ncd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\nNAMESPACE=openstack make namespace\n", "delta": "0:00:00.830227", "end": "2025-10-06 14:57:39.231164", "msg": "", "rc": 0, "start": "2025-10-06 14:57:38.400937", "stderr": "+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n+ NAMESPACE=openstack\n+ make namespace\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z openstack ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ cat", "stderr_lines": ["+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/", "+ NAMESPACE=openstack", "+ make namespace", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z openstack ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ cat"], "stdout": "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml\nnamespace/openstack unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\nopenstack Active\noc project openstack\nAlready on project \"openstack\" on server \"https://api.crc.testing:6443\".\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "stdout_lines": ["make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml", "namespace/openstack unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "openstack Active", "oc project openstack", "Already on project \"openstack\" on server \"https://api.crc.testing:6443\".", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'"]} TASK [prelude_local : set default namespace to openstack] ********************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc project openstack\n", "delta": "0:00:00.128184", "end": "2025-10-06 14:57:39.619119", "msg": "", "rc": 0, "start": "2025-10-06 14:57:39.490935", "stderr": "+ oc project openstack", "stderr_lines": ["+ oc project openstack"], "stdout": "Already on project \"openstack\" on server \"https://api.crc.testing:6443\".", "stdout_lines": ["Already on project \"openstack\" on server \"https://api.crc.testing:6443\"."]} PLAY [Cleanup] ***************************************************************** TASK [pcp_cleanup : clean up any remains of podified deployment] *************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n# Cleanup OpenStackControlPlane object\noc delete --ignore-not-found=true OpenStackControlPlane --all || true\n\n# Ensure that all pods in openstack namespace are deleted\nwhile oc get pod | grep -E 'rabbitmq-server-0|openstack-galera-0'; do\n sleep 2;\ndone\n\n# Cleanup OpenStackDataplane objects\noc delete --ignore-not-found=true OpenStackDataPlaneDeployment --all || true\noc delete --ignore-not-found=true OpenStackDataPlaneNodeSet --all || true\noc delete --ignore-not-found=true OpenStackDataPlaneService --all || true\n\n# Delete Adoption helper pods\noc delete --ignore-not-found=true --wait=false pod mariadb-copy-data\noc delete --ignore-not-found=true --wait=false pvc mariadb-data\noc delete --ignore-not-found=true --wait=false pod ovn-copy-data\n\n# Delete secrets\nfor secret in $(oc get secrets -o name); do\n echo \"Deleting secret ${secret}\";\n # (TODO: holser) The following 'oc patch' command removes finalizers from secrets to allow deletion.\n # This is a workaround for an issue where secrets may be stuck in a terminating state due to finalizers.\n # Once OSPRH-10262 is merged and the issue is resolved, this patch can be removed.\n oc patch ${secret} -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n oc delete ${secret} ;\ndone\n\n# Make pvs available if they are released\noc get pv -o json | jq -r '.items[] | select(.status.phase == \"Released\") | .metadata.name' | xargs -I{} oc patch pv {} --type='merge' -p '{\"spec\":{\"claimRef\": null}}'\n\n# Delete IT certificates\noc delete --ignore-not-found issuer rootca-internal\noc delete --ignore-not-found secret rootca-internal\n\noc delete subscription cluster-observability-operator -n openshift-operators --ignore-not-found\n", "delta": "0:00:15.967146", "end": "2025-10-06 14:57:55.846240", "msg": "", "rc": 0, "start": "2025-10-06 14:57:39.879094", "stderr": "+ oc delete --ignore-not-found=true OpenStackControlPlane --all\n+ oc get pod\n+ grep -E 'rabbitmq-server-0|openstack-galera-0'\nNo resources found in openstack namespace.\n+ oc delete --ignore-not-found=true OpenStackDataPlaneDeployment --all\n+ oc delete --ignore-not-found=true OpenStackDataPlaneNodeSet --all\n+ oc delete --ignore-not-found=true OpenStackDataPlaneService --all\n+ oc delete --ignore-not-found=true --wait=false pod mariadb-copy-data\n+ oc delete --ignore-not-found=true --wait=false pvc mariadb-data\n+ oc delete --ignore-not-found=true --wait=false pod ovn-copy-data\n++ oc get secrets -o name\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/builder-dockercfg-7xlj7'\n+ oc patch secret/builder-dockercfg-7xlj7 -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/builder-dockercfg-7xlj7\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-barbican-internal-svc'\n+ oc patch secret/cert-barbican-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-barbican-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-barbican-public-route'\n+ oc patch secret/cert-barbican-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-barbican-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-barbican-public-svc'\n+ oc patch secret/cert-barbican-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-barbican-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ceilometer-internal-svc'\n+ oc patch secret/cert-ceilometer-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ceilometer-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-cinder-internal-svc'\n+ oc patch secret/cert-cinder-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-cinder-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-cinder-public-route'\n+ oc patch secret/cert-cinder-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-cinder-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-cinder-public-svc'\n+ oc patch secret/cert-cinder-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-cinder-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-galera-openstack-cell1-svc'\n+ oc patch secret/cert-galera-openstack-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-galera-openstack-cell1-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-galera-openstack-svc'\n+ oc patch secret/cert-galera-openstack-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-galera-openstack-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-glance-default-internal-svc'\n+ oc patch secret/cert-glance-default-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-glance-default-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-glance-default-public-route'\n+ oc patch secret/cert-glance-default-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-glance-default-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-glance-default-public-svc'\n+ oc patch secret/cert-glance-default-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-glance-default-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-keystone-internal-svc'\n+ oc patch secret/cert-keystone-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-keystone-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-keystone-public-route'\n+ oc patch secret/cert-keystone-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-keystone-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-keystone-public-svc'\n+ oc patch secret/cert-keystone-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-keystone-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-kube-state-metrics-svc'\n+ oc patch secret/cert-kube-state-metrics-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-kube-state-metrics-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-memcached-svc'\n+ oc patch secret/cert-memcached-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-memcached-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-internal-svc'\n+ oc patch secret/cert-neutron-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-ovndbs'\n+ oc patch secret/cert-neutron-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-public-route'\n+ oc patch secret/cert-neutron-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-neutron-public-svc'\n+ oc patch secret/cert-neutron-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-neutron-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-internal-svc'\n+ oc patch secret/cert-nova-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-metadata-internal-svc'\n+ oc patch secret/cert-nova-metadata-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-metadata-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-route'\n+ oc patch secret/cert-nova-novncproxy-cell1-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-novncproxy-cell1-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-svc'\n+ oc patch secret/cert-nova-novncproxy-cell1-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-novncproxy-cell1-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-vencrypt'\n+ oc patch secret/cert-nova-novncproxy-cell1-vencrypt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-novncproxy-cell1-vencrypt\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-public-route'\n+ oc patch secret/cert-nova-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-nova-public-svc'\n+ oc patch secret/cert-nova-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-nova-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovn-metrics'\n+ oc patch secret/cert-ovn-metrics -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovn-metrics\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovncontroller-ovndbs'\n+ oc patch secret/cert-ovncontroller-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovncontroller-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovndbcluster-nb-ovndbs'\n+ oc patch secret/cert-ovndbcluster-nb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovndbcluster-nb-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovndbcluster-sb-ovndbs'\n+ oc patch secret/cert-ovndbcluster-sb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovndbcluster-sb-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-ovnnorthd-ovndbs'\n+ oc patch secret/cert-ovnnorthd-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-ovnnorthd-ovndbs\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-placement-internal-svc'\n+ oc patch secret/cert-placement-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-placement-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-placement-public-route'\n+ oc patch secret/cert-placement-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-placement-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-placement-public-svc'\n+ oc patch secret/cert-placement-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-placement-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-rabbitmq-cell1-svc'\n+ oc patch secret/cert-rabbitmq-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-rabbitmq-cell1-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-rabbitmq-svc'\n+ oc patch secret/cert-rabbitmq-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-rabbitmq-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-swift-internal-svc'\n+ oc patch secret/cert-swift-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-swift-internal-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-swift-public-route'\n+ oc patch secret/cert-swift-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-swift-public-route\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/cert-swift-public-svc'\n+ oc patch secret/cert-swift-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/cert-swift-public-svc\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/combined-ca-bundle'\n+ oc patch secret/combined-ca-bundle -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/combined-ca-bundle\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/default-dockercfg-s55mr'\n+ oc patch secret/default-dockercfg-s55mr -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/default-dockercfg-s55mr\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/deployer-dockercfg-8rgl6'\n+ oc patch secret/deployer-dockercfg-8rgl6 -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/deployer-dockercfg-8rgl6\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/libvirt-secret'\n+ oc patch secret/libvirt-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/libvirt-secret\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/octavia-ca-passphrase'\n+ oc patch secret/octavia-ca-passphrase -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/octavia-ca-passphrase\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/osp-secret'\n+ oc patch secret/osp-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/osp-secret\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-internal'\n+ oc patch secret/rootca-internal -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-internal\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-libvirt'\n+ oc patch secret/rootca-libvirt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-libvirt\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-ovn'\n+ oc patch secret/rootca-ovn -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-ovn\n+ for secret in $(oc get secrets -o name)\n+ echo 'Deleting secret secret/rootca-public'\n+ oc patch secret/rootca-public -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge\n+ oc delete secret/rootca-public\n+ oc get pv -o json\n+ jq -r '.items[] | select(.status.phase == \"Released\") | .metadata.name'\n+ xargs '-I{}' oc patch pv '{}' --type=merge -p '{\"spec\":{\"claimRef\": null}}'\n+ oc delete --ignore-not-found issuer rootca-internal\n+ oc delete --ignore-not-found secret rootca-internal\n+ oc delete subscription cluster-observability-operator -n openshift-operators --ignore-not-found", "stderr_lines": ["+ oc delete --ignore-not-found=true OpenStackControlPlane --all", "+ oc get pod", "+ grep -E 'rabbitmq-server-0|openstack-galera-0'", "No resources found in openstack namespace.", "+ oc delete --ignore-not-found=true OpenStackDataPlaneDeployment --all", "+ oc delete --ignore-not-found=true OpenStackDataPlaneNodeSet --all", "+ oc delete --ignore-not-found=true OpenStackDataPlaneService --all", "+ oc delete --ignore-not-found=true --wait=false pod mariadb-copy-data", "+ oc delete --ignore-not-found=true --wait=false pvc mariadb-data", "+ oc delete --ignore-not-found=true --wait=false pod ovn-copy-data", "++ oc get secrets -o name", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/builder-dockercfg-7xlj7'", "+ oc patch secret/builder-dockercfg-7xlj7 -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/builder-dockercfg-7xlj7", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-barbican-internal-svc'", "+ oc patch secret/cert-barbican-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-barbican-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-barbican-public-route'", "+ oc patch secret/cert-barbican-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-barbican-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-barbican-public-svc'", "+ oc patch secret/cert-barbican-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-barbican-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ceilometer-internal-svc'", "+ oc patch secret/cert-ceilometer-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ceilometer-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-cinder-internal-svc'", "+ oc patch secret/cert-cinder-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-cinder-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-cinder-public-route'", "+ oc patch secret/cert-cinder-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-cinder-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-cinder-public-svc'", "+ oc patch secret/cert-cinder-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-cinder-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-galera-openstack-cell1-svc'", "+ oc patch secret/cert-galera-openstack-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-galera-openstack-cell1-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-galera-openstack-svc'", "+ oc patch secret/cert-galera-openstack-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-galera-openstack-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-glance-default-internal-svc'", "+ oc patch secret/cert-glance-default-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-glance-default-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-glance-default-public-route'", "+ oc patch secret/cert-glance-default-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-glance-default-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-glance-default-public-svc'", "+ oc patch secret/cert-glance-default-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-glance-default-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-keystone-internal-svc'", "+ oc patch secret/cert-keystone-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-keystone-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-keystone-public-route'", "+ oc patch secret/cert-keystone-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-keystone-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-keystone-public-svc'", "+ oc patch secret/cert-keystone-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-keystone-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-kube-state-metrics-svc'", "+ oc patch secret/cert-kube-state-metrics-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-kube-state-metrics-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-memcached-svc'", "+ oc patch secret/cert-memcached-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-memcached-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-internal-svc'", "+ oc patch secret/cert-neutron-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-ovndbs'", "+ oc patch secret/cert-neutron-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-public-route'", "+ oc patch secret/cert-neutron-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-neutron-public-svc'", "+ oc patch secret/cert-neutron-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-neutron-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-internal-svc'", "+ oc patch secret/cert-nova-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-metadata-internal-svc'", "+ oc patch secret/cert-nova-metadata-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-metadata-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-route'", "+ oc patch secret/cert-nova-novncproxy-cell1-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-novncproxy-cell1-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-public-svc'", "+ oc patch secret/cert-nova-novncproxy-cell1-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-novncproxy-cell1-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-novncproxy-cell1-vencrypt'", "+ oc patch secret/cert-nova-novncproxy-cell1-vencrypt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-novncproxy-cell1-vencrypt", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-public-route'", "+ oc patch secret/cert-nova-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-nova-public-svc'", "+ oc patch secret/cert-nova-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-nova-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovn-metrics'", "+ oc patch secret/cert-ovn-metrics -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovn-metrics", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovncontroller-ovndbs'", "+ oc patch secret/cert-ovncontroller-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovncontroller-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovndbcluster-nb-ovndbs'", "+ oc patch secret/cert-ovndbcluster-nb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovndbcluster-nb-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovndbcluster-sb-ovndbs'", "+ oc patch secret/cert-ovndbcluster-sb-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovndbcluster-sb-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-ovnnorthd-ovndbs'", "+ oc patch secret/cert-ovnnorthd-ovndbs -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-ovnnorthd-ovndbs", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-placement-internal-svc'", "+ oc patch secret/cert-placement-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-placement-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-placement-public-route'", "+ oc patch secret/cert-placement-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-placement-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-placement-public-svc'", "+ oc patch secret/cert-placement-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-placement-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-rabbitmq-cell1-svc'", "+ oc patch secret/cert-rabbitmq-cell1-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-rabbitmq-cell1-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-rabbitmq-svc'", "+ oc patch secret/cert-rabbitmq-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-rabbitmq-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-swift-internal-svc'", "+ oc patch secret/cert-swift-internal-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-swift-internal-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-swift-public-route'", "+ oc patch secret/cert-swift-public-route -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-swift-public-route", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/cert-swift-public-svc'", "+ oc patch secret/cert-swift-public-svc -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/cert-swift-public-svc", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/combined-ca-bundle'", "+ oc patch secret/combined-ca-bundle -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/combined-ca-bundle", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/default-dockercfg-s55mr'", "+ oc patch secret/default-dockercfg-s55mr -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/default-dockercfg-s55mr", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/deployer-dockercfg-8rgl6'", "+ oc patch secret/deployer-dockercfg-8rgl6 -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/deployer-dockercfg-8rgl6", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/libvirt-secret'", "+ oc patch secret/libvirt-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/libvirt-secret", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/octavia-ca-passphrase'", "+ oc patch secret/octavia-ca-passphrase -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/octavia-ca-passphrase", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/osp-secret'", "+ oc patch secret/osp-secret -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/osp-secret", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-internal'", "+ oc patch secret/rootca-internal -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-internal", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-libvirt'", "+ oc patch secret/rootca-libvirt -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-libvirt", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-ovn'", "+ oc patch secret/rootca-ovn -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-ovn", "+ for secret in $(oc get secrets -o name)", "+ echo 'Deleting secret secret/rootca-public'", "+ oc patch secret/rootca-public -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge", "+ oc delete secret/rootca-public", "+ oc get pv -o json", "+ jq -r '.items[] | select(.status.phase == \"Released\") | .metadata.name'", "+ xargs '-I{}' oc patch pv '{}' --type=merge -p '{\"spec\":{\"claimRef\": null}}'", "+ oc delete --ignore-not-found issuer rootca-internal", "+ oc delete --ignore-not-found secret rootca-internal", "+ oc delete subscription cluster-observability-operator -n openshift-operators --ignore-not-found"], "stdout": "No resources found\nNo resources found\nNo resources found\nNo resources found\nDeleting secret secret/builder-dockercfg-7xlj7\nsecret/builder-dockercfg-7xlj7 patched (no change)\nsecret \"builder-dockercfg-7xlj7\" deleted\nDeleting secret secret/cert-barbican-internal-svc\nsecret/cert-barbican-internal-svc patched (no change)\nsecret \"cert-barbican-internal-svc\" deleted\nDeleting secret secret/cert-barbican-public-route\nsecret/cert-barbican-public-route patched (no change)\nsecret \"cert-barbican-public-route\" deleted\nDeleting secret secret/cert-barbican-public-svc\nsecret/cert-barbican-public-svc patched (no change)\nsecret \"cert-barbican-public-svc\" deleted\nDeleting secret secret/cert-ceilometer-internal-svc\nsecret/cert-ceilometer-internal-svc patched (no change)\nsecret \"cert-ceilometer-internal-svc\" deleted\nDeleting secret secret/cert-cinder-internal-svc\nsecret/cert-cinder-internal-svc patched (no change)\nsecret \"cert-cinder-internal-svc\" deleted\nDeleting secret secret/cert-cinder-public-route\nsecret/cert-cinder-public-route patched (no change)\nsecret \"cert-cinder-public-route\" deleted\nDeleting secret secret/cert-cinder-public-svc\nsecret/cert-cinder-public-svc patched (no change)\nsecret \"cert-cinder-public-svc\" deleted\nDeleting secret secret/cert-galera-openstack-cell1-svc\nsecret/cert-galera-openstack-cell1-svc patched (no change)\nsecret \"cert-galera-openstack-cell1-svc\" deleted\nDeleting secret secret/cert-galera-openstack-svc\nsecret/cert-galera-openstack-svc patched (no change)\nsecret \"cert-galera-openstack-svc\" deleted\nDeleting secret secret/cert-glance-default-internal-svc\nsecret/cert-glance-default-internal-svc patched (no change)\nsecret \"cert-glance-default-internal-svc\" deleted\nDeleting secret secret/cert-glance-default-public-route\nsecret/cert-glance-default-public-route patched (no change)\nsecret \"cert-glance-default-public-route\" deleted\nDeleting secret secret/cert-glance-default-public-svc\nsecret/cert-glance-default-public-svc patched (no change)\nsecret \"cert-glance-default-public-svc\" deleted\nDeleting secret secret/cert-keystone-internal-svc\nsecret/cert-keystone-internal-svc patched (no change)\nsecret \"cert-keystone-internal-svc\" deleted\nDeleting secret secret/cert-keystone-public-route\nsecret/cert-keystone-public-route patched (no change)\nsecret \"cert-keystone-public-route\" deleted\nDeleting secret secret/cert-keystone-public-svc\nsecret/cert-keystone-public-svc patched (no change)\nsecret \"cert-keystone-public-svc\" deleted\nDeleting secret secret/cert-kube-state-metrics-svc\nsecret/cert-kube-state-metrics-svc patched (no change)\nsecret \"cert-kube-state-metrics-svc\" deleted\nDeleting secret secret/cert-memcached-svc\nsecret/cert-memcached-svc patched (no change)\nsecret \"cert-memcached-svc\" deleted\nDeleting secret secret/cert-neutron-internal-svc\nsecret/cert-neutron-internal-svc patched (no change)\nsecret \"cert-neutron-internal-svc\" deleted\nDeleting secret secret/cert-neutron-ovndbs\nsecret/cert-neutron-ovndbs patched (no change)\nsecret \"cert-neutron-ovndbs\" deleted\nDeleting secret secret/cert-neutron-public-route\nsecret/cert-neutron-public-route patched (no change)\nsecret \"cert-neutron-public-route\" deleted\nDeleting secret secret/cert-neutron-public-svc\nsecret/cert-neutron-public-svc patched (no change)\nsecret \"cert-neutron-public-svc\" deleted\nDeleting secret secret/cert-nova-internal-svc\nsecret/cert-nova-internal-svc patched (no change)\nsecret \"cert-nova-internal-svc\" deleted\nDeleting secret secret/cert-nova-metadata-internal-svc\nsecret/cert-nova-metadata-internal-svc patched (no change)\nsecret \"cert-nova-metadata-internal-svc\" deleted\nDeleting secret secret/cert-nova-novncproxy-cell1-public-route\nsecret/cert-nova-novncproxy-cell1-public-route patched (no change)\nsecret \"cert-nova-novncproxy-cell1-public-route\" deleted\nDeleting secret secret/cert-nova-novncproxy-cell1-public-svc\nsecret/cert-nova-novncproxy-cell1-public-svc patched (no change)\nsecret \"cert-nova-novncproxy-cell1-public-svc\" deleted\nDeleting secret secret/cert-nova-novncproxy-cell1-vencrypt\nsecret/cert-nova-novncproxy-cell1-vencrypt patched (no change)\nsecret \"cert-nova-novncproxy-cell1-vencrypt\" deleted\nDeleting secret secret/cert-nova-public-route\nsecret/cert-nova-public-route patched (no change)\nsecret \"cert-nova-public-route\" deleted\nDeleting secret secret/cert-nova-public-svc\nsecret/cert-nova-public-svc patched (no change)\nsecret \"cert-nova-public-svc\" deleted\nDeleting secret secret/cert-ovn-metrics\nsecret/cert-ovn-metrics patched (no change)\nsecret \"cert-ovn-metrics\" deleted\nDeleting secret secret/cert-ovncontroller-ovndbs\nsecret/cert-ovncontroller-ovndbs patched (no change)\nsecret \"cert-ovncontroller-ovndbs\" deleted\nDeleting secret secret/cert-ovndbcluster-nb-ovndbs\nsecret/cert-ovndbcluster-nb-ovndbs patched (no change)\nsecret \"cert-ovndbcluster-nb-ovndbs\" deleted\nDeleting secret secret/cert-ovndbcluster-sb-ovndbs\nsecret/cert-ovndbcluster-sb-ovndbs patched (no change)\nsecret \"cert-ovndbcluster-sb-ovndbs\" deleted\nDeleting secret secret/cert-ovnnorthd-ovndbs\nsecret/cert-ovnnorthd-ovndbs patched (no change)\nsecret \"cert-ovnnorthd-ovndbs\" deleted\nDeleting secret secret/cert-placement-internal-svc\nsecret/cert-placement-internal-svc patched (no change)\nsecret \"cert-placement-internal-svc\" deleted\nDeleting secret secret/cert-placement-public-route\nsecret/cert-placement-public-route patched (no change)\nsecret \"cert-placement-public-route\" deleted\nDeleting secret secret/cert-placement-public-svc\nsecret/cert-placement-public-svc patched (no change)\nsecret \"cert-placement-public-svc\" deleted\nDeleting secret secret/cert-rabbitmq-cell1-svc\nsecret/cert-rabbitmq-cell1-svc patched (no change)\nsecret \"cert-rabbitmq-cell1-svc\" deleted\nDeleting secret secret/cert-rabbitmq-svc\nsecret/cert-rabbitmq-svc patched (no change)\nsecret \"cert-rabbitmq-svc\" deleted\nDeleting secret secret/cert-swift-internal-svc\nsecret/cert-swift-internal-svc patched (no change)\nsecret \"cert-swift-internal-svc\" deleted\nDeleting secret secret/cert-swift-public-route\nsecret/cert-swift-public-route patched (no change)\nsecret \"cert-swift-public-route\" deleted\nDeleting secret secret/cert-swift-public-svc\nsecret/cert-swift-public-svc patched (no change)\nsecret \"cert-swift-public-svc\" deleted\nDeleting secret secret/combined-ca-bundle\nsecret/combined-ca-bundle patched (no change)\nsecret \"combined-ca-bundle\" deleted\nDeleting secret secret/default-dockercfg-s55mr\nsecret/default-dockercfg-s55mr patched (no change)\nsecret \"default-dockercfg-s55mr\" deleted\nDeleting secret secret/deployer-dockercfg-8rgl6\nsecret/deployer-dockercfg-8rgl6 patched (no change)\nsecret \"deployer-dockercfg-8rgl6\" deleted\nDeleting secret secret/libvirt-secret\nsecret/libvirt-secret patched (no change)\nsecret \"libvirt-secret\" deleted\nDeleting secret secret/octavia-ca-passphrase\nsecret/octavia-ca-passphrase patched (no change)\nsecret \"octavia-ca-passphrase\" deleted\nDeleting secret secret/osp-secret\nsecret/osp-secret patched (no change)\nsecret \"osp-secret\" deleted\nDeleting secret secret/rootca-internal\nsecret/rootca-internal patched (no change)\nsecret \"rootca-internal\" deleted\nDeleting secret secret/rootca-libvirt\nsecret/rootca-libvirt patched (no change)\nsecret \"rootca-libvirt\" deleted\nDeleting secret secret/rootca-ovn\nsecret/rootca-ovn patched (no change)\nsecret \"rootca-ovn\" deleted\nDeleting secret secret/rootca-public\nsecret/rootca-public patched (no change)\nsecret \"rootca-public\" deleted\npersistentvolume/local-storage06-crc patched\npersistentvolume/local-storage07-crc patched\npersistentvolume/local-storage08-crc patched\npersistentvolume/local-storage12-crc patched", "stdout_lines": ["No resources found", "No resources found", "No resources found", "No resources found", "Deleting secret secret/builder-dockercfg-7xlj7", "secret/builder-dockercfg-7xlj7 patched (no change)", "secret \"builder-dockercfg-7xlj7\" deleted", "Deleting secret secret/cert-barbican-internal-svc", "secret/cert-barbican-internal-svc patched (no change)", "secret \"cert-barbican-internal-svc\" deleted", "Deleting secret secret/cert-barbican-public-route", "secret/cert-barbican-public-route patched (no change)", "secret \"cert-barbican-public-route\" deleted", "Deleting secret secret/cert-barbican-public-svc", "secret/cert-barbican-public-svc patched (no change)", "secret \"cert-barbican-public-svc\" deleted", "Deleting secret secret/cert-ceilometer-internal-svc", "secret/cert-ceilometer-internal-svc patched (no change)", "secret \"cert-ceilometer-internal-svc\" deleted", "Deleting secret secret/cert-cinder-internal-svc", "secret/cert-cinder-internal-svc patched (no change)", "secret \"cert-cinder-internal-svc\" deleted", "Deleting secret secret/cert-cinder-public-route", "secret/cert-cinder-public-route patched (no change)", "secret \"cert-cinder-public-route\" deleted", "Deleting secret secret/cert-cinder-public-svc", "secret/cert-cinder-public-svc patched (no change)", "secret \"cert-cinder-public-svc\" deleted", "Deleting secret secret/cert-galera-openstack-cell1-svc", "secret/cert-galera-openstack-cell1-svc patched (no change)", "secret \"cert-galera-openstack-cell1-svc\" deleted", "Deleting secret secret/cert-galera-openstack-svc", "secret/cert-galera-openstack-svc patched (no change)", "secret \"cert-galera-openstack-svc\" deleted", "Deleting secret secret/cert-glance-default-internal-svc", "secret/cert-glance-default-internal-svc patched (no change)", "secret \"cert-glance-default-internal-svc\" deleted", "Deleting secret secret/cert-glance-default-public-route", "secret/cert-glance-default-public-route patched (no change)", "secret \"cert-glance-default-public-route\" deleted", "Deleting secret secret/cert-glance-default-public-svc", "secret/cert-glance-default-public-svc patched (no change)", "secret \"cert-glance-default-public-svc\" deleted", "Deleting secret secret/cert-keystone-internal-svc", "secret/cert-keystone-internal-svc patched (no change)", "secret \"cert-keystone-internal-svc\" deleted", "Deleting secret secret/cert-keystone-public-route", "secret/cert-keystone-public-route patched (no change)", "secret \"cert-keystone-public-route\" deleted", "Deleting secret secret/cert-keystone-public-svc", "secret/cert-keystone-public-svc patched (no change)", "secret \"cert-keystone-public-svc\" deleted", "Deleting secret secret/cert-kube-state-metrics-svc", "secret/cert-kube-state-metrics-svc patched (no change)", "secret \"cert-kube-state-metrics-svc\" deleted", "Deleting secret secret/cert-memcached-svc", "secret/cert-memcached-svc patched (no change)", "secret \"cert-memcached-svc\" deleted", "Deleting secret secret/cert-neutron-internal-svc", "secret/cert-neutron-internal-svc patched (no change)", "secret \"cert-neutron-internal-svc\" deleted", "Deleting secret secret/cert-neutron-ovndbs", "secret/cert-neutron-ovndbs patched (no change)", "secret \"cert-neutron-ovndbs\" deleted", "Deleting secret secret/cert-neutron-public-route", "secret/cert-neutron-public-route patched (no change)", "secret \"cert-neutron-public-route\" deleted", "Deleting secret secret/cert-neutron-public-svc", "secret/cert-neutron-public-svc patched (no change)", "secret \"cert-neutron-public-svc\" deleted", "Deleting secret secret/cert-nova-internal-svc", "secret/cert-nova-internal-svc patched (no change)", "secret \"cert-nova-internal-svc\" deleted", "Deleting secret secret/cert-nova-metadata-internal-svc", "secret/cert-nova-metadata-internal-svc patched (no change)", "secret \"cert-nova-metadata-internal-svc\" deleted", "Deleting secret secret/cert-nova-novncproxy-cell1-public-route", "secret/cert-nova-novncproxy-cell1-public-route patched (no change)", "secret \"cert-nova-novncproxy-cell1-public-route\" deleted", "Deleting secret secret/cert-nova-novncproxy-cell1-public-svc", "secret/cert-nova-novncproxy-cell1-public-svc patched (no change)", "secret \"cert-nova-novncproxy-cell1-public-svc\" deleted", "Deleting secret secret/cert-nova-novncproxy-cell1-vencrypt", "secret/cert-nova-novncproxy-cell1-vencrypt patched (no change)", "secret \"cert-nova-novncproxy-cell1-vencrypt\" deleted", "Deleting secret secret/cert-nova-public-route", "secret/cert-nova-public-route patched (no change)", "secret \"cert-nova-public-route\" deleted", "Deleting secret secret/cert-nova-public-svc", "secret/cert-nova-public-svc patched (no change)", "secret \"cert-nova-public-svc\" deleted", "Deleting secret secret/cert-ovn-metrics", "secret/cert-ovn-metrics patched (no change)", "secret \"cert-ovn-metrics\" deleted", "Deleting secret secret/cert-ovncontroller-ovndbs", "secret/cert-ovncontroller-ovndbs patched (no change)", "secret \"cert-ovncontroller-ovndbs\" deleted", "Deleting secret secret/cert-ovndbcluster-nb-ovndbs", "secret/cert-ovndbcluster-nb-ovndbs patched (no change)", "secret \"cert-ovndbcluster-nb-ovndbs\" deleted", "Deleting secret secret/cert-ovndbcluster-sb-ovndbs", "secret/cert-ovndbcluster-sb-ovndbs patched (no change)", "secret \"cert-ovndbcluster-sb-ovndbs\" deleted", "Deleting secret secret/cert-ovnnorthd-ovndbs", "secret/cert-ovnnorthd-ovndbs patched (no change)", "secret \"cert-ovnnorthd-ovndbs\" deleted", "Deleting secret secret/cert-placement-internal-svc", "secret/cert-placement-internal-svc patched (no change)", "secret \"cert-placement-internal-svc\" deleted", "Deleting secret secret/cert-placement-public-route", "secret/cert-placement-public-route patched (no change)", "secret \"cert-placement-public-route\" deleted", "Deleting secret secret/cert-placement-public-svc", "secret/cert-placement-public-svc patched (no change)", "secret \"cert-placement-public-svc\" deleted", "Deleting secret secret/cert-rabbitmq-cell1-svc", "secret/cert-rabbitmq-cell1-svc patched (no change)", "secret \"cert-rabbitmq-cell1-svc\" deleted", "Deleting secret secret/cert-rabbitmq-svc", "secret/cert-rabbitmq-svc patched (no change)", "secret \"cert-rabbitmq-svc\" deleted", "Deleting secret secret/cert-swift-internal-svc", "secret/cert-swift-internal-svc patched (no change)", "secret \"cert-swift-internal-svc\" deleted", "Deleting secret secret/cert-swift-public-route", "secret/cert-swift-public-route patched (no change)", "secret \"cert-swift-public-route\" deleted", "Deleting secret secret/cert-swift-public-svc", "secret/cert-swift-public-svc patched (no change)", "secret \"cert-swift-public-svc\" deleted", "Deleting secret secret/combined-ca-bundle", "secret/combined-ca-bundle patched (no change)", "secret \"combined-ca-bundle\" deleted", "Deleting secret secret/default-dockercfg-s55mr", "secret/default-dockercfg-s55mr patched (no change)", "secret \"default-dockercfg-s55mr\" deleted", "Deleting secret secret/deployer-dockercfg-8rgl6", "secret/deployer-dockercfg-8rgl6 patched (no change)", "secret \"deployer-dockercfg-8rgl6\" deleted", "Deleting secret secret/libvirt-secret", "secret/libvirt-secret patched (no change)", "secret \"libvirt-secret\" deleted", "Deleting secret secret/octavia-ca-passphrase", "secret/octavia-ca-passphrase patched (no change)", "secret \"octavia-ca-passphrase\" deleted", "Deleting secret secret/osp-secret", "secret/osp-secret patched (no change)", "secret \"osp-secret\" deleted", "Deleting secret secret/rootca-internal", "secret/rootca-internal patched (no change)", "secret \"rootca-internal\" deleted", "Deleting secret secret/rootca-libvirt", "secret/rootca-libvirt patched (no change)", "secret \"rootca-libvirt\" deleted", "Deleting secret secret/rootca-ovn", "secret/rootca-ovn patched (no change)", "secret \"rootca-ovn\" deleted", "Deleting secret secret/rootca-public", "secret/rootca-public patched (no change)", "secret \"rootca-public\" deleted", "persistentvolume/local-storage06-crc patched", "persistentvolume/local-storage07-crc patched", "persistentvolume/local-storage08-crc patched", "persistentvolume/local-storage12-crc patched"]} TASK [pcp_cleanup : revert standalone VM to snapshotted state] ***************** skipping: [localhost] => {"changed": false, "false_condition": "standalone_revert_enabled|bool", "skip_reason": "Conditional result was False"} TASK [pcp_cleanup : reset CRC storage] ***************************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\ncd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n# Try up to 3 times to clean up and recreate CRC storage, with a 5-second delay between attempts\nfor i in {1..3}; do\n make crc_storage_cleanup crc_storage && break || sleep 5\ndone\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\nNAMESPACE=openstack make namespace\nfor CELL in $(echo $RENAMED_CELLS); do\n oc delete pvc mysql-db-openstack-$CELL-galera-0 --ignore-not-found=true\n oc delete pvc persistence-rabbitmq-$CELL-server-0 --ignore-not-found=true\ndone\n", "delta": "0:00:18.212945", "end": "2025-10-06 14:58:14.369618", "msg": "", "rc": 0, "start": "2025-10-06 14:57:56.156673", "stderr": "+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n+ for i in {1..3}\n+ make crc_storage_cleanup crc_storage\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z crc-storage ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage\n+ cat\n++ oc get pv --selector provisioned-by=crc-devsetup --no-headers\n++ grep Bound\n++ awk '{print $6}'\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/swift-swift-storage-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/swift-swift-storage-0\n++ cut -d / -f 2\n+ NAME=swift-swift-storage-0\n+ oc delete -n openstack pvc/swift-swift-storage-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/glance-glance-default-internal-api-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/glance-glance-default-internal-api-0\n++ cut -d / -f 2\n+ NAME=glance-glance-default-internal-api-0\n+ oc delete -n openstack pvc/glance-glance-default-internal-api-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/mysql-db-openstack-cell1-galera-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/mysql-db-openstack-cell1-galera-0\n++ cut -d / -f 2\n+ NAME=mysql-db-openstack-cell1-galera-0\n+ oc delete -n openstack pvc/mysql-db-openstack-cell1-galera-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/glance-glance-default-external-api-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/glance-glance-default-external-api-0\n++ cut -d / -f 2\n+ NAME=glance-glance-default-external-api-0\n+ oc delete -n openstack pvc/glance-glance-default-external-api-0 --ignore-not-found\n+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`\n++ echo openstack/mysql-db-openstack-galera-0\n++ cut -d / -f 1\n+ NS=openstack\n++ echo openstack/mysql-db-openstack-galera-0\n++ cut -d / -f 2\n+ NAME=mysql-db-openstack-galera-0\n+ oc delete -n openstack pvc/mysql-db-openstack-galera-0 --ignore-not-found\n++ oc get pv --selector provisioned-by=crc-devsetup --no-headers\n++ awk '{print $1}'\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage01-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage02-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage03-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage04-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage05-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage06-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage07-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage08-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage09-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage10-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage11-crc\n+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`\n+ oc delete pv/local-storage12-crc\n+++ dirname scripts/delete-pv.sh\n++ cd scripts\n++ pwd -P\n+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh\n++ set -ex\n++ OPERATION=create\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n+ PV_NUM=12\n+ TIMEOUT=500s\n++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker\n+ NODE_NAMES=crc\n+ '[' -z crc ']'\n+ for node in $NODE_NAMES\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc delete\n++ set -ex\n++ NODE=crc\n++ OPERATION=delete\n++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found\n++ cat\n++ oc apply -f -\nWarning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)\n+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s\n+++ dirname scripts/create-pv.sh\n++ cd scripts\n++ pwd -P\n+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh\n++ set -ex\n++ OPERATION=create\n++ cat\n++ oc apply -f -\n++ cat\n++ oc apply -f -\n++ oc apply -f -\n++ cat\n++ cat\n++ oc apply -f -\n+ PV_NUM=12\n+ TIMEOUT=500s\n++ oc get pv -o json\n++ jq -r '.items[] | select(.status.phase | test(\"Released\")).metadata.name'\n+ released=\n++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker\n+ NODE_NAMES=crc\n+ '[' -z crc ']'\n+ for node in $NODE_NAMES\n+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc create\n++ set -ex\n++ NODE=crc\n++ OPERATION=create\n++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found\n++ cat\n++ oc apply -f -\nWarning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)\n+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s\n+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out\n+ '[' -z '\"local-storage\"' ']'\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc\n+ PV_NUM=12\n+ STORAGE_CAPACITY=10\n++ oc get node -o name -l node-role.kubernetes.io/worker\n++ sed -e 's|node/||'\n++ head -c-1\n++ tr '\\n' ' '\n+ NODE_NAMES=crc\n+ '[' -z crc ']'\n+ cat\n+ for node in $NODE_NAMES\n++ seq -w 12\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ for i in `seq -w $PV_NUM`\n+ cat\n++ sed -e 's/^\"//' -e 's/\"$//'\n+ cat\n+ break\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ NAMESPACE=openstack\n+ make namespace\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z openstack ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'\n+ cat\n++ echo cell1\n+ for CELL in $(echo $RENAMED_CELLS)\n+ oc delete pvc mysql-db-openstack-cell1-galera-0 --ignore-not-found=true\n+ oc delete pvc persistence-rabbitmq-cell1-server-0 --ignore-not-found=true", "stderr_lines": ["+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/", "+ for i in {1..3}", "+ make crc_storage_cleanup crc_storage", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z crc-storage ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage", "+ cat", "++ oc get pv --selector provisioned-by=crc-devsetup --no-headers", "++ grep Bound", "++ awk '{print $6}'", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/swift-swift-storage-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/swift-swift-storage-0", "++ cut -d / -f 2", "+ NAME=swift-swift-storage-0", "+ oc delete -n openstack pvc/swift-swift-storage-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/glance-glance-default-internal-api-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/glance-glance-default-internal-api-0", "++ cut -d / -f 2", "+ NAME=glance-glance-default-internal-api-0", "+ oc delete -n openstack pvc/glance-glance-default-internal-api-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/mysql-db-openstack-cell1-galera-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/mysql-db-openstack-cell1-galera-0", "++ cut -d / -f 2", "+ NAME=mysql-db-openstack-cell1-galera-0", "+ oc delete -n openstack pvc/mysql-db-openstack-cell1-galera-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/glance-glance-default-external-api-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/glance-glance-default-external-api-0", "++ cut -d / -f 2", "+ NAME=glance-glance-default-external-api-0", "+ oc delete -n openstack pvc/glance-glance-default-external-api-0 --ignore-not-found", "+ for pvc in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | grep Bound | awk '{print $6}'`", "++ echo openstack/mysql-db-openstack-galera-0", "++ cut -d / -f 1", "+ NS=openstack", "++ echo openstack/mysql-db-openstack-galera-0", "++ cut -d / -f 2", "+ NAME=mysql-db-openstack-galera-0", "+ oc delete -n openstack pvc/mysql-db-openstack-galera-0 --ignore-not-found", "++ oc get pv --selector provisioned-by=crc-devsetup --no-headers", "++ awk '{print $1}'", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage01-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage02-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage03-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage04-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage05-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage06-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage07-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage08-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage09-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage10-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage11-crc", "+ for pv in `oc get pv --selector provisioned-by=crc-devsetup --no-headers | awk '{print $1}'`", "+ oc delete pv/local-storage12-crc", "+++ dirname scripts/delete-pv.sh", "++ cd scripts", "++ pwd -P", "+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh", "++ set -ex", "++ OPERATION=create", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "+ PV_NUM=12", "+ TIMEOUT=500s", "++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker", "+ NODE_NAMES=crc", "+ '[' -z crc ']'", "+ for node in $NODE_NAMES", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc delete", "++ set -ex", "++ NODE=crc", "++ OPERATION=delete", "++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found", "++ cat", "++ oc apply -f -", "Warning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)", "+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s", "+++ dirname scripts/create-pv.sh", "++ cd scripts", "++ pwd -P", "+ SCRIPTPATH=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_common.sh", "++ set -ex", "++ OPERATION=create", "++ cat", "++ oc apply -f -", "++ cat", "++ oc apply -f -", "++ oc apply -f -", "++ cat", "++ cat", "++ oc apply -f -", "+ PV_NUM=12", "+ TIMEOUT=500s", "++ oc get pv -o json", "++ jq -r '.items[] | select(.status.phase | test(\"Released\")).metadata.name'", "+ released=", "++ oc get node -o template --template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}' -l node-role.kubernetes.io/worker", "+ NODE_NAMES=crc", "+ '[' -z crc ']'", "+ for node in $NODE_NAMES", "+ . /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/scripts/storage_apply.sh crc create", "++ set -ex", "++ NODE=crc", "++ OPERATION=create", "++ oc delete -n crc-storage job crc-storage-crc --ignore-not-found", "++ cat", "++ oc apply -f -", "Warning: would violate PodSecurity \"restricted:latest\": privileged (container \"storage\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (container \"storage\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"storage\" must set securityContext.capabilities.drop=[\"ALL\"]), restricted volume types (volume \"node-mnt\" uses restricted volume type \"hostPath\"), runAsNonRoot != true (pod or container \"storage\" must set securityContext.runAsNonRoot=true), runAsUser=0 (pod and container \"storage\" must not set runAsUser=0)", "+ oc wait job -n crc-storage -l install-yamls.crc.storage --for condition=Complete --timeout 500s", "+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out", "+ '[' -z '\"local-storage\"' ']'", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc", "+ PV_NUM=12", "+ STORAGE_CAPACITY=10", "++ oc get node -o name -l node-role.kubernetes.io/worker", "++ sed -e 's|node/||'", "++ head -c-1", "++ tr '\\n' ' '", "+ NODE_NAMES=crc", "+ '[' -z crc ']'", "+ cat", "+ for node in $NODE_NAMES", "++ seq -w 12", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ for i in `seq -w $PV_NUM`", "+ cat", "++ sed -e 's/^\"//' -e 's/\"$//'", "+ cat", "+ break", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ NAMESPACE=openstack", "+ make namespace", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z openstack ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'", "+ cat", "++ echo cell1", "+ for CELL in $(echo $RENAMED_CELLS)", "+ oc delete pvc mysql-db-openstack-cell1-galera-0 --ignore-not-found=true", "+ oc delete pvc persistence-rabbitmq-cell1-server-0 --ignore-not-found=true"], "stdout": "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage/namespace.yaml\nnamespace/crc-storage unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io crc-storage); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\ncrc-storage Active\noc project crc-storage\nNow using project \"crc-storage\" on server \"https://api.crc.testing:6443\".\nbash scripts/cleanup-crc-pv.sh\npersistentvolumeclaim \"swift-swift-storage-0\" deleted\npersistentvolumeclaim \"glance-glance-default-internal-api-0\" deleted\npersistentvolumeclaim \"mysql-db-openstack-cell1-galera-0\" deleted\npersistentvolumeclaim \"glance-glance-default-external-api-0\" deleted\npersistentvolumeclaim \"mysql-db-openstack-galera-0\" deleted\npersistentvolume \"local-storage01-crc\" deleted\npersistentvolume \"local-storage02-crc\" deleted\npersistentvolume \"local-storage03-crc\" deleted\npersistentvolume \"local-storage04-crc\" deleted\npersistentvolume \"local-storage05-crc\" deleted\npersistentvolume \"local-storage06-crc\" deleted\npersistentvolume \"local-storage07-crc\" deleted\npersistentvolume \"local-storage08-crc\" deleted\npersistentvolume \"local-storage09-crc\" deleted\npersistentvolume \"local-storage10-crc\" deleted\npersistentvolume \"local-storage11-crc\" deleted\npersistentvolume \"local-storage12-crc\" deleted\nif oc get sc \"local-storage\"; then oc delete sc \"local-storage\"; fi\nNAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE\nlocal-storage kubernetes.io/no-provisioner Delete WaitForFirstConsumer true 68m\nstorageclass.storage.k8s.io \"local-storage\" deleted\nbash scripts/delete-pv.sh\nconfigmap/crc-storage unchanged\nserviceaccount/crc-storage unchanged\nrole.rbac.authorization.k8s.io/crc-storage-role unchanged\nrolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged\njob.batch \"crc-storage-crc\" deleted\njob.batch/crc-storage-crc created\njob.batch/crc-storage-crc condition met\nbash scripts/create-pv.sh\nconfigmap/crc-storage unchanged\nserviceaccount/crc-storage unchanged\nrole.rbac.authorization.k8s.io/crc-storage-role unchanged\nrolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged\njob.batch \"crc-storage-crc\" deleted\njob.batch/crc-storage-crc created\njob.batch/crc-storage-crc condition met\nbash scripts/gen-crc-pv-kustomize.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc/storage.yaml\nstorageclass.storage.k8s.io/local-storage created\npersistentvolume/local-storage01-crc created\npersistentvolume/local-storage02-crc created\npersistentvolume/local-storage03-crc created\npersistentvolume/local-storage04-crc created\npersistentvolume/local-storage05-crc created\npersistentvolume/local-storage06-crc created\npersistentvolume/local-storage07-crc created\npersistentvolume/local-storage08-crc created\npersistentvolume/local-storage09-crc created\npersistentvolume/local-storage10-crc created\npersistentvolume/local-storage11-crc created\npersistentvolume/local-storage12-crc created\npersistentvolumeclaim/ansible-ee-logs unchanged\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nmake[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml\nnamespace/openstack unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\nopenstack Active\noc project openstack\nNow using project \"openstack\" on server \"https://api.crc.testing:6443\".\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "stdout_lines": ["make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc-storage/namespace.yaml", "namespace/crc-storage unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io crc-storage); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "crc-storage Active", "oc project crc-storage", "Now using project \"crc-storage\" on server \"https://api.crc.testing:6443\".", "bash scripts/cleanup-crc-pv.sh", "persistentvolumeclaim \"swift-swift-storage-0\" deleted", "persistentvolumeclaim \"glance-glance-default-internal-api-0\" deleted", "persistentvolumeclaim \"mysql-db-openstack-cell1-galera-0\" deleted", "persistentvolumeclaim \"glance-glance-default-external-api-0\" deleted", "persistentvolumeclaim \"mysql-db-openstack-galera-0\" deleted", "persistentvolume \"local-storage01-crc\" deleted", "persistentvolume \"local-storage02-crc\" deleted", "persistentvolume \"local-storage03-crc\" deleted", "persistentvolume \"local-storage04-crc\" deleted", "persistentvolume \"local-storage05-crc\" deleted", "persistentvolume \"local-storage06-crc\" deleted", "persistentvolume \"local-storage07-crc\" deleted", "persistentvolume \"local-storage08-crc\" deleted", "persistentvolume \"local-storage09-crc\" deleted", "persistentvolume \"local-storage10-crc\" deleted", "persistentvolume \"local-storage11-crc\" deleted", "persistentvolume \"local-storage12-crc\" deleted", "if oc get sc \"local-storage\"; then oc delete sc \"local-storage\"; fi", "NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE", "local-storage kubernetes.io/no-provisioner Delete WaitForFirstConsumer true 68m", "storageclass.storage.k8s.io \"local-storage\" deleted", "bash scripts/delete-pv.sh", "configmap/crc-storage unchanged", "serviceaccount/crc-storage unchanged", "role.rbac.authorization.k8s.io/crc-storage-role unchanged", "rolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged", "job.batch \"crc-storage-crc\" deleted", "job.batch/crc-storage-crc created", "job.batch/crc-storage-crc condition met", "bash scripts/create-pv.sh", "configmap/crc-storage unchanged", "serviceaccount/crc-storage unchanged", "role.rbac.authorization.k8s.io/crc-storage-role unchanged", "rolebinding.rbac.authorization.k8s.io/crc-storage-rolebinding unchanged", "job.batch \"crc-storage-crc\" deleted", "job.batch/crc-storage-crc created", "job.batch/crc-storage-crc condition met", "bash scripts/gen-crc-pv-kustomize.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/crc/storage.yaml", "storageclass.storage.k8s.io/local-storage created", "persistentvolume/local-storage01-crc created", "persistentvolume/local-storage02-crc created", "persistentvolume/local-storage03-crc created", "persistentvolume/local-storage04-crc created", "persistentvolume/local-storage05-crc created", "persistentvolume/local-storage06-crc created", "persistentvolume/local-storage07-crc created", "persistentvolume/local-storage08-crc created", "persistentvolume/local-storage09-crc created", "persistentvolume/local-storage10-crc created", "persistentvolume/local-storage11-crc created", "persistentvolume/local-storage12-crc created", "persistentvolumeclaim/ansible-ee-logs unchanged", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml", "namespace/openstack unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "openstack Active", "oc project openstack", "Now using project \"openstack\" on server \"https://api.crc.testing:6443\".", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'"]} PLAY [Adoption] **************************************************************** TASK [development_environment : pre-launch test VM instance] ******************* changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nexport OPENSTACK_COMMAND=\"ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack\"\nexport EDPM_CONFIGURE_HUGEPAGES=false\nexport CINDER_VOLUME_BACKEND_CONFIGURED=false\nexport CINDER_BACKUP_BACKEND_CONFIGURED=false\nexport PING_TEST_VM=false\nset -e\n\nalias openstack=\"$OPENSTACK_COMMAND\"\n\nfunction wait_for_status() {\n local time=0\n local msg=\"Waiting for $2\"\n local status=\"${3:-available}\"\n local result\n while [ $time -le 30 ] ; do\n result=$(${BASH_ALIASES[openstack]} $1 -f json)\n echo $result | jq -r \".status\" | grep -q $status && break\n echo \"result=$result\"\n echo \"$msg\"\n time=$(( time + 5 ))\n sleep 5\n done\n}\n\nfunction create_volume_resources() {\n # create a data volume\n if ! ${BASH_ALIASES[openstack]} volume show disk ; then\n ${BASH_ALIASES[openstack]} volume create --image cirros --size 1 disk\n wait_for_status \"volume show disk\" \"test volume 'disk' creation\"\n fi\n\n # create volume snapshot\n if ! ${BASH_ALIASES[openstack]} volume snapshot show snapshot ; then\n ${BASH_ALIASES[openstack]} volume snapshot create --volume disk snapshot\n wait_for_status \"volume snapshot show snapshot\" \"test volume 'disk' snapshot availability\"\n fi\n\n # Add volume to the test VM\n if ${BASH_ALIASES[openstack]} volume show disk -f json | jq -r '.status' | grep -q available ; then\n ${BASH_ALIASES[openstack]} server add volume test disk\n fi\n}\n\nfunction create_backup_resources() {\n # create volume backup\n if ! ${BASH_ALIASES[openstack]} volume backup show backup; then\n ${BASH_ALIASES[openstack]} volume backup create --name backup disk --force\n wait_for_status \"volume backup show backup\" \"test volume 'disk' backup completion\"\n fi\n}\n\nfunction create_bfv_volume() {\n # Launch an instance from boot-volume (BFV)\n if ! ${BASH_ALIASES[openstack]} volume show boot-volume ; then\n ${BASH_ALIASES[openstack]} volume create --image cirros --size 1 boot-volume\n wait_for_status \"volume show boot-volume\" \"test volume 'boot-volume' creation\"\n fi\n if ${BASH_ALIASES[openstack]} volume show boot-volume -f json | jq -r '.status' | grep -q available ; then\n ${BASH_ALIASES[openstack]} server create --flavor m1.small --volume boot-volume --nic net-id=private bfv-server --wait\n fi\n}\n\n# Create Image\nIMG=cirros-0.6.3-x86_64-disk.img\nURL=http://download.cirros-cloud.net/0.6.3/$IMG\nDISK_FORMAT=qcow2\nRAW=$IMG\ncurl -L -# $URL > /tmp/$IMG\nif type qemu-img >/dev/null 2>&1; then\n RAW=$(echo $IMG | sed s/img/raw/g)\n qemu-img convert -f qcow2 -O raw /tmp/$IMG /tmp/$RAW\n DISK_FORMAT=raw\nfi\n${BASH_ALIASES[openstack]} image show cirros || \\\n ${BASH_ALIASES[openstack]} image create --container-format bare --disk-format $DISK_FORMAT cirros < /tmp/$RAW\n\n# Create flavor\n${BASH_ALIASES[openstack]} flavor show m1.small || \\\n ${BASH_ALIASES[openstack]} flavor create --ram 512 --vcpus 1 --disk 1 --ephemeral 1 m1.small\nif [ \"${EDPM_CONFIGURE_HUGEPAGES:-false}\" = \"true\" ] ; then\n ${BASH_ALIASES[openstack]} flavor set m1.small --property hw:mem_page_size=2MB\nfi\n\n# Create networks\n${BASH_ALIASES[openstack]} network show private || ${BASH_ALIASES[openstack]} network create private --share\n${BASH_ALIASES[openstack]} subnet show priv_sub || ${BASH_ALIASES[openstack]} subnet create priv_sub --subnet-range 192.168.0.0/24 --network private\n${BASH_ALIASES[openstack]} network show public || ${BASH_ALIASES[openstack]} network create public --external --provider-network-type flat --provider-physical-network datacentre\n${BASH_ALIASES[openstack]} subnet show public_subnet || \\\n ${BASH_ALIASES[openstack]} subnet create public_subnet --subnet-range 192.168.122.0/24 --allocation-pool start=192.168.122.171,end=192.168.122.250 --gateway 192.168.122.1 --dhcp --network public\n${BASH_ALIASES[openstack]} router show priv_router || {\n ${BASH_ALIASES[openstack]} router create priv_router\n ${BASH_ALIASES[openstack]} router add subnet priv_router priv_sub\n ${BASH_ALIASES[openstack]} router set priv_router --external-gateway public\n}\n\n# Create a floating IP\n${BASH_ALIASES[openstack]} floating ip show 192.168.122.20 || \\\n ${BASH_ALIASES[openstack]} floating ip create public --floating-ip-address 192.168.122.20\n\n# Create a test instance\n${BASH_ALIASES[openstack]} server show test || {\n ${BASH_ALIASES[openstack]} server create --flavor m1.small --image cirros --nic net-id=private test --wait\n ${BASH_ALIASES[openstack]} server add floating ip test 192.168.122.20\n}\n\nif [ \"$PING_TEST_VM\" = \"true\" ]; then\n # Create a floating IP\n ${BASH_ALIASES[openstack]} floating ip show 192.168.122.21 || \\\n ${BASH_ALIASES[openstack]} floating ip create public --floating-ip-address 192.168.122.21\n\n # Create a test-ping instance\n ${BASH_ALIASES[openstack]} server show test-ping || {\n ${BASH_ALIASES[openstack]} server create --flavor m1.small --image cirros --nic net-id=private test-ping --wait\n ${BASH_ALIASES[openstack]} server add floating ip test-ping 192.168.122.21\n }\nfi\n\n# Create security groups\n${BASH_ALIASES[openstack]} security group rule list --protocol icmp --ingress -f json | grep -q '\"IP Range\": \"0.0.0.0/0\"' || \\\n ${BASH_ALIASES[openstack]} security group rule create --protocol icmp --ingress --icmp-type -1 $(${BASH_ALIASES[openstack]} security group list --project admin -f value -c ID)\n${BASH_ALIASES[openstack]} security group rule list --protocol tcp --ingress -f json | grep '\"Port Range\": \"22:22\"' || \\\n ${BASH_ALIASES[openstack]} security group rule create --protocol tcp --ingress --dst-port 22 $(${BASH_ALIASES[openstack]} security group list --project admin -f value -c ID)\n\nexport FIP=192.168.122.20\n# check connectivity via FIP\nTRIES=0\nuntil ping -D -c1 -W2 \"$FIP\"; do\n ((TRIES++)) || true\n if [ \"$TRIES\" -gt 20 ]; then\n echo \"Ping timeout\"\n exit 1\n fi\ndone\n\nif [ \"$CINDER_VOLUME_BACKEND_CONFIGURED\" = \"true\" ]; then\n create_volume_resources\n create_bfv_volume\nfi\n\nif [ \"$CINDER_BACKUP_BACKEND_CONFIGURED\" = \"true\" ]; then\n create_backup_resources\nfi\n", "delta": "0:01:36.353665", "end": "2025-10-06 14:59:51.105253", "msg": "", "rc": 0, "start": "2025-10-06 14:58:14.751588", "stderr": "+ export 'OPENSTACK_COMMAND=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'\n+ OPENSTACK_COMMAND='ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'\n+ export EDPM_CONFIGURE_HUGEPAGES=false\n+ EDPM_CONFIGURE_HUGEPAGES=false\n+ export CINDER_VOLUME_BACKEND_CONFIGURED=false\n+ CINDER_VOLUME_BACKEND_CONFIGURED=false\n+ export CINDER_BACKUP_BACKEND_CONFIGURED=false\n+ CINDER_BACKUP_BACKEND_CONFIGURED=false\n+ export PING_TEST_VM=false\n+ PING_TEST_VM=false\n+ set -e\n+ alias 'openstack=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'\n+ IMG=cirros-0.6.3-x86_64-disk.img\n+ URL=http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img\n+ DISK_FORMAT=qcow2\n+ RAW=cirros-0.6.3-x86_64-disk.img\n+ curl -L -# http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img\n#=#=# \r\r######################################################################## 100.0%##O#-# \r\r############################## 42.7%\r######################################################################## 100.0%\n+ type qemu-img\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image show cirros\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nNo Image found for cirros\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image create --container-format bare --disk-format qcow2 cirros\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor show m1.small\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nNo Flavor found for m1.small\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor create --ram 512 --vcpus 1 --disk 1 --ephemeral 1 m1.small\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ '[' false = true ']'\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show private\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nError while executing command: No Network found for private\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create private --share\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show priv_sub\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nNo Subnet found for priv_sub\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create priv_sub --subnet-range 192.168.0.0/24 --network private\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show public\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nError while executing command: No Network found for public\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create public --external --provider-network-type flat --provider-physical-network datacentre\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show public_subnet\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nNo Subnet found for public_subnet\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create public_subnet --subnet-range 192.168.122.0/24 --allocation-pool start=192.168.122.171,end=192.168.122.250 --gateway 192.168.122.1 --dhcp --network public\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router show priv_router\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nNo Router found for priv_router\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router create priv_router\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router add subnet priv_router priv_sub\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router set priv_router --external-gateway public\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip show 192.168.122.20\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nError while executing command: No FloatingIP found for 192.168.122.20\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip create public --floating-ip-address 192.168.122.20\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server show test\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\nNo server with a name or ID of 'test' exists.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server create --flavor m1.small --image cirros --nic net-id=private test --wait\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add floating ip test 192.168.122.20\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ '[' false = true ']'\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule list --protocol icmp --ingress -f json\n+ grep -q '\"IP Range\": \"0.0.0.0/0\"'\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group list --project admin -f value -c ID\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule create --protocol icmp --ingress --icmp-type -1 1ff9793e-0b34-42fb-96a4-ae5d694ff985\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ grep '\"Port Range\": \"22:22\"'\n+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule list --protocol tcp --ingress -f json\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.\n+ export FIP=192.168.122.20\n+ FIP=192.168.122.20\n+ TRIES=0\n+ ping -D -c1 -W2 192.168.122.20\n+ (( TRIES++ ))\n+ true\n+ '[' 1 -gt 20 ']'\n+ ping -D -c1 -W2 192.168.122.20\n+ '[' false = true ']'\n+ '[' false = true ']'", "stderr_lines": ["+ export 'OPENSTACK_COMMAND=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'", "+ OPENSTACK_COMMAND='ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'", "+ export EDPM_CONFIGURE_HUGEPAGES=false", "+ EDPM_CONFIGURE_HUGEPAGES=false", "+ export CINDER_VOLUME_BACKEND_CONFIGURED=false", "+ CINDER_VOLUME_BACKEND_CONFIGURED=false", "+ export CINDER_BACKUP_BACKEND_CONFIGURED=false", "+ CINDER_BACKUP_BACKEND_CONFIGURED=false", "+ export PING_TEST_VM=false", "+ PING_TEST_VM=false", "+ set -e", "+ alias 'openstack=ssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack'", "+ IMG=cirros-0.6.3-x86_64-disk.img", "+ URL=http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img", "+ DISK_FORMAT=qcow2", "+ RAW=cirros-0.6.3-x86_64-disk.img", "+ curl -L -# http://download.cirros-cloud.net/0.6.3/cirros-0.6.3-x86_64-disk.img", "#=#=# ", "", "######################################################################## 100.0%##O#-# ", "", "############################## 42.7%", "######################################################################## 100.0%", "+ type qemu-img", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image show cirros", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "No Image found for cirros", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack image create --container-format bare --disk-format qcow2 cirros", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor show m1.small", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "No Flavor found for m1.small", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack flavor create --ram 512 --vcpus 1 --disk 1 --ephemeral 1 m1.small", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ '[' false = true ']'", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show private", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Error while executing command: No Network found for private", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create private --share", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show priv_sub", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "No Subnet found for priv_sub", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create priv_sub --subnet-range 192.168.0.0/24 --network private", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network show public", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Error while executing command: No Network found for public", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack network create public --external --provider-network-type flat --provider-physical-network datacentre", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet show public_subnet", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "No Subnet found for public_subnet", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack subnet create public_subnet --subnet-range 192.168.122.0/24 --allocation-pool start=192.168.122.171,end=192.168.122.250 --gateway 192.168.122.1 --dhcp --network public", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router show priv_router", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "No Router found for priv_router", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router create priv_router", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router add subnet priv_router priv_sub", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack router set priv_router --external-gateway public", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip show 192.168.122.20", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "Error while executing command: No FloatingIP found for 192.168.122.20", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack floating ip create public --floating-ip-address 192.168.122.20", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server show test", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "No server with a name or ID of 'test' exists.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server create --flavor m1.small --image cirros --nic net-id=private test --wait", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack server add floating ip test 192.168.122.20", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ '[' false = true ']'", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule list --protocol icmp --ingress -f json", "+ grep -q '\"IP Range\": \"0.0.0.0/0\"'", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "++ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group list --project admin -f value -c ID", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule create --protocol icmp --ingress --icmp-type -1 1ff9793e-0b34-42fb-96a4-ae5d694ff985", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ grep '\"Port Range\": \"22:22\"'", "+ ssh -i '~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa' -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack security group rule list --protocol tcp --ingress -f json", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "+ export FIP=192.168.122.20", "+ FIP=192.168.122.20", "+ TRIES=0", "+ ping -D -c1 -W2 192.168.122.20", "+ (( TRIES++ ))", "+ true", "+ '[' 1 -gt 20 ']'", "+ ping -D -c1 -W2 192.168.122.20", "+ '[' false = true ']'", "+ '[' false = true ']'"], "stdout": "+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+\n| Field | Value |\n+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+\n| container_format | bare |\n| created_at | 2025-10-06T14:58:20Z |\n| disk_format | qcow2 |\n| file | /v2/images/0bfff2e5-812f-4322-b5dd-d2eb886ac5f7/file |\n| id | 0bfff2e5-812f-4322-b5dd-d2eb886ac5f7 |\n| min_disk | 0 |\n| min_ram | 0 |\n| name | cirros |\n| owner | f97174449bc6492a90138a7ecbd64dbe |\n| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' |\n| protected | False |\n| schema | /v2/schemas/image |\n| status | queued |\n| tags | |\n| updated_at | 2025-10-06T14:58:20Z |\n| visibility | shared |\n+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+\n+----------------------------+--------------------------------------+\n| Field | Value |\n+----------------------------+--------------------------------------+\n| OS-FLV-DISABLED:disabled | False |\n| OS-FLV-EXT-DATA:ephemeral | 1 |\n| description | None |\n| disk | 1 |\n| id | 07a52d90-f051-45f6-a49c-98f19eaeec7b |\n| name | m1.small |\n| os-flavor-access:is_public | True |\n| properties | |\n| ram | 512 |\n| rxtx_factor | 1.0 |\n| swap | |\n| vcpus | 1 |\n+----------------------------+--------------------------------------+\n+---------------------------+--------------------------------------+\n| Field | Value |\n+---------------------------+--------------------------------------+\n| admin_state_up | UP |\n| availability_zone_hints | |\n| availability_zones | |\n| created_at | 2025-10-06T14:58:34Z |\n| description | |\n| dns_domain | |\n| id | 764282d1-88ad-48a1-b206-80bbea72a34f |\n| ipv4_address_scope | None |\n| ipv6_address_scope | None |\n| is_default | False |\n| is_vlan_transparent | None |\n| mtu | 1442 |\n| name | private |\n| port_security_enabled | True |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| provider:network_type | geneve |\n| provider:physical_network | None |\n| provider:segmentation_id | 44651 |\n| qos_policy_id | None |\n| revision_number | 1 |\n| router:external | Internal |\n| segments | None |\n| shared | True |\n| status | ACTIVE |\n| subnets | |\n| tags | |\n| updated_at | 2025-10-06T14:58:34Z |\n+---------------------------+--------------------------------------+\n+----------------------+--------------------------------------+\n| Field | Value |\n+----------------------+--------------------------------------+\n| allocation_pools | 192.168.0.2-192.168.0.254 |\n| cidr | 192.168.0.0/24 |\n| created_at | 2025-10-06T14:58:40Z |\n| description | |\n| dns_nameservers | |\n| dns_publish_fixed_ip | None |\n| enable_dhcp | True |\n| gateway_ip | 192.168.0.1 |\n| host_routes | |\n| id | 5f29e9b6-fb0d-49ec-a852-10703b72f435 |\n| ip_version | 4 |\n| ipv6_address_mode | None |\n| ipv6_ra_mode | None |\n| name | priv_sub |\n| network_id | 764282d1-88ad-48a1-b206-80bbea72a34f |\n| prefix_length | None |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| revision_number | 0 |\n| segment_id | None |\n| service_types | |\n| subnetpool_id | None |\n| tags | |\n| updated_at | 2025-10-06T14:58:40Z |\n+----------------------+--------------------------------------+\n+---------------------------+--------------------------------------+\n| Field | Value |\n+---------------------------+--------------------------------------+\n| admin_state_up | UP |\n| availability_zone_hints | |\n| availability_zones | |\n| created_at | 2025-10-06T14:58:46Z |\n| description | |\n| dns_domain | |\n| id | 21266228-6569-4e70-90b3-d960c402bd06 |\n| ipv4_address_scope | None |\n| ipv6_address_scope | None |\n| is_default | False |\n| is_vlan_transparent | None |\n| mtu | 1500 |\n| name | public |\n| port_security_enabled | True |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| provider:network_type | flat |\n| provider:physical_network | datacentre |\n| provider:segmentation_id | None |\n| qos_policy_id | None |\n| revision_number | 1 |\n| router:external | External |\n| segments | None |\n| shared | False |\n| status | ACTIVE |\n| subnets | |\n| tags | |\n| updated_at | 2025-10-06T14:58:46Z |\n+---------------------------+--------------------------------------+\n+----------------------+--------------------------------------+\n| Field | Value |\n+----------------------+--------------------------------------+\n| allocation_pools | 192.168.122.171-192.168.122.250 |\n| cidr | 192.168.122.0/24 |\n| created_at | 2025-10-06T14:58:51Z |\n| description | |\n| dns_nameservers | |\n| dns_publish_fixed_ip | None |\n| enable_dhcp | True |\n| gateway_ip | 192.168.122.1 |\n| host_routes | |\n| id | cafaaeb9-1ec3-418d-84a1-e72a216d40c2 |\n| ip_version | 4 |\n| ipv6_address_mode | None |\n| ipv6_ra_mode | None |\n| name | public_subnet |\n| network_id | 21266228-6569-4e70-90b3-d960c402bd06 |\n| prefix_length | None |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| revision_number | 0 |\n| segment_id | None |\n| service_types | |\n| subnetpool_id | None |\n| tags | |\n| updated_at | 2025-10-06T14:58:51Z |\n+----------------------+--------------------------------------+\n+-------------------------+--------------------------------------+\n| Field | Value |\n+-------------------------+--------------------------------------+\n| admin_state_up | UP |\n| availability_zone_hints | |\n| availability_zones | |\n| created_at | 2025-10-06T14:58:57Z |\n| description | |\n| external_gateway_info | null |\n| flavor_id | None |\n| id | 001dcbf7-44f3-46e1-b674-9541f3df5199 |\n| name | priv_router |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| revision_number | 1 |\n| routes | |\n| status | ACTIVE |\n| tags | |\n| updated_at | 2025-10-06T14:58:57Z |\n+-------------------------+--------------------------------------+\n+---------------------+--------------------------------------+\n| Field | Value |\n+---------------------+--------------------------------------+\n| created_at | 2025-10-06T14:59:14Z |\n| description | |\n| dns_domain | |\n| dns_name | |\n| fixed_ip_address | None |\n| floating_ip_address | 192.168.122.20 |\n| floating_network_id | 21266228-6569-4e70-90b3-d960c402bd06 |\n| id | 4a07c9ac-1df3-49a9-ae04-7aeb0fc104f7 |\n| name | 192.168.122.20 |\n| port_details | None |\n| port_id | None |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| qos_policy_id | None |\n| revision_number | 0 |\n| router_id | None |\n| status | DOWN |\n| subnet_id | None |\n| tags | [] |\n| updated_at | 2025-10-06T14:59:14Z |\n+---------------------+--------------------------------------+\n\n+-------------------------------------+----------------------------------------------------------+\n| Field | Value |\n+-------------------------------------+----------------------------------------------------------+\n| OS-DCF:diskConfig | MANUAL |\n| OS-EXT-AZ:availability_zone | nova |\n| OS-EXT-SRV-ATTR:host | standalone.ooo.test |\n| OS-EXT-SRV-ATTR:hypervisor_hostname | standalone.ooo.test |\n| OS-EXT-SRV-ATTR:instance_name | instance-00000001 |\n| OS-EXT-STS:power_state | Running |\n| OS-EXT-STS:task_state | None |\n| OS-EXT-STS:vm_state | active |\n| OS-SRV-USG:launched_at | 2025-10-06T14:59:30.000000 |\n| OS-SRV-USG:terminated_at | None |\n| accessIPv4 | |\n| accessIPv6 | |\n| addresses | private=192.168.0.148 |\n| adminPass | s37Pn3CXwmqt |\n| config_drive | |\n| created | 2025-10-06T14:59:22Z |\n| flavor | m1.small (07a52d90-f051-45f6-a49c-98f19eaeec7b) |\n| hostId | a0f13d3f2f4e309c2d7407625d539ed87a6339ccd4f8a25a059cddfa |\n| id | ddf4de27-d091-4f06-b446-d332da735efa |\n| image | cirros (0bfff2e5-812f-4322-b5dd-d2eb886ac5f7) |\n| key_name | None |\n| name | test |\n| progress | 0 |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| properties | |\n| security_groups | name='default' |\n| status | ACTIVE |\n| updated | 2025-10-06T14:59:30Z |\n| user_id | defef0301f934088b3da883792a6262c |\n| volumes_attached | |\n+-------------------------------------+----------------------------------------------------------+\n+-------------------------+--------------------------------------+\n| Field | Value |\n+-------------------------+--------------------------------------+\n| created_at | 2025-10-06T14:59:45Z |\n| description | |\n| direction | ingress |\n| ether_type | IPv4 |\n| id | 830cfa95-d75d-4a60-b1e1-ff251bcd3e8b |\n| name | None |\n| port_range_max | None |\n| port_range_min | None |\n| project_id | f97174449bc6492a90138a7ecbd64dbe |\n| protocol | icmp |\n| remote_address_group_id | None |\n| remote_group_id | None |\n| remote_ip_prefix | 0.0.0.0/0 |\n| revision_number | 0 |\n| security_group_id | 1ff9793e-0b34-42fb-96a4-ae5d694ff985 |\n| tags | [] |\n| updated_at | 2025-10-06T14:59:45Z |\n+-------------------------+--------------------------------------+\n \"Port Range\": \"22:22\",\nPING 192.168.122.20 (192.168.122.20) 56(84) bytes of data.\n\n--- 192.168.122.20 ping statistics ---\n1 packets transmitted, 0 received, 100% packet loss, time 0ms\n\nPING 192.168.122.20 (192.168.122.20) 56(84) bytes of data.\n[1759762791.103992] 64 bytes from 192.168.122.20: icmp_seq=1 ttl=63 time=11.9 ms\n\n--- 192.168.122.20 ping statistics ---\n1 packets transmitted, 1 received, 0% packet loss, time 0ms\nrtt min/avg/max/mdev = 11.866/11.866/11.866/0.000 ms", "stdout_lines": ["+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+", "| Field | Value |", "+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+", "| container_format | bare |", "| created_at | 2025-10-06T14:58:20Z |", "| disk_format | qcow2 |", "| file | /v2/images/0bfff2e5-812f-4322-b5dd-d2eb886ac5f7/file |", "| id | 0bfff2e5-812f-4322-b5dd-d2eb886ac5f7 |", "| min_disk | 0 |", "| min_ram | 0 |", "| name | cirros |", "| owner | f97174449bc6492a90138a7ecbd64dbe |", "| properties | os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/cirros', owner_specified.openstack.sha256='' |", "| protected | False |", "| schema | /v2/schemas/image |", "| status | queued |", "| tags | |", "| updated_at | 2025-10-06T14:58:20Z |", "| visibility | shared |", "+------------------+--------------------------------------------------------------------------------------------------------------------------------------------+", "+----------------------------+--------------------------------------+", "| Field | Value |", "+----------------------------+--------------------------------------+", "| OS-FLV-DISABLED:disabled | False |", "| OS-FLV-EXT-DATA:ephemeral | 1 |", "| description | None |", "| disk | 1 |", "| id | 07a52d90-f051-45f6-a49c-98f19eaeec7b |", "| name | m1.small |", "| os-flavor-access:is_public | True |", "| properties | |", "| ram | 512 |", "| rxtx_factor | 1.0 |", "| swap | |", "| vcpus | 1 |", "+----------------------------+--------------------------------------+", "+---------------------------+--------------------------------------+", "| Field | Value |", "+---------------------------+--------------------------------------+", "| admin_state_up | UP |", "| availability_zone_hints | |", "| availability_zones | |", "| created_at | 2025-10-06T14:58:34Z |", "| description | |", "| dns_domain | |", "| id | 764282d1-88ad-48a1-b206-80bbea72a34f |", "| ipv4_address_scope | None |", "| ipv6_address_scope | None |", "| is_default | False |", "| is_vlan_transparent | None |", "| mtu | 1442 |", "| name | private |", "| port_security_enabled | True |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| provider:network_type | geneve |", "| provider:physical_network | None |", "| provider:segmentation_id | 44651 |", "| qos_policy_id | None |", "| revision_number | 1 |", "| router:external | Internal |", "| segments | None |", "| shared | True |", "| status | ACTIVE |", "| subnets | |", "| tags | |", "| updated_at | 2025-10-06T14:58:34Z |", "+---------------------------+--------------------------------------+", "+----------------------+--------------------------------------+", "| Field | Value |", "+----------------------+--------------------------------------+", "| allocation_pools | 192.168.0.2-192.168.0.254 |", "| cidr | 192.168.0.0/24 |", "| created_at | 2025-10-06T14:58:40Z |", "| description | |", "| dns_nameservers | |", "| dns_publish_fixed_ip | None |", "| enable_dhcp | True |", "| gateway_ip | 192.168.0.1 |", "| host_routes | |", "| id | 5f29e9b6-fb0d-49ec-a852-10703b72f435 |", "| ip_version | 4 |", "| ipv6_address_mode | None |", "| ipv6_ra_mode | None |", "| name | priv_sub |", "| network_id | 764282d1-88ad-48a1-b206-80bbea72a34f |", "| prefix_length | None |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| revision_number | 0 |", "| segment_id | None |", "| service_types | |", "| subnetpool_id | None |", "| tags | |", "| updated_at | 2025-10-06T14:58:40Z |", "+----------------------+--------------------------------------+", "+---------------------------+--------------------------------------+", "| Field | Value |", "+---------------------------+--------------------------------------+", "| admin_state_up | UP |", "| availability_zone_hints | |", "| availability_zones | |", "| created_at | 2025-10-06T14:58:46Z |", "| description | |", "| dns_domain | |", "| id | 21266228-6569-4e70-90b3-d960c402bd06 |", "| ipv4_address_scope | None |", "| ipv6_address_scope | None |", "| is_default | False |", "| is_vlan_transparent | None |", "| mtu | 1500 |", "| name | public |", "| port_security_enabled | True |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| provider:network_type | flat |", "| provider:physical_network | datacentre |", "| provider:segmentation_id | None |", "| qos_policy_id | None |", "| revision_number | 1 |", "| router:external | External |", "| segments | None |", "| shared | False |", "| status | ACTIVE |", "| subnets | |", "| tags | |", "| updated_at | 2025-10-06T14:58:46Z |", "+---------------------------+--------------------------------------+", "+----------------------+--------------------------------------+", "| Field | Value |", "+----------------------+--------------------------------------+", "| allocation_pools | 192.168.122.171-192.168.122.250 |", "| cidr | 192.168.122.0/24 |", "| created_at | 2025-10-06T14:58:51Z |", "| description | |", "| dns_nameservers | |", "| dns_publish_fixed_ip | None |", "| enable_dhcp | True |", "| gateway_ip | 192.168.122.1 |", "| host_routes | |", "| id | cafaaeb9-1ec3-418d-84a1-e72a216d40c2 |", "| ip_version | 4 |", "| ipv6_address_mode | None |", "| ipv6_ra_mode | None |", "| name | public_subnet |", "| network_id | 21266228-6569-4e70-90b3-d960c402bd06 |", "| prefix_length | None |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| revision_number | 0 |", "| segment_id | None |", "| service_types | |", "| subnetpool_id | None |", "| tags | |", "| updated_at | 2025-10-06T14:58:51Z |", "+----------------------+--------------------------------------+", "+-------------------------+--------------------------------------+", "| Field | Value |", "+-------------------------+--------------------------------------+", "| admin_state_up | UP |", "| availability_zone_hints | |", "| availability_zones | |", "| created_at | 2025-10-06T14:58:57Z |", "| description | |", "| external_gateway_info | null |", "| flavor_id | None |", "| id | 001dcbf7-44f3-46e1-b674-9541f3df5199 |", "| name | priv_router |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| revision_number | 1 |", "| routes | |", "| status | ACTIVE |", "| tags | |", "| updated_at | 2025-10-06T14:58:57Z |", "+-------------------------+--------------------------------------+", "+---------------------+--------------------------------------+", "| Field | Value |", "+---------------------+--------------------------------------+", "| created_at | 2025-10-06T14:59:14Z |", "| description | |", "| dns_domain | |", "| dns_name | |", "| fixed_ip_address | None |", "| floating_ip_address | 192.168.122.20 |", "| floating_network_id | 21266228-6569-4e70-90b3-d960c402bd06 |", "| id | 4a07c9ac-1df3-49a9-ae04-7aeb0fc104f7 |", "| name | 192.168.122.20 |", "| port_details | None |", "| port_id | None |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| qos_policy_id | None |", "| revision_number | 0 |", "| router_id | None |", "| status | DOWN |", "| subnet_id | None |", "| tags | [] |", "| updated_at | 2025-10-06T14:59:14Z |", "+---------------------+--------------------------------------+", "", "+-------------------------------------+----------------------------------------------------------+", "| Field | Value |", "+-------------------------------------+----------------------------------------------------------+", "| OS-DCF:diskConfig | MANUAL |", "| OS-EXT-AZ:availability_zone | nova |", "| OS-EXT-SRV-ATTR:host | standalone.ooo.test |", "| OS-EXT-SRV-ATTR:hypervisor_hostname | standalone.ooo.test |", "| OS-EXT-SRV-ATTR:instance_name | instance-00000001 |", "| OS-EXT-STS:power_state | Running |", "| OS-EXT-STS:task_state | None |", "| OS-EXT-STS:vm_state | active |", "| OS-SRV-USG:launched_at | 2025-10-06T14:59:30.000000 |", "| OS-SRV-USG:terminated_at | None |", "| accessIPv4 | |", "| accessIPv6 | |", "| addresses | private=192.168.0.148 |", "| adminPass | s37Pn3CXwmqt |", "| config_drive | |", "| created | 2025-10-06T14:59:22Z |", "| flavor | m1.small (07a52d90-f051-45f6-a49c-98f19eaeec7b) |", "| hostId | a0f13d3f2f4e309c2d7407625d539ed87a6339ccd4f8a25a059cddfa |", "| id | ddf4de27-d091-4f06-b446-d332da735efa |", "| image | cirros (0bfff2e5-812f-4322-b5dd-d2eb886ac5f7) |", "| key_name | None |", "| name | test |", "| progress | 0 |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| properties | |", "| security_groups | name='default' |", "| status | ACTIVE |", "| updated | 2025-10-06T14:59:30Z |", "| user_id | defef0301f934088b3da883792a6262c |", "| volumes_attached | |", "+-------------------------------------+----------------------------------------------------------+", "+-------------------------+--------------------------------------+", "| Field | Value |", "+-------------------------+--------------------------------------+", "| created_at | 2025-10-06T14:59:45Z |", "| description | |", "| direction | ingress |", "| ether_type | IPv4 |", "| id | 830cfa95-d75d-4a60-b1e1-ff251bcd3e8b |", "| name | None |", "| port_range_max | None |", "| port_range_min | None |", "| project_id | f97174449bc6492a90138a7ecbd64dbe |", "| protocol | icmp |", "| remote_address_group_id | None |", "| remote_group_id | None |", "| remote_ip_prefix | 0.0.0.0/0 |", "| revision_number | 0 |", "| security_group_id | 1ff9793e-0b34-42fb-96a4-ae5d694ff985 |", "| tags | [] |", "| updated_at | 2025-10-06T14:59:45Z |", "+-------------------------+--------------------------------------+", " \"Port Range\": \"22:22\",", "PING 192.168.122.20 (192.168.122.20) 56(84) bytes of data.", "", "--- 192.168.122.20 ping statistics ---", "1 packets transmitted, 0 received, 100% packet loss, time 0ms", "", "PING 192.168.122.20 (192.168.122.20) 56(84) bytes of data.", "[1759762791.103992] 64 bytes from 192.168.122.20: icmp_seq=1 ttl=63 time=11.9 ms", "", "--- 192.168.122.20 ping statistics ---", "1 packets transmitted, 1 received, 0% packet loss, time 0ms", "rtt min/avg/max/mdev = 11.866/11.866/11.866/0.000 ms"]} TASK [development_environment : pre-launch test Ironic instance] *************** skipping: [localhost] => {"changed": false, "false_condition": "'pre_launch_ironic.bash' in prelaunch_test_instance_scripts", "skip_reason": "Conditional result was False"} TASK [development_environment : Start the ping test to the VM instance.] ******* skipping: [localhost] => {"changed": false, "false_condition": "ping_test|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Create stop l3 agent connectivity check scripts.] *** skipping: [localhost] => {"changed": false, "false_condition": "ping_test|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Remember that the ping test is running.] ******* skipping: [localhost] => {"changed": false, "false_condition": "ping_test|bool", "skip_reason": "Conditional result was False"} TASK [development_environment : creates Barbican secret] *********************** skipping: [localhost] => {"changed": false, "false_condition": "prelaunch_test_instance|bool and prelaunch_barbican_secret|default(false)", "skip_reason": "Conditional result was False"} TASK [development_environment : Issue session fernet token] ******************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\n", "delta": "0:00:02.388136", "end": "2025-10-06 14:59:54.069142", "msg": "", "rc": 0, "start": "2025-10-06 14:59:51.681006", "stderr": "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "stderr_lines": ["+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory."], "stdout": "gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg", "stdout_lines": ["gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg"]} TASK [development_environment : Create credential for sanity checking its value after adoption] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack credential create admin test -f value -c id\n", "delta": "0:00:02.758297", "end": "2025-10-06 14:59:57.128958", "msg": "", "rc": 0, "start": "2025-10-06 14:59:54.370661", "stderr": "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack credential create admin test -f value -c id\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "stderr_lines": ["+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack credential create admin test -f value -c id", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory."], "stdout": "58e2321473e34aa88fe9cd869f4cde62", "stdout_lines": ["58e2321473e34aa88fe9cd869f4cde62"]} TASK [development_environment : execute create resources script] *************** skipping: [localhost] => {"changed": false, "false_condition": "neutron_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : execute prepare-pinger script] ***************** skipping: [localhost] => {"changed": false, "false_condition": "neutron_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : execute start-pinger script] ******************* skipping: [localhost] => {"changed": false, "false_condition": "neutron_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : tobiko installation] *************************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : oc undercloud installation] ******************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : copy kube conf to undercloud] ****************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : upload tobiko-playbook.yaml to the undercloud] *** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Add tobiko.conf to the undercloud] ************* skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : Run Tobiko from the undercloud] **************** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [development_environment : copy keys from undercloud for tobiko] ********** skipping: [localhost] => {"changed": false, "false_condition": "tobiko_qe_test | default('false') | bool", "skip_reason": "Conditional result was False"} TASK [tls_adoption : Create Certificate Issuer with cert and key from IPA] ***** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nIPA_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 podman exec -ti freeipa-server-container\"\n$IPA_SSH pk12util -o /tmp/freeipa.p12 -n 'caSigningCert\\ cert-pki-ca' -d /etc/pki/pki-tomcat/alias -k /etc/pki/pki-tomcat/alias/pwdfile.txt -w /etc/pki/pki-tomcat/alias/pwdfile.txt\n\noc create secret generic rootca-internal\n\noc patch secret rootca-internal -p=\"{\\\"data\\\":{\\\"ca.crt\\\": \\\"`$IPA_SSH openssl pkcs12 -in /tmp/freeipa.p12 -passin file:/etc/pki/pki-tomcat/alias/pwdfile.txt -nokeys | openssl x509 | base64 -w 0`\\\"}}\"\n\noc patch secret rootca-internal -p=\"{\\\"data\\\":{\\\"tls.crt\\\": \\\"`$IPA_SSH openssl pkcs12 -in /tmp/freeipa.p12 -passin file:/etc/pki/pki-tomcat/alias/pwdfile.txt -nokeys | openssl x509 | base64 -w 0`\\\"}}\"\n\noc patch secret rootca-internal -p=\"{\\\"data\\\":{\\\"tls.key\\\": \\\"`$IPA_SSH openssl pkcs12 -in /tmp/freeipa.p12 -passin file:/etc/pki/pki-tomcat/alias/pwdfile.txt -nocerts -noenc | openssl rsa | base64 -w 0`\\\"}}\"\n\noc apply -f - < {"changed": true, "cmd": "set -euxo pipefail\n\n\ncd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\nmake input\n", "delta": "0:00:01.112600", "end": "2025-10-06 15:00:03.322630", "msg": "", "rc": 0, "start": "2025-10-06 15:00:02.210030", "stderr": "+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/\n+ make input\n+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'\n+ '[' -z openstack ']'\n+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'\n+ cat\n+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out\n+ '[' -z openstack ']'\n+ '[' -z osp-secret ']'\n+ '[' -z 12345678 ']'\n+ '[' -z 1234567842 ']'\n+ '[' -z 767c3ed056cbaa3b9dfedb8c6f825bf0 ']'\n+ '[' -z sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU= ']'\n+ '[' -z COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f ']'\n+ '[' -z openstack ']'\n+ '[' -z libvirt-secret ']'\n+ DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input\n+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ']'\n+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input\n+ pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input\n+ cat\nError from server (NotFound): secrets \"osp-secret\" not found", "stderr_lines": ["+ cd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/", "+ make input", "+ '[' -z /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out ']'", "+ '[' -z openstack ']'", "+ OUT_DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack ']'", "+ cat", "+ OUT=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out", "+ '[' -z openstack ']'", "+ '[' -z osp-secret ']'", "+ '[' -z 12345678 ']'", "+ '[' -z 1234567842 ']'", "+ '[' -z 767c3ed056cbaa3b9dfedb8c6f825bf0 ']'", "+ '[' -z sEFmdFjDUqRM2VemYslV5yGNWjokioJXsg8Nrlc3drU= ']'", "+ '[' -z COX8bmlKAWn56XCGMrKQJj7dgHNAOl6f ']'", "+ '[' -z openstack ']'", "+ '[' -z libvirt-secret ']'", "+ DIR=/home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input", "+ '[' '!' -d /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ']'", "+ mkdir -p /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input", "+ pushd /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input", "+ cat", "Error from server (NotFound): secrets \"osp-secret\" not found"], "stdout": "make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'\nbash scripts/gen-namespace.sh\noc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml\nnamespace/openstack unchanged\ntimeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"\nNAME DISPLAY NAME STATUS\nopenstack Active\noc project openstack\nAlready on project \"openstack\" on server \"https://api.crc.testing:6443\".\nbash scripts/gen-input-kustomize.sh\n~/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ~/src/github.com/openstack-k8s-operators/install_yamls\noc get secret/osp-secret || oc kustomize /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input | oc apply -f -\nsecret/libvirt-secret created\nsecret/octavia-ca-passphrase created\nsecret/osp-secret created\nmake[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "stdout_lines": ["make[1]: Entering directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'", "bash scripts/gen-namespace.sh", "oc apply -f /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/namespace.yaml", "namespace/openstack unchanged", "timeout 500s bash -c \"while ! (oc get project.v1.project.openshift.io openstack); do sleep 1; done\"", "NAME DISPLAY NAME STATUS", "openstack Active", "oc project openstack", "Already on project \"openstack\" on server \"https://api.crc.testing:6443\".", "bash scripts/gen-input-kustomize.sh", "~/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input ~/src/github.com/openstack-k8s-operators/install_yamls", "oc get secret/osp-secret || oc kustomize /home/zuul/src/github.com/openstack-k8s-operators/install_yamls/out/openstack/input | oc apply -f -", "secret/libvirt-secret created", "secret/octavia-ca-passphrase created", "secret/osp-secret created", "make[1]: Leaving directory '/home/zuul/src/github.com/openstack-k8s-operators/install_yamls'"]} TASK [backend_services : execute alternative tasks when source env is ODPdO] *** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [backend_services : set service passwords] ******************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n oc set data secret/osp-secret \"AodhPassword=FE40SwbpalV3bCmjE8C8PjinC\"\n oc set data secret/osp-secret \"BarbicanPassword=eYzr9MBFDmiJB4hH9JpYGrBUs\"\n oc set data secret/osp-secret \"CeilometerPassword=73G2pFjfcIBAoDeVlhO8qoCzS\"\n oc set data secret/osp-secret \"CinderPassword=ZLaOV439P93F4BjobxhcroXOz\"\n oc set data secret/osp-secret \"GlancePassword=aNIzZUnWJXeB7vN5hHCSRraE1\"\n oc set data secret/osp-secret \"IronicPassword=ZT39Alv7wHwQ8qHve67FgxGFx\"\n oc set data secret/osp-secret \"IronicInspectorPassword=ZT39Alv7wHwQ8qHve67FgxGFx\"\n oc set data secret/osp-secret \"NeutronPassword=64Ro7K4F9IeNcP3mN1fhgWKi0\"\n oc set data secret/osp-secret \"NovaPassword=9uQQnx1OMESGlZOvqzpGC8oZo\"\n oc set data secret/osp-secret \"OctaviaPassword=M7MuAlqxfV5sPRHUIK51WpJDo\"\n oc set data secret/osp-secret \"PlacementPassword=d08B4cvlLTJHRqtwNH1Ht6Ilx\"\n oc set data secret/osp-secret \"HeatPassword=Fo0tYo48rsHxn7Wj2RN5dScXN\"\n oc set data secret/osp-secret \"HeatAuthEncryptionKey=jnxb6OcYF4nlD2uDxWIPC3kZMGMY5kVu\"\n oc set data secret/osp-secret \"HeatStackDomainAdminPassword=C7nzrujMcPvyeTlrR1xZWRURR\"\n oc set data secret/osp-secret \"ManilaPassword=RJJX8DHfYb9Voo0aMJNFkej3i\"\n oc set data secret/osp-secret \"SwiftPassword=SejPT2ovcAzGrcd41YOgrfRZw\"\n", "delta": "0:00:02.258023", "end": "2025-10-06 15:00:05.865888", "msg": "", "rc": 0, "start": "2025-10-06 15:00:03.607865", "stderr": "+ oc set data secret/osp-secret AodhPassword=FE40SwbpalV3bCmjE8C8PjinC\n+ oc set data secret/osp-secret BarbicanPassword=eYzr9MBFDmiJB4hH9JpYGrBUs\n+ oc set data secret/osp-secret CeilometerPassword=73G2pFjfcIBAoDeVlhO8qoCzS\n+ oc set data secret/osp-secret CinderPassword=ZLaOV439P93F4BjobxhcroXOz\n+ oc set data secret/osp-secret GlancePassword=aNIzZUnWJXeB7vN5hHCSRraE1\n+ oc set data secret/osp-secret IronicPassword=ZT39Alv7wHwQ8qHve67FgxGFx\n+ oc set data secret/osp-secret IronicInspectorPassword=ZT39Alv7wHwQ8qHve67FgxGFx\n+ oc set data secret/osp-secret NeutronPassword=64Ro7K4F9IeNcP3mN1fhgWKi0\n+ oc set data secret/osp-secret NovaPassword=9uQQnx1OMESGlZOvqzpGC8oZo\n+ oc set data secret/osp-secret OctaviaPassword=M7MuAlqxfV5sPRHUIK51WpJDo\n+ oc set data secret/osp-secret PlacementPassword=d08B4cvlLTJHRqtwNH1Ht6Ilx\n+ oc set data secret/osp-secret HeatPassword=Fo0tYo48rsHxn7Wj2RN5dScXN\n+ oc set data secret/osp-secret HeatAuthEncryptionKey=jnxb6OcYF4nlD2uDxWIPC3kZMGMY5kVu\n+ oc set data secret/osp-secret HeatStackDomainAdminPassword=C7nzrujMcPvyeTlrR1xZWRURR\n+ oc set data secret/osp-secret ManilaPassword=RJJX8DHfYb9Voo0aMJNFkej3i\n+ oc set data secret/osp-secret SwiftPassword=SejPT2ovcAzGrcd41YOgrfRZw", "stderr_lines": ["+ oc set data secret/osp-secret AodhPassword=FE40SwbpalV3bCmjE8C8PjinC", "+ oc set data secret/osp-secret BarbicanPassword=eYzr9MBFDmiJB4hH9JpYGrBUs", "+ oc set data secret/osp-secret CeilometerPassword=73G2pFjfcIBAoDeVlhO8qoCzS", "+ oc set data secret/osp-secret CinderPassword=ZLaOV439P93F4BjobxhcroXOz", "+ oc set data secret/osp-secret GlancePassword=aNIzZUnWJXeB7vN5hHCSRraE1", "+ oc set data secret/osp-secret IronicPassword=ZT39Alv7wHwQ8qHve67FgxGFx", "+ oc set data secret/osp-secret IronicInspectorPassword=ZT39Alv7wHwQ8qHve67FgxGFx", "+ oc set data secret/osp-secret NeutronPassword=64Ro7K4F9IeNcP3mN1fhgWKi0", "+ oc set data secret/osp-secret NovaPassword=9uQQnx1OMESGlZOvqzpGC8oZo", "+ oc set data secret/osp-secret OctaviaPassword=M7MuAlqxfV5sPRHUIK51WpJDo", "+ oc set data secret/osp-secret PlacementPassword=d08B4cvlLTJHRqtwNH1Ht6Ilx", "+ oc set data secret/osp-secret HeatPassword=Fo0tYo48rsHxn7Wj2RN5dScXN", "+ oc set data secret/osp-secret HeatAuthEncryptionKey=jnxb6OcYF4nlD2uDxWIPC3kZMGMY5kVu", "+ oc set data secret/osp-secret HeatStackDomainAdminPassword=C7nzrujMcPvyeTlrR1xZWRURR", "+ oc set data secret/osp-secret ManilaPassword=RJJX8DHfYb9Voo0aMJNFkej3i", "+ oc set data secret/osp-secret SwiftPassword=SejPT2ovcAzGrcd41YOgrfRZw"], "stdout": "secret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated\nsecret/osp-secret data updated", "stdout_lines": ["secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated", "secret/osp-secret data updated"]} TASK [backend_services : create tmp directory] ********************************* changed: [localhost] => {"changed": true, "cmd": ["mkdir", "-p", "../../tests/config/tmp"], "delta": "0:00:00.006737", "end": "2025-10-06 15:00:06.125507", "msg": "", "rc": 0, "start": "2025-10-06 15:00:06.118770", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []} TASK [backend_services : template out the controlplane deployment] ************* changed: [localhost] => {"changed": true, "checksum": "9622ec52eedf34d330e8c80edc6a81450692a9a6", "dest": "../config/tmp/test_deployment.yaml", "gid": 1000, "group": "zuul", "md5sum": "e535f37907880f5ac608c12cefd5a242", "mode": "0644", "owner": "zuul", "secontext": "unconfined_u:object_r:user_home_t:s0", "size": 3385, "src": "/home/zuul/.ansible/tmp/ansible-tmp-1759762806.198967-39856-267218925146315/source", "state": "file", "uid": 1000} TASK [backend_services : template out the OpenStackVersion deployment with container overrides] *** skipping: [localhost] => {"changed": false, "false_condition": "periodic|default(false)", "skip_reason": "Conditional result was False"} TASK [backend_services : Apply OpenStackVersion with container overrides to environment] *** skipping: [localhost] => {"changed": false, "false_condition": "periodic|default(false)", "skip_reason": "Conditional result was False"} TASK [backend_services : execute alternative tasks when source env is ODPdO] *** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [backend_services : deploy the OpenStackControlPlane CR] ****************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc apply -f ../config/tmp/test_deployment.yaml\n", "delta": "0:00:00.207264", "end": "2025-10-06 15:00:07.360212", "msg": "", "rc": 0, "start": "2025-10-06 15:00:07.152948", "stderr": "+ oc apply -f ../config/tmp/test_deployment.yaml\nWarning: spec.galera.template[openstack].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!\nWarning: spec.galera.template[openstack-cell1].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!", "stderr_lines": ["+ oc apply -f ../config/tmp/test_deployment.yaml", "Warning: spec.galera.template[openstack].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!", "Warning: spec.galera.template[openstack-cell1].storageRequest: 1Gi is not appropriate for production! For production use at least 5G!"], "stdout": "openstackcontrolplane.core.openstack.org/openstack created", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack created"]} TASK [backend_services : verify that MariaDB and RabbitMQ are running, for all defined cells] *** FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (60 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (59 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (58 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (57 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (56 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (55 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ are running, for all defined cells (54 retries left). changed: [localhost] => {"attempts": 8, "changed": true, "cmd": "set -euxo pipefail\n\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\noc get pod openstack-galera-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\noc get pod rabbitmq-server-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\nfor CELL in $(echo $RENAMED_CELLS); do\n oc get pod openstack-$CELL-galera-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\n oc get pod rabbitmq-$CELL-server-0 -o jsonpath='{.status.phase}{\"\\n\"}' | grep Running\ndone\n", "delta": "0:00:00.567874", "end": "2025-10-06 15:00:46.619116", "msg": "", "rc": 0, "start": "2025-10-06 15:00:46.051242", "stderr": "+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ oc get pod openstack-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'\n+ grep Running\n+ oc get pod rabbitmq-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'\n+ grep Running\n++ echo cell1\n+ for CELL in $(echo $RENAMED_CELLS)\n+ oc get pod openstack-cell1-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'\n+ grep Running\n+ oc get pod rabbitmq-cell1-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'\n+ grep Running", "stderr_lines": ["+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ oc get pod openstack-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "+ grep Running", "+ oc get pod rabbitmq-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "+ grep Running", "++ echo cell1", "+ for CELL in $(echo $RENAMED_CELLS)", "+ oc get pod openstack-cell1-galera-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "+ grep Running", "+ oc get pod rabbitmq-cell1-server-0 -o 'jsonpath={.status.phase}{\"\\n\"}'", "+ grep Running"], "stdout": "Running\nRunning\nRunning\nRunning", "stdout_lines": ["Running", "Running", "Running", "Running"]} TASK [backend_services : verify that MariaDB and RabbitMQ CR's deployed, for all defined cells] *** changed: [localhost] => (item=Galera) => {"ansible_loop_var": "item", "attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\noc get Galera -o json | jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'\n", "delta": "0:00:00.150880", "end": "2025-10-06 15:00:47.030547", "failed_when_result": false, "item": "Galera", "msg": "", "rc": 0, "start": "2025-10-06 15:00:46.879667", "stderr": "+ oc get Galera -o json\n+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'", "stderr_lines": ["+ oc get Galera -o json", "+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'"], "stdout": "true", "stdout_lines": ["true"]} FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ CR's deployed, for all defined cells (60 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ CR's deployed, for all defined cells (59 retries left). FAILED - RETRYING: [localhost]: verify that MariaDB and RabbitMQ CR's deployed, for all defined cells (58 retries left). changed: [localhost] => (item=Rabbitmqs) => {"ansible_loop_var": "item", "attempts": 4, "changed": true, "cmd": "set -euxo pipefail\n\n\noc get Rabbitmqs -o json | jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'\n", "delta": "0:00:00.155044", "end": "2025-10-06 15:01:03.521655", "failed_when_result": false, "item": "Rabbitmqs", "msg": "", "rc": 0, "start": "2025-10-06 15:01:03.366611", "stderr": "+ oc get Rabbitmqs -o json\n+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'", "stderr_lines": ["+ oc get Rabbitmqs -o json", "+ jq -e '[ .items[].status.conditions[] | select(.type == \"Ready\") | .message] | select(length > 0) | all(. == \"Setup complete\")'"], "stdout": "true", "stdout_lines": ["true"]} TASK [backend_services : Patch openstack upstream dns server to set the correct value for the environment] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\ncrname=$(oc get openstackcontrolplane -o name)\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/dns/template/options\", \"value\": [{\"key\": \"server\", \"values\": [\"192.168.122.10\"]}]}]'\n", "delta": "0:00:00.412852", "end": "2025-10-06 15:01:04.210237", "msg": "", "rc": 0, "start": "2025-10-06 15:01:03.797385", "stderr": "++ oc get openstackcontrolplane -o name\n+ crname=openstackcontrolplane.core.openstack.org/openstack\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/dns/template/options\", \"value\": [{\"key\": \"server\", \"values\": [\"192.168.122.10\"]}]}]'", "stderr_lines": ["++ oc get openstackcontrolplane -o name", "+ crname=openstackcontrolplane.core.openstack.org/openstack", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/dns/template/options\", \"value\": [{\"key\": \"server\", \"values\": [\"192.168.122.10\"]}]}]'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [backend_services : Patch rabbitmq resources for lower resource consumption] *** ok: [localhost] => {"changed": false, "cmd": "set -euxo pipefail\n\n\ncrname=$(oc get openstackcontrolplane -o name)\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/cpu\", \"value\": 500m}]'\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/memory\", \"value\": 500Mi}]'\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/cpu\", \"value\": 500m}]'\noc patch ${crname} --type json -p='[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/memory\", \"value\": 500Mi}]'\n", "delta": "0:00:01.270682", "end": "2025-10-06 15:01:05.769016", "msg": "", "rc": 0, "start": "2025-10-06 15:01:04.498334", "stderr": "++ oc get openstackcontrolplane -o name\n+ crname=openstackcontrolplane.core.openstack.org/openstack\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/cpu\", \"value\": 500m}]'\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/memory\", \"value\": 500Mi}]'\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/cpu\", \"value\": 500m}]'\n+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/memory\", \"value\": 500Mi}]'", "stderr_lines": ["++ oc get openstackcontrolplane -o name", "+ crname=openstackcontrolplane.core.openstack.org/openstack", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/cpu\", \"value\": 500m}]'", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq/resources/requests/memory\", \"value\": 500Mi}]'", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/cpu\", \"value\": 500m}]'", "+ oc patch openstackcontrolplane.core.openstack.org/openstack --type json '-p=[{\"op\": \"replace\", \"path\": \"/spec/rabbitmq/templates/rabbitmq-cell1/resources/requests/memory\", \"value\": 500Mi}]'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched\nopenstackcontrolplane.core.openstack.org/openstack patched\nopenstackcontrolplane.core.openstack.org/openstack patched\nopenstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched", "openstackcontrolplane.core.openstack.org/openstack patched", "openstackcontrolplane.core.openstack.org/openstack patched", "openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [backend_services : Verify that OpenStackControlPlane is waiting for openstackclient] *** FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (60 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (59 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (58 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (57 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (56 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (55 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (54 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (53 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (52 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (51 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (50 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (49 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (48 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (47 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (46 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (45 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (44 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (43 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (42 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (41 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (40 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (39 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (38 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (37 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (36 retries left). FAILED - RETRYING: [localhost]: Verify that OpenStackControlPlane is waiting for openstackclient (35 retries left). changed: [localhost] => {"attempts": 27, "changed": true, "cmd": "set -euxo pipefail\n\n\n\noc get openstackcontrolplane openstack -o jsonpath='{.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}' | grep 'OpenStackControlPlane Client not started'\n", "delta": "0:00:00.179075", "end": "2025-10-06 15:02:08.881655", "msg": "", "rc": 0, "start": "2025-10-06 15:02:08.702580", "stderr": "+ oc get openstackcontrolplane openstack -o 'jsonpath={.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'\n+ grep 'OpenStackControlPlane Client not started'", "stderr_lines": ["+ oc get openstackcontrolplane openstack -o 'jsonpath={.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'", "+ grep 'OpenStackControlPlane Client not started'"], "stdout": "OpenStackControlPlane Client not started", "stdout_lines": ["OpenStackControlPlane Client not started"]} TASK [execute alternative tasks when source env is OSPdO] ********************** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [get_services_configuration : test connection to the original DB] ********* changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nunset PULL_OPENSTACK_CONFIGURATION_DATABASES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\nfor CELL in $(echo $CELLS); do\n PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]=$(oc run mariadb-client-1-$CELL ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- mysql -rsh \"${SOURCE_MARIADB_IP[$CELL]}\" -uroot -p\"${SOURCE_DB_ROOT_PASSWORD[$CELL]}\" -e 'SHOW databases;')\ndone\n", "delta": "0:00:06.868374", "end": "2025-10-06 15:02:16.178887", "msg": "", "rc": 0, "start": "2025-10-06 15:02:09.310513", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ awk -F ': ' '{ print $2; }'\n++ grep ' MysqlRootPassword:'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ unset PULL_OPENSTACK_CONFIGURATION_DATABASES\n+ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ oc run mariadb-client-1-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'SHOW databases;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-1-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-1-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-1-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-1-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\nwarning: couldn't attach to pod/mariadb-client-1-default, falling back to streaming logs: unable to upgrade connection: container mariadb-client-1-default not found in pod mariadb-client-1-default_openstack\n+ PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]='aodh\ncinder\nglance\ngnocchi\nheat\ninformation_schema\nkeystone\nmanila\nmysql\nnova\nnova_api\nnova_cell0\noctavia\noctavia_persistence\novs_neutron\nperformance_schema\nplacement'", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ awk -F ': ' '{ print $2; }'", "++ grep ' MysqlRootPassword:'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ unset PULL_OPENSTACK_CONFIGURATION_DATABASES", "+ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ oc run mariadb-client-1-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'SHOW databases;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-1-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-1-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-1-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-1-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "warning: couldn't attach to pod/mariadb-client-1-default, falling back to streaming logs: unable to upgrade connection: container mariadb-client-1-default not found in pod mariadb-client-1-default_openstack", "+ PULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]='aodh", "cinder", "glance", "gnocchi", "heat", "information_schema", "keystone", "manila", "mysql", "nova", "nova_api", "nova_cell0", "octavia", "octavia_persistence", "ovs_neutron", "performance_schema", "placement'"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : run mysqlcheck on the original DB to look for things that are not OK] *** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nunset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\nrun_mysqlcheck() {\n PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK=$(oc run mariadb-client-2-$1 ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- mysqlcheck --all-databases -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p\"${SOURCE_DB_ROOT_PASSWORD[$CELL]}\" | grep -v OK)\n}\nfor CELL in $(echo $CELLS); do\n run_mysqlcheck $CELL\ndone\nif [ \"$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\" != \"\" ]; then\n # Try mysql_upgrade to fix mysqlcheck failure\n for CELL in $(echo $CELLS); do\n MYSQL_UPGRADE=$(oc run mariadb-client-3-$CELL ${MARIADB_CLIENT_ANNOTATIONS} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- mysql_upgrade --skip-version-check -v -h ${SOURCE_MARIADB_IP[$CELL]} -u root -p\"${SOURCE_DB_ROOT_PASSWORD[$CELL]}\")\n # rerun mysqlcheck to check if problem is resolved\n run_mysqlcheck\n done\nfi\necho \"$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\"\n", "delta": "0:00:03.765613", "end": "2025-10-06 15:02:20.245878", "failed_when_result": false, "msg": "non-zero return code", "rc": 1, "start": "2025-10-06 15:02:16.480265", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n+ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ run_mysqlcheck default\n++ oc run mariadb-client-2-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysqlcheck --all-databases -h 172.17.0.100 -u root -pCjEVN5fsDI\n++ grep -v OK\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-2-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-2-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-2-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-2-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\n+ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK=", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "+ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ run_mysqlcheck default", "++ oc run mariadb-client-2-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysqlcheck --all-databases -h 172.17.0.100 -u root -pCjEVN5fsDI", "++ grep -v OK", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-2-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-2-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-2-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-2-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "+ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK="], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : get Nova cells mappings from database] ****** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nexport PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=$(oc run mariadb-client-1 ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- mysql -rsh \"${SOURCE_MARIADB_IP[default]}\" -uroot -p\"${SOURCE_DB_ROOT_PASSWORD[default]}\" nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;')\n", "delta": "0:00:03.662672", "end": "2025-10-06 15:02:24.240055", "msg": "", "rc": 0, "start": "2025-10-06 15:02:20.577383", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n++ oc run mariadb-client-1 --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-1\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-1\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-1\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-1\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\nwarning: couldn't attach to pod/mariadb-client-1, falling back to streaming logs: unable to upgrade connection: container mariadb-client-1 not found in pod mariadb-client-1_openstack\n+ export 'PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "++ oc run mariadb-client-1 --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-1\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-1\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-1\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-1\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "warning: couldn't attach to pod/mariadb-client-1, falling back to streaming logs: unable to upgrade connection: container mariadb-client-1 not found in pod mariadb-client-1_openstack", "+ export 'PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : get the host names of the registered Nova compute services] *** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nunset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\nfor CELL in $(echo $CELLS); do\n PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=$(oc run mariadb-client-4-$CELL ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- mysql -rsh \"${SOURCE_MARIADB_IP[$CELL]}\" -uroot -p\"${SOURCE_DB_ROOT_PASSWORD[$CELL]}\" -e \"select host from nova.services where services.binary='nova-compute' and deleted=0;\")\ndone\n", "delta": "0:00:03.727829", "end": "2025-10-06 15:02:28.271766", "msg": "", "rc": 0, "start": "2025-10-06 15:02:24.543937", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n+ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ oc run mariadb-client-4-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-4-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-4-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-4-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-4-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\n+ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=standalone.ooo.test", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "+ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ oc run mariadb-client-4-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-4-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-4-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-4-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-4-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "+ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=standalone.ooo.test"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : get the list of mapped Nova cells] ********** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\nexport PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=$($CONTROLLER1_SSH sudo podman exec -it nova_api nova-manage cell_v2 list_cells)\n", "delta": "0:00:03.194272", "end": "2025-10-06 15:02:31.725975", "msg": "", "rc": 0, "start": "2025-10-06 15:02:28.531703", "stderr": "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_api nova-manage cell_v2 list_cells\n+ export 'PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| Name | UUID | Transport URL | Database Connection | Disabled |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r'\n+ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| Name | UUID | Transport URL | Database Connection | Disabled |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r'", "stderr_lines": ["+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_api nova-manage cell_v2 list_cells", "+ export 'PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| Name | UUID | Transport URL | Database Connection | Disabled |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "'", "+ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| Name | UUID | Transport URL | Database Connection | Disabled |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "'"], "stdout": "", "stdout_lines": []} TASK [get_services_configuration : store exported variables for future use] **** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nfor CELL in $(echo $CELLS); do\n RCELL=$CELL\n [ \"$CELL\" = \"$DEFAULT_CELL_NAME\" ] && RCELL=default\n cat > ~/.source_cloud_exported_variables_$CELL << EOF\nunset PULL_OPENSTACK_CONFIGURATION_DATABASES\nunset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\nunset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\ndeclare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\nPULL_OPENSTACK_CONFIGURATION_DATABASES[$CELL]=\"$(oc run mariadb-client-5-$CELL ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \\\n mysql -rsh ${SOURCE_MARIADB_IP[$RCELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} -e 'SHOW databases;')\"\nPULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[$CELL]=\"$(oc run mariadb-client-6-$CELL ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \\\n mysqlcheck --all-databases -h ${SOURCE_MARIADB_IP[$RCELL]} -u root -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} | grep -v OK)\"\nPULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[$CELL]=\"$(oc run mariadb-client-7-$CELL ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \\\n mysql -rsh ${SOURCE_MARIADB_IP[$RCELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} -e \\\n \"select host from nova.services where services.binary='nova-compute' and deleted=0;\")\"\nif [ \"$RCELL\" = \"default\" ]; then\n PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS=\"$(oc run mariadb-client-2 ${MARIADB_RUN_OVERRIDES} -q --image ${MARIADB_IMAGE} -i --rm --restart=Never -- \\\n mysql -rsh ${SOURCE_MARIADB_IP[$RCELL]} -uroot -p${SOURCE_DB_ROOT_PASSWORD[$RCELL]} nova_api -e \\\n 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;')\"\n PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS=\"$($CONTROLLER1_SSH sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells)\"\nfi\nEOF\ndone\nchmod 0600 ~/.source_cloud_exported_variables*\n", "delta": "0:00:16.031188", "end": "2025-10-06 15:02:48.050158", "msg": "", "rc": 0, "start": "2025-10-06 15:02:32.018970", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ RCELL=default\n+ '[' default = cell1 ']'\n+ cat\n++ oc run mariadb-client-5-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'SHOW databases;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-5-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-5-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-5-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-5-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\n++ grep -v OK\n++ oc run mariadb-client-6-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysqlcheck --all-databases -h 172.17.0.100 -u root -pCjEVN5fsDI\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-6-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-6-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-6-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-6-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\nwarning: couldn't attach to pod/mariadb-client-6-default, falling back to streaming logs: unable to upgrade connection: container mariadb-client-6-default not found in pod mariadb-client-6-default_openstack\n++ oc run mariadb-client-7-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-7-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-7-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-7-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-7-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\n++ oc run mariadb-client-2 --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-2\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-2\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-2\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-2\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\n++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells\n+ chmod 0600 /home/zuul/.source_cloud_exported_variables_default", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ RCELL=default", "+ '[' default = cell1 ']'", "+ cat", "++ oc run mariadb-client-5-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'SHOW databases;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-5-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-5-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-5-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-5-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "++ grep -v OK", "++ oc run mariadb-client-6-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysqlcheck --all-databases -h 172.17.0.100 -u root -pCjEVN5fsDI", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-6-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-6-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-6-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-6-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "warning: couldn't attach to pod/mariadb-client-6-default, falling back to streaming logs: unable to upgrade connection: container mariadb-client-6-default not found in pod mariadb-client-6-default_openstack", "++ oc run mariadb-client-7-default --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI -e 'select host from nova.services where services.binary='\\''nova-compute'\\'' and deleted=0;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-7-default\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-7-default\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-7-default\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-7-default\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "++ oc run mariadb-client-2 --annotations=k8s.v1.cni.cncf.io/networks=internalapi -q --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 172.17.0.100 -uroot -pCjEVN5fsDI nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client-2\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client-2\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client-2\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client-2\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo podman exec -it nova_conductor nova-manage cell_v2 list_cells", "+ chmod 0600 /home/zuul/.source_cloud_exported_variables_default"], "stdout": "", "stdout_lines": []} TASK [stop_openstack_services : set shell vars for stopping openstack services] *** ok: [localhost] => {"ansible_facts": {"stop_openstack_services_shell_vars": "CONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n"}, "changed": false} TASK [stop_openstack_services : Remove colocation constraints between manila-share and ceph-nfs] *** skipping: [localhost] => {"changed": false, "false_condition": "manila_backend | default(\"\") == \"cephnfs\"", "skip_reason": "Conditional result was False"} TASK [stop_openstack_services : stop control plane services] ******************* changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\nServicesToStop=(\"tripleo_aodh_api.service\"\n \"tripleo_aodh_api_cron.service\"\n \"tripleo_aodh_evaluator.service\"\n \"tripleo_aodh_listener.service\"\n \"tripleo_aodh_notifier.service\"\n \"tripleo_ceilometer_agent_central.service\"\n \"tripleo_ceilometer_agent_notification.service\"\n \"tripleo_octavia_api.service\"\n \"tripleo_octavia_health_manager.service\"\n \"tripleo_octavia_rsyslog.service\"\n \"tripleo_octavia_driver_agent.service\"\n \"tripleo_octavia_housekeeping.service\"\n \"tripleo_octavia_worker.service\"\n \"tripleo_horizon.service\"\n \"tripleo_keystone.service\"\n \"tripleo_barbican_api.service\"\n \"tripleo_barbican_worker.service\"\n \"tripleo_barbican_keystone_listener.service\"\n \"tripleo_cinder_api.service\"\n \"tripleo_cinder_api_cron.service\"\n \"tripleo_cinder_scheduler.service\"\n \"tripleo_cinder_volume.service\"\n \"tripleo_cinder_backup.service\"\n \"tripleo_collectd.service\"\n \"tripleo_glance_api.service\"\n \"tripleo_gnocchi_api.service\"\n \"tripleo_gnocchi_metricd.service\"\n \"tripleo_gnocchi_statsd.service\"\n \"tripleo_manila_api.service\"\n \"tripleo_manila_api_cron.service\"\n \"tripleo_manila_scheduler.service\"\n \"tripleo_neutron_api.service\"\n \"tripleo_placement_api.service\"\n \"tripleo_nova_api_cron.service\"\n \"tripleo_nova_api.service\"\n \"tripleo_nova_conductor.service\"\n \"tripleo_nova_metadata.service\"\n \"tripleo_nova_scheduler.service\"\n \"tripleo_nova_vnc_proxy.service\"\n \"tripleo_aodh_api.service\"\n \"tripleo_aodh_api_cron.service\"\n \"tripleo_aodh_evaluator.service\"\n \"tripleo_aodh_listener.service\"\n \"tripleo_aodh_notifier.service\"\n \"tripleo_ceilometer_agent_central.service\"\n \"tripleo_ceilometer_agent_compute.service\"\n \"tripleo_ceilometer_agent_ipmi.service\"\n \"tripleo_ceilometer_agent_notification.service\"\n \"tripleo_ovn_cluster_northd.service\"\n \"tripleo_ironic_neutron_agent.service\"\n \"tripleo_ironic_api.service\"\n \"tripleo_ironic_inspector.service\"\n \"tripleo_ironic_conductor.service\"\n \"tripleo_ironic_inspector_dnsmasq.service\"\n \"tripleo_ironic_pxe_http.service\"\n \"tripleo_ironic_pxe_tftp.service\")\n\nPacemakerResourcesToStop=(\"openstack-cinder-volume\"\n \"openstack-cinder-backup\"\n \"openstack-manila-share\")\n\necho \"Stopping systemd OpenStack services\"\nfor service in ${ServicesToStop[*]}; do\n for i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n echo \"Stopping the $service in controller $i\"\n if ${!SSH_CMD} sudo systemctl is-active $service; then\n ${!SSH_CMD} sudo systemctl stop $service\n fi\n fi\n done\ndone\n\necho \"Checking systemd OpenStack services\"\nfor service in ${ServicesToStop[*]}; do\n for i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n if ! ${!SSH_CMD} systemctl show $service | grep ActiveState=inactive >/dev/null; then\n echo \"ERROR: Service $service still running on controller $i\"\n else\n echo \"OK: Service $service is not running on controller $i\"\n fi\n fi\n done\ndone\n\necho \"Stopping pacemaker OpenStack services\"\nfor i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n echo \"Using controller $i to run pacemaker commands\"\n for resource in ${PacemakerResourcesToStop[*]}; do\n if ${!SSH_CMD} sudo pcs resource config $resource &>/dev/null; then\n echo \"Stopping $resource\"\n ${!SSH_CMD} sudo pcs resource disable $resource\n else\n echo \"Service $resource not present\"\n fi\n done\n break\n fi\ndone\n\necho \"Checking pacemaker OpenStack services\"\nfor i in {1..3}; do\n SSH_CMD=CONTROLLER${i}_SSH\n if [ ! -z \"${!SSH_CMD}\" ]; then\n echo \"Using controller $i to run pacemaker commands\"\n for resource in ${PacemakerResourcesToStop[*]}; do\n if ${!SSH_CMD} sudo pcs resource config $resource &>/dev/null; then\n if ! ${!SSH_CMD} sudo pcs resource status $resource | grep Started; then\n echo \"OK: Service $resource is stopped\"\n else\n echo \"ERROR: Service $resource is started\"\n fi\n fi\n done\n break\n fi\ndone\n", "delta": "0:03:01.070693", "end": "2025-10-06 15:05:49.446477", "msg": "", "rc": 0, "start": "2025-10-06 15:02:48.375784", "stderr": "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ ServicesToStop=(\"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_octavia_api.service\" \"tripleo_octavia_health_manager.service\" \"tripleo_octavia_rsyslog.service\" \"tripleo_octavia_driver_agent.service\" \"tripleo_octavia_housekeeping.service\" \"tripleo_octavia_worker.service\" \"tripleo_horizon.service\" \"tripleo_keystone.service\" \"tripleo_barbican_api.service\" \"tripleo_barbican_worker.service\" \"tripleo_barbican_keystone_listener.service\" \"tripleo_cinder_api.service\" \"tripleo_cinder_api_cron.service\" \"tripleo_cinder_scheduler.service\" \"tripleo_cinder_volume.service\" \"tripleo_cinder_backup.service\" \"tripleo_collectd.service\" \"tripleo_glance_api.service\" \"tripleo_gnocchi_api.service\" \"tripleo_gnocchi_metricd.service\" \"tripleo_gnocchi_statsd.service\" \"tripleo_manila_api.service\" \"tripleo_manila_api_cron.service\" \"tripleo_manila_scheduler.service\" \"tripleo_neutron_api.service\" \"tripleo_placement_api.service\" \"tripleo_nova_api_cron.service\" \"tripleo_nova_api.service\" \"tripleo_nova_conductor.service\" \"tripleo_nova_metadata.service\" \"tripleo_nova_scheduler.service\" \"tripleo_nova_vnc_proxy.service\" \"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_compute.service\" \"tripleo_ceilometer_agent_ipmi.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_ovn_cluster_northd.service\" \"tripleo_ironic_neutron_agent.service\" \"tripleo_ironic_api.service\" \"tripleo_ironic_inspector.service\" \"tripleo_ironic_conductor.service\" \"tripleo_ironic_inspector_dnsmasq.service\" \"tripleo_ironic_pxe_http.service\" \"tripleo_ironic_pxe_tftp.service\")\n+ PacemakerResourcesToStop=(\"openstack-cinder-volume\" \"openstack-cinder-backup\" \"openstack-manila-share\")\n+ echo 'Stopping systemd OpenStack services'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_api.service\n+ : sudo systemctl stop tripleo_aodh_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_api.service\n+ : sudo systemctl stop tripleo_aodh_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_api_cron.service\n+ : sudo systemctl stop tripleo_aodh_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_api_cron.service\n+ : sudo systemctl stop tripleo_aodh_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_evaluator.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_evaluator.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_evaluator.service\n+ : sudo systemctl stop tripleo_aodh_evaluator.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_evaluator.service\n+ : sudo systemctl stop tripleo_aodh_evaluator.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_listener.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_listener.service\n+ : sudo systemctl stop tripleo_aodh_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_listener.service\n+ : sudo systemctl stop tripleo_aodh_listener.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_notifier.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_notifier.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_notifier.service\n+ : sudo systemctl stop tripleo_aodh_notifier.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_notifier.service\n+ : sudo systemctl stop tripleo_aodh_notifier.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_api.service\n+ : sudo systemctl stop tripleo_octavia_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_api.service\n+ : sudo systemctl stop tripleo_octavia_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_health_manager.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_health_manager.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_health_manager.service\n+ : sudo systemctl stop tripleo_octavia_health_manager.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_health_manager.service\n+ : sudo systemctl stop tripleo_octavia_health_manager.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_rsyslog.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_rsyslog.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_rsyslog.service\n+ : sudo systemctl stop tripleo_octavia_rsyslog.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_rsyslog.service\n+ : sudo systemctl stop tripleo_octavia_rsyslog.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_driver_agent.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_driver_agent.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_driver_agent.service\n+ : sudo systemctl stop tripleo_octavia_driver_agent.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_driver_agent.service\n+ : sudo systemctl stop tripleo_octavia_driver_agent.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_housekeeping.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_housekeeping.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_housekeeping.service\n+ : sudo systemctl stop tripleo_octavia_housekeeping.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_housekeeping.service\n+ : sudo systemctl stop tripleo_octavia_housekeeping.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_octavia_worker.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_worker.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_worker.service in controller 2'\n+ : sudo systemctl is-active tripleo_octavia_worker.service\n+ : sudo systemctl stop tripleo_octavia_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_octavia_worker.service in controller 3'\n+ : sudo systemctl is-active tripleo_octavia_worker.service\n+ : sudo systemctl stop tripleo_octavia_worker.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_horizon.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_horizon.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_horizon.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_horizon.service in controller 2'\n+ : sudo systemctl is-active tripleo_horizon.service\n+ : sudo systemctl stop tripleo_horizon.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_horizon.service in controller 3'\n+ : sudo systemctl is-active tripleo_horizon.service\n+ : sudo systemctl stop tripleo_horizon.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_keystone.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_keystone.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_keystone.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_keystone.service in controller 2'\n+ : sudo systemctl is-active tripleo_keystone.service\n+ : sudo systemctl stop tripleo_keystone.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_keystone.service in controller 3'\n+ : sudo systemctl is-active tripleo_keystone.service\n+ : sudo systemctl stop tripleo_keystone.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_barbican_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_barbican_api.service\n+ : sudo systemctl stop tripleo_barbican_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_barbican_api.service\n+ : sudo systemctl stop tripleo_barbican_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_barbican_worker.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_worker.service in controller 2'\n+ : sudo systemctl is-active tripleo_barbican_worker.service\n+ : sudo systemctl stop tripleo_barbican_worker.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_worker.service in controller 3'\n+ : sudo systemctl is-active tripleo_barbican_worker.service\n+ : sudo systemctl stop tripleo_barbican_worker.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_keystone_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 2'\n+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service\n+ : sudo systemctl stop tripleo_barbican_keystone_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 3'\n+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service\n+ : sudo systemctl stop tripleo_barbican_keystone_listener.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_api.service\n+ : sudo systemctl stop tripleo_cinder_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_api.service\n+ : sudo systemctl stop tripleo_cinder_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_api_cron.service\n+ : sudo systemctl stop tripleo_cinder_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_api_cron.service\n+ : sudo systemctl stop tripleo_cinder_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_scheduler.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_scheduler.service\n+ : sudo systemctl stop tripleo_cinder_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_scheduler.service\n+ : sudo systemctl stop tripleo_cinder_scheduler.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_volume.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_volume.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_volume.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_volume.service\n+ : sudo systemctl stop tripleo_cinder_volume.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_volume.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_volume.service\n+ : sudo systemctl stop tripleo_cinder_volume.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_cinder_backup.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_backup.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_backup.service in controller 2'\n+ : sudo systemctl is-active tripleo_cinder_backup.service\n+ : sudo systemctl stop tripleo_cinder_backup.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_cinder_backup.service in controller 3'\n+ : sudo systemctl is-active tripleo_cinder_backup.service\n+ : sudo systemctl stop tripleo_cinder_backup.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_collectd.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_collectd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_collectd.service in controller 2'\n+ : sudo systemctl is-active tripleo_collectd.service\n+ : sudo systemctl stop tripleo_collectd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_collectd.service in controller 3'\n+ : sudo systemctl is-active tripleo_collectd.service\n+ : sudo systemctl stop tripleo_collectd.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_glance_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_glance_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_glance_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_glance_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_glance_api.service\n+ : sudo systemctl stop tripleo_glance_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_glance_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_glance_api.service\n+ : sudo systemctl stop tripleo_glance_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_gnocchi_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_gnocchi_api.service\n+ : sudo systemctl stop tripleo_gnocchi_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_gnocchi_api.service\n+ : sudo systemctl stop tripleo_gnocchi_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_metricd.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_metricd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 2'\n+ : sudo systemctl is-active tripleo_gnocchi_metricd.service\n+ : sudo systemctl stop tripleo_gnocchi_metricd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 3'\n+ : sudo systemctl is-active tripleo_gnocchi_metricd.service\n+ : sudo systemctl stop tripleo_gnocchi_metricd.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_statsd.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_statsd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 2'\n+ : sudo systemctl is-active tripleo_gnocchi_statsd.service\n+ : sudo systemctl stop tripleo_gnocchi_statsd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 3'\n+ : sudo systemctl is-active tripleo_gnocchi_statsd.service\n+ : sudo systemctl stop tripleo_gnocchi_statsd.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_manila_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_manila_api.service\n+ : sudo systemctl stop tripleo_manila_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_manila_api.service\n+ : sudo systemctl stop tripleo_manila_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_manila_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_manila_api_cron.service\n+ : sudo systemctl stop tripleo_manila_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_manila_api_cron.service\n+ : sudo systemctl stop tripleo_manila_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_manila_scheduler.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_scheduler.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_scheduler.service in controller 2'\n+ : sudo systemctl is-active tripleo_manila_scheduler.service\n+ : sudo systemctl stop tripleo_manila_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_manila_scheduler.service in controller 3'\n+ : sudo systemctl is-active tripleo_manila_scheduler.service\n+ : sudo systemctl stop tripleo_manila_scheduler.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_neutron_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_neutron_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_neutron_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_neutron_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_neutron_api.service\n+ : sudo systemctl stop tripleo_neutron_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_neutron_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_neutron_api.service\n+ : sudo systemctl stop tripleo_neutron_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_placement_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_placement_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_placement_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_placement_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_placement_api.service\n+ : sudo systemctl stop tripleo_placement_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_placement_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_placement_api.service\n+ : sudo systemctl stop tripleo_placement_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api_cron.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_api_cron.service\n+ : sudo systemctl stop tripleo_nova_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_api_cron.service\n+ : sudo systemctl stop tripleo_nova_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_api.service\n+ : sudo systemctl stop tripleo_nova_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_api.service\n+ : sudo systemctl stop tripleo_nova_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_conductor.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_conductor.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_conductor.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_conductor.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_conductor.service\n+ : sudo systemctl stop tripleo_nova_conductor.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_conductor.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_conductor.service\n+ : sudo systemctl stop tripleo_nova_conductor.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_metadata.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_metadata.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_metadata.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_metadata.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_metadata.service\n+ : sudo systemctl stop tripleo_nova_metadata.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_metadata.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_metadata.service\n+ : sudo systemctl stop tripleo_nova_metadata.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_scheduler.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_scheduler.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_scheduler.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_scheduler.service\n+ : sudo systemctl stop tripleo_nova_scheduler.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_scheduler.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_scheduler.service\n+ : sudo systemctl stop tripleo_nova_scheduler.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_nova_vnc_proxy.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_vnc_proxy.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_vnc_proxy.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_vnc_proxy.service in controller 2'\n+ : sudo systemctl is-active tripleo_nova_vnc_proxy.service\n+ : sudo systemctl stop tripleo_nova_vnc_proxy.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_nova_vnc_proxy.service in controller 3'\n+ : sudo systemctl is-active tripleo_nova_vnc_proxy.service\n+ : sudo systemctl stop tripleo_nova_vnc_proxy.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_api.service\n+ : sudo systemctl stop tripleo_aodh_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_api.service\n+ : sudo systemctl stop tripleo_aodh_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_api_cron.service\n+ : sudo systemctl stop tripleo_aodh_api_cron.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_api_cron.service\n+ : sudo systemctl stop tripleo_aodh_api_cron.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_evaluator.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_evaluator.service\n+ : sudo systemctl stop tripleo_aodh_evaluator.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_evaluator.service\n+ : sudo systemctl stop tripleo_aodh_evaluator.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_listener.service\n+ : sudo systemctl stop tripleo_aodh_listener.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_listener.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_listener.service\n+ : sudo systemctl stop tripleo_aodh_listener.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_notifier.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 2'\n+ : sudo systemctl is-active tripleo_aodh_notifier.service\n+ : sudo systemctl stop tripleo_aodh_notifier.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_aodh_notifier.service in controller 3'\n+ : sudo systemctl is-active tripleo_aodh_notifier.service\n+ : sudo systemctl stop tripleo_aodh_notifier.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_central.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_compute.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_compute.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_compute.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_compute.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_compute.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_compute.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_compute.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_compute.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_compute.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_ipmi.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_ipmi.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_ipmi.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_ipmi.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_ipmi.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_ipmi.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_ipmi.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_ipmi.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 2'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 3'\n+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service\n+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ovn_cluster_northd.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ovn_cluster_northd.service\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ovn_cluster_northd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ovn_cluster_northd.service in controller 2'\n+ : sudo systemctl is-active tripleo_ovn_cluster_northd.service\n+ : sudo systemctl stop tripleo_ovn_cluster_northd.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ovn_cluster_northd.service in controller 3'\n+ : sudo systemctl is-active tripleo_ovn_cluster_northd.service\n+ : sudo systemctl stop tripleo_ovn_cluster_northd.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ironic_neutron_agent.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_neutron_agent.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_neutron_agent.service in controller 2'\n+ : sudo systemctl is-active tripleo_ironic_neutron_agent.service\n+ : sudo systemctl stop tripleo_ironic_neutron_agent.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_neutron_agent.service in controller 3'\n+ : sudo systemctl is-active tripleo_ironic_neutron_agent.service\n+ : sudo systemctl stop tripleo_ironic_neutron_agent.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ironic_api.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_api.service in controller 2'\n+ : sudo systemctl is-active tripleo_ironic_api.service\n+ : sudo systemctl stop tripleo_ironic_api.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_api.service in controller 3'\n+ : sudo systemctl is-active tripleo_ironic_api.service\n+ : sudo systemctl stop tripleo_ironic_api.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ironic_inspector.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_inspector.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_inspector.service in controller 2'\n+ : sudo systemctl is-active tripleo_ironic_inspector.service\n+ : sudo systemctl stop tripleo_ironic_inspector.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_inspector.service in controller 3'\n+ : sudo systemctl is-active tripleo_ironic_inspector.service\n+ : sudo systemctl stop tripleo_ironic_inspector.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ironic_conductor.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_conductor.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_conductor.service in controller 2'\n+ : sudo systemctl is-active tripleo_ironic_conductor.service\n+ : sudo systemctl stop tripleo_ironic_conductor.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_conductor.service in controller 3'\n+ : sudo systemctl is-active tripleo_ironic_conductor.service\n+ : sudo systemctl stop tripleo_ironic_conductor.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_inspector_dnsmasq.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 2'\n+ : sudo systemctl is-active tripleo_ironic_inspector_dnsmasq.service\n+ : sudo systemctl stop tripleo_ironic_inspector_dnsmasq.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 3'\n+ : sudo systemctl is-active tripleo_ironic_inspector_dnsmasq.service\n+ : sudo systemctl stop tripleo_ironic_inspector_dnsmasq.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ironic_pxe_http.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_pxe_http.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_pxe_http.service in controller 2'\n+ : sudo systemctl is-active tripleo_ironic_pxe_http.service\n+ : sudo systemctl stop tripleo_ironic_pxe_http.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_pxe_http.service in controller 3'\n+ : sudo systemctl is-active tripleo_ironic_pxe_http.service\n+ : sudo systemctl stop tripleo_ironic_pxe_http.service\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Stopping the tripleo_ironic_pxe_tftp.service in controller 1'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_pxe_tftp.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_pxe_tftp.service in controller 2'\n+ : sudo systemctl is-active tripleo_ironic_pxe_tftp.service\n+ : sudo systemctl stop tripleo_ironic_pxe_tftp.service\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ echo 'Stopping the tripleo_ironic_pxe_tftp.service in controller 3'\n+ : sudo systemctl is-active tripleo_ironic_pxe_tftp.service\n+ : sudo systemctl stop tripleo_ironic_pxe_tftp.service\n+ echo 'Checking systemd OpenStack services'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_api_cron.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ grep ActiveState=inactive\n+ : systemctl show tripleo_aodh_api_cron.service\n+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_evaluator.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_evaluator.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_evaluator.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_evaluator.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_listener.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_listener.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_listener.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_listener.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_notifier.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_notifier.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_notifier.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_notifier.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_central.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ceilometer_agent_central.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_central.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_central.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_notification.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_notification.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_notification.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_octavia_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_health_manager.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_octavia_health_manager.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_health_manager.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_health_manager.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_health_manager.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_health_manager.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_rsyslog.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_octavia_rsyslog.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_rsyslog.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_rsyslog.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_rsyslog.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_rsyslog.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_driver_agent.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_octavia_driver_agent.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ grep ActiveState=inactive\n+ : systemctl show tripleo_octavia_driver_agent.service\n+ echo 'ERROR: Service tripleo_octavia_driver_agent.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_driver_agent.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_driver_agent.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_housekeeping.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_octavia_housekeeping.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_housekeeping.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_housekeeping.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_housekeeping.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_housekeeping.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_worker.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_octavia_worker.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_octavia_worker.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_octavia_worker.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ grep ActiveState=inactive\n+ : systemctl show tripleo_octavia_worker.service\n+ echo 'ERROR: Service tripleo_octavia_worker.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_horizon.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_horizon.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_horizon.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_horizon.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_horizon.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_horizon.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_keystone.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_keystone.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_keystone.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_keystone.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_keystone.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_keystone.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_barbican_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_barbican_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_barbican_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_barbican_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_barbican_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_barbican_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_barbican_worker.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_barbican_worker.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_barbican_worker.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_barbican_worker.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_barbican_worker.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_barbican_worker.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_barbican_keystone_listener.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_barbican_keystone_listener.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_barbican_keystone_listener.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_barbican_keystone_listener.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_barbican_keystone_listener.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_barbican_keystone_listener.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_cinder_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_cinder_api_cron.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_api_cron.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_api_cron.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_cinder_scheduler.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_scheduler.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_scheduler.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_volume.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_cinder_volume.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_volume.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_volume.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_volume.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_volume.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_backup.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_cinder_backup.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_backup.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_backup.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_cinder_backup.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_cinder_backup.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ grep ActiveState=inactive\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_collectd.service\n+ echo 'OK: Service tripleo_collectd.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_collectd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_collectd.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_collectd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_collectd.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_glance_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_glance_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_glance_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_glance_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_glance_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_glance_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_gnocchi_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_gnocchi_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_gnocchi_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_gnocchi_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_gnocchi_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_gnocchi_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_gnocchi_metricd.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_gnocchi_metricd.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_gnocchi_metricd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_gnocchi_metricd.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_gnocchi_metricd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_gnocchi_metricd.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_gnocchi_statsd.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_gnocchi_statsd.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_gnocchi_statsd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_gnocchi_statsd.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_gnocchi_statsd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_gnocchi_statsd.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_manila_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_manila_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_manila_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_manila_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_manila_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_manila_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_manila_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_manila_api_cron.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_manila_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_manila_api_cron.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_manila_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_manila_api_cron.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_manila_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_manila_scheduler.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_manila_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_manila_scheduler.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_manila_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_manila_scheduler.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_neutron_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_neutron_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_neutron_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_neutron_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_neutron_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_neutron_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_placement_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_placement_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_placement_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_placement_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ grep ActiveState=inactive\n+ : systemctl show tripleo_placement_api.service\n+ echo 'ERROR: Service tripleo_placement_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_nova_api_cron.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_api_cron.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_api_cron.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_nova_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_conductor.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_nova_conductor.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_conductor.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_conductor.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_conductor.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_conductor.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_metadata.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_nova_metadata.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_metadata.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_metadata.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_metadata.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_metadata.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_nova_scheduler.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_scheduler.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_scheduler.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_scheduler.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_vnc_proxy.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_nova_vnc_proxy.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_vnc_proxy.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_vnc_proxy.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_nova_vnc_proxy.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_nova_vnc_proxy.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_api_cron.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_api_cron.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_evaluator.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_evaluator.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_evaluator.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_evaluator.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_listener.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_listener.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_listener.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_listener.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_notifier.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_aodh_notifier.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_notifier.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_aodh_notifier.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_central.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ceilometer_agent_central.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_central.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_central.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_compute.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ceilometer_agent_compute.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_compute.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_compute.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_ipmi.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ceilometer_agent_ipmi.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_ipmi.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_ipmi.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_notification.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_notification.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ceilometer_agent_notification.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ovn_cluster_northd.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ovn_cluster_northd.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ovn_cluster_northd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ovn_cluster_northd.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ovn_cluster_northd.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ovn_cluster_northd.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_neutron_agent.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ironic_neutron_agent.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ grep ActiveState=inactive\n+ : systemctl show tripleo_ironic_neutron_agent.service\n+ echo 'ERROR: Service tripleo_ironic_neutron_agent.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_neutron_agent.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_neutron_agent.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_api.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ironic_api.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_api.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_api.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_api.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_inspector.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ironic_inspector.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_inspector.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_inspector.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ grep ActiveState=inactive\n+ : systemctl show tripleo_ironic_inspector.service\n+ echo 'ERROR: Service tripleo_ironic_inspector.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_conductor.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ironic_conductor.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_conductor.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_conductor.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_conductor.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_conductor.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_inspector_dnsmasq.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ironic_inspector_dnsmasq.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_inspector_dnsmasq.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_inspector_dnsmasq.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_pxe_http.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ironic_pxe_http.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_pxe_http.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_pxe_http.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_pxe_http.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_pxe_http.service still running on controller 3'\n+ for service in ${ServicesToStop[*]}\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_pxe_tftp.service\n+ grep ActiveState=inactive\n+ echo 'OK: Service tripleo_ironic_pxe_tftp.service is not running on controller 1'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER2_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_pxe_tftp.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 2'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER3_SSH\n+ '[' '!' -z : ']'\n+ : systemctl show tripleo_ironic_pxe_tftp.service\n+ grep ActiveState=inactive\n+ echo 'ERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 3'\n+ echo 'Stopping pacemaker OpenStack services'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Using controller 1 to run pacemaker commands'\n+ for resource in ${PacemakerResourcesToStop[*]}\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-volume\n+ echo 'Stopping openstack-cinder-volume'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource disable openstack-cinder-volume\n+ for resource in ${PacemakerResourcesToStop[*]}\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-backup\n+ echo 'Stopping openstack-cinder-backup'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource disable openstack-cinder-backup\n+ for resource in ${PacemakerResourcesToStop[*]}\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-manila-share\n+ echo 'Stopping openstack-manila-share'\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource disable openstack-manila-share\n+ break\n+ echo 'Checking pacemaker OpenStack services'\n+ for i in {1..3}\n+ SSH_CMD=CONTROLLER1_SSH\n+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'\n+ echo 'Using controller 1 to run pacemaker commands'\n+ for resource in ${PacemakerResourcesToStop[*]}\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-volume\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource status openstack-cinder-volume\n+ grep Started\n+ echo 'OK: Service openstack-cinder-volume is stopped'\n+ for resource in ${PacemakerResourcesToStop[*]}\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-backup\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource status openstack-cinder-backup\n+ grep Started\n+ echo 'ERROR: Service openstack-cinder-backup is started'\n+ for resource in ${PacemakerResourcesToStop[*]}\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-manila-share\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource status openstack-manila-share\n+ grep Started\n+ echo 'ERROR: Service openstack-manila-share is started'\n+ break", "stderr_lines": ["+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ ServicesToStop=(\"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_octavia_api.service\" \"tripleo_octavia_health_manager.service\" \"tripleo_octavia_rsyslog.service\" \"tripleo_octavia_driver_agent.service\" \"tripleo_octavia_housekeeping.service\" \"tripleo_octavia_worker.service\" \"tripleo_horizon.service\" \"tripleo_keystone.service\" \"tripleo_barbican_api.service\" \"tripleo_barbican_worker.service\" \"tripleo_barbican_keystone_listener.service\" \"tripleo_cinder_api.service\" \"tripleo_cinder_api_cron.service\" \"tripleo_cinder_scheduler.service\" \"tripleo_cinder_volume.service\" \"tripleo_cinder_backup.service\" \"tripleo_collectd.service\" \"tripleo_glance_api.service\" \"tripleo_gnocchi_api.service\" \"tripleo_gnocchi_metricd.service\" \"tripleo_gnocchi_statsd.service\" \"tripleo_manila_api.service\" \"tripleo_manila_api_cron.service\" \"tripleo_manila_scheduler.service\" \"tripleo_neutron_api.service\" \"tripleo_placement_api.service\" \"tripleo_nova_api_cron.service\" \"tripleo_nova_api.service\" \"tripleo_nova_conductor.service\" \"tripleo_nova_metadata.service\" \"tripleo_nova_scheduler.service\" \"tripleo_nova_vnc_proxy.service\" \"tripleo_aodh_api.service\" \"tripleo_aodh_api_cron.service\" \"tripleo_aodh_evaluator.service\" \"tripleo_aodh_listener.service\" \"tripleo_aodh_notifier.service\" \"tripleo_ceilometer_agent_central.service\" \"tripleo_ceilometer_agent_compute.service\" \"tripleo_ceilometer_agent_ipmi.service\" \"tripleo_ceilometer_agent_notification.service\" \"tripleo_ovn_cluster_northd.service\" \"tripleo_ironic_neutron_agent.service\" \"tripleo_ironic_api.service\" \"tripleo_ironic_inspector.service\" \"tripleo_ironic_conductor.service\" \"tripleo_ironic_inspector_dnsmasq.service\" \"tripleo_ironic_pxe_http.service\" \"tripleo_ironic_pxe_tftp.service\")", "+ PacemakerResourcesToStop=(\"openstack-cinder-volume\" \"openstack-cinder-backup\" \"openstack-manila-share\")", "+ echo 'Stopping systemd OpenStack services'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_api.service", "+ : sudo systemctl stop tripleo_aodh_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_api.service", "+ : sudo systemctl stop tripleo_aodh_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_api_cron.service", "+ : sudo systemctl stop tripleo_aodh_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_api_cron.service", "+ : sudo systemctl stop tripleo_aodh_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_evaluator.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_evaluator.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_evaluator.service", "+ : sudo systemctl stop tripleo_aodh_evaluator.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_evaluator.service", "+ : sudo systemctl stop tripleo_aodh_evaluator.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_listener.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_listener.service", "+ : sudo systemctl stop tripleo_aodh_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_listener.service", "+ : sudo systemctl stop tripleo_aodh_listener.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_notifier.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_aodh_notifier.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_notifier.service", "+ : sudo systemctl stop tripleo_aodh_notifier.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_notifier.service", "+ : sudo systemctl stop tripleo_aodh_notifier.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_api.service", "+ : sudo systemctl stop tripleo_octavia_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_api.service", "+ : sudo systemctl stop tripleo_octavia_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_health_manager.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_health_manager.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_health_manager.service", "+ : sudo systemctl stop tripleo_octavia_health_manager.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_health_manager.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_health_manager.service", "+ : sudo systemctl stop tripleo_octavia_health_manager.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_rsyslog.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_rsyslog.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_rsyslog.service", "+ : sudo systemctl stop tripleo_octavia_rsyslog.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_rsyslog.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_rsyslog.service", "+ : sudo systemctl stop tripleo_octavia_rsyslog.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_driver_agent.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_driver_agent.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_driver_agent.service", "+ : sudo systemctl stop tripleo_octavia_driver_agent.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_driver_agent.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_driver_agent.service", "+ : sudo systemctl stop tripleo_octavia_driver_agent.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_housekeeping.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_housekeeping.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_housekeeping.service", "+ : sudo systemctl stop tripleo_octavia_housekeeping.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_housekeeping.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_housekeeping.service", "+ : sudo systemctl stop tripleo_octavia_housekeeping.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_octavia_worker.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_octavia_worker.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_octavia_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_worker.service in controller 2'", "+ : sudo systemctl is-active tripleo_octavia_worker.service", "+ : sudo systemctl stop tripleo_octavia_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_octavia_worker.service in controller 3'", "+ : sudo systemctl is-active tripleo_octavia_worker.service", "+ : sudo systemctl stop tripleo_octavia_worker.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_horizon.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_horizon.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_horizon.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_horizon.service in controller 2'", "+ : sudo systemctl is-active tripleo_horizon.service", "+ : sudo systemctl stop tripleo_horizon.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_horizon.service in controller 3'", "+ : sudo systemctl is-active tripleo_horizon.service", "+ : sudo systemctl stop tripleo_horizon.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_keystone.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_keystone.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_keystone.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_keystone.service in controller 2'", "+ : sudo systemctl is-active tripleo_keystone.service", "+ : sudo systemctl stop tripleo_keystone.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_keystone.service in controller 3'", "+ : sudo systemctl is-active tripleo_keystone.service", "+ : sudo systemctl stop tripleo_keystone.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_barbican_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_barbican_api.service", "+ : sudo systemctl stop tripleo_barbican_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_barbican_api.service", "+ : sudo systemctl stop tripleo_barbican_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_barbican_worker.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_worker.service in controller 2'", "+ : sudo systemctl is-active tripleo_barbican_worker.service", "+ : sudo systemctl stop tripleo_barbican_worker.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_worker.service in controller 3'", "+ : sudo systemctl is-active tripleo_barbican_worker.service", "+ : sudo systemctl stop tripleo_barbican_worker.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_barbican_keystone_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 2'", "+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service", "+ : sudo systemctl stop tripleo_barbican_keystone_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_barbican_keystone_listener.service in controller 3'", "+ : sudo systemctl is-active tripleo_barbican_keystone_listener.service", "+ : sudo systemctl stop tripleo_barbican_keystone_listener.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_api.service", "+ : sudo systemctl stop tripleo_cinder_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_api.service", "+ : sudo systemctl stop tripleo_cinder_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_api_cron.service", "+ : sudo systemctl stop tripleo_cinder_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_api_cron.service", "+ : sudo systemctl stop tripleo_cinder_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_scheduler.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_cinder_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_scheduler.service", "+ : sudo systemctl stop tripleo_cinder_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_scheduler.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_scheduler.service", "+ : sudo systemctl stop tripleo_cinder_scheduler.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_volume.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_volume.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_volume.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_volume.service", "+ : sudo systemctl stop tripleo_cinder_volume.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_volume.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_volume.service", "+ : sudo systemctl stop tripleo_cinder_volume.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_cinder_backup.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_cinder_backup.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_backup.service in controller 2'", "+ : sudo systemctl is-active tripleo_cinder_backup.service", "+ : sudo systemctl stop tripleo_cinder_backup.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_cinder_backup.service in controller 3'", "+ : sudo systemctl is-active tripleo_cinder_backup.service", "+ : sudo systemctl stop tripleo_cinder_backup.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_collectd.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_collectd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_collectd.service in controller 2'", "+ : sudo systemctl is-active tripleo_collectd.service", "+ : sudo systemctl stop tripleo_collectd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_collectd.service in controller 3'", "+ : sudo systemctl is-active tripleo_collectd.service", "+ : sudo systemctl stop tripleo_collectd.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_glance_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_glance_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_glance_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_glance_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_glance_api.service", "+ : sudo systemctl stop tripleo_glance_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_glance_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_glance_api.service", "+ : sudo systemctl stop tripleo_glance_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_gnocchi_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_gnocchi_api.service", "+ : sudo systemctl stop tripleo_gnocchi_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_gnocchi_api.service", "+ : sudo systemctl stop tripleo_gnocchi_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_metricd.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_metricd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 2'", "+ : sudo systemctl is-active tripleo_gnocchi_metricd.service", "+ : sudo systemctl stop tripleo_gnocchi_metricd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_metricd.service in controller 3'", "+ : sudo systemctl is-active tripleo_gnocchi_metricd.service", "+ : sudo systemctl stop tripleo_gnocchi_metricd.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_gnocchi_statsd.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_gnocchi_statsd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 2'", "+ : sudo systemctl is-active tripleo_gnocchi_statsd.service", "+ : sudo systemctl stop tripleo_gnocchi_statsd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_gnocchi_statsd.service in controller 3'", "+ : sudo systemctl is-active tripleo_gnocchi_statsd.service", "+ : sudo systemctl stop tripleo_gnocchi_statsd.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_manila_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_manila_api.service", "+ : sudo systemctl stop tripleo_manila_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_manila_api.service", "+ : sudo systemctl stop tripleo_manila_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_manila_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_manila_api_cron.service", "+ : sudo systemctl stop tripleo_manila_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_manila_api_cron.service", "+ : sudo systemctl stop tripleo_manila_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_manila_scheduler.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_manila_scheduler.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_manila_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_scheduler.service in controller 2'", "+ : sudo systemctl is-active tripleo_manila_scheduler.service", "+ : sudo systemctl stop tripleo_manila_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_manila_scheduler.service in controller 3'", "+ : sudo systemctl is-active tripleo_manila_scheduler.service", "+ : sudo systemctl stop tripleo_manila_scheduler.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_neutron_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_neutron_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_neutron_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_neutron_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_neutron_api.service", "+ : sudo systemctl stop tripleo_neutron_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_neutron_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_neutron_api.service", "+ : sudo systemctl stop tripleo_neutron_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_placement_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_placement_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_placement_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_placement_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_placement_api.service", "+ : sudo systemctl stop tripleo_placement_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_placement_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_placement_api.service", "+ : sudo systemctl stop tripleo_placement_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api_cron.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_api_cron.service", "+ : sudo systemctl stop tripleo_nova_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_api_cron.service", "+ : sudo systemctl stop tripleo_nova_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_api.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_api.service", "+ : sudo systemctl stop tripleo_nova_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_api.service", "+ : sudo systemctl stop tripleo_nova_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_conductor.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_conductor.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_conductor.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_conductor.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_conductor.service", "+ : sudo systemctl stop tripleo_nova_conductor.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_conductor.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_conductor.service", "+ : sudo systemctl stop tripleo_nova_conductor.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_metadata.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_metadata.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_metadata.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_metadata.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_metadata.service", "+ : sudo systemctl stop tripleo_nova_metadata.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_metadata.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_metadata.service", "+ : sudo systemctl stop tripleo_nova_metadata.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_scheduler.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_scheduler.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_scheduler.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_scheduler.service", "+ : sudo systemctl stop tripleo_nova_scheduler.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_scheduler.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_scheduler.service", "+ : sudo systemctl stop tripleo_nova_scheduler.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_nova_vnc_proxy.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_nova_vnc_proxy.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_nova_vnc_proxy.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_vnc_proxy.service in controller 2'", "+ : sudo systemctl is-active tripleo_nova_vnc_proxy.service", "+ : sudo systemctl stop tripleo_nova_vnc_proxy.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_nova_vnc_proxy.service in controller 3'", "+ : sudo systemctl is-active tripleo_nova_vnc_proxy.service", "+ : sudo systemctl stop tripleo_nova_vnc_proxy.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_api.service", "+ : sudo systemctl stop tripleo_aodh_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_api.service", "+ : sudo systemctl stop tripleo_aodh_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_api_cron.service", "+ : sudo systemctl stop tripleo_aodh_api_cron.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_api_cron.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_api_cron.service", "+ : sudo systemctl stop tripleo_aodh_api_cron.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_evaluator.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_evaluator.service", "+ : sudo systemctl stop tripleo_aodh_evaluator.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_evaluator.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_evaluator.service", "+ : sudo systemctl stop tripleo_aodh_evaluator.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_listener.service", "+ : sudo systemctl stop tripleo_aodh_listener.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_listener.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_listener.service", "+ : sudo systemctl stop tripleo_aodh_listener.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_aodh_notifier.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 2'", "+ : sudo systemctl is-active tripleo_aodh_notifier.service", "+ : sudo systemctl stop tripleo_aodh_notifier.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_aodh_notifier.service in controller 3'", "+ : sudo systemctl is-active tripleo_aodh_notifier.service", "+ : sudo systemctl stop tripleo_aodh_notifier.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_central.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_central.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_central.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_compute.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_compute.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ceilometer_agent_compute.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_compute.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_compute.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_compute.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_compute.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_compute.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_compute.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_ipmi.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_ipmi.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_ipmi.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_ipmi.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_ipmi.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_ipmi.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_ipmi.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_ipmi.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 2'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ceilometer_agent_notification.service in controller 3'", "+ : sudo systemctl is-active tripleo_ceilometer_agent_notification.service", "+ : sudo systemctl stop tripleo_ceilometer_agent_notification.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ovn_cluster_northd.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ovn_cluster_northd.service", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl stop tripleo_ovn_cluster_northd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ovn_cluster_northd.service in controller 2'", "+ : sudo systemctl is-active tripleo_ovn_cluster_northd.service", "+ : sudo systemctl stop tripleo_ovn_cluster_northd.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ovn_cluster_northd.service in controller 3'", "+ : sudo systemctl is-active tripleo_ovn_cluster_northd.service", "+ : sudo systemctl stop tripleo_ovn_cluster_northd.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ironic_neutron_agent.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_neutron_agent.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_neutron_agent.service in controller 2'", "+ : sudo systemctl is-active tripleo_ironic_neutron_agent.service", "+ : sudo systemctl stop tripleo_ironic_neutron_agent.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_neutron_agent.service in controller 3'", "+ : sudo systemctl is-active tripleo_ironic_neutron_agent.service", "+ : sudo systemctl stop tripleo_ironic_neutron_agent.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ironic_api.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_api.service in controller 2'", "+ : sudo systemctl is-active tripleo_ironic_api.service", "+ : sudo systemctl stop tripleo_ironic_api.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_api.service in controller 3'", "+ : sudo systemctl is-active tripleo_ironic_api.service", "+ : sudo systemctl stop tripleo_ironic_api.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ironic_inspector.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_inspector.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_inspector.service in controller 2'", "+ : sudo systemctl is-active tripleo_ironic_inspector.service", "+ : sudo systemctl stop tripleo_ironic_inspector.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_inspector.service in controller 3'", "+ : sudo systemctl is-active tripleo_ironic_inspector.service", "+ : sudo systemctl stop tripleo_ironic_inspector.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ironic_conductor.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_conductor.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_conductor.service in controller 2'", "+ : sudo systemctl is-active tripleo_ironic_conductor.service", "+ : sudo systemctl stop tripleo_ironic_conductor.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_conductor.service in controller 3'", "+ : sudo systemctl is-active tripleo_ironic_conductor.service", "+ : sudo systemctl stop tripleo_ironic_conductor.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_inspector_dnsmasq.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 2'", "+ : sudo systemctl is-active tripleo_ironic_inspector_dnsmasq.service", "+ : sudo systemctl stop tripleo_ironic_inspector_dnsmasq.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 3'", "+ : sudo systemctl is-active tripleo_ironic_inspector_dnsmasq.service", "+ : sudo systemctl stop tripleo_ironic_inspector_dnsmasq.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ironic_pxe_http.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_pxe_http.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_pxe_http.service in controller 2'", "+ : sudo systemctl is-active tripleo_ironic_pxe_http.service", "+ : sudo systemctl stop tripleo_ironic_pxe_http.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_pxe_http.service in controller 3'", "+ : sudo systemctl is-active tripleo_ironic_pxe_http.service", "+ : sudo systemctl stop tripleo_ironic_pxe_http.service", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Stopping the tripleo_ironic_pxe_tftp.service in controller 1'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo systemctl is-active tripleo_ironic_pxe_tftp.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_pxe_tftp.service in controller 2'", "+ : sudo systemctl is-active tripleo_ironic_pxe_tftp.service", "+ : sudo systemctl stop tripleo_ironic_pxe_tftp.service", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ echo 'Stopping the tripleo_ironic_pxe_tftp.service in controller 3'", "+ : sudo systemctl is-active tripleo_ironic_pxe_tftp.service", "+ : sudo systemctl stop tripleo_ironic_pxe_tftp.service", "+ echo 'Checking systemd OpenStack services'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_api_cron.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ grep ActiveState=inactive", "+ : systemctl show tripleo_aodh_api_cron.service", "+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_evaluator.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_evaluator.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_evaluator.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_evaluator.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_listener.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_listener.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_listener.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_listener.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_notifier.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_notifier.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_notifier.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_notifier.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_central.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ceilometer_agent_central.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_central.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_central.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_notification.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_notification.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_notification.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_octavia_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_health_manager.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_octavia_health_manager.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_health_manager.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_health_manager.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_health_manager.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_health_manager.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_rsyslog.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_octavia_rsyslog.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_rsyslog.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_rsyslog.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_rsyslog.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_rsyslog.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_driver_agent.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_octavia_driver_agent.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ grep ActiveState=inactive", "+ : systemctl show tripleo_octavia_driver_agent.service", "+ echo 'ERROR: Service tripleo_octavia_driver_agent.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_driver_agent.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_driver_agent.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_housekeeping.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_octavia_housekeeping.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_housekeeping.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_housekeeping.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_housekeeping.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_housekeeping.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_octavia_worker.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_octavia_worker.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_octavia_worker.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_octavia_worker.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ grep ActiveState=inactive", "+ : systemctl show tripleo_octavia_worker.service", "+ echo 'ERROR: Service tripleo_octavia_worker.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_horizon.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_horizon.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_horizon.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_horizon.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_horizon.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_horizon.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_keystone.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_keystone.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_keystone.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_keystone.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_keystone.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_keystone.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_barbican_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_barbican_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_barbican_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_barbican_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_barbican_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_barbican_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_barbican_worker.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_barbican_worker.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_barbican_worker.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_barbican_worker.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_barbican_worker.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_barbican_worker.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_barbican_keystone_listener.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_barbican_keystone_listener.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_barbican_keystone_listener.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_barbican_keystone_listener.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_barbican_keystone_listener.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_barbican_keystone_listener.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_cinder_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_cinder_api_cron.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_api_cron.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_api_cron.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_cinder_scheduler.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_scheduler.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_scheduler.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_volume.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_cinder_volume.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_volume.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_volume.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_volume.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_volume.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_cinder_backup.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_cinder_backup.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_backup.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_backup.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_cinder_backup.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_cinder_backup.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ grep ActiveState=inactive", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_collectd.service", "+ echo 'OK: Service tripleo_collectd.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_collectd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_collectd.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_collectd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_collectd.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_glance_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_glance_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_glance_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_glance_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_glance_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_glance_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_gnocchi_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_gnocchi_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_gnocchi_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_gnocchi_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_gnocchi_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_gnocchi_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_gnocchi_metricd.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_gnocchi_metricd.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_gnocchi_metricd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_gnocchi_metricd.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_gnocchi_metricd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_gnocchi_metricd.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_gnocchi_statsd.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_gnocchi_statsd.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_gnocchi_statsd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_gnocchi_statsd.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_gnocchi_statsd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_gnocchi_statsd.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_manila_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_manila_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_manila_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_manila_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_manila_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_manila_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_manila_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_manila_api_cron.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_manila_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_manila_api_cron.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_manila_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_manila_api_cron.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_manila_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_manila_scheduler.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_manila_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_manila_scheduler.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_manila_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_manila_scheduler.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_neutron_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_neutron_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_neutron_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_neutron_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_neutron_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_neutron_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_placement_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_placement_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_placement_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_placement_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ grep ActiveState=inactive", "+ : systemctl show tripleo_placement_api.service", "+ echo 'ERROR: Service tripleo_placement_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_nova_api_cron.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_api_cron.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_api_cron.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_nova_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_conductor.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_nova_conductor.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_conductor.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_conductor.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_conductor.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_conductor.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_metadata.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_nova_metadata.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_metadata.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_metadata.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_metadata.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_metadata.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_nova_scheduler.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_scheduler.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_scheduler.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_scheduler.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_nova_vnc_proxy.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_nova_vnc_proxy.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_vnc_proxy.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_vnc_proxy.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_nova_vnc_proxy.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_nova_vnc_proxy.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_api_cron.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_api_cron.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_api_cron.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_evaluator.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_evaluator.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_evaluator.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_evaluator.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_evaluator.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_listener.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_listener.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_listener.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_listener.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_listener.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_aodh_notifier.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_aodh_notifier.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_notifier.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_aodh_notifier.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_aodh_notifier.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_central.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ceilometer_agent_central.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_central.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_central.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_compute.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ceilometer_agent_compute.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_compute.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_compute.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_ipmi.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ceilometer_agent_ipmi.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_ipmi.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_ipmi.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ceilometer_agent_notification.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_notification.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ceilometer_agent_notification.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ovn_cluster_northd.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ovn_cluster_northd.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ovn_cluster_northd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ovn_cluster_northd.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ovn_cluster_northd.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ovn_cluster_northd.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_neutron_agent.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ironic_neutron_agent.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ grep ActiveState=inactive", "+ : systemctl show tripleo_ironic_neutron_agent.service", "+ echo 'ERROR: Service tripleo_ironic_neutron_agent.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_neutron_agent.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_neutron_agent.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_api.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ironic_api.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_api.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_api.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_api.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_inspector.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ironic_inspector.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_inspector.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_inspector.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ grep ActiveState=inactive", "+ : systemctl show tripleo_ironic_inspector.service", "+ echo 'ERROR: Service tripleo_ironic_inspector.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_conductor.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ironic_conductor.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_conductor.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_conductor.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_conductor.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_conductor.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_inspector_dnsmasq.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ironic_inspector_dnsmasq.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_inspector_dnsmasq.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_inspector_dnsmasq.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_pxe_http.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ironic_pxe_http.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_pxe_http.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_pxe_http.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_pxe_http.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_pxe_http.service still running on controller 3'", "+ for service in ${ServicesToStop[*]}", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 systemctl show tripleo_ironic_pxe_tftp.service", "+ grep ActiveState=inactive", "+ echo 'OK: Service tripleo_ironic_pxe_tftp.service is not running on controller 1'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER2_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_pxe_tftp.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 2'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER3_SSH", "+ '[' '!' -z : ']'", "+ : systemctl show tripleo_ironic_pxe_tftp.service", "+ grep ActiveState=inactive", "+ echo 'ERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 3'", "+ echo 'Stopping pacemaker OpenStack services'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Using controller 1 to run pacemaker commands'", "+ for resource in ${PacemakerResourcesToStop[*]}", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-volume", "+ echo 'Stopping openstack-cinder-volume'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource disable openstack-cinder-volume", "+ for resource in ${PacemakerResourcesToStop[*]}", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-backup", "+ echo 'Stopping openstack-cinder-backup'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource disable openstack-cinder-backup", "+ for resource in ${PacemakerResourcesToStop[*]}", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-manila-share", "+ echo 'Stopping openstack-manila-share'", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource disable openstack-manila-share", "+ break", "+ echo 'Checking pacemaker OpenStack services'", "+ for i in {1..3}", "+ SSH_CMD=CONTROLLER1_SSH", "+ '[' '!' -z 'ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100' ']'", "+ echo 'Using controller 1 to run pacemaker commands'", "+ for resource in ${PacemakerResourcesToStop[*]}", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-volume", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource status openstack-cinder-volume", "+ grep Started", "+ echo 'OK: Service openstack-cinder-volume is stopped'", "+ for resource in ${PacemakerResourcesToStop[*]}", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-cinder-backup", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource status openstack-cinder-backup", "+ grep Started", "+ echo 'ERROR: Service openstack-cinder-backup is started'", "+ for resource in ${PacemakerResourcesToStop[*]}", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource config openstack-manila-share", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo pcs resource status openstack-manila-share", "+ grep Started", "+ echo 'ERROR: Service openstack-manila-share is started'", "+ break"], "stdout": "Stopping systemd OpenStack services\nStopping the tripleo_aodh_api.service in controller 1\nactive\nStopping the tripleo_aodh_api.service in controller 2\nStopping the tripleo_aodh_api.service in controller 3\nStopping the tripleo_aodh_api_cron.service in controller 1\nactive\nStopping the tripleo_aodh_api_cron.service in controller 2\nStopping the tripleo_aodh_api_cron.service in controller 3\nStopping the tripleo_aodh_evaluator.service in controller 1\nactive\nStopping the tripleo_aodh_evaluator.service in controller 2\nStopping the tripleo_aodh_evaluator.service in controller 3\nStopping the tripleo_aodh_listener.service in controller 1\nactive\nStopping the tripleo_aodh_listener.service in controller 2\nStopping the tripleo_aodh_listener.service in controller 3\nStopping the tripleo_aodh_notifier.service in controller 1\nactive\nStopping the tripleo_aodh_notifier.service in controller 2\nStopping the tripleo_aodh_notifier.service in controller 3\nStopping the tripleo_ceilometer_agent_central.service in controller 1\nactive\nStopping the tripleo_ceilometer_agent_central.service in controller 2\nStopping the tripleo_ceilometer_agent_central.service in controller 3\nStopping the tripleo_ceilometer_agent_notification.service in controller 1\nactive\nStopping the tripleo_ceilometer_agent_notification.service in controller 2\nStopping the tripleo_ceilometer_agent_notification.service in controller 3\nStopping the tripleo_octavia_api.service in controller 1\nactive\nStopping the tripleo_octavia_api.service in controller 2\nStopping the tripleo_octavia_api.service in controller 3\nStopping the tripleo_octavia_health_manager.service in controller 1\nactive\nStopping the tripleo_octavia_health_manager.service in controller 2\nStopping the tripleo_octavia_health_manager.service in controller 3\nStopping the tripleo_octavia_rsyslog.service in controller 1\nactive\nStopping the tripleo_octavia_rsyslog.service in controller 2\nStopping the tripleo_octavia_rsyslog.service in controller 3\nStopping the tripleo_octavia_driver_agent.service in controller 1\nactive\nStopping the tripleo_octavia_driver_agent.service in controller 2\nStopping the tripleo_octavia_driver_agent.service in controller 3\nStopping the tripleo_octavia_housekeeping.service in controller 1\nactive\nStopping the tripleo_octavia_housekeeping.service in controller 2\nStopping the tripleo_octavia_housekeeping.service in controller 3\nStopping the tripleo_octavia_worker.service in controller 1\nactive\nStopping the tripleo_octavia_worker.service in controller 2\nStopping the tripleo_octavia_worker.service in controller 3\nStopping the tripleo_horizon.service in controller 1\nactive\nStopping the tripleo_horizon.service in controller 2\nStopping the tripleo_horizon.service in controller 3\nStopping the tripleo_keystone.service in controller 1\nactive\nStopping the tripleo_keystone.service in controller 2\nStopping the tripleo_keystone.service in controller 3\nStopping the tripleo_barbican_api.service in controller 1\ninactive\nStopping the tripleo_barbican_api.service in controller 2\nStopping the tripleo_barbican_api.service in controller 3\nStopping the tripleo_barbican_worker.service in controller 1\ninactive\nStopping the tripleo_barbican_worker.service in controller 2\nStopping the tripleo_barbican_worker.service in controller 3\nStopping the tripleo_barbican_keystone_listener.service in controller 1\ninactive\nStopping the tripleo_barbican_keystone_listener.service in controller 2\nStopping the tripleo_barbican_keystone_listener.service in controller 3\nStopping the tripleo_cinder_api.service in controller 1\nactive\nStopping the tripleo_cinder_api.service in controller 2\nStopping the tripleo_cinder_api.service in controller 3\nStopping the tripleo_cinder_api_cron.service in controller 1\nactive\nStopping the tripleo_cinder_api_cron.service in controller 2\nStopping the tripleo_cinder_api_cron.service in controller 3\nStopping the tripleo_cinder_scheduler.service in controller 1\nactive\nStopping the tripleo_cinder_scheduler.service in controller 2\nStopping the tripleo_cinder_scheduler.service in controller 3\nStopping the tripleo_cinder_volume.service in controller 1\ninactive\nStopping the tripleo_cinder_volume.service in controller 2\nStopping the tripleo_cinder_volume.service in controller 3\nStopping the tripleo_cinder_backup.service in controller 1\ninactive\nStopping the tripleo_cinder_backup.service in controller 2\nStopping the tripleo_cinder_backup.service in controller 3\nStopping the tripleo_collectd.service in controller 1\ninactive\nStopping the tripleo_collectd.service in controller 2\nStopping the tripleo_collectd.service in controller 3\nStopping the tripleo_glance_api.service in controller 1\nactive\nStopping the tripleo_glance_api.service in controller 2\nStopping the tripleo_glance_api.service in controller 3\nStopping the tripleo_gnocchi_api.service in controller 1\nactive\nStopping the tripleo_gnocchi_api.service in controller 2\nStopping the tripleo_gnocchi_api.service in controller 3\nStopping the tripleo_gnocchi_metricd.service in controller 1\nactive\nStopping the tripleo_gnocchi_metricd.service in controller 2\nStopping the tripleo_gnocchi_metricd.service in controller 3\nStopping the tripleo_gnocchi_statsd.service in controller 1\nactive\nStopping the tripleo_gnocchi_statsd.service in controller 2\nStopping the tripleo_gnocchi_statsd.service in controller 3\nStopping the tripleo_manila_api.service in controller 1\nactive\nStopping the tripleo_manila_api.service in controller 2\nStopping the tripleo_manila_api.service in controller 3\nStopping the tripleo_manila_api_cron.service in controller 1\nactive\nStopping the tripleo_manila_api_cron.service in controller 2\nStopping the tripleo_manila_api_cron.service in controller 3\nStopping the tripleo_manila_scheduler.service in controller 1\nactive\nStopping the tripleo_manila_scheduler.service in controller 2\nStopping the tripleo_manila_scheduler.service in controller 3\nStopping the tripleo_neutron_api.service in controller 1\nactive\nStopping the tripleo_neutron_api.service in controller 2\nStopping the tripleo_neutron_api.service in controller 3\nStopping the tripleo_placement_api.service in controller 1\nactive\nStopping the tripleo_placement_api.service in controller 2\nStopping the tripleo_placement_api.service in controller 3\nStopping the tripleo_nova_api_cron.service in controller 1\nactive\nStopping the tripleo_nova_api_cron.service in controller 2\nStopping the tripleo_nova_api_cron.service in controller 3\nStopping the tripleo_nova_api.service in controller 1\nactive\nStopping the tripleo_nova_api.service in controller 2\nStopping the tripleo_nova_api.service in controller 3\nStopping the tripleo_nova_conductor.service in controller 1\nactive\nStopping the tripleo_nova_conductor.service in controller 2\nStopping the tripleo_nova_conductor.service in controller 3\nStopping the tripleo_nova_metadata.service in controller 1\nactive\nStopping the tripleo_nova_metadata.service in controller 2\nStopping the tripleo_nova_metadata.service in controller 3\nStopping the tripleo_nova_scheduler.service in controller 1\nactive\nStopping the tripleo_nova_scheduler.service in controller 2\nStopping the tripleo_nova_scheduler.service in controller 3\nStopping the tripleo_nova_vnc_proxy.service in controller 1\nactive\nStopping the tripleo_nova_vnc_proxy.service in controller 2\nStopping the tripleo_nova_vnc_proxy.service in controller 3\nStopping the tripleo_aodh_api.service in controller 1\ninactive\nStopping the tripleo_aodh_api.service in controller 2\nStopping the tripleo_aodh_api.service in controller 3\nStopping the tripleo_aodh_api_cron.service in controller 1\ninactive\nStopping the tripleo_aodh_api_cron.service in controller 2\nStopping the tripleo_aodh_api_cron.service in controller 3\nStopping the tripleo_aodh_evaluator.service in controller 1\ninactive\nStopping the tripleo_aodh_evaluator.service in controller 2\nStopping the tripleo_aodh_evaluator.service in controller 3\nStopping the tripleo_aodh_listener.service in controller 1\ninactive\nStopping the tripleo_aodh_listener.service in controller 2\nStopping the tripleo_aodh_listener.service in controller 3\nStopping the tripleo_aodh_notifier.service in controller 1\ninactive\nStopping the tripleo_aodh_notifier.service in controller 2\nStopping the tripleo_aodh_notifier.service in controller 3\nStopping the tripleo_ceilometer_agent_central.service in controller 1\ninactive\nStopping the tripleo_ceilometer_agent_central.service in controller 2\nStopping the tripleo_ceilometer_agent_central.service in controller 3\nStopping the tripleo_ceilometer_agent_compute.service in controller 1\nactive\nStopping the tripleo_ceilometer_agent_compute.service in controller 2\nStopping the tripleo_ceilometer_agent_compute.service in controller 3\nStopping the tripleo_ceilometer_agent_ipmi.service in controller 1\ninactive\nStopping the tripleo_ceilometer_agent_ipmi.service in controller 2\nStopping the tripleo_ceilometer_agent_ipmi.service in controller 3\nStopping the tripleo_ceilometer_agent_notification.service in controller 1\ninactive\nStopping the tripleo_ceilometer_agent_notification.service in controller 2\nStopping the tripleo_ceilometer_agent_notification.service in controller 3\nStopping the tripleo_ovn_cluster_northd.service in controller 1\nactive\nStopping the tripleo_ovn_cluster_northd.service in controller 2\nStopping the tripleo_ovn_cluster_northd.service in controller 3\nStopping the tripleo_ironic_neutron_agent.service in controller 1\ninactive\nStopping the tripleo_ironic_neutron_agent.service in controller 2\nStopping the tripleo_ironic_neutron_agent.service in controller 3\nStopping the tripleo_ironic_api.service in controller 1\ninactive\nStopping the tripleo_ironic_api.service in controller 2\nStopping the tripleo_ironic_api.service in controller 3\nStopping the tripleo_ironic_inspector.service in controller 1\ninactive\nStopping the tripleo_ironic_inspector.service in controller 2\nStopping the tripleo_ironic_inspector.service in controller 3\nStopping the tripleo_ironic_conductor.service in controller 1\ninactive\nStopping the tripleo_ironic_conductor.service in controller 2\nStopping the tripleo_ironic_conductor.service in controller 3\nStopping the tripleo_ironic_inspector_dnsmasq.service in controller 1\ninactive\nStopping the tripleo_ironic_inspector_dnsmasq.service in controller 2\nStopping the tripleo_ironic_inspector_dnsmasq.service in controller 3\nStopping the tripleo_ironic_pxe_http.service in controller 1\ninactive\nStopping the tripleo_ironic_pxe_http.service in controller 2\nStopping the tripleo_ironic_pxe_http.service in controller 3\nStopping the tripleo_ironic_pxe_tftp.service in controller 1\ninactive\nStopping the tripleo_ironic_pxe_tftp.service in controller 2\nStopping the tripleo_ironic_pxe_tftp.service in controller 3\nChecking systemd OpenStack services\nOK: Service tripleo_aodh_api.service is not running on controller 1\nERROR: Service tripleo_aodh_api.service still running on controller 2\nERROR: Service tripleo_aodh_api.service still running on controller 3\nOK: Service tripleo_aodh_api_cron.service is not running on controller 1\nERROR: Service tripleo_aodh_api_cron.service still running on controller 2\nERROR: Service tripleo_aodh_api_cron.service still running on controller 3\nOK: Service tripleo_aodh_evaluator.service is not running on controller 1\nERROR: Service tripleo_aodh_evaluator.service still running on controller 2\nERROR: Service tripleo_aodh_evaluator.service still running on controller 3\nOK: Service tripleo_aodh_listener.service is not running on controller 1\nERROR: Service tripleo_aodh_listener.service still running on controller 2\nERROR: Service tripleo_aodh_listener.service still running on controller 3\nOK: Service tripleo_aodh_notifier.service is not running on controller 1\nERROR: Service tripleo_aodh_notifier.service still running on controller 2\nERROR: Service tripleo_aodh_notifier.service still running on controller 3\nOK: Service tripleo_ceilometer_agent_central.service is not running on controller 1\nERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2\nERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3\nOK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1\nERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2\nERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3\nOK: Service tripleo_octavia_api.service is not running on controller 1\nERROR: Service tripleo_octavia_api.service still running on controller 2\nERROR: Service tripleo_octavia_api.service still running on controller 3\nOK: Service tripleo_octavia_health_manager.service is not running on controller 1\nERROR: Service tripleo_octavia_health_manager.service still running on controller 2\nERROR: Service tripleo_octavia_health_manager.service still running on controller 3\nOK: Service tripleo_octavia_rsyslog.service is not running on controller 1\nERROR: Service tripleo_octavia_rsyslog.service still running on controller 2\nERROR: Service tripleo_octavia_rsyslog.service still running on controller 3\nOK: Service tripleo_octavia_driver_agent.service is not running on controller 1\nERROR: Service tripleo_octavia_driver_agent.service still running on controller 2\nERROR: Service tripleo_octavia_driver_agent.service still running on controller 3\nOK: Service tripleo_octavia_housekeeping.service is not running on controller 1\nERROR: Service tripleo_octavia_housekeeping.service still running on controller 2\nERROR: Service tripleo_octavia_housekeeping.service still running on controller 3\nOK: Service tripleo_octavia_worker.service is not running on controller 1\nERROR: Service tripleo_octavia_worker.service still running on controller 2\nERROR: Service tripleo_octavia_worker.service still running on controller 3\nOK: Service tripleo_horizon.service is not running on controller 1\nERROR: Service tripleo_horizon.service still running on controller 2\nERROR: Service tripleo_horizon.service still running on controller 3\nOK: Service tripleo_keystone.service is not running on controller 1\nERROR: Service tripleo_keystone.service still running on controller 2\nERROR: Service tripleo_keystone.service still running on controller 3\nOK: Service tripleo_barbican_api.service is not running on controller 1\nERROR: Service tripleo_barbican_api.service still running on controller 2\nERROR: Service tripleo_barbican_api.service still running on controller 3\nOK: Service tripleo_barbican_worker.service is not running on controller 1\nERROR: Service tripleo_barbican_worker.service still running on controller 2\nERROR: Service tripleo_barbican_worker.service still running on controller 3\nOK: Service tripleo_barbican_keystone_listener.service is not running on controller 1\nERROR: Service tripleo_barbican_keystone_listener.service still running on controller 2\nERROR: Service tripleo_barbican_keystone_listener.service still running on controller 3\nOK: Service tripleo_cinder_api.service is not running on controller 1\nERROR: Service tripleo_cinder_api.service still running on controller 2\nERROR: Service tripleo_cinder_api.service still running on controller 3\nOK: Service tripleo_cinder_api_cron.service is not running on controller 1\nERROR: Service tripleo_cinder_api_cron.service still running on controller 2\nERROR: Service tripleo_cinder_api_cron.service still running on controller 3\nOK: Service tripleo_cinder_scheduler.service is not running on controller 1\nERROR: Service tripleo_cinder_scheduler.service still running on controller 2\nERROR: Service tripleo_cinder_scheduler.service still running on controller 3\nOK: Service tripleo_cinder_volume.service is not running on controller 1\nERROR: Service tripleo_cinder_volume.service still running on controller 2\nERROR: Service tripleo_cinder_volume.service still running on controller 3\nOK: Service tripleo_cinder_backup.service is not running on controller 1\nERROR: Service tripleo_cinder_backup.service still running on controller 2\nERROR: Service tripleo_cinder_backup.service still running on controller 3\nOK: Service tripleo_collectd.service is not running on controller 1\nERROR: Service tripleo_collectd.service still running on controller 2\nERROR: Service tripleo_collectd.service still running on controller 3\nOK: Service tripleo_glance_api.service is not running on controller 1\nERROR: Service tripleo_glance_api.service still running on controller 2\nERROR: Service tripleo_glance_api.service still running on controller 3\nOK: Service tripleo_gnocchi_api.service is not running on controller 1\nERROR: Service tripleo_gnocchi_api.service still running on controller 2\nERROR: Service tripleo_gnocchi_api.service still running on controller 3\nOK: Service tripleo_gnocchi_metricd.service is not running on controller 1\nERROR: Service tripleo_gnocchi_metricd.service still running on controller 2\nERROR: Service tripleo_gnocchi_metricd.service still running on controller 3\nOK: Service tripleo_gnocchi_statsd.service is not running on controller 1\nERROR: Service tripleo_gnocchi_statsd.service still running on controller 2\nERROR: Service tripleo_gnocchi_statsd.service still running on controller 3\nOK: Service tripleo_manila_api.service is not running on controller 1\nERROR: Service tripleo_manila_api.service still running on controller 2\nERROR: Service tripleo_manila_api.service still running on controller 3\nOK: Service tripleo_manila_api_cron.service is not running on controller 1\nERROR: Service tripleo_manila_api_cron.service still running on controller 2\nERROR: Service tripleo_manila_api_cron.service still running on controller 3\nOK: Service tripleo_manila_scheduler.service is not running on controller 1\nERROR: Service tripleo_manila_scheduler.service still running on controller 2\nERROR: Service tripleo_manila_scheduler.service still running on controller 3\nOK: Service tripleo_neutron_api.service is not running on controller 1\nERROR: Service tripleo_neutron_api.service still running on controller 2\nERROR: Service tripleo_neutron_api.service still running on controller 3\nOK: Service tripleo_placement_api.service is not running on controller 1\nERROR: Service tripleo_placement_api.service still running on controller 2\nERROR: Service tripleo_placement_api.service still running on controller 3\nOK: Service tripleo_nova_api_cron.service is not running on controller 1\nERROR: Service tripleo_nova_api_cron.service still running on controller 2\nERROR: Service tripleo_nova_api_cron.service still running on controller 3\nOK: Service tripleo_nova_api.service is not running on controller 1\nERROR: Service tripleo_nova_api.service still running on controller 2\nERROR: Service tripleo_nova_api.service still running on controller 3\nOK: Service tripleo_nova_conductor.service is not running on controller 1\nERROR: Service tripleo_nova_conductor.service still running on controller 2\nERROR: Service tripleo_nova_conductor.service still running on controller 3\nOK: Service tripleo_nova_metadata.service is not running on controller 1\nERROR: Service tripleo_nova_metadata.service still running on controller 2\nERROR: Service tripleo_nova_metadata.service still running on controller 3\nOK: Service tripleo_nova_scheduler.service is not running on controller 1\nERROR: Service tripleo_nova_scheduler.service still running on controller 2\nERROR: Service tripleo_nova_scheduler.service still running on controller 3\nOK: Service tripleo_nova_vnc_proxy.service is not running on controller 1\nERROR: Service tripleo_nova_vnc_proxy.service still running on controller 2\nERROR: Service tripleo_nova_vnc_proxy.service still running on controller 3\nOK: Service tripleo_aodh_api.service is not running on controller 1\nERROR: Service tripleo_aodh_api.service still running on controller 2\nERROR: Service tripleo_aodh_api.service still running on controller 3\nOK: Service tripleo_aodh_api_cron.service is not running on controller 1\nERROR: Service tripleo_aodh_api_cron.service still running on controller 2\nERROR: Service tripleo_aodh_api_cron.service still running on controller 3\nOK: Service tripleo_aodh_evaluator.service is not running on controller 1\nERROR: Service tripleo_aodh_evaluator.service still running on controller 2\nERROR: Service tripleo_aodh_evaluator.service still running on controller 3\nOK: Service tripleo_aodh_listener.service is not running on controller 1\nERROR: Service tripleo_aodh_listener.service still running on controller 2\nERROR: Service tripleo_aodh_listener.service still running on controller 3\nOK: Service tripleo_aodh_notifier.service is not running on controller 1\nERROR: Service tripleo_aodh_notifier.service still running on controller 2\nERROR: Service tripleo_aodh_notifier.service still running on controller 3\nOK: Service tripleo_ceilometer_agent_central.service is not running on controller 1\nERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2\nERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3\nOK: Service tripleo_ceilometer_agent_compute.service is not running on controller 1\nERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 2\nERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 3\nOK: Service tripleo_ceilometer_agent_ipmi.service is not running on controller 1\nERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 2\nERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 3\nOK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1\nERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2\nERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3\nOK: Service tripleo_ovn_cluster_northd.service is not running on controller 1\nERROR: Service tripleo_ovn_cluster_northd.service still running on controller 2\nERROR: Service tripleo_ovn_cluster_northd.service still running on controller 3\nOK: Service tripleo_ironic_neutron_agent.service is not running on controller 1\nERROR: Service tripleo_ironic_neutron_agent.service still running on controller 2\nERROR: Service tripleo_ironic_neutron_agent.service still running on controller 3\nOK: Service tripleo_ironic_api.service is not running on controller 1\nERROR: Service tripleo_ironic_api.service still running on controller 2\nERROR: Service tripleo_ironic_api.service still running on controller 3\nOK: Service tripleo_ironic_inspector.service is not running on controller 1\nERROR: Service tripleo_ironic_inspector.service still running on controller 2\nERROR: Service tripleo_ironic_inspector.service still running on controller 3\nOK: Service tripleo_ironic_conductor.service is not running on controller 1\nERROR: Service tripleo_ironic_conductor.service still running on controller 2\nERROR: Service tripleo_ironic_conductor.service still running on controller 3\nOK: Service tripleo_ironic_inspector_dnsmasq.service is not running on controller 1\nERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 2\nERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 3\nOK: Service tripleo_ironic_pxe_http.service is not running on controller 1\nERROR: Service tripleo_ironic_pxe_http.service still running on controller 2\nERROR: Service tripleo_ironic_pxe_http.service still running on controller 3\nOK: Service tripleo_ironic_pxe_tftp.service is not running on controller 1\nERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 2\nERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 3\nStopping pacemaker OpenStack services\nUsing controller 1 to run pacemaker commands\nStopping openstack-cinder-volume\nStopping openstack-cinder-backup\nStopping openstack-manila-share\nChecking pacemaker OpenStack services\nUsing controller 1 to run pacemaker commands\nOK: Service openstack-cinder-volume is stopped\n * openstack-cinder-backup-podman-0\t(ocf:heartbeat:podman):\t Started standalone (disabled)\nERROR: Service openstack-cinder-backup is started\n * openstack-manila-share-podman-0\t(ocf:heartbeat:podman):\t Started standalone (disabled)\nERROR: Service openstack-manila-share is started", "stdout_lines": ["Stopping systemd OpenStack services", "Stopping the tripleo_aodh_api.service in controller 1", "active", "Stopping the tripleo_aodh_api.service in controller 2", "Stopping the tripleo_aodh_api.service in controller 3", "Stopping the tripleo_aodh_api_cron.service in controller 1", "active", "Stopping the tripleo_aodh_api_cron.service in controller 2", "Stopping the tripleo_aodh_api_cron.service in controller 3", "Stopping the tripleo_aodh_evaluator.service in controller 1", "active", "Stopping the tripleo_aodh_evaluator.service in controller 2", "Stopping the tripleo_aodh_evaluator.service in controller 3", "Stopping the tripleo_aodh_listener.service in controller 1", "active", "Stopping the tripleo_aodh_listener.service in controller 2", "Stopping the tripleo_aodh_listener.service in controller 3", "Stopping the tripleo_aodh_notifier.service in controller 1", "active", "Stopping the tripleo_aodh_notifier.service in controller 2", "Stopping the tripleo_aodh_notifier.service in controller 3", "Stopping the tripleo_ceilometer_agent_central.service in controller 1", "active", "Stopping the tripleo_ceilometer_agent_central.service in controller 2", "Stopping the tripleo_ceilometer_agent_central.service in controller 3", "Stopping the tripleo_ceilometer_agent_notification.service in controller 1", "active", "Stopping the tripleo_ceilometer_agent_notification.service in controller 2", "Stopping the tripleo_ceilometer_agent_notification.service in controller 3", "Stopping the tripleo_octavia_api.service in controller 1", "active", "Stopping the tripleo_octavia_api.service in controller 2", "Stopping the tripleo_octavia_api.service in controller 3", "Stopping the tripleo_octavia_health_manager.service in controller 1", "active", "Stopping the tripleo_octavia_health_manager.service in controller 2", "Stopping the tripleo_octavia_health_manager.service in controller 3", "Stopping the tripleo_octavia_rsyslog.service in controller 1", "active", "Stopping the tripleo_octavia_rsyslog.service in controller 2", "Stopping the tripleo_octavia_rsyslog.service in controller 3", "Stopping the tripleo_octavia_driver_agent.service in controller 1", "active", "Stopping the tripleo_octavia_driver_agent.service in controller 2", "Stopping the tripleo_octavia_driver_agent.service in controller 3", "Stopping the tripleo_octavia_housekeeping.service in controller 1", "active", "Stopping the tripleo_octavia_housekeeping.service in controller 2", "Stopping the tripleo_octavia_housekeeping.service in controller 3", "Stopping the tripleo_octavia_worker.service in controller 1", "active", "Stopping the tripleo_octavia_worker.service in controller 2", "Stopping the tripleo_octavia_worker.service in controller 3", "Stopping the tripleo_horizon.service in controller 1", "active", "Stopping the tripleo_horizon.service in controller 2", "Stopping the tripleo_horizon.service in controller 3", "Stopping the tripleo_keystone.service in controller 1", "active", "Stopping the tripleo_keystone.service in controller 2", "Stopping the tripleo_keystone.service in controller 3", "Stopping the tripleo_barbican_api.service in controller 1", "inactive", "Stopping the tripleo_barbican_api.service in controller 2", "Stopping the tripleo_barbican_api.service in controller 3", "Stopping the tripleo_barbican_worker.service in controller 1", "inactive", "Stopping the tripleo_barbican_worker.service in controller 2", "Stopping the tripleo_barbican_worker.service in controller 3", "Stopping the tripleo_barbican_keystone_listener.service in controller 1", "inactive", "Stopping the tripleo_barbican_keystone_listener.service in controller 2", "Stopping the tripleo_barbican_keystone_listener.service in controller 3", "Stopping the tripleo_cinder_api.service in controller 1", "active", "Stopping the tripleo_cinder_api.service in controller 2", "Stopping the tripleo_cinder_api.service in controller 3", "Stopping the tripleo_cinder_api_cron.service in controller 1", "active", "Stopping the tripleo_cinder_api_cron.service in controller 2", "Stopping the tripleo_cinder_api_cron.service in controller 3", "Stopping the tripleo_cinder_scheduler.service in controller 1", "active", "Stopping the tripleo_cinder_scheduler.service in controller 2", "Stopping the tripleo_cinder_scheduler.service in controller 3", "Stopping the tripleo_cinder_volume.service in controller 1", "inactive", "Stopping the tripleo_cinder_volume.service in controller 2", "Stopping the tripleo_cinder_volume.service in controller 3", "Stopping the tripleo_cinder_backup.service in controller 1", "inactive", "Stopping the tripleo_cinder_backup.service in controller 2", "Stopping the tripleo_cinder_backup.service in controller 3", "Stopping the tripleo_collectd.service in controller 1", "inactive", "Stopping the tripleo_collectd.service in controller 2", "Stopping the tripleo_collectd.service in controller 3", "Stopping the tripleo_glance_api.service in controller 1", "active", "Stopping the tripleo_glance_api.service in controller 2", "Stopping the tripleo_glance_api.service in controller 3", "Stopping the tripleo_gnocchi_api.service in controller 1", "active", "Stopping the tripleo_gnocchi_api.service in controller 2", "Stopping the tripleo_gnocchi_api.service in controller 3", "Stopping the tripleo_gnocchi_metricd.service in controller 1", "active", "Stopping the tripleo_gnocchi_metricd.service in controller 2", "Stopping the tripleo_gnocchi_metricd.service in controller 3", "Stopping the tripleo_gnocchi_statsd.service in controller 1", "active", "Stopping the tripleo_gnocchi_statsd.service in controller 2", "Stopping the tripleo_gnocchi_statsd.service in controller 3", "Stopping the tripleo_manila_api.service in controller 1", "active", "Stopping the tripleo_manila_api.service in controller 2", "Stopping the tripleo_manila_api.service in controller 3", "Stopping the tripleo_manila_api_cron.service in controller 1", "active", "Stopping the tripleo_manila_api_cron.service in controller 2", "Stopping the tripleo_manila_api_cron.service in controller 3", "Stopping the tripleo_manila_scheduler.service in controller 1", "active", "Stopping the tripleo_manila_scheduler.service in controller 2", "Stopping the tripleo_manila_scheduler.service in controller 3", "Stopping the tripleo_neutron_api.service in controller 1", "active", "Stopping the tripleo_neutron_api.service in controller 2", "Stopping the tripleo_neutron_api.service in controller 3", "Stopping the tripleo_placement_api.service in controller 1", "active", "Stopping the tripleo_placement_api.service in controller 2", "Stopping the tripleo_placement_api.service in controller 3", "Stopping the tripleo_nova_api_cron.service in controller 1", "active", "Stopping the tripleo_nova_api_cron.service in controller 2", "Stopping the tripleo_nova_api_cron.service in controller 3", "Stopping the tripleo_nova_api.service in controller 1", "active", "Stopping the tripleo_nova_api.service in controller 2", "Stopping the tripleo_nova_api.service in controller 3", "Stopping the tripleo_nova_conductor.service in controller 1", "active", "Stopping the tripleo_nova_conductor.service in controller 2", "Stopping the tripleo_nova_conductor.service in controller 3", "Stopping the tripleo_nova_metadata.service in controller 1", "active", "Stopping the tripleo_nova_metadata.service in controller 2", "Stopping the tripleo_nova_metadata.service in controller 3", "Stopping the tripleo_nova_scheduler.service in controller 1", "active", "Stopping the tripleo_nova_scheduler.service in controller 2", "Stopping the tripleo_nova_scheduler.service in controller 3", "Stopping the tripleo_nova_vnc_proxy.service in controller 1", "active", "Stopping the tripleo_nova_vnc_proxy.service in controller 2", "Stopping the tripleo_nova_vnc_proxy.service in controller 3", "Stopping the tripleo_aodh_api.service in controller 1", "inactive", "Stopping the tripleo_aodh_api.service in controller 2", "Stopping the tripleo_aodh_api.service in controller 3", "Stopping the tripleo_aodh_api_cron.service in controller 1", "inactive", "Stopping the tripleo_aodh_api_cron.service in controller 2", "Stopping the tripleo_aodh_api_cron.service in controller 3", "Stopping the tripleo_aodh_evaluator.service in controller 1", "inactive", "Stopping the tripleo_aodh_evaluator.service in controller 2", "Stopping the tripleo_aodh_evaluator.service in controller 3", "Stopping the tripleo_aodh_listener.service in controller 1", "inactive", "Stopping the tripleo_aodh_listener.service in controller 2", "Stopping the tripleo_aodh_listener.service in controller 3", "Stopping the tripleo_aodh_notifier.service in controller 1", "inactive", "Stopping the tripleo_aodh_notifier.service in controller 2", "Stopping the tripleo_aodh_notifier.service in controller 3", "Stopping the tripleo_ceilometer_agent_central.service in controller 1", "inactive", "Stopping the tripleo_ceilometer_agent_central.service in controller 2", "Stopping the tripleo_ceilometer_agent_central.service in controller 3", "Stopping the tripleo_ceilometer_agent_compute.service in controller 1", "active", "Stopping the tripleo_ceilometer_agent_compute.service in controller 2", "Stopping the tripleo_ceilometer_agent_compute.service in controller 3", "Stopping the tripleo_ceilometer_agent_ipmi.service in controller 1", "inactive", "Stopping the tripleo_ceilometer_agent_ipmi.service in controller 2", "Stopping the tripleo_ceilometer_agent_ipmi.service in controller 3", "Stopping the tripleo_ceilometer_agent_notification.service in controller 1", "inactive", "Stopping the tripleo_ceilometer_agent_notification.service in controller 2", "Stopping the tripleo_ceilometer_agent_notification.service in controller 3", "Stopping the tripleo_ovn_cluster_northd.service in controller 1", "active", "Stopping the tripleo_ovn_cluster_northd.service in controller 2", "Stopping the tripleo_ovn_cluster_northd.service in controller 3", "Stopping the tripleo_ironic_neutron_agent.service in controller 1", "inactive", "Stopping the tripleo_ironic_neutron_agent.service in controller 2", "Stopping the tripleo_ironic_neutron_agent.service in controller 3", "Stopping the tripleo_ironic_api.service in controller 1", "inactive", "Stopping the tripleo_ironic_api.service in controller 2", "Stopping the tripleo_ironic_api.service in controller 3", "Stopping the tripleo_ironic_inspector.service in controller 1", "inactive", "Stopping the tripleo_ironic_inspector.service in controller 2", "Stopping the tripleo_ironic_inspector.service in controller 3", "Stopping the tripleo_ironic_conductor.service in controller 1", "inactive", "Stopping the tripleo_ironic_conductor.service in controller 2", "Stopping the tripleo_ironic_conductor.service in controller 3", "Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 1", "inactive", "Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 2", "Stopping the tripleo_ironic_inspector_dnsmasq.service in controller 3", "Stopping the tripleo_ironic_pxe_http.service in controller 1", "inactive", "Stopping the tripleo_ironic_pxe_http.service in controller 2", "Stopping the tripleo_ironic_pxe_http.service in controller 3", "Stopping the tripleo_ironic_pxe_tftp.service in controller 1", "inactive", "Stopping the tripleo_ironic_pxe_tftp.service in controller 2", "Stopping the tripleo_ironic_pxe_tftp.service in controller 3", "Checking systemd OpenStack services", "OK: Service tripleo_aodh_api.service is not running on controller 1", "ERROR: Service tripleo_aodh_api.service still running on controller 2", "ERROR: Service tripleo_aodh_api.service still running on controller 3", "OK: Service tripleo_aodh_api_cron.service is not running on controller 1", "ERROR: Service tripleo_aodh_api_cron.service still running on controller 2", "ERROR: Service tripleo_aodh_api_cron.service still running on controller 3", "OK: Service tripleo_aodh_evaluator.service is not running on controller 1", "ERROR: Service tripleo_aodh_evaluator.service still running on controller 2", "ERROR: Service tripleo_aodh_evaluator.service still running on controller 3", "OK: Service tripleo_aodh_listener.service is not running on controller 1", "ERROR: Service tripleo_aodh_listener.service still running on controller 2", "ERROR: Service tripleo_aodh_listener.service still running on controller 3", "OK: Service tripleo_aodh_notifier.service is not running on controller 1", "ERROR: Service tripleo_aodh_notifier.service still running on controller 2", "ERROR: Service tripleo_aodh_notifier.service still running on controller 3", "OK: Service tripleo_ceilometer_agent_central.service is not running on controller 1", "ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2", "ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3", "OK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1", "ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2", "ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3", "OK: Service tripleo_octavia_api.service is not running on controller 1", "ERROR: Service tripleo_octavia_api.service still running on controller 2", "ERROR: Service tripleo_octavia_api.service still running on controller 3", "OK: Service tripleo_octavia_health_manager.service is not running on controller 1", "ERROR: Service tripleo_octavia_health_manager.service still running on controller 2", "ERROR: Service tripleo_octavia_health_manager.service still running on controller 3", "OK: Service tripleo_octavia_rsyslog.service is not running on controller 1", "ERROR: Service tripleo_octavia_rsyslog.service still running on controller 2", "ERROR: Service tripleo_octavia_rsyslog.service still running on controller 3", "OK: Service tripleo_octavia_driver_agent.service is not running on controller 1", "ERROR: Service tripleo_octavia_driver_agent.service still running on controller 2", "ERROR: Service tripleo_octavia_driver_agent.service still running on controller 3", "OK: Service tripleo_octavia_housekeeping.service is not running on controller 1", "ERROR: Service tripleo_octavia_housekeeping.service still running on controller 2", "ERROR: Service tripleo_octavia_housekeeping.service still running on controller 3", "OK: Service tripleo_octavia_worker.service is not running on controller 1", "ERROR: Service tripleo_octavia_worker.service still running on controller 2", "ERROR: Service tripleo_octavia_worker.service still running on controller 3", "OK: Service tripleo_horizon.service is not running on controller 1", "ERROR: Service tripleo_horizon.service still running on controller 2", "ERROR: Service tripleo_horizon.service still running on controller 3", "OK: Service tripleo_keystone.service is not running on controller 1", "ERROR: Service tripleo_keystone.service still running on controller 2", "ERROR: Service tripleo_keystone.service still running on controller 3", "OK: Service tripleo_barbican_api.service is not running on controller 1", "ERROR: Service tripleo_barbican_api.service still running on controller 2", "ERROR: Service tripleo_barbican_api.service still running on controller 3", "OK: Service tripleo_barbican_worker.service is not running on controller 1", "ERROR: Service tripleo_barbican_worker.service still running on controller 2", "ERROR: Service tripleo_barbican_worker.service still running on controller 3", "OK: Service tripleo_barbican_keystone_listener.service is not running on controller 1", "ERROR: Service tripleo_barbican_keystone_listener.service still running on controller 2", "ERROR: Service tripleo_barbican_keystone_listener.service still running on controller 3", "OK: Service tripleo_cinder_api.service is not running on controller 1", "ERROR: Service tripleo_cinder_api.service still running on controller 2", "ERROR: Service tripleo_cinder_api.service still running on controller 3", "OK: Service tripleo_cinder_api_cron.service is not running on controller 1", "ERROR: Service tripleo_cinder_api_cron.service still running on controller 2", "ERROR: Service tripleo_cinder_api_cron.service still running on controller 3", "OK: Service tripleo_cinder_scheduler.service is not running on controller 1", "ERROR: Service tripleo_cinder_scheduler.service still running on controller 2", "ERROR: Service tripleo_cinder_scheduler.service still running on controller 3", "OK: Service tripleo_cinder_volume.service is not running on controller 1", "ERROR: Service tripleo_cinder_volume.service still running on controller 2", "ERROR: Service tripleo_cinder_volume.service still running on controller 3", "OK: Service tripleo_cinder_backup.service is not running on controller 1", "ERROR: Service tripleo_cinder_backup.service still running on controller 2", "ERROR: Service tripleo_cinder_backup.service still running on controller 3", "OK: Service tripleo_collectd.service is not running on controller 1", "ERROR: Service tripleo_collectd.service still running on controller 2", "ERROR: Service tripleo_collectd.service still running on controller 3", "OK: Service tripleo_glance_api.service is not running on controller 1", "ERROR: Service tripleo_glance_api.service still running on controller 2", "ERROR: Service tripleo_glance_api.service still running on controller 3", "OK: Service tripleo_gnocchi_api.service is not running on controller 1", "ERROR: Service tripleo_gnocchi_api.service still running on controller 2", "ERROR: Service tripleo_gnocchi_api.service still running on controller 3", "OK: Service tripleo_gnocchi_metricd.service is not running on controller 1", "ERROR: Service tripleo_gnocchi_metricd.service still running on controller 2", "ERROR: Service tripleo_gnocchi_metricd.service still running on controller 3", "OK: Service tripleo_gnocchi_statsd.service is not running on controller 1", "ERROR: Service tripleo_gnocchi_statsd.service still running on controller 2", "ERROR: Service tripleo_gnocchi_statsd.service still running on controller 3", "OK: Service tripleo_manila_api.service is not running on controller 1", "ERROR: Service tripleo_manila_api.service still running on controller 2", "ERROR: Service tripleo_manila_api.service still running on controller 3", "OK: Service tripleo_manila_api_cron.service is not running on controller 1", "ERROR: Service tripleo_manila_api_cron.service still running on controller 2", "ERROR: Service tripleo_manila_api_cron.service still running on controller 3", "OK: Service tripleo_manila_scheduler.service is not running on controller 1", "ERROR: Service tripleo_manila_scheduler.service still running on controller 2", "ERROR: Service tripleo_manila_scheduler.service still running on controller 3", "OK: Service tripleo_neutron_api.service is not running on controller 1", "ERROR: Service tripleo_neutron_api.service still running on controller 2", "ERROR: Service tripleo_neutron_api.service still running on controller 3", "OK: Service tripleo_placement_api.service is not running on controller 1", "ERROR: Service tripleo_placement_api.service still running on controller 2", "ERROR: Service tripleo_placement_api.service still running on controller 3", "OK: Service tripleo_nova_api_cron.service is not running on controller 1", "ERROR: Service tripleo_nova_api_cron.service still running on controller 2", "ERROR: Service tripleo_nova_api_cron.service still running on controller 3", "OK: Service tripleo_nova_api.service is not running on controller 1", "ERROR: Service tripleo_nova_api.service still running on controller 2", "ERROR: Service tripleo_nova_api.service still running on controller 3", "OK: Service tripleo_nova_conductor.service is not running on controller 1", "ERROR: Service tripleo_nova_conductor.service still running on controller 2", "ERROR: Service tripleo_nova_conductor.service still running on controller 3", "OK: Service tripleo_nova_metadata.service is not running on controller 1", "ERROR: Service tripleo_nova_metadata.service still running on controller 2", "ERROR: Service tripleo_nova_metadata.service still running on controller 3", "OK: Service tripleo_nova_scheduler.service is not running on controller 1", "ERROR: Service tripleo_nova_scheduler.service still running on controller 2", "ERROR: Service tripleo_nova_scheduler.service still running on controller 3", "OK: Service tripleo_nova_vnc_proxy.service is not running on controller 1", "ERROR: Service tripleo_nova_vnc_proxy.service still running on controller 2", "ERROR: Service tripleo_nova_vnc_proxy.service still running on controller 3", "OK: Service tripleo_aodh_api.service is not running on controller 1", "ERROR: Service tripleo_aodh_api.service still running on controller 2", "ERROR: Service tripleo_aodh_api.service still running on controller 3", "OK: Service tripleo_aodh_api_cron.service is not running on controller 1", "ERROR: Service tripleo_aodh_api_cron.service still running on controller 2", "ERROR: Service tripleo_aodh_api_cron.service still running on controller 3", "OK: Service tripleo_aodh_evaluator.service is not running on controller 1", "ERROR: Service tripleo_aodh_evaluator.service still running on controller 2", "ERROR: Service tripleo_aodh_evaluator.service still running on controller 3", "OK: Service tripleo_aodh_listener.service is not running on controller 1", "ERROR: Service tripleo_aodh_listener.service still running on controller 2", "ERROR: Service tripleo_aodh_listener.service still running on controller 3", "OK: Service tripleo_aodh_notifier.service is not running on controller 1", "ERROR: Service tripleo_aodh_notifier.service still running on controller 2", "ERROR: Service tripleo_aodh_notifier.service still running on controller 3", "OK: Service tripleo_ceilometer_agent_central.service is not running on controller 1", "ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 2", "ERROR: Service tripleo_ceilometer_agent_central.service still running on controller 3", "OK: Service tripleo_ceilometer_agent_compute.service is not running on controller 1", "ERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 2", "ERROR: Service tripleo_ceilometer_agent_compute.service still running on controller 3", "OK: Service tripleo_ceilometer_agent_ipmi.service is not running on controller 1", "ERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 2", "ERROR: Service tripleo_ceilometer_agent_ipmi.service still running on controller 3", "OK: Service tripleo_ceilometer_agent_notification.service is not running on controller 1", "ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 2", "ERROR: Service tripleo_ceilometer_agent_notification.service still running on controller 3", "OK: Service tripleo_ovn_cluster_northd.service is not running on controller 1", "ERROR: Service tripleo_ovn_cluster_northd.service still running on controller 2", "ERROR: Service tripleo_ovn_cluster_northd.service still running on controller 3", "OK: Service tripleo_ironic_neutron_agent.service is not running on controller 1", "ERROR: Service tripleo_ironic_neutron_agent.service still running on controller 2", "ERROR: Service tripleo_ironic_neutron_agent.service still running on controller 3", "OK: Service tripleo_ironic_api.service is not running on controller 1", "ERROR: Service tripleo_ironic_api.service still running on controller 2", "ERROR: Service tripleo_ironic_api.service still running on controller 3", "OK: Service tripleo_ironic_inspector.service is not running on controller 1", "ERROR: Service tripleo_ironic_inspector.service still running on controller 2", "ERROR: Service tripleo_ironic_inspector.service still running on controller 3", "OK: Service tripleo_ironic_conductor.service is not running on controller 1", "ERROR: Service tripleo_ironic_conductor.service still running on controller 2", "ERROR: Service tripleo_ironic_conductor.service still running on controller 3", "OK: Service tripleo_ironic_inspector_dnsmasq.service is not running on controller 1", "ERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 2", "ERROR: Service tripleo_ironic_inspector_dnsmasq.service still running on controller 3", "OK: Service tripleo_ironic_pxe_http.service is not running on controller 1", "ERROR: Service tripleo_ironic_pxe_http.service still running on controller 2", "ERROR: Service tripleo_ironic_pxe_http.service still running on controller 3", "OK: Service tripleo_ironic_pxe_tftp.service is not running on controller 1", "ERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 2", "ERROR: Service tripleo_ironic_pxe_tftp.service still running on controller 3", "Stopping pacemaker OpenStack services", "Using controller 1 to run pacemaker commands", "Stopping openstack-cinder-volume", "Stopping openstack-cinder-backup", "Stopping openstack-manila-share", "Checking pacemaker OpenStack services", "Using controller 1 to run pacemaker commands", "OK: Service openstack-cinder-volume is stopped", " * openstack-cinder-backup-podman-0\t(ocf:heartbeat:podman):\t Started standalone (disabled)", "ERROR: Service openstack-cinder-backup is started", " * openstack-manila-share-podman-0\t(ocf:heartbeat:podman):\t Started standalone (disabled)", "ERROR: Service openstack-manila-share is started"]} TASK [execute alternative tasks when source env is OSPdO] ********************** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [mariadb_copy : start an adoption mariadb helper pod] ********************* changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\n\noc apply -f - < {"attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\noc wait --for condition=Ready pod/mariadb-copy-data --timeout=10s\n", "delta": "0:00:02.543734", "end": "2025-10-06 15:05:52.775273", "msg": "", "rc": 0, "start": "2025-10-06 15:05:50.231539", "stderr": "+ oc wait --for condition=Ready pod/mariadb-copy-data --timeout=10s", "stderr_lines": ["+ oc wait --for condition=Ready pod/mariadb-copy-data --timeout=10s"], "stdout": "pod/mariadb-copy-data condition met", "stdout_lines": ["pod/mariadb-copy-data condition met"]} TASK [mariadb_copy : check that the Galera database cluster(s) members are online and synced, for all cells] *** changed: [localhost] => {"changed": true, "cmd": "\nset -euxo pipefail\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A SOURCE_GALERA_MEMBERS_DEFAULT\nSOURCE_GALERA_MEMBERS_DEFAULT=(\n[\"standalone\"]=\"172.17.0.100\"\n\n)\n\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nfor CELL in $(echo $CELLS); do\n MEMBERS=SOURCE_GALERA_MEMBERS_$(echo ${CELL}|tr '[:lower:]' '[:upper:]')[@]\n for i in \"${!MEMBERS}\"; do\n echo \"Checking for the database node $i WSREP status Synced\"\n oc rsh mariadb-copy-data mysql -h \"$i\" -uroot -p\"${SOURCE_DB_ROOT_PASSWORD[$CELL]}\" -e \"show global status like 'wsrep_local_state_comment'\" | grep -qE \"\\bSynced\\b\"\n done\ndone\n", "delta": "0:00:00.220730", "end": "2025-10-06 15:05:53.284848", "msg": "", "rc": 0, "start": "2025-10-06 15:05:53.064118", "stderr": "+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A SOURCE_GALERA_MEMBERS_DEFAULT\n+ SOURCE_GALERA_MEMBERS_DEFAULT=([\"standalone\"]=\"172.17.0.100\")\n+ set -euxo pipefail\n+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ echo default\n++ tr '[:lower:]' '[:upper:]'\n+ MEMBERS='SOURCE_GALERA_MEMBERS_DEFAULT[@]'\n+ for i in \"${!MEMBERS}\"\n+ echo 'Checking for the database node 172.17.0.100 WSREP status Synced'\n+ oc rsh mariadb-copy-data mysql -h 172.17.0.100 -uroot -pCjEVN5fsDI -e 'show global status like '\\''wsrep_local_state_comment'\\'''\n+ grep -qE '\\bSynced\\b'", "stderr_lines": ["+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A SOURCE_GALERA_MEMBERS_DEFAULT", "+ SOURCE_GALERA_MEMBERS_DEFAULT=([\"standalone\"]=\"172.17.0.100\")", "+ set -euxo pipefail", "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ echo default", "++ tr '[:lower:]' '[:upper:]'", "+ MEMBERS='SOURCE_GALERA_MEMBERS_DEFAULT[@]'", "+ for i in \"${!MEMBERS}\"", "+ echo 'Checking for the database node 172.17.0.100 WSREP status Synced'", "+ oc rsh mariadb-copy-data mysql -h 172.17.0.100 -uroot -pCjEVN5fsDI -e 'show global status like '\\''wsrep_local_state_comment'\\'''", "+ grep -qE '\\bSynced\\b'"], "stdout": "Checking for the database node 172.17.0.100 WSREP status Synced", "stdout_lines": ["Checking for the database node 172.17.0.100 WSREP status Synced"]} TASK [mariadb_copy : Get the count of not-OK source databases] ***************** changed: [localhost] => {"changed": true, "cmd": "for CELL in $(echo $CELLS); do\nset +u\n . ~/.source_cloud_exported_variables_$CELL\nset -u\ndone\ntest -z \"$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\" || [ \"x$PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\" = \"x \" ] && echo \"OK\" || echo \"CHECK FAILED\"\n", "delta": "0:00:00.005840", "end": "2025-10-06 15:05:53.543329", "failed_when_result": false, "msg": "", "rc": 0, "start": "2025-10-06 15:05:53.537489", "stderr": "", "stderr_lines": [], "stdout": "OK", "stdout_lines": ["OK"]} TASK [mariadb_copy : test connection to podified DBs (show databases)] ********* changed: [localhost] => {"changed": true, "cmd": "#!/bin/bash\nset -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\nCHARACTER_SET=utf8\nCOLLATION=utf8_general_ci\n\nset -euxo pipefail\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A PODIFIED_DB_ROOT_PASSWORD\nfor CELL in $(echo \"super $RENAMED_CELLS\"); do\n PODIFIED_DB_ROOT_PASSWORD[$CELL]=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d)\ndone\n\ndeclare -A PODIFIED_MARIADB_IP\nfor CELL in $(echo \"super $RENAMED_CELLS\"); do\n if [ \"$CELL\" = \"super\" ]; then\n PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector \"mariadb/name=openstack\" -ojsonpath='{.items[0].spec.clusterIP}')\n else\n PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector \"mariadb/name=openstack-$CELL\" -ojsonpath='{.items[0].spec.clusterIP}')\n fi\ndone\n\n\n# Test the connection to the control plane \"upcall\" and cells' databases\nfor CELL in $(echo \"super $RENAMED_CELLS\"); do\n oc run mariadb-client --image $MARIADB_IMAGE -i --rm --restart=Never -- \\\n mysql -rsh \"${PODIFIED_MARIADB_IP[$CELL]}\" -uroot -p\"${PODIFIED_DB_ROOT_PASSWORD[$CELL]}\" -e 'SHOW databases;'\ndone\n", "delta": "0:00:06.269000", "end": "2025-10-06 15:06:00.096861", "msg": "", "rc": 0, "start": "2025-10-06 15:05:53.827861", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ CHARACTER_SET=utf8\n+ COLLATION=utf8_general_ci\n+ set -euxo pipefail\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A PODIFIED_DB_ROOT_PASSWORD\n++ echo 'super cell1'\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n++ oc get -o json secret/osp-secret\n++ jq -r .data.DbRootPassword\n++ base64 -d\n+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n++ oc get -o json secret/osp-secret\n++ jq -r .data.DbRootPassword\n++ base64 -d\n+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678\n+ declare -A PODIFIED_MARIADB_IP\n++ echo 'super cell1'\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ '[' super = super ']'\n++ oc get svc --selector mariadb/name=openstack '-ojsonpath={.items[0].spec.clusterIP}'\n+ PODIFIED_MARIADB_IP[$CELL]=10.217.4.91\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ '[' cell1 = super ']'\n++ oc get svc --selector mariadb/name=openstack-cell1 '-ojsonpath={.items[0].spec.clusterIP}'\n+ PODIFIED_MARIADB_IP[$CELL]=10.217.5.113\n++ echo 'super cell1'\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ oc run mariadb-client --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 10.217.4.91 -uroot -p12345678 -e 'SHOW databases;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ oc run mariadb-client --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 10.217.5.113 -uroot -p12345678 -e 'SHOW databases;'\nWarning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ CHARACTER_SET=utf8", "+ COLLATION=utf8_general_ci", "+ set -euxo pipefail", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A PODIFIED_DB_ROOT_PASSWORD", "++ echo 'super cell1'", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "++ oc get -o json secret/osp-secret", "++ jq -r .data.DbRootPassword", "++ base64 -d", "+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "++ oc get -o json secret/osp-secret", "++ jq -r .data.DbRootPassword", "++ base64 -d", "+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678", "+ declare -A PODIFIED_MARIADB_IP", "++ echo 'super cell1'", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ '[' super = super ']'", "++ oc get svc --selector mariadb/name=openstack '-ojsonpath={.items[0].spec.clusterIP}'", "+ PODIFIED_MARIADB_IP[$CELL]=10.217.4.91", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ '[' cell1 = super ']'", "++ oc get svc --selector mariadb/name=openstack-cell1 '-ojsonpath={.items[0].spec.clusterIP}'", "+ PODIFIED_MARIADB_IP[$CELL]=10.217.5.113", "++ echo 'super cell1'", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ oc run mariadb-client --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 10.217.4.91 -uroot -p12345678 -e 'SHOW databases;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ oc run mariadb-client --image quay.io/podified-antelope-centos9/openstack-mariadb:current-podified -i --rm --restart=Never -- mysql -rsh 10.217.5.113 -uroot -p12345678 -e 'SHOW databases;'", "Warning: would violate PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"mariadb-client\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"mariadb-client\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"mariadb-client\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"mariadb-client\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")"], "stdout": "information_schema\nmysql\nperformance_schema\npod \"mariadb-client\" deleted\ninformation_schema\nmysql\nperformance_schema\npod \"mariadb-client\" deleted", "stdout_lines": ["information_schema", "mysql", "performance_schema", "pod \"mariadb-client\" deleted", "information_schema", "mysql", "performance_schema", "pod \"mariadb-client\" deleted"]} TASK [mariadb_copy : dump databases] ******************************************* changed: [localhost] => {"changed": true, "cmd": "#!/bin/bash\n\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\n\n# Create a dump of the original databases\n# Note Filter the information and performance schema tables\n# Gnocchi is no longer used as a metric store, skip dumping gnocchi database as well\n# Migrating Aodh alarms from previous release is not supported, hence skip aodh database\nfor CELL in $(echo $CELLS); do\n oc rsh mariadb-copy-data << EOF\n mysql -h\"${SOURCE_MARIADB_IP[$CELL]}\" -uroot -p\"${SOURCE_DB_ROOT_PASSWORD[$CELL]}\" \\\n -N -e \"show databases\" | grep -E -v \"schema|mysql|gnocchi|aodh\" | \\\n while read dbname; do\n echo \"Dumping $CELL cell \\${dbname}\";\n mysqldump -h\"${SOURCE_MARIADB_IP[$CELL]}\" -uroot -p\"${SOURCE_DB_ROOT_PASSWORD[$CELL]}\" \\\n --single-transaction --complete-insert --skip-lock-tables --lock-tables=0 \\\n \"\\${dbname}\" > /backup/\"${CELL}.\\${dbname}\".sql;\n done\nEOF\ndone\n", "delta": "0:00:04.398352", "end": "2025-10-06 15:06:04.874408", "msg": "", "rc": 0, "start": "2025-10-06 15:06:00.476056", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ oc rsh mariadb-copy-data", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ oc rsh mariadb-copy-data"], "stdout": "Dumping default cell cinder\nDumping default cell glance\nDumping default cell heat\nDumping default cell keystone\nDumping default cell manila\nDumping default cell nova\nDumping default cell nova_api\nDumping default cell nova_cell0\nDumping default cell octavia\nDumping default cell octavia_persistence\nDumping default cell ovs_neutron\nDumping default cell placement", "stdout_lines": ["Dumping default cell cinder", "Dumping default cell glance", "Dumping default cell heat", "Dumping default cell keystone", "Dumping default cell manila", "Dumping default cell nova", "Dumping default cell nova_api", "Dumping default cell nova_cell0", "Dumping default cell octavia", "Dumping default cell octavia_persistence", "Dumping default cell ovs_neutron", "Dumping default cell placement"]} TASK [mariadb_copy : restore databases] **************************************** changed: [localhost] => {"changed": true, "cmd": "#!/bin/bash\nset -euxo pipefail\n\n\nPASSWORD_FILE=\"$HOME/overcloud-passwords.yaml\"\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A TRIPLEO_PASSWORDS\nfor CELL in $(echo $CELLS); do\n TRIPLEO_PASSWORDS[$CELL]=\"$PASSWORD_FILE\"\ndone\n\nRUN_OVERRIDES=' '\nMARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\nMARIADB_RUN_OVERRIDES=$MARIADB_CLIENT_ANNOTATIONS\n\nOSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\ndeclare -A SOURCE_DB_ROOT_PASSWORD\nfor CELL in $(echo $CELLS); do\n SOURCE_DB_ROOT_PASSWORD[$CELL]=$(cat ${TRIPLEO_PASSWORDS[$CELL]} | grep ' MysqlRootPassword:' | awk -F ': ' '{ print $2; }')\ndone\n\ndeclare -A SOURCE_MARIADB_IP\nSOURCE_MARIADB_IP[default]=172.17.0.100\n\nset -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\nCHARACTER_SET=utf8\nCOLLATION=utf8_general_ci\n\nset -euxo pipefail\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A PODIFIED_DB_ROOT_PASSWORD\nfor CELL in $(echo \"super $RENAMED_CELLS\"); do\n PODIFIED_DB_ROOT_PASSWORD[$CELL]=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d)\ndone\n\ndeclare -A PODIFIED_MARIADB_IP\nfor CELL in $(echo \"super $RENAMED_CELLS\"); do\n if [ \"$CELL\" = \"super\" ]; then\n PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector \"mariadb/name=openstack\" -ojsonpath='{.items[0].spec.clusterIP}')\n else\n PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector \"mariadb/name=openstack-$CELL\" -ojsonpath='{.items[0].spec.clusterIP}')\n fi\ndone\n\n\n# Restore the databases from .sql files into the control plane MariaDB\n\nfor CELL in $(echo $CELLS); do\n RCELL=$CELL\n [ \"$CELL\" = \"default\" ] && RCELL=$DEFAULT_CELL_NAME\n oc rsh mariadb-copy-data << EOF\n declare -A db_name_map # <1>\n db_name_map['nova']=\"nova_$RCELL\"\n db_name_map['ovs_neutron']='neutron'\n db_name_map['ironic-inspector']='ironic_inspector'\n declare -A db_cell_map # <2>\n db_cell_map['nova']=\"nova_$DEFAULT_CELL_NAME\"\n db_cell_map[\"nova_$RCELL\"]=\"nova_$RCELL\" # <3>\n declare -A db_server_map # <4>\n db_server_map['default']=${PODIFIED_MARIADB_IP['super']}\n db_server_map[\"nova\"]=${PODIFIED_MARIADB_IP[$DEFAULT_CELL_NAME]}\n db_server_map[\"nova_$RCELL\"]=${PODIFIED_MARIADB_IP[$RCELL]}\n declare -A db_server_password_map # <5>\n db_server_password_map['default']=${PODIFIED_DB_ROOT_PASSWORD['super']}\n db_server_password_map[\"nova\"]=${PODIFIED_DB_ROOT_PASSWORD[$DEFAULT_CELL_NAME]}\n db_server_password_map[\"nova_$RCELL\"]=${PODIFIED_DB_ROOT_PASSWORD[$RCELL]}\n cd /backup\n for db_file in \\$(ls ${CELL}.*.sql); do\n db_name=\\$(echo \\${db_file} | awk -F'.' '{ print \\$2; }')\n [[ \"$CELL\" != \"default\" && ! -v \"db_cell_map[\\${db_name}]\" ]] && continue\n if [[ \"$CELL\" == \"default\" && -v \"db_cell_map[\\${db_name}]\" ]] ; then\n target=$DEFAULT_CELL_NAME\n elif [[ \"$CELL\" == \"default\" && ! -v \"db_cell_map[\\${db_name}]\" ]] ; then\n target=super\n else\n target=$RCELL\n fi # <6>\n renamed_db_file=\"\\${target}_new.\\${db_name}.sql\"\n mv -f \\${db_file} \\${renamed_db_file}\n if [[ -v \"db_name_map[\\${db_name}]\" ]]; then\n echo \"renaming $CELL cell \\${db_name} to \\$target \\${db_name_map[\\${db_name}]}\"\n db_name=\\${db_name_map[\\${db_name}]}\n fi\n db_server=\\${db_server_map[\"default\"]}\n if [[ -v \"db_server_map[\\${db_name}]\" ]]; then\n db_server=\\${db_server_map[\\${db_name}]}\n fi\n db_password=\\${db_server_password_map['default']}\n if [[ -v \"db_server_password_map[\\${db_name}]\" ]]; then\n db_password=\\${db_server_password_map[\\${db_name}]}\n fi\n echo \"creating $CELL cell \\${db_name} in \\$target \\${db_server}\"\n mysql -h\"\\${db_server}\" -uroot \"-p\\${db_password}\" -e \\\n \"CREATE DATABASE IF NOT EXISTS \\${db_name} DEFAULT \\\n CHARACTER SET ${CHARACTER_SET} DEFAULT COLLATE ${COLLATION};\"\n echo \"importing $CELL cell \\${db_name} into \\$target \\${db_server} from \\${renamed_db_file}\"\n mysql -h \"\\${db_server}\" -uroot \"-p\\${db_password}\" \"\\${db_name}\" < \"\\${renamed_db_file}\"\n done\n if [ \"$CELL\" = \"default\" ] ; then\n mysql -h \"\\${db_server_map['default']}\" -uroot -p\"\\${db_server_password_map['default']}\" -e \\\n \"update nova_api.cell_mappings set name='$DEFAULT_CELL_NAME' where name='default';\"\n fi\n mysql -h \"\\${db_server_map[\"nova_$RCELL\"]}\" -uroot -p\"\\${db_server_password_map[\"nova_$RCELL\"]}\" -e \\\n \"delete from nova_${RCELL}.services where host not like '%nova_${RCELL}-%' and services.binary != 'nova-compute';\"\nEOF\ndone\n\n# <1> Defines which common databases to rename when importing them.\n# <2> Defines which cells databases to import, and how to rename them, if needed.\n# <3> Omits importing special `cell0` databases of the cells, as its contents cannot be consolidated during adoption.\n# <4> Defines which databases to import into which servers, usually dedicated for cells.\n# <5> Defines the root passwords map for database servers. You can only use the same password for now.\n# <6> Assigns which databases to import into which hosts when extracting databases from the `default` cell.\n", "delta": "0:00:21.179162", "end": "2025-10-06 15:06:26.364276", "msg": "", "rc": 0, "start": "2025-10-06 15:06:05.185114", "stderr": "+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A TRIPLEO_PASSWORDS\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml\n+ RUN_OVERRIDES=' '\n+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi\n+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ declare -A SOURCE_DB_ROOT_PASSWORD\n++ echo default\n+ for CELL in $(echo $CELLS)\n++ cat /home/zuul/overcloud-passwords.yaml\n++ grep ' MysqlRootPassword:'\n++ awk -F ': ' '{ print $2; }'\n+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI\n+ declare -A SOURCE_MARIADB_IP\n+ SOURCE_MARIADB_IP[default]=172.17.0.100\n+ set -euxo pipefail\n+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ CHARACTER_SET=utf8\n+ COLLATION=utf8_general_ci\n+ set -euxo pipefail\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A PODIFIED_DB_ROOT_PASSWORD\n++ echo 'super cell1'\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n++ oc get -o json secret/osp-secret\n++ jq -r .data.DbRootPassword\n++ base64 -d\n+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n++ oc get -o json secret/osp-secret\n++ jq -r .data.DbRootPassword\n++ base64 -d\n+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678\n+ declare -A PODIFIED_MARIADB_IP\n++ echo 'super cell1'\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ '[' super = super ']'\n++ oc get svc --selector mariadb/name=openstack '-ojsonpath={.items[0].spec.clusterIP}'\n+ PODIFIED_MARIADB_IP[$CELL]=10.217.4.91\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ '[' cell1 = super ']'\n++ oc get svc --selector mariadb/name=openstack-cell1 '-ojsonpath={.items[0].spec.clusterIP}'\n+ PODIFIED_MARIADB_IP[$CELL]=10.217.5.113\n++ echo default\n+ for CELL in $(echo $CELLS)\n+ RCELL=default\n+ '[' default = default ']'\n+ RCELL=cell1\n+ oc rsh mariadb-copy-data", "stderr_lines": ["+ PASSWORD_FILE=/home/zuul/overcloud-passwords.yaml", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A TRIPLEO_PASSWORDS", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ TRIPLEO_PASSWORDS[$CELL]=/home/zuul/overcloud-passwords.yaml", "+ RUN_OVERRIDES=' '", "+ MARIADB_CLIENT_ANNOTATIONS=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ MARIADB_RUN_OVERRIDES=--annotations=k8s.v1.cni.cncf.io/networks=internalapi", "+ OSPDO_MARIADB_CLIENT_ANNOTATIONS='[{\"name\": \"internalapi-static\",\"ips\": [\"172.17.0.99/24\"]}]'", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ declare -A SOURCE_DB_ROOT_PASSWORD", "++ echo default", "+ for CELL in $(echo $CELLS)", "++ cat /home/zuul/overcloud-passwords.yaml", "++ grep ' MysqlRootPassword:'", "++ awk -F ': ' '{ print $2; }'", "+ SOURCE_DB_ROOT_PASSWORD[$CELL]=CjEVN5fsDI", "+ declare -A SOURCE_MARIADB_IP", "+ SOURCE_MARIADB_IP[default]=172.17.0.100", "+ set -euxo pipefail", "+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ CHARACTER_SET=utf8", "+ COLLATION=utf8_general_ci", "+ set -euxo pipefail", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A PODIFIED_DB_ROOT_PASSWORD", "++ echo 'super cell1'", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "++ oc get -o json secret/osp-secret", "++ jq -r .data.DbRootPassword", "++ base64 -d", "+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "++ oc get -o json secret/osp-secret", "++ jq -r .data.DbRootPassword", "++ base64 -d", "+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678", "+ declare -A PODIFIED_MARIADB_IP", "++ echo 'super cell1'", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ '[' super = super ']'", "++ oc get svc --selector mariadb/name=openstack '-ojsonpath={.items[0].spec.clusterIP}'", "+ PODIFIED_MARIADB_IP[$CELL]=10.217.4.91", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ '[' cell1 = super ']'", "++ oc get svc --selector mariadb/name=openstack-cell1 '-ojsonpath={.items[0].spec.clusterIP}'", "+ PODIFIED_MARIADB_IP[$CELL]=10.217.5.113", "++ echo default", "+ for CELL in $(echo $CELLS)", "+ RCELL=default", "+ '[' default = default ']'", "+ RCELL=cell1", "+ oc rsh mariadb-copy-data"], "stdout": "creating default cell cinder in super 10.217.4.91\nimporting default cell cinder into super 10.217.4.91 from super_new.cinder.sql\ncreating default cell glance in super 10.217.4.91\nimporting default cell glance into super 10.217.4.91 from super_new.glance.sql\ncreating default cell heat in super 10.217.4.91\nimporting default cell heat into super 10.217.4.91 from super_new.heat.sql\ncreating default cell keystone in super 10.217.4.91\nimporting default cell keystone into super 10.217.4.91 from super_new.keystone.sql\ncreating default cell manila in super 10.217.4.91\nimporting default cell manila into super 10.217.4.91 from super_new.manila.sql\ncreating default cell nova_api in super 10.217.4.91\nimporting default cell nova_api into super 10.217.4.91 from super_new.nova_api.sql\ncreating default cell nova_cell0 in super 10.217.4.91\nimporting default cell nova_cell0 into super 10.217.4.91 from super_new.nova_cell0.sql\nrenaming default cell nova to cell1 nova_cell1\ncreating default cell nova_cell1 in cell1 10.217.5.113\nimporting default cell nova_cell1 into cell1 10.217.5.113 from cell1_new.nova.sql\ncreating default cell octavia_persistence in super 10.217.4.91\nimporting default cell octavia_persistence into super 10.217.4.91 from super_new.octavia_persistence.sql\ncreating default cell octavia in super 10.217.4.91\nimporting default cell octavia into super 10.217.4.91 from super_new.octavia.sql\nrenaming default cell ovs_neutron to super neutron\ncreating default cell neutron in super 10.217.4.91\nimporting default cell neutron into super 10.217.4.91 from super_new.ovs_neutron.sql\ncreating default cell placement in super 10.217.4.91\nimporting default cell placement into super 10.217.4.91 from super_new.placement.sql", "stdout_lines": ["creating default cell cinder in super 10.217.4.91", "importing default cell cinder into super 10.217.4.91 from super_new.cinder.sql", "creating default cell glance in super 10.217.4.91", "importing default cell glance into super 10.217.4.91 from super_new.glance.sql", "creating default cell heat in super 10.217.4.91", "importing default cell heat into super 10.217.4.91 from super_new.heat.sql", "creating default cell keystone in super 10.217.4.91", "importing default cell keystone into super 10.217.4.91 from super_new.keystone.sql", "creating default cell manila in super 10.217.4.91", "importing default cell manila into super 10.217.4.91 from super_new.manila.sql", "creating default cell nova_api in super 10.217.4.91", "importing default cell nova_api into super 10.217.4.91 from super_new.nova_api.sql", "creating default cell nova_cell0 in super 10.217.4.91", "importing default cell nova_cell0 into super 10.217.4.91 from super_new.nova_cell0.sql", "renaming default cell nova to cell1 nova_cell1", "creating default cell nova_cell1 in cell1 10.217.5.113", "importing default cell nova_cell1 into cell1 10.217.5.113 from cell1_new.nova.sql", "creating default cell octavia_persistence in super 10.217.4.91", "importing default cell octavia_persistence into super 10.217.4.91 from super_new.octavia_persistence.sql", "creating default cell octavia in super 10.217.4.91", "importing default cell octavia into super 10.217.4.91 from super_new.octavia.sql", "renaming default cell ovs_neutron to super neutron", "creating default cell neutron in super 10.217.4.91", "importing default cell neutron into super 10.217.4.91 from super_new.ovs_neutron.sql", "creating default cell placement in super 10.217.4.91", "importing default cell placement into super 10.217.4.91 from super_new.placement.sql"]} TASK [mariadb_copy : Verify MariaDB data imported] ***************************** included: /home/zuul/src/github.com/openstack-k8s-operators/data-plane-adoption/tests/roles/mariadb_copy/tasks/mariadb_verify.yaml for localhost TASK [execute alternative tasks when source env is ODPdO] ********************** skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [mariadb_copy : MariaDB checks] ******************************************* changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nMARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\nCHARACTER_SET=utf8\nCOLLATION=utf8_general_ci\n\nset -euxo pipefail\n\nCELLS=\"default\"\nDEFAULT_CELL_NAME=cell1\nRENAMED_CELLS=\"cell1\"\n\n\ndeclare -A PODIFIED_DB_ROOT_PASSWORD\nfor CELL in $(echo \"super $RENAMED_CELLS\"); do\n PODIFIED_DB_ROOT_PASSWORD[$CELL]=$(oc get -o json secret/osp-secret | jq -r .data.DbRootPassword | base64 -d)\ndone\n\ndeclare -A PODIFIED_MARIADB_IP\nfor CELL in $(echo \"super $RENAMED_CELLS\"); do\n if [ \"$CELL\" = \"super\" ]; then\n PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector \"mariadb/name=openstack\" -ojsonpath='{.items[0].spec.clusterIP}')\n else\n PODIFIED_MARIADB_IP[$CELL]=$(oc get svc --selector \"mariadb/name=openstack-$CELL\" -ojsonpath='{.items[0].spec.clusterIP}')\n fi\ndone\n\n# Check that the databases were imported correctly\n# use 'oc exec' and 'mysql -rs' to maintain formatting\n\nset +u\n. ~/.source_cloud_exported_variables_default\nset -u\ndbs=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p\"${PODIFIED_DB_ROOT_PASSWORD['super']}\" -e 'SHOW databases;')\necho $dbs | grep -Eq '\\bkeystone\\b' && echo \"OK\" || echo \"CHECK FAILED\"\necho $dbs | grep -Eq '\\bneutron\\b' && echo \"OK\" || echo \"CHECK FAILED\"\necho \"${PULL_OPENSTACK_CONFIGURATION_DATABASES[@]}\" | grep -Eq '\\bovs_neutron\\b' && echo \"OK\" || echo \"CHECK FAILED\" # <1>\nnovadb_mapped_cells=$(oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p\"${PODIFIED_DB_ROOT_PASSWORD['super']}\" \\\n nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;') # <2>\nuuidf='\\S{8,}-\\S{4,}-\\S{4,}-\\S{4,}-\\S{12,}'\ndefault=$(printf \"%s\\n\" \"$PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS\" | sed -rn \"s/^($uuidf)\\s+default\\b.*$/\\1/p\")\ndifference=$(diff -ZNua \\\n <(printf \"%s\\n\" \"$PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS\") \\\n <(printf \"%s\\n\" \"$novadb_mapped_cells\")) || true\nif [ \"$DEFAULT_CELL_NAME\" != \"default\" ]; then\n printf \"%s\\n\" \"$difference\" | grep -qE \"^\\-$default\\s+default\\b\" && echo \"OK\" || echo \"CHECK FAILED\"\n printf \"%s\\n\" \"$difference\" | grep -qE \"^\\+$default\\s+$DEFAULT_CELL_NAME\\b\" && echo \"OK\" || echo \"CHECK FAILED\"\n [ $(grep -E \"^[-\\+]$uuidf\" <<<\"$difference\" | wc -l) -eq 2 ] && echo \"OK\" || echo \"CHECK FAILED\"\nelse\n [ \"x$difference\" = \"x\" ] && echo \"OK\" || echo \"CHECK FAILED\"\nfi\nfor CELL in $(echo $RENAMED_CELLS); do # <3>\n RCELL=$CELL\n [ \"$CELL\" = \"$DEFAULT_CELL_NAME\" ] && RCELL=default\n set +u\n . ~/.source_cloud_exported_variables_$RCELL\n set -u\n c1dbs=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$CELL]} -e 'SHOW databases;') # <4>\n echo $c1dbs | grep -Eq \"\\bnova_${CELL}\\b\" && echo \"OK\" || echo \"CHECK FAILED\"\n novadb_svc_records=$(oc exec openstack-$CELL-galera-0 -c galera -- mysql -rs -uroot -p${PODIFIED_DB_ROOT_PASSWORD[$CELL]} \\\n nova_$CELL -e \"select host from services where services.binary='nova-compute' and deleted=0 order by host asc;\")\n diff -Z <(echo \"x$novadb_svc_records\") <(echo \"x${PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[@]}\") && echo \"OK\" || echo \"CHECK FAILED\" # <5>\ndone\n\n# <1> Ensures that the {networking_first_ref} database is renamed from `ovs_neutron`.\n# <2> Ensures that the `default` cell is renamed to `$DEFAULT_CELL_NAME`, and the cell UUIDs are retained.\n# <3> Ensures that the registered Compute services names have not changed.\n# <4> Ensures {compute_service} cells databases are extracted to separate database servers, and renamed from `nova` to `nova_cell`.\n# <5> Ensures that the registered {compute_service} name has not changed.\n", "delta": "0:00:01.507185", "end": "2025-10-06 15:06:28.271503", "failed_when_result": false, "msg": "", "rc": 0, "start": "2025-10-06 15:06:26.764318", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ CHARACTER_SET=utf8\n+ COLLATION=utf8_general_ci\n+ set -euxo pipefail\n+ CELLS=default\n+ DEFAULT_CELL_NAME=cell1\n+ RENAMED_CELLS=cell1\n+ declare -A PODIFIED_DB_ROOT_PASSWORD\n++ echo 'super cell1'\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n++ oc get -o json secret/osp-secret\n++ jq -r .data.DbRootPassword\n++ base64 -d\n+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n++ oc get -o json secret/osp-secret\n++ jq -r .data.DbRootPassword\n++ base64 -d\n+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678\n+ declare -A PODIFIED_MARIADB_IP\n++ echo 'super cell1'\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ '[' super = super ']'\n++ oc get svc --selector mariadb/name=openstack '-ojsonpath={.items[0].spec.clusterIP}'\n+ PODIFIED_MARIADB_IP[$CELL]=10.217.4.91\n+ for CELL in $(echo \"super $RENAMED_CELLS\")\n+ '[' cell1 = super ']'\n++ oc get svc --selector mariadb/name=openstack-cell1 '-ojsonpath={.items[0].spec.clusterIP}'\n+ PODIFIED_MARIADB_IP[$CELL]=10.217.5.113\n+ set +u\n+ . /home/zuul/.source_cloud_exported_variables_default\n++ unset PULL_OPENSTACK_CONFIGURATION_DATABASES\n++ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n++ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n++ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\n++ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n++ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n++ PULL_OPENSTACK_CONFIGURATION_DATABASES[default]='aodh\ncinder\nglance\ngnocchi\nheat\ninformation_schema\nkeystone\nmanila\nmysql\nnova\nnova_api\nnova_cell0\noctavia\noctavia_persistence\novs_neutron\nperformance_schema\nplacement'\n++ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[default]=\n++ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[default]=standalone.ooo.test\n++ '[' default = default ']'\n++ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n++ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| Name | UUID | Transport URL | Database Connection | Disabled |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r'\n+ set -u\n++ oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p12345678 -e 'SHOW databases;'\n+ dbs='cinder\nglance\nheat\ninformation_schema\nkeystone\nmanila\nmysql\nneutron\nnova_api\nnova_cell0\noctavia\noctavia_persistence\nperformance_schema\nplacement'\n+ echo cinder glance heat information_schema keystone manila mysql neutron nova_api nova_cell0 octavia octavia_persistence performance_schema placement\n+ grep -Eq '\\bkeystone\\b'\n+ echo OK\n+ echo cinder glance heat information_schema keystone manila mysql neutron nova_api nova_cell0 octavia octavia_persistence performance_schema placement\n+ grep -Eq '\\bneutron\\b'\n+ echo OK\n+ echo 'aodh\ncinder\nglance\ngnocchi\nheat\ninformation_schema\nkeystone\nmanila\nmysql\nnova\nnova_api\nnova_cell0\noctavia\noctavia_persistence\novs_neutron\nperformance_schema\nplacement'\n+ grep -Eq '\\bovs_neutron\\b'\n+ echo OK\n++ oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p12345678 nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'\n+ novadb_mapped_cells='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+ uuidf='\\S{8,}-\\S{4,}-\\S{4,}-\\S{4,}-\\S{12,}'\n++ printf '%s\\n' '00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n++ sed -rn 's/^(\\S{8,}-\\S{4,}-\\S{4,}-\\S{4,}-\\S{12,})\\s+default\\b.*$/\\1/p'\n+ default=8acf629b-94c0-4fdc-bf28-41015a9a8dc4\n++ diff -ZNua /dev/fd/63 /dev/fd/62\n+++ printf '%s\\n' '00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+++ printf '%s\\n' '00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+ difference='--- /dev/fd/63\t2025-10-06 15:06:27.797625349 +0000\n+++ /dev/fd/62\t2025-10-06 15:06:27.797625349 +0000\n@@ -1,2 +1,2 @@\n 00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+ true\n+ '[' cell1 '!=' default ']'\n+ printf '%s\\n' '--- /dev/fd/63\t2025-10-06 15:06:27.797625349 +0000\n+++ /dev/fd/62\t2025-10-06 15:06:27.797625349 +0000\n@@ -1,2 +1,2 @@\n 00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+ grep -qE '^\\-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\\s+default\\b'\n+ echo OK\n+ printf '%s\\n' '--- /dev/fd/63\t2025-10-06 15:06:27.797625349 +0000\n+++ /dev/fd/62\t2025-10-06 15:06:27.797625349 +0000\n@@ -1,2 +1,2 @@\n 00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n+ grep -qE '^\\+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\\s+cell1\\b'\n+ echo OK\n++ grep -E '^[-\\+]\\S{8,}-\\S{4,}-\\S{4,}-\\S{4,}-\\S{12,}'\n++ wc -l\n+ '[' 2 -eq 2 ']'\n+ echo OK\n++ echo cell1\n+ for CELL in $(echo $RENAMED_CELLS)\n+ RCELL=cell1\n+ '[' cell1 = cell1 ']'\n+ RCELL=default\n+ set +u\n+ . /home/zuul/.source_cloud_exported_variables_default\n++ unset PULL_OPENSTACK_CONFIGURATION_DATABASES\n++ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n++ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n++ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES\n++ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK\n++ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES\n++ PULL_OPENSTACK_CONFIGURATION_DATABASES[default]='aodh\ncinder\nglance\ngnocchi\nheat\ninformation_schema\nkeystone\nmanila\nmysql\nnova\nnova_api\nnova_cell0\noctavia\noctavia_persistence\novs_neutron\nperformance_schema\nplacement'\n++ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[default]=\n++ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[default]=standalone.ooo.test\n++ '[' default = default ']'\n++ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0\n8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'\n++ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| Name | UUID | Transport URL | Database Connection | Disabled |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r\n| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |\r\n+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+\r'\n+ set -u\n++ oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot -p12345678 -e 'SHOW databases;'\n+ c1dbs='information_schema\nmysql\nnova_cell1\nperformance_schema'\n+ echo information_schema mysql nova_cell1 performance_schema\n+ grep -Eq '\\bnova_cell1\\b'\n+ echo OK\n++ oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot -p12345678 nova_cell1 -e 'select host from services where services.binary='\\''nova-compute'\\'' and deleted=0 order by host asc;'\n+ novadb_svc_records=standalone.ooo.test\n+ diff -Z /dev/fd/63 /dev/fd/62\n++ echo xstandalone.ooo.test\n++ echo xstandalone.ooo.test\n+ echo OK", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ MARIADB_IMAGE=quay.io/podified-antelope-centos9/openstack-mariadb:current-podified", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ CHARACTER_SET=utf8", "+ COLLATION=utf8_general_ci", "+ set -euxo pipefail", "+ CELLS=default", "+ DEFAULT_CELL_NAME=cell1", "+ RENAMED_CELLS=cell1", "+ declare -A PODIFIED_DB_ROOT_PASSWORD", "++ echo 'super cell1'", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "++ oc get -o json secret/osp-secret", "++ jq -r .data.DbRootPassword", "++ base64 -d", "+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "++ oc get -o json secret/osp-secret", "++ jq -r .data.DbRootPassword", "++ base64 -d", "+ PODIFIED_DB_ROOT_PASSWORD[$CELL]=12345678", "+ declare -A PODIFIED_MARIADB_IP", "++ echo 'super cell1'", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ '[' super = super ']'", "++ oc get svc --selector mariadb/name=openstack '-ojsonpath={.items[0].spec.clusterIP}'", "+ PODIFIED_MARIADB_IP[$CELL]=10.217.4.91", "+ for CELL in $(echo \"super $RENAMED_CELLS\")", "+ '[' cell1 = super ']'", "++ oc get svc --selector mariadb/name=openstack-cell1 '-ojsonpath={.items[0].spec.clusterIP}'", "+ PODIFIED_MARIADB_IP[$CELL]=10.217.5.113", "+ set +u", "+ . /home/zuul/.source_cloud_exported_variables_default", "++ unset PULL_OPENSTACK_CONFIGURATION_DATABASES", "++ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "++ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "++ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES", "++ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "++ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "++ PULL_OPENSTACK_CONFIGURATION_DATABASES[default]='aodh", "cinder", "glance", "gnocchi", "heat", "information_schema", "keystone", "manila", "mysql", "nova", "nova_api", "nova_cell0", "octavia", "octavia_persistence", "ovs_neutron", "performance_schema", "placement'", "++ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[default]=", "++ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[default]=standalone.ooo.test", "++ '[' default = default ']'", "++ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "++ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| Name | UUID | Transport URL | Database Connection | Disabled |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "'", "+ set -u", "++ oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p12345678 -e 'SHOW databases;'", "+ dbs='cinder", "glance", "heat", "information_schema", "keystone", "manila", "mysql", "neutron", "nova_api", "nova_cell0", "octavia", "octavia_persistence", "performance_schema", "placement'", "+ echo cinder glance heat information_schema keystone manila mysql neutron nova_api nova_cell0 octavia octavia_persistence performance_schema placement", "+ grep -Eq '\\bkeystone\\b'", "+ echo OK", "+ echo cinder glance heat information_schema keystone manila mysql neutron nova_api nova_cell0 octavia octavia_persistence performance_schema placement", "+ grep -Eq '\\bneutron\\b'", "+ echo OK", "+ echo 'aodh", "cinder", "glance", "gnocchi", "heat", "information_schema", "keystone", "manila", "mysql", "nova", "nova_api", "nova_cell0", "octavia", "octavia_persistence", "ovs_neutron", "performance_schema", "placement'", "+ grep -Eq '\\bovs_neutron\\b'", "+ echo OK", "++ oc exec openstack-galera-0 -c galera -- mysql -rs -uroot -p12345678 nova_api -e 'select uuid,name,transport_url,database_connection,disabled from cell_mappings;'", "+ novadb_mapped_cells='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+ uuidf='\\S{8,}-\\S{4,}-\\S{4,}-\\S{4,}-\\S{12,}'", "++ printf '%s\\n' '00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "++ sed -rn 's/^(\\S{8,}-\\S{4,}-\\S{4,}-\\S{4,}-\\S{12,})\\s+default\\b.*$/\\1/p'", "+ default=8acf629b-94c0-4fdc-bf28-41015a9a8dc4", "++ diff -ZNua /dev/fd/63 /dev/fd/62", "+++ printf '%s\\n' '00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+++ printf '%s\\n' '00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+ difference='--- /dev/fd/63\t2025-10-06 15:06:27.797625349 +0000", "+++ /dev/fd/62\t2025-10-06 15:06:27.797625349 +0000", "@@ -1,2 +1,2 @@", " 00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+ true", "+ '[' cell1 '!=' default ']'", "+ printf '%s\\n' '--- /dev/fd/63\t2025-10-06 15:06:27.797625349 +0000", "+++ /dev/fd/62\t2025-10-06 15:06:27.797625349 +0000", "@@ -1,2 +1,2 @@", " 00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+ grep -qE '^\\-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\\s+default\\b'", "+ echo OK", "+ printf '%s\\n' '--- /dev/fd/63\t2025-10-06 15:06:27.797625349 +0000", "+++ /dev/fd/62\t2025-10-06 15:06:27.797625349 +0000", "@@ -1,2 +1,2 @@", " 00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "-8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tcell1\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "+ grep -qE '^\\+8acf629b-94c0-4fdc-bf28-41015a9a8dc4\\s+cell1\\b'", "+ echo OK", "++ grep -E '^[-\\+]\\S{8,}-\\S{4,}-\\S{4,}-\\S{4,}-\\S{12,}'", "++ wc -l", "+ '[' 2 -eq 2 ']'", "+ echo OK", "++ echo cell1", "+ for CELL in $(echo $RENAMED_CELLS)", "+ RCELL=cell1", "+ '[' cell1 = cell1 ']'", "+ RCELL=default", "+ set +u", "+ . /home/zuul/.source_cloud_exported_variables_default", "++ unset PULL_OPENSTACK_CONFIGURATION_DATABASES", "++ unset PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "++ unset PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "++ declare -xA PULL_OPENSTACK_CONFIGURATION_DATABASES", "++ declare -xA PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK", "++ declare -xA PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES", "++ PULL_OPENSTACK_CONFIGURATION_DATABASES[default]='aodh", "cinder", "glance", "gnocchi", "heat", "information_schema", "keystone", "manila", "mysql", "nova", "nova_api", "nova_cell0", "octavia", "octavia_persistence", "ovs_neutron", "performance_schema", "placement'", "++ PULL_OPENSTACK_CONFIGURATION_MYSQLCHECK_NOK[default]=", "++ PULL_OPENSTACK_CONFIGURATION_NOVA_COMPUTE_HOSTNAMES[default]=standalone.ooo.test", "++ '[' default = default ']'", "++ PULL_OPENSTACK_CONFIGURATION_NOVADB_MAPPED_CELLS='00000000-0000-0000-0000-000000000000\tcell0\tnone:///\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0", "8acf629b-94c0-4fdc-bf28-41015a9a8dc4\tdefault\trabbit://{username}:{password}@standalone.internalapi.ooo.test:5672/?ssl=1\tmysql+pymysql://{username}:{password}@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo\t0'", "++ PULL_OPENSTACK_CONFIGURATION_NOVAMANAGE_CELL_MAPPINGS='+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| Name | UUID | Transport URL | Database Connection | Disabled |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova_cell0?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "| default | 8acf629b-94c0-4fdc-bf28-41015a9a8dc4 | rabbit://guest:****@standalone.internalapi.ooo.test:5672/?ssl=1 | mysql+pymysql://nova:****@overcloud.internalapi.ooo.test/nova?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo | False |", "+---------+--------------------------------------+-----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------+----------+", "'", "+ set -u", "++ oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot -p12345678 -e 'SHOW databases;'", "+ c1dbs='information_schema", "mysql", "nova_cell1", "performance_schema'", "+ echo information_schema mysql nova_cell1 performance_schema", "+ grep -Eq '\\bnova_cell1\\b'", "+ echo OK", "++ oc exec openstack-cell1-galera-0 -c galera -- mysql -rs -uroot -p12345678 nova_cell1 -e 'select host from services where services.binary='\\''nova-compute'\\'' and deleted=0 order by host asc;'", "+ novadb_svc_records=standalone.ooo.test", "+ diff -Z /dev/fd/63 /dev/fd/62", "++ echo xstandalone.ooo.test", "++ echo xstandalone.ooo.test", "+ echo OK"], "stdout": "OK\nOK\nOK\nOK\nOK\nOK\nOK\nOK", "stdout_lines": ["OK", "OK", "OK", "OK", "OK", "OK", "OK", "OK"]} TASK [ovn_adoption : deploy podified OVN ovsdb-servers] ************************ changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n ovn:\n enabled: true\n template:\n ovnDBCluster:\n ovndbcluster-nb:\n replicas: 3\n dbType: NB\n storageRequest: 10G\n networkAttachment: internalapi\n ovndbcluster-sb:\n replicas: 3\n dbType: SB\n storageRequest: 10G\n networkAttachment: internalapi\n'\n", "delta": "0:00:00.215134", "end": "2025-10-06 15:06:28.737900", "msg": "", "rc": 0, "start": "2025-10-06 15:06:28.522766", "stderr": "+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n ovn:\n enabled: true\n template:\n ovnDBCluster:\n ovndbcluster-nb:\n replicas: 3\n dbType: NB\n storageRequest: 10G\n networkAttachment: internalapi\n ovndbcluster-sb:\n replicas: 3\n dbType: SB\n storageRequest: 10G\n networkAttachment: internalapi\n'", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:", " ovn:", " enabled: true", " template:", " ovnDBCluster:", " ovndbcluster-nb:", " replicas: 3", " dbType: NB", " storageRequest: 10G", " networkAttachment: internalapi", " ovndbcluster-sb:", " replicas: 3", " dbType: SB", " storageRequest: 10G", " networkAttachment: internalapi", "'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [ovn_adoption : wait for OVN ovsdb-servers to start up] ******************* FAILED - RETRYING: [localhost]: wait for OVN ovsdb-servers to start up (60 retries left). FAILED - RETRYING: [localhost]: wait for OVN ovsdb-servers to start up (59 retries left). changed: [localhost] => {"attempts": 3, "changed": true, "cmd": "set -euxo pipefail\n\n\noc wait pod --for condition=Ready --selector=service=ovsdbserver-nb\noc wait pod --for condition=Ready --selector=service=ovsdbserver-sb\n", "delta": "0:00:06.636506", "end": "2025-10-06 15:06:40.394662", "msg": "", "rc": 0, "start": "2025-10-06 15:06:33.758156", "stderr": "+ oc wait pod --for condition=Ready --selector=service=ovsdbserver-nb\n+ oc wait pod --for condition=Ready --selector=service=ovsdbserver-sb", "stderr_lines": ["+ oc wait pod --for condition=Ready --selector=service=ovsdbserver-nb", "+ oc wait pod --for condition=Ready --selector=service=ovsdbserver-sb"], "stdout": "pod/ovsdbserver-nb-0 condition met\npod/ovsdbserver-nb-1 condition met\npod/ovsdbserver-nb-2 condition met\npod/ovsdbserver-sb-0 condition met\npod/ovsdbserver-sb-1 condition met\npod/ovsdbserver-sb-2 condition met", "stdout_lines": ["pod/ovsdbserver-nb-0 condition met", "pod/ovsdbserver-nb-1 condition met", "pod/ovsdbserver-nb-2 condition met", "pod/ovsdbserver-sb-0 condition met", "pod/ovsdbserver-sb-1 condition met", "pod/ovsdbserver-sb-2 condition met"]} TASK [ovn_adoption : get podified OVN NB ovsdb-server service cluster IP] ****** changed: [localhost] => {"attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\noc get svc --selector \"statefulset.kubernetes.io/pod-name=ovsdbserver-nb-0\" -ojsonpath='{.items[0].spec.clusterIP}'\n", "delta": "0:00:00.159005", "end": "2025-10-06 15:06:40.804254", "msg": "", "rc": 0, "start": "2025-10-06 15:06:40.645249", "stderr": "+ oc get svc --selector statefulset.kubernetes.io/pod-name=ovsdbserver-nb-0 '-ojsonpath={.items[0].spec.clusterIP}'", "stderr_lines": ["+ oc get svc --selector statefulset.kubernetes.io/pod-name=ovsdbserver-nb-0 '-ojsonpath={.items[0].spec.clusterIP}'"], "stdout": "10.217.4.74", "stdout_lines": ["10.217.4.74"]} TASK [ovn_adoption : get podified OVN SB ovsdb-server IP] ********************** changed: [localhost] => {"attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\noc get svc --selector \"statefulset.kubernetes.io/pod-name=ovsdbserver-sb-0\" -ojsonpath='{.items[0].spec.clusterIP}'\n", "delta": "0:00:00.124206", "end": "2025-10-06 15:06:41.168680", "msg": "", "rc": 0, "start": "2025-10-06 15:06:41.044474", "stderr": "+ oc get svc --selector statefulset.kubernetes.io/pod-name=ovsdbserver-sb-0 '-ojsonpath={.items[0].spec.clusterIP}'", "stderr_lines": ["+ oc get svc --selector statefulset.kubernetes.io/pod-name=ovsdbserver-sb-0 '-ojsonpath={.items[0].spec.clusterIP}'"], "stdout": "10.217.5.43", "stdout_lines": ["10.217.5.43"]} TASK [ovn_adoption : execute alternative tasks when source env is ODPdO] ******* skipping: [localhost] => {"changed": false, "false_condition": "ospdo_src| bool", "skip_reason": "Conditional result was False"} TASK [ovn_adoption : set OVN copy shell vars] ********************************** ok: [localhost] => {"ansible_facts": {"ovn_copy_shell_vars": "STORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n"}, "changed": false} TASK [ovn_adoption : start an adoption helper pod] ***************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\noc apply -f - < {"attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\noc wait --for condition=Ready pod/ovn-copy-data --timeout=30s\n", "delta": "0:00:05.428926", "end": "2025-10-06 15:06:47.314364", "msg": "", "rc": 0, "start": "2025-10-06 15:06:41.885438", "stderr": "+ oc wait --for condition=Ready pod/ovn-copy-data --timeout=30s", "stderr_lines": ["+ oc wait --for condition=Ready pod/ovn-copy-data --timeout=30s"], "stdout": "pod/ovn-copy-data condition met", "stdout_lines": ["pod/ovn-copy-data condition met"]} TASK [ovn_adoption : stop northd service] ************************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\n$CONTROLLER1_SSH if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi\n$CONTROLLER2_SSH if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi\n$CONTROLLER3_SSH if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi\n", "delta": "0:00:00.341832", "end": "2025-10-06 15:06:47.898960", "msg": "", "rc": 0, "start": "2025-10-06 15:06:47.557128", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ PODIFIED_OVSDB_NB_IP=10.217.4.74\n+ PODIFIED_OVSDB_SB_IP=10.217.5.43\n+ SOURCE_OVSDB_IP=192.168.122.100\n+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi\n+ : if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi\n+ : if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ PODIFIED_OVSDB_NB_IP=10.217.4.74", "+ PODIFIED_OVSDB_SB_IP=10.217.5.43", "+ SOURCE_OVSDB_IP=192.168.122.100", "+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi", "+ : if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi", "+ : if sudo systemctl is-active tripleo_ovn_cluster_northd.service ';' then sudo systemctl stop tripleo_ovn_cluster_northd.service ';' fi"], "stdout": "inactive", "stdout_lines": ["inactive"]} TASK [ovn_adoption : Add nftables rule to allow podified internalapi traffic to controllers (IPv4)] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\n$CONTROLLER1_SSH sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept\n$CONTROLLER1_SSH sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept\n$CONTROLLER2_SSH sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept\n$CONTROLLER2_SSH sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept\n$CONTROLLER3_SSH sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept\n$CONTROLLER3_SSH sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept\n", "delta": "0:00:00.647802", "end": "2025-10-06 15:06:48.795630", "msg": "", "rc": 0, "start": "2025-10-06 15:06:48.147828", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ PODIFIED_OVSDB_NB_IP=10.217.4.74\n+ PODIFIED_OVSDB_SB_IP=10.217.5.43\n+ SOURCE_OVSDB_IP=192.168.122.100\n+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept\n+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept\n+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept\n+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept\n+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ PODIFIED_OVSDB_NB_IP=10.217.4.74", "+ PODIFIED_OVSDB_SB_IP=10.217.5.43", "+ SOURCE_OVSDB_IP=192.168.122.100", "+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept", "+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept", "+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept", "+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6641 ct state new counter accept", "+ : sudo nft add rule inet filter INPUT ip saddr 172.17.1.0/24 tcp dport 6642 ct state new counter accept"], "stdout": "", "stdout_lines": []} TASK [ovn_adoption : Add nftables rule to allow podified internalapi traffic to controllers (IPv6)] *** skipping: [localhost] => {"changed": false, "false_condition": "ipv6_enabled | bool", "skip_reason": "Conditional result was False"} TASK [ovn_adoption : dump OVN databases using tcp connection] ****************** skipping: [localhost] => {"changed": false, "false_condition": "enable_tlse|bool is false", "skip_reason": "Conditional result was False"} TASK [ovn_adoption : dump OVN databases using ssl connection] ****************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\noc exec ovn-copy-data -- bash -c \"ovsdb-client backup --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:$SOURCE_OVSDB_IP:6641 > /backup/ovs-nb.db\"\noc exec ovn-copy-data -- bash -c \"ovsdb-client backup --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:$SOURCE_OVSDB_IP:6642 > /backup/ovs-sb.db\"\n", "delta": "0:00:00.601549", "end": "2025-10-06 15:06:49.724683", "msg": "", "rc": 0, "start": "2025-10-06 15:06:49.123134", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ PODIFIED_OVSDB_NB_IP=10.217.4.74\n+ PODIFIED_OVSDB_SB_IP=10.217.5.43\n+ SOURCE_OVSDB_IP=192.168.122.100\n+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ oc exec ovn-copy-data -- bash -c 'ovsdb-client backup --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:192.168.122.100:6641 > /backup/ovs-nb.db'\n+ oc exec ovn-copy-data -- bash -c 'ovsdb-client backup --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:192.168.122.100:6642 > /backup/ovs-sb.db'", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ PODIFIED_OVSDB_NB_IP=10.217.4.74", "+ PODIFIED_OVSDB_SB_IP=10.217.5.43", "+ SOURCE_OVSDB_IP=192.168.122.100", "+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ oc exec ovn-copy-data -- bash -c 'ovsdb-client backup --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:192.168.122.100:6641 > /backup/ovs-nb.db'", "+ oc exec ovn-copy-data -- bash -c 'ovsdb-client backup --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:192.168.122.100:6642 > /backup/ovs-sb.db'"], "stdout": "", "stdout_lines": []} TASK [ovn_adoption : upgrade OVN databases to the latest schema from podified ovsdb-servers] *** skipping: [localhost] => {"changed": false, "false_condition": "enable_tlse|bool is false", "skip_reason": "Conditional result was False"} TASK [ovn_adoption : upgrade OVN databases to the latest schema from podified ovsdb-servers (tls)] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\noc exec ovn-copy-data -- bash -c \"ovsdb-client get-schema --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:$PODIFIED_OVSDB_NB_IP:6641 > /backup/ovs-nb.ovsschema && ovsdb-tool convert /backup/ovs-nb.db /backup/ovs-nb.ovsschema\"\noc exec ovn-copy-data -- bash -c \"ovsdb-client get-schema --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:$PODIFIED_OVSDB_SB_IP:6642 > /backup/ovs-sb.ovsschema && ovsdb-tool convert /backup/ovs-sb.db /backup/ovs-sb.ovsschema\"\n", "delta": "0:00:00.556547", "end": "2025-10-06 15:06:50.568485", "msg": "", "rc": 0, "start": "2025-10-06 15:06:50.011938", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ PODIFIED_OVSDB_NB_IP=10.217.4.74\n+ PODIFIED_OVSDB_SB_IP=10.217.5.43\n+ SOURCE_OVSDB_IP=192.168.122.100\n+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ oc exec ovn-copy-data -- bash -c 'ovsdb-client get-schema --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.4.74:6641 > /backup/ovs-nb.ovsschema && ovsdb-tool convert /backup/ovs-nb.db /backup/ovs-nb.ovsschema'\n+ oc exec ovn-copy-data -- bash -c 'ovsdb-client get-schema --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.5.43:6642 > /backup/ovs-sb.ovsschema && ovsdb-tool convert /backup/ovs-sb.db /backup/ovs-sb.ovsschema'", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ PODIFIED_OVSDB_NB_IP=10.217.4.74", "+ PODIFIED_OVSDB_SB_IP=10.217.5.43", "+ SOURCE_OVSDB_IP=192.168.122.100", "+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ oc exec ovn-copy-data -- bash -c 'ovsdb-client get-schema --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.4.74:6641 > /backup/ovs-nb.ovsschema && ovsdb-tool convert /backup/ovs-nb.db /backup/ovs-nb.ovsschema'", "+ oc exec ovn-copy-data -- bash -c 'ovsdb-client get-schema --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.5.43:6642 > /backup/ovs-sb.ovsschema && ovsdb-tool convert /backup/ovs-sb.db /backup/ovs-sb.ovsschema'"], "stdout": "", "stdout_lines": []} TASK [ovn_adoption : restore OVN database backups to podified ovsdb-servers] *** skipping: [localhost] => {"changed": false, "false_condition": "enable_tlse|bool is false", "skip_reason": "Conditional result was False"} TASK [ovn_adoption : restore OVN database backups to podified ovsdb-servers (tls)] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\noc exec ovn-copy-data -- bash -c \"ovsdb-client restore --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:$PODIFIED_OVSDB_NB_IP:6641 < /backup/ovs-nb.db\"\noc exec ovn-copy-data -- bash -c \"ovsdb-client restore --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:$PODIFIED_OVSDB_SB_IP:6642 < /backup/ovs-sb.db\"\n", "delta": "0:00:00.493611", "end": "2025-10-06 15:06:51.366165", "msg": "", "rc": 0, "start": "2025-10-06 15:06:50.872554", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ PODIFIED_OVSDB_NB_IP=10.217.4.74\n+ PODIFIED_OVSDB_SB_IP=10.217.5.43\n+ SOURCE_OVSDB_IP=192.168.122.100\n+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ oc exec ovn-copy-data -- bash -c 'ovsdb-client restore --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.4.74:6641 < /backup/ovs-nb.db'\n+ oc exec ovn-copy-data -- bash -c 'ovsdb-client restore --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.5.43:6642 < /backup/ovs-sb.db'", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ PODIFIED_OVSDB_NB_IP=10.217.4.74", "+ PODIFIED_OVSDB_SB_IP=10.217.5.43", "+ SOURCE_OVSDB_IP=192.168.122.100", "+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ oc exec ovn-copy-data -- bash -c 'ovsdb-client restore --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.4.74:6641 < /backup/ovs-nb.db'", "+ oc exec ovn-copy-data -- bash -c 'ovsdb-client restore --ca-cert=/etc/pki/tls/misc/ca.crt --private-key=/etc/pki/tls/misc/tls.key --certificate=/etc/pki/tls/misc/tls.crt ssl:10.217.5.43:6642 < /backup/ovs-sb.db'"], "stdout": "", "stdout_lines": []} TASK [ovn_adoption : deploy podified OVN northd service to keep databases in sync] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n ovn:\n enabled: true\n template:\n ovnNorthd:\n replicas: 1\n'\n", "delta": "0:00:00.192068", "end": "2025-10-06 15:06:51.802817", "msg": "", "rc": 0, "start": "2025-10-06 15:06:51.610749", "stderr": "+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n ovn:\n enabled: true\n template:\n ovnNorthd:\n replicas: 1\n'", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:", " ovn:", " enabled: true", " template:", " ovnNorthd:", " replicas: 1", "'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [ovn_adoption : Patch OVN add baremetal bridge mapping] ******************* skipping: [localhost] => {"changed": false, "false_condition": "ironic_adoption|bool", "skip_reason": "Conditional result was False"} TASK [ovn_adoption : Enable ovn controller] ************************************ changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc patch openstackcontrolplane openstack --type=json -p=\"[{'op': 'remove', 'path': '/spec/ovn/template/ovnController/nodeSelector'}]\"\n", "delta": "0:00:00.295062", "end": "2025-10-06 15:06:52.370209", "msg": "", "rc": 0, "start": "2025-10-06 15:06:52.075147", "stderr": "+ oc patch openstackcontrolplane openstack --type=json '-p=[{'\\''op'\\'': '\\''remove'\\'', '\\''path'\\'': '\\''/spec/ovn/template/ovnController/nodeSelector'\\''}]'", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=json '-p=[{'\\''op'\\'': '\\''remove'\\'', '\\''path'\\'': '\\''/spec/ovn/template/ovnController/nodeSelector'\\''}]'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [ovn_adoption : list briefs from OVN NB and SB databases] ***************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n\noc exec ovsdbserver-nb-0 -- ovn-nbctl show\noc exec ovsdbserver-sb-0 -- ovn-sbctl show\n", "delta": "0:00:00.482186", "end": "2025-10-06 15:06:53.102765", "msg": "", "rc": 0, "start": "2025-10-06 15:06:52.620579", "stderr": "+ oc exec ovsdbserver-nb-0 -- ovn-nbctl show\nDefaulted container \"ovsdbserver-nb\" out of: ovsdbserver-nb, openstack-network-exporter\n+ oc exec ovsdbserver-sb-0 -- ovn-sbctl show\nDefaulted container \"ovsdbserver-sb\" out of: ovsdbserver-sb, openstack-network-exporter", "stderr_lines": ["+ oc exec ovsdbserver-nb-0 -- ovn-nbctl show", "Defaulted container \"ovsdbserver-nb\" out of: ovsdbserver-nb, openstack-network-exporter", "+ oc exec ovsdbserver-sb-0 -- ovn-sbctl show", "Defaulted container \"ovsdbserver-sb\" out of: ovsdbserver-sb, openstack-network-exporter"], "stdout": "switch 92e628f0-7365-4037-a5a2-392fbc34b6f6 (neutron-c5b872be-d2ba-442e-a269-4d5f635276f6) (aka lb-mgmt-net)\n port e37fbe75-3d07-400b-ba4d-c3052d51c2ce (aka octavia-health-manager-standalone.ooo.test-listen-port)\n addresses: [\"fa:16:3e:13:33:1d 172.24.0.168\"]\n port 6b09b74f-430a-4cdd-a63d-f5db8bde2c91\n type: localport\n addresses: [\"fa:16:3e:1c:de:2e 172.24.0.2\"]\n port 1bc7dfbe-f0b5-4833-84b7-714bee782adc\n addresses: [\"unknown\"]\nswitch 7b5da951-e7f5-44ac-9450-124a106de6ef (neutron-21266228-6569-4e70-90b3-d960c402bd06) (aka public)\n port 3fe36b85-a59e-416e-910e-22713d023d9e\n type: localport\n addresses: [\"fa:16:3e:4c:9d:6a 192.168.122.171\"]\n port c9b75a92-b5d4-4535-9460-5d559d736620\n addresses: [\"unknown\"]\n port 83402839-b584-4817-a270-688590520390\n type: router\n router-port: lrp-83402839-b584-4817-a270-688590520390\n port provnet-887cbdf9-fe1e-4138-bdda-13b72a3a4510\n type: localnet\n addresses: [\"unknown\"]\nswitch 25a7115b-acbd-4019-9de4-99fa0c24b4b1 (neutron-764282d1-88ad-48a1-b206-80bbea72a34f) (aka private)\n port d90e9833-0647-44dd-b936-b326ab96e3f7\n addresses: [\"unknown\"]\n port 9923f97b-5a4a-41ee-81d7-cfbe4adcdbd1\n type: localport\n addresses: [\"fa:16:3e:a6:19:1a 192.168.0.2\"]\n port 693c3c0b-5632-43c1-be62-cca22bf53ff5\n type: router\n router-port: lrp-693c3c0b-5632-43c1-be62-cca22bf53ff5\n port e117576e-f9a5-4182-9ec7-ad6f8b3a2186\n addresses: [\"fa:16:3e:84:13:1e 192.168.0.148\"]\nrouter 4d4f7757-e1a4-4a49-9db4-2419cfba82ad (neutron-001dcbf7-44f3-46e1-b674-9541f3df5199) (aka priv_router)\n port lrp-693c3c0b-5632-43c1-be62-cca22bf53ff5\n mac: \"fa:16:3e:3f:12:dd\"\n networks: [\"192.168.0.1/24\"]\n port lrp-83402839-b584-4817-a270-688590520390\n mac: \"fa:16:3e:0c:de:f8\"\n networks: [\"192.168.122.248/24\"]\n gateway chassis: [579788be-c2c1-4f74-a8d6-40dafff82015]\n nat 4c08bf86-ab9f-499c-bd31-e380d26527f5\n external ip: \"192.168.122.248\"\n logical ip: \"192.168.0.0/24\"\n type: \"snat\"\n nat e9e8dc10-5331-40d4-a98b-280a711a449b\n external ip: \"192.168.122.20\"\n logical ip: \"192.168.0.148\"\n type: \"dnat_and_snat\"\nChassis \"579788be-c2c1-4f74-a8d6-40dafff82015\"\n hostname: standalone.ooo.test\n Encap geneve\n ip: \"172.19.0.100\"\n options: {csum=\"true\"}\n Port_Binding cr-lrp-83402839-b584-4817-a270-688590520390\n Port_Binding \"d90e9833-0647-44dd-b936-b326ab96e3f7\"\n Port_Binding \"1bc7dfbe-f0b5-4833-84b7-714bee782adc\"\n Port_Binding \"e37fbe75-3d07-400b-ba4d-c3052d51c2ce\"\n Port_Binding \"e117576e-f9a5-4182-9ec7-ad6f8b3a2186\"\n Port_Binding \"c9b75a92-b5d4-4535-9460-5d559d736620\"", "stdout_lines": ["switch 92e628f0-7365-4037-a5a2-392fbc34b6f6 (neutron-c5b872be-d2ba-442e-a269-4d5f635276f6) (aka lb-mgmt-net)", " port e37fbe75-3d07-400b-ba4d-c3052d51c2ce (aka octavia-health-manager-standalone.ooo.test-listen-port)", " addresses: [\"fa:16:3e:13:33:1d 172.24.0.168\"]", " port 6b09b74f-430a-4cdd-a63d-f5db8bde2c91", " type: localport", " addresses: [\"fa:16:3e:1c:de:2e 172.24.0.2\"]", " port 1bc7dfbe-f0b5-4833-84b7-714bee782adc", " addresses: [\"unknown\"]", "switch 7b5da951-e7f5-44ac-9450-124a106de6ef (neutron-21266228-6569-4e70-90b3-d960c402bd06) (aka public)", " port 3fe36b85-a59e-416e-910e-22713d023d9e", " type: localport", " addresses: [\"fa:16:3e:4c:9d:6a 192.168.122.171\"]", " port c9b75a92-b5d4-4535-9460-5d559d736620", " addresses: [\"unknown\"]", " port 83402839-b584-4817-a270-688590520390", " type: router", " router-port: lrp-83402839-b584-4817-a270-688590520390", " port provnet-887cbdf9-fe1e-4138-bdda-13b72a3a4510", " type: localnet", " addresses: [\"unknown\"]", "switch 25a7115b-acbd-4019-9de4-99fa0c24b4b1 (neutron-764282d1-88ad-48a1-b206-80bbea72a34f) (aka private)", " port d90e9833-0647-44dd-b936-b326ab96e3f7", " addresses: [\"unknown\"]", " port 9923f97b-5a4a-41ee-81d7-cfbe4adcdbd1", " type: localport", " addresses: [\"fa:16:3e:a6:19:1a 192.168.0.2\"]", " port 693c3c0b-5632-43c1-be62-cca22bf53ff5", " type: router", " router-port: lrp-693c3c0b-5632-43c1-be62-cca22bf53ff5", " port e117576e-f9a5-4182-9ec7-ad6f8b3a2186", " addresses: [\"fa:16:3e:84:13:1e 192.168.0.148\"]", "router 4d4f7757-e1a4-4a49-9db4-2419cfba82ad (neutron-001dcbf7-44f3-46e1-b674-9541f3df5199) (aka priv_router)", " port lrp-693c3c0b-5632-43c1-be62-cca22bf53ff5", " mac: \"fa:16:3e:3f:12:dd\"", " networks: [\"192.168.0.1/24\"]", " port lrp-83402839-b584-4817-a270-688590520390", " mac: \"fa:16:3e:0c:de:f8\"", " networks: [\"192.168.122.248/24\"]", " gateway chassis: [579788be-c2c1-4f74-a8d6-40dafff82015]", " nat 4c08bf86-ab9f-499c-bd31-e380d26527f5", " external ip: \"192.168.122.248\"", " logical ip: \"192.168.0.0/24\"", " type: \"snat\"", " nat e9e8dc10-5331-40d4-a98b-280a711a449b", " external ip: \"192.168.122.20\"", " logical ip: \"192.168.0.148\"", " type: \"dnat_and_snat\"", "Chassis \"579788be-c2c1-4f74-a8d6-40dafff82015\"", " hostname: standalone.ooo.test", " Encap geneve", " ip: \"172.19.0.100\"", " options: {csum=\"true\"}", " Port_Binding cr-lrp-83402839-b584-4817-a270-688590520390", " Port_Binding \"d90e9833-0647-44dd-b936-b326ab96e3f7\"", " Port_Binding \"1bc7dfbe-f0b5-4833-84b7-714bee782adc\"", " Port_Binding \"e37fbe75-3d07-400b-ba4d-c3052d51c2ce\"", " Port_Binding \"e117576e-f9a5-4182-9ec7-ad6f8b3a2186\"", " Port_Binding \"c9b75a92-b5d4-4535-9460-5d559d736620\""]} TASK [ovn_adoption : stop old ovn ovsdb services] ****************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nSTORAGE_CLASS=crc-csi-hostpath-provisioner\nPODIFIED_OVSDB_NB_IP=10.217.4.74\nPODIFIED_OVSDB_SB_IP=10.217.5.43\nSOURCE_OVSDB_IP=192.168.122.100\nOVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\nCONTROLLER2_SSH=\":\"\nCONTROLLER3_SSH=\":\"\n\n\n$CONTROLLER1_SSH if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi\n$CONTROLLER2_SSH if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi\n$CONTROLLER3_SSH if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi\n\n$CONTROLLER1_SSH if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi\n$CONTROLLER2_SSH if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi\n$CONTROLLER3_SSH if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi\n", "delta": "0:00:01.649150", "end": "2025-10-06 15:06:54.999314", "msg": "", "rc": 0, "start": "2025-10-06 15:06:53.350164", "stderr": "+ STORAGE_CLASS=crc-csi-hostpath-provisioner\n+ PODIFIED_OVSDB_NB_IP=10.217.4.74\n+ PODIFIED_OVSDB_SB_IP=10.217.5.43\n+ SOURCE_OVSDB_IP=192.168.122.100\n+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\n+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n+ CONTROLLER2_SSH=:\n+ CONTROLLER3_SSH=:\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi\n+ : if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi\n+ : if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi\n+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi\n+ : if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi\n+ : if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi", "stderr_lines": ["+ STORAGE_CLASS=crc-csi-hostpath-provisioner", "+ PODIFIED_OVSDB_NB_IP=10.217.4.74", "+ PODIFIED_OVSDB_SB_IP=10.217.5.43", "+ SOURCE_OVSDB_IP=192.168.122.100", "+ OVSDB_IMAGE=quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified", "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "+ CONTROLLER2_SSH=:", "+ CONTROLLER3_SSH=:", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi", "+ : if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi", "+ : if sudo systemctl is-active tripleo_ovn_cluster_north_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_north_db_server.service ';' fi", "+ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi", "+ : if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi", "+ : if sudo systemctl is-active tripleo_ovn_cluster_south_db_server.service ';' then sudo systemctl stop tripleo_ovn_cluster_south_db_server.service ';' fi"], "stdout": "active\nactive", "stdout_lines": ["active", "active"]} TASK [keystone_adoption : add keystone fernet keys secret] ********************* changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\noc apply -f - < {"changed": true, "cmd": "set -euxo pipefail\n\n\noc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n keystone:\n enabled: true\n apiOverride:\n route: {}\n template:\n customServiceConfig: |\n [token]\n expiration = 360000\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n databaseInstance: openstack\n secret: osp-secret\n'\n", "delta": "0:00:00.197895", "end": "2025-10-06 15:06:56.979743", "msg": "", "rc": 0, "start": "2025-10-06 15:06:56.781848", "stderr": "+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n keystone:\n enabled: true\n apiOverride:\n route: {}\n template:\n customServiceConfig: |\n [token]\n expiration = 360000\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n databaseInstance: openstack\n secret: osp-secret\n'", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:", " keystone:", " enabled: true", " apiOverride:", " route: {}", " template:", " customServiceConfig: |", " [token]", " expiration = 360000", " override:", " service:", " internal:", " metadata:", " annotations:", " metallb.universe.tf/address-pool: internalapi", " metallb.universe.tf/allow-shared-ip: internalapi", " metallb.universe.tf/loadBalancerIPs: 172.17.0.80", " spec:", " type: LoadBalancer", " databaseInstance: openstack", " secret: osp-secret", "'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [keystone_adoption : wait for Keystone to start up] *********************** FAILED - RETRYING: [localhost]: wait for Keystone to start up (10 retries left). FAILED - RETRYING: [localhost]: wait for Keystone to start up (9 retries left). changed: [localhost] => {"attempts": 3, "changed": true, "cmd": "set -euxo pipefail\n\n\noc wait pod --for condition=Ready --selector=service=keystone\n", "delta": "0:00:04.962869", "end": "2025-10-06 15:08:03.026091", "msg": "", "rc": 0, "start": "2025-10-06 15:07:58.063222", "stderr": "+ oc wait pod --for condition=Ready --selector=service=keystone", "stderr_lines": ["+ oc wait pod --for condition=Ready --selector=service=keystone"], "stdout": "pod/keystone-5c5bc6dbbd-g2zwx condition met", "stdout_lines": ["pod/keystone-5c5bc6dbbd-g2zwx condition met"]} TASK [keystone_adoption : wait for openstackclient pod to start up] ************ FAILED - RETRYING: [localhost]: wait for openstackclient pod to start up (10 retries left). changed: [localhost] => {"attempts": 2, "changed": true, "cmd": "set -euxo pipefail\n\n\noc wait pod --for condition=Ready --selector=service=openstackclient\n", "delta": "0:00:00.241214", "end": "2025-10-06 15:08:33.953212", "msg": "", "rc": 0, "start": "2025-10-06 15:08:33.711998", "stderr": "+ oc wait pod --for condition=Ready --selector=service=openstackclient", "stderr_lines": ["+ oc wait pod --for condition=Ready --selector=service=openstackclient"], "stdout": "pod/openstackclient condition met", "stdout_lines": ["pod/openstackclient condition met"]} TASK [keystone_adoption : check that Keystone is reachable and its endpoints are defined] *** changed: [localhost] => {"attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\n\nalias openstack=\"oc exec -t openstackclient -- openstack\"\n\n${BASH_ALIASES[openstack]} endpoint list | grep keystone\n", "delta": "0:00:02.575322", "end": "2025-10-06 15:08:36.779643", "msg": "", "rc": 0, "start": "2025-10-06 15:08:34.204321", "stderr": "+ alias 'openstack=oc exec -t openstackclient -- openstack'\n+ oc exec -t openstackclient -- openstack endpoint list\n+ grep keystone", "stderr_lines": ["+ alias 'openstack=oc exec -t openstackclient -- openstack'", "+ oc exec -t openstackclient -- openstack endpoint list", "+ grep keystone"], "stdout": "| 184a0b5862214c92a1e181fe851723d1 | regionOne | keystone | identity | True | internal | https://keystone-internal.openstack.svc:5000 |\n| da67297a302c4477a748d31cb98ea717 | regionOne | keystone | identity | True | admin | https://overcloud.ctlplane.ooo.test:35357 |\n| eb7dd867c4c54511850cb2a9c87137be | regionOne | keystone | identity | True | public | https://keystone-public-openstack.apps-crc.testing |", "stdout_lines": ["| 184a0b5862214c92a1e181fe851723d1 | regionOne | keystone | identity | True | internal | https://keystone-internal.openstack.svc:5000 |", "| da67297a302c4477a748d31cb98ea717 | regionOne | keystone | identity | True | admin | https://overcloud.ctlplane.ooo.test:35357 |", "| eb7dd867c4c54511850cb2a9c87137be | regionOne | keystone | identity | True | public | https://keystone-public-openstack.apps-crc.testing |"]} TASK [keystone_adoption : verify that OpenStackControlPlane setup is complete] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc wait --for=condition=Ready --timeout=3m OpenStackControlPlane openstack\n", "delta": "0:00:00.232523", "end": "2025-10-06 15:08:37.269668", "msg": "", "rc": 0, "start": "2025-10-06 15:08:37.037145", "stderr": "+ oc wait --for=condition=Ready --timeout=3m OpenStackControlPlane openstack", "stderr_lines": ["+ oc wait --for=condition=Ready --timeout=3m OpenStackControlPlane openstack"], "stdout": "openstackcontrolplane.core.openstack.org/openstack condition met", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack condition met"]} TASK [keystone_adoption : clean up services and endpoints] ********************* changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n\nalias openstack=\"oc exec -t openstackclient -- openstack\"\n\n${BASH_ALIASES[openstack]} endpoint list | grep keystone | awk '/admin/{ print $2; }' | xargs ${BASH_ALIASES[openstack]} endpoint delete || true\n\nfor service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic; do\n ${BASH_ALIASES[openstack]} service list | awk \"/ $service /{ print \\$2; }\" | xargs -r ${BASH_ALIASES[openstack]} service delete || true\ndone\n", "delta": "0:00:57.840155", "end": "2025-10-06 15:09:35.369156", "msg": "", "rc": 0, "start": "2025-10-06 15:08:37.529001", "stderr": "+ alias 'openstack=oc exec -t openstackclient -- openstack'\n+ oc exec -t openstackclient -- openstack endpoint list\n+ grep keystone\n+ awk '/admin/{ print $2; }'\n+ xargs oc exec -t openstackclient -- openstack endpoint delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ aodh /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ heat /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ awk '/ heat-cfn /{ print $2; }'\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ barbican /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ cinderv3 /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ glance /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ gnocchi /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ manila /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ manilav2 /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ neutron /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ nova /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ placement /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ swift /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ ironic-inspector /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete\n+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic\n+ oc exec -t openstackclient -- openstack service list\n+ awk '/ ironic /{ print $2; }'\n+ xargs -r oc exec -t openstackclient -- openstack service delete", "stderr_lines": ["+ alias 'openstack=oc exec -t openstackclient -- openstack'", "+ oc exec -t openstackclient -- openstack endpoint list", "+ grep keystone", "+ awk '/admin/{ print $2; }'", "+ xargs oc exec -t openstackclient -- openstack endpoint delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ aodh /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ heat /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ awk '/ heat-cfn /{ print $2; }'", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ barbican /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ cinderv3 /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ glance /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ gnocchi /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ manila /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ manilav2 /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ neutron /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ nova /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ placement /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ swift /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ ironic-inspector /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete", "+ for service in aodh heat heat-cfn barbican cinderv3 glance gnocchi manila manilav2 neutron nova placement swift ironic-inspector ironic", "+ oc exec -t openstackclient -- openstack service list", "+ awk '/ ironic /{ print $2; }'", "+ xargs -r oc exec -t openstackclient -- openstack service delete"], "stdout": "", "stdout_lines": []} TASK [keystone_adoption : Print session test token] **************************** ok: [localhost] => { "before_adoption_token": { "changed": true, "cmd": "set -euxo pipefail\n\nssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\n", "delta": "0:00:02.388136", "end": "2025-10-06 14:59:54.069142", "failed": false, "msg": "", "rc": 0, "start": "2025-10-06 14:59:51.681006", "stderr": "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "stderr_lines": [ "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory." ], "stdout": "gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg", "stdout_lines": [ "gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg" ] } } TASK [keystone_adoption : Verify that pre-adoption token still works] ********** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n\nalias openstack=\"oc exec -t openstackclient -- env -u OS_CLOUD - OS_AUTH_URL=http://keystone-public-openstack.apps-crc.testing OS_AUTH_TYPE=token OS_TOKEN=gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg openstack\"\n\nif ${BASH_ALIASES[openstack]} endpoint list 2>&1 | grep \"Failed to validate token\"; then\n exit 1\nelse\n exit 0\nfi\n", "delta": "0:00:01.611559", "end": "2025-10-06 15:09:37.233967", "msg": "", "rc": 0, "start": "2025-10-06 15:09:35.622408", "stderr": "+ alias 'openstack=oc exec -t openstackclient -- env -u OS_CLOUD - OS_AUTH_URL=http://keystone-public-openstack.apps-crc.testing OS_AUTH_TYPE=token OS_TOKEN=gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg openstack'\n+ oc exec -t openstackclient -- env -u OS_CLOUD - OS_AUTH_URL=http://keystone-public-openstack.apps-crc.testing OS_AUTH_TYPE=token OS_TOKEN=gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg openstack endpoint list\n+ grep 'Failed to validate token'\n+ exit 0", "stderr_lines": ["+ alias 'openstack=oc exec -t openstackclient -- env -u OS_CLOUD - OS_AUTH_URL=http://keystone-public-openstack.apps-crc.testing OS_AUTH_TYPE=token OS_TOKEN=gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg openstack'", "+ oc exec -t openstackclient -- env -u OS_CLOUD - OS_AUTH_URL=http://keystone-public-openstack.apps-crc.testing OS_AUTH_TYPE=token OS_TOKEN=gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg openstack endpoint list", "+ grep 'Failed to validate token'", "+ exit 0"], "stdout": "Failed to validate token (HTTP 404) (Request-ID: req-eeec9efe-8e31-4d5e-94ee-e5de63317f58)", "stdout_lines": ["Failed to validate token (HTTP 404) (Request-ID: req-eeec9efe-8e31-4d5e-94ee-e5de63317f58)"]} TASK [keystone_adoption : Print credentials test token] ************************ ok: [localhost] => { "before_adoption_token": { "changed": true, "cmd": "set -euxo pipefail\n\nssh -i ~/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\n", "delta": "0:00:02.388136", "end": "2025-10-06 14:59:54.069142", "failed": false, "msg": "", "rc": 0, "start": "2025-10-06 14:59:51.681006", "stderr": "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id\nWarning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory.", "stderr_lines": [ "+ ssh -i /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa -o StrictHostKeyChecking=no root@192.168.122.100 OS_CLOUD=standalone openstack token issue -f value -c id", "Warning: Identity file /home/zuul/install_yamls/out/edpm/ansibleee-ssh-key-id_rsa not accessible: No such file or directory." ], "stdout": "gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg", "stdout_lines": [ "gAAAAABo49lpXL7p5-rYghq0tnypypTDTy9xs3Fqfwi31ArdRgkPx3RMvb6kkrBsS23rzXC8Ut8Cy4aqiZ0hG2ayG8eFHn5U0gqTxIj6EQJlk_PKOKoezRR7xMBFvLUnT7dVHNGuVXlRG-iQsFbfoqS_HNkEa0jubltKqyRBJKjQMh9k4EiJgxg" ] } } TASK [keystone_adoption : Verify that pre-adoption credential stills the same] *** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\n\nalias openstack=\"oc exec -t openstackclient -- openstack\"\n\n${BASH_ALIASES[openstack]} credential show 58e2321473e34aa88fe9cd869f4cde62 -f value -c blob\n", "delta": "0:00:01.888289", "end": "2025-10-06 15:09:39.394602", "failed_when_result": false, "msg": "", "rc": 0, "start": "2025-10-06 15:09:37.506313", "stderr": "+ alias 'openstack=oc exec -t openstackclient -- openstack'\n+ oc exec -t openstackclient -- openstack credential show 58e2321473e34aa88fe9cd869f4cde62 -f value -c blob", "stderr_lines": ["+ alias 'openstack=oc exec -t openstackclient -- openstack'", "+ oc exec -t openstackclient -- openstack credential show 58e2321473e34aa88fe9cd869f4cde62 -f value -c blob"], "stdout": "test", "stdout_lines": ["test"]} TASK [barbican_adoption : patch osp-secret with kek] *************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\noc set data secret/osp-secret \"BarbicanSimpleCryptoKEK=$($CONTROLLER1_SSH \"sudo python3 -c \\\"import configparser; c = configparser.ConfigParser(); c.read('/var/lib/config-data/puppet-generated/barbican/etc/barbican/barbican.conf'); print(c['simple_crypto_plugin']['kek'])\\\"\")\"\n", "delta": "0:00:00.504235", "end": "2025-10-06 15:09:40.149604", "msg": "", "rc": 0, "start": "2025-10-06 15:09:39.645369", "stderr": "+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'\n++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 'sudo python3 -c \"import configparser; c = configparser.ConfigParser(); c.read('\\''/var/lib/config-data/puppet-generated/barbican/etc/barbican/barbican.conf'\\''); print(c['\\''simple_crypto_plugin'\\'']['\\''kek'\\''])\"'\nTraceback (most recent call last):\n File \"\", line 1, in \n File \"/usr/lib64/python3.9/configparser.py\", line 963, in __getitem__\n raise KeyError(key)\nKeyError: 'simple_crypto_plugin'\n+ oc set data secret/osp-secret BarbicanSimpleCryptoKEK=", "stderr_lines": ["+ CONTROLLER1_SSH='ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100'", "++ ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100 'sudo python3 -c \"import configparser; c = configparser.ConfigParser(); c.read('\\''/var/lib/config-data/puppet-generated/barbican/etc/barbican/barbican.conf'\\''); print(c['\\''simple_crypto_plugin'\\'']['\\''kek'\\''])\"'", "Traceback (most recent call last):", " File \"\", line 1, in ", " File \"/usr/lib64/python3.9/configparser.py\", line 963, in __getitem__", " raise KeyError(key)", "KeyError: 'simple_crypto_plugin'", "+ oc set data secret/osp-secret BarbicanSimpleCryptoKEK="], "stdout": "secret/osp-secret data updated", "stdout_lines": ["secret/osp-secret data updated"]} TASK [barbican_adoption : deploy podified Barbican] **************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n barbican:\n enabled: true\n apiOverride:\n route: {}\n template:\n databaseInstance: openstack\n databaseAccount: barbican\n rabbitMqClusterName: rabbitmq\n secret: osp-secret\n simpleCryptoBackendSecret: osp-secret\n serviceAccount: barbican\n serviceUser: barbican\n passwordSelectors:\n database: BarbicanDatabasePassword\n service: BarbicanPassword\n simplecryptokek: BarbicanSimpleCryptoKEK\n barbicanAPI:\n replicas: 1\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n barbicanWorker:\n replicas: 1\n barbicanKeystoneListener:\n replicas: 1\n'\n", "delta": "0:00:00.222344", "end": "2025-10-06 15:09:40.633105", "msg": "", "rc": 0, "start": "2025-10-06 15:09:40.410761", "stderr": "+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n barbican:\n enabled: true\n apiOverride:\n route: {}\n template:\n databaseInstance: openstack\n databaseAccount: barbican\n rabbitMqClusterName: rabbitmq\n secret: osp-secret\n simpleCryptoBackendSecret: osp-secret\n serviceAccount: barbican\n serviceUser: barbican\n passwordSelectors:\n database: BarbicanDatabasePassword\n service: BarbicanPassword\n simplecryptokek: BarbicanSimpleCryptoKEK\n barbicanAPI:\n replicas: 1\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n barbicanWorker:\n replicas: 1\n barbicanKeystoneListener:\n replicas: 1\n'\nWarning: unknown field \"spec.barbican.template.passwordSelectors.database\"", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:", " barbican:", " enabled: true", " apiOverride:", " route: {}", " template:", " databaseInstance: openstack", " databaseAccount: barbican", " rabbitMqClusterName: rabbitmq", " secret: osp-secret", " simpleCryptoBackendSecret: osp-secret", " serviceAccount: barbican", " serviceUser: barbican", " passwordSelectors:", " database: BarbicanDatabasePassword", " service: BarbicanPassword", " simplecryptokek: BarbicanSimpleCryptoKEK", " barbicanAPI:", " replicas: 1", " override:", " service:", " internal:", " metadata:", " annotations:", " metallb.universe.tf/address-pool: internalapi", " metallb.universe.tf/allow-shared-ip: internalapi", " metallb.universe.tf/loadBalancerIPs: 172.17.0.80", " spec:", " type: LoadBalancer", " barbicanWorker:", " replicas: 1", " barbicanKeystoneListener:", " replicas: 1", "'", "Warning: unknown field \"spec.barbican.template.passwordSelectors.database\""], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [barbican_adoption : wait for Barbican to start up] *********************** FAILED - RETRYING: [localhost]: wait for Barbican to start up (180 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (179 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (178 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (177 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (176 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (175 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (174 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (173 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (172 retries left). FAILED - RETRYING: [localhost]: wait for Barbican to start up (171 retries left). changed: [localhost] => {"attempts": 11, "changed": true, "cmd": "set -euxo pipefail\n\n\noc wait pod --for condition=Ready --selector=service=barbican\n", "delta": "0:00:10.508898", "end": "2025-10-06 15:10:14.987807", "msg": "", "rc": 0, "start": "2025-10-06 15:10:04.478909", "stderr": "+ oc wait pod --for condition=Ready --selector=service=barbican", "stderr_lines": ["+ oc wait pod --for condition=Ready --selector=service=barbican"], "stdout": "pod/barbican-api-5fbd45848d-wkv66 condition met\npod/barbican-keystone-listener-76b567cf98-2ljlg condition met\npod/barbican-worker-8558c89d89-26z6j condition met", "stdout_lines": ["pod/barbican-api-5fbd45848d-wkv66 condition met", "pod/barbican-keystone-listener-76b567cf98-2ljlg condition met", "pod/barbican-worker-8558c89d89-26z6j condition met"]} TASK [barbican_adoption : check that Barbican is reachable and its endpoints are defined] *** changed: [localhost] => {"attempts": 1, "changed": true, "cmd": "set -euxo pipefail\n\n\n\nalias openstack=\"oc exec -t openstackclient -- openstack\"\n\n${BASH_ALIASES[openstack]} endpoint list | grep key-manager\n${BASH_ALIASES[openstack]} secret list\n", "delta": "0:00:22.111277", "end": "2025-10-06 15:10:37.345800", "msg": "", "rc": 0, "start": "2025-10-06 15:10:15.234523", "stderr": "+ alias 'openstack=oc exec -t openstackclient -- openstack'\n+ oc exec -t openstackclient -- openstack endpoint list\n+ grep key-manager\n+ oc exec -t openstackclient -- openstack secret list\nFailed to contact the endpoint at https://barbican-public-openstack.apps-crc.testing for discovery. Fallback to using that endpoint as the base url.", "stderr_lines": ["+ alias 'openstack=oc exec -t openstackclient -- openstack'", "+ oc exec -t openstackclient -- openstack endpoint list", "+ grep key-manager", "+ oc exec -t openstackclient -- openstack secret list", "Failed to contact the endpoint at https://barbican-public-openstack.apps-crc.testing for discovery. Fallback to using that endpoint as the base url."], "stdout": "| 9601dbdb8dd642cf9a2804283ab6dc7e | regionOne | barbican | key-manager | True | public | https://barbican-public-openstack.apps-crc.testing |\n| a0e56fe9cbc34ee4bea0ed54f7667302 | regionOne | barbican | key-manager | True | internal | https://barbican-internal.openstack.svc:9311 |", "stdout_lines": ["| 9601dbdb8dd642cf9a2804283ab6dc7e | regionOne | barbican | key-manager | True | public | https://barbican-public-openstack.apps-crc.testing |", "| a0e56fe9cbc34ee4bea0ed54f7667302 | regionOne | barbican | key-manager | True | internal | https://barbican-internal.openstack.svc:9311 |"]} TASK [barbican_adoption : check that Barbican secret payload was migrated successfully] *** skipping: [localhost] => {"changed": false, "false_condition": "prelaunch_barbican_secret|default(false)", "skip_reason": "Conditional result was False"} TASK [neutron_adoption : deploy podified Neutron] ****************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\noc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n neutron:\n enabled: true\n apiOverride:\n route: {}\n template:\n customServiceConfig: |\n [DEFAULT]\n dhcp_agent_notification = True\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n databaseInstance: openstack\n databaseAccount: neutron\n secret: osp-secret\n networkAttachments:\n - internalapi\n'\n", "delta": "0:00:00.205606", "end": "2025-10-06 15:10:37.839581", "msg": "", "rc": 0, "start": "2025-10-06 15:10:37.633975", "stderr": "+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n neutron:\n enabled: true\n apiOverride:\n route: {}\n template:\n customServiceConfig: |\n [DEFAULT]\n dhcp_agent_notification = True\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n databaseInstance: openstack\n databaseAccount: neutron\n secret: osp-secret\n networkAttachments:\n - internalapi\n'", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:", " neutron:", " enabled: true", " apiOverride:", " route: {}", " template:", " customServiceConfig: |", " [DEFAULT]", " dhcp_agent_notification = True", " override:", " service:", " internal:", " metadata:", " annotations:", " metallb.universe.tf/address-pool: internalapi", " metallb.universe.tf/allow-shared-ip: internalapi", " metallb.universe.tf/loadBalancerIPs: 172.17.0.80", " spec:", " type: LoadBalancer", " databaseInstance: openstack", " databaseAccount: neutron", " secret: osp-secret", " networkAttachments:", " - internalapi", "'"], "stdout": "openstackcontrolplane.core.openstack.org/openstack patched", "stdout_lines": ["openstackcontrolplane.core.openstack.org/openstack patched"]} TASK [neutron_adoption : wait for Neutron to start up] ************************* FAILED - RETRYING: [localhost]: wait for Neutron to start up (60 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (59 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (58 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (57 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (56 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (55 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (54 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (53 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (52 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (51 retries left). FAILED - RETRYING: [localhost]: wait for Neutron to start up (50 retries left). changed: [localhost] => {"attempts": 12, "changed": true, "cmd": "set -euxo pipefail\n\n\noc wait pod --for condition=Ready --selector=service=neutron\n", "delta": "0:00:28.611483", "end": "2025-10-06 15:11:32.523201", "msg": "", "rc": 0, "start": "2025-10-06 15:11:03.911718", "stderr": "+ oc wait pod --for condition=Ready --selector=service=neutron", "stderr_lines": ["+ oc wait pod --for condition=Ready --selector=service=neutron"], "stdout": "pod/neutron-7dc5fcd74d-zllrm condition met", "stdout_lines": ["pod/neutron-7dc5fcd74d-zllrm condition met"]} TASK [neutron_adoption : check that Neutron is reachable and its endpoints are defined] *** FAILED - RETRYING: [localhost]: check that Neutron is reachable and its endpoints are defined (15 retries left). changed: [localhost] => {"attempts": 2, "changed": true, "cmd": "set -euxo pipefail\n\n\nalias openstack=\"oc exec -t openstackclient -- openstack\"\n\n${BASH_ALIASES[openstack]} endpoint list | grep network\n${BASH_ALIASES[openstack]} network list\n", "delta": "0:00:05.542496", "end": "2025-10-06 15:11:52.722695", "msg": "", "rc": 0, "start": "2025-10-06 15:11:47.180199", "stderr": "+ alias 'openstack=oc exec -t openstackclient -- openstack'\n+ oc exec -t openstackclient -- openstack endpoint list\n+ grep network\n+ oc exec -t openstackclient -- openstack network list", "stderr_lines": ["+ alias 'openstack=oc exec -t openstackclient -- openstack'", "+ oc exec -t openstackclient -- openstack endpoint list", "+ grep network", "+ oc exec -t openstackclient -- openstack network list"], "stdout": "| 49c290e88b114b27a4cd92eb86ab6b9c | regionOne | neutron | network | True | public | https://neutron-public-openstack.apps-crc.testing |\n| d63a9570175c438bb7045d6f343cffc7 | regionOne | neutron | network | True | internal | https://neutron-internal.openstack.svc:9696 |\n+--------------------------------------+-------------+--------------------------------------+\n| ID | Name | Subnets |\n+--------------------------------------+-------------+--------------------------------------+\n| 21266228-6569-4e70-90b3-d960c402bd06 | public | cafaaeb9-1ec3-418d-84a1-e72a216d40c2 |\n| 764282d1-88ad-48a1-b206-80bbea72a34f | private | 5f29e9b6-fb0d-49ec-a852-10703b72f435 |\n| c5b872be-d2ba-442e-a269-4d5f635276f6 | lb-mgmt-net | e15f0fbd-6280-4f2c-94eb-6ca073804adc |\n+--------------------------------------+-------------+--------------------------------------+", "stdout_lines": ["| 49c290e88b114b27a4cd92eb86ab6b9c | regionOne | neutron | network | True | public | https://neutron-public-openstack.apps-crc.testing |", "| d63a9570175c438bb7045d6f343cffc7 | regionOne | neutron | network | True | internal | https://neutron-internal.openstack.svc:9696 |", "+--------------------------------------+-------------+--------------------------------------+", "| ID | Name | Subnets |", "+--------------------------------------+-------------+--------------------------------------+", "| 21266228-6569-4e70-90b3-d960c402bd06 | public | cafaaeb9-1ec3-418d-84a1-e72a216d40c2 |", "| 764282d1-88ad-48a1-b206-80bbea72a34f | private | 5f29e9b6-fb0d-49ec-a852-10703b72f435 |", "| c5b872be-d2ba-442e-a269-4d5f635276f6 | lb-mgmt-net | e15f0fbd-6280-4f2c-94eb-6ca073804adc |", "+--------------------------------------+-------------+--------------------------------------+"]} TASK [swift_adoption : Add swift.conf secret] ********************************** changed: [localhost] => {"changed": true, "cmd": "set -euxo pipefail\n\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\noc apply -f - < {"changed": true, "cmd": "set -euxo pipefail\n\n\nCONTROLLER1_SSH=\"ssh -i /home/zuul/.ssh/id_rsa root@192.168.122.100\"\noc apply -f - < {"changed": true, "cmd": "set -euxo pipefail\n\n\noc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n swift:\n enabled: true\n template:\n memcachedInstance: memcached\n swiftRing:\n ringReplicas: 1\n swiftStorage:\n replicas: 0\n networkAttachments:\n - storage\n storageClass: crc-csi-hostpath-provisioner\n storageRequest: 10Gi\n swiftProxy:\n secret: osp-secret\n replicas: 1\n passwordSelectors:\n service: SwiftPassword\n serviceUser: swift\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n networkAttachments:\n - storage\n \n'\n", "delta": "0:00:00.107938", "end": "2025-10-06 15:11:55.199005", "msg": "non-zero return code", "rc": 1, "start": "2025-10-06 15:11:55.091067", "stderr": "+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:\n swift:\n enabled: true\n template:\n memcachedInstance: memcached\n swiftRing:\n ringReplicas: 1\n swiftStorage:\n replicas: 0\n networkAttachments:\n - storage\n storageClass: crc-csi-hostpath-provisioner\n storageRequest: 10Gi\n swiftProxy:\n secret: osp-secret\n replicas: 1\n passwordSelectors:\n service: SwiftPassword\n serviceUser: swift\n override:\n service:\n internal:\n metadata:\n annotations:\n metallb.universe.tf/address-pool: internalapi\n metallb.universe.tf/allow-shared-ip: internalapi\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\n spec:\n type: LoadBalancer\n networkAttachments:\n - storage\n \n'\nerror: unable to parse \"spec:\\n swift:\\n enabled: true\\n template:\\n memcachedInstance: memcached\\n swiftRing:\\n ringReplicas: 1\\n swiftStorage:\\n replicas: 0\\n networkAttachments:\\n - storage\\n storageClass: crc-csi-hostpath-provisioner\\n storageRequest: 10Gi\\n swiftProxy:\\n secret: osp-secret\\n replicas: 1\\n passwordSelectors:\\n service: SwiftPassword\\n serviceUser: swift\\n override:\\n service:\\n internal:\\n metadata:\\n annotations:\\n metallb.universe.tf/address-pool: internalapi\\n metallb.universe.tf/allow-shared-ip: internalapi\\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\\n spec:\\n type: LoadBalancer\\n networkAttachments:\\n - storage\\n \\n\": yaml: line 12: mapping values are not allowed in this context", "stderr_lines": ["+ oc patch openstackcontrolplane openstack --type=merge --patch 'spec:", " swift:", " enabled: true", " template:", " memcachedInstance: memcached", " swiftRing:", " ringReplicas: 1", " swiftStorage:", " replicas: 0", " networkAttachments:", " - storage", " storageClass: crc-csi-hostpath-provisioner", " storageRequest: 10Gi", " swiftProxy:", " secret: osp-secret", " replicas: 1", " passwordSelectors:", " service: SwiftPassword", " serviceUser: swift", " override:", " service:", " internal:", " metadata:", " annotations:", " metallb.universe.tf/address-pool: internalapi", " metallb.universe.tf/allow-shared-ip: internalapi", " metallb.universe.tf/loadBalancerIPs: 172.17.0.80", " spec:", " type: LoadBalancer", " networkAttachments:", " - storage", " ", "'", "error: unable to parse \"spec:\\n swift:\\n enabled: true\\n template:\\n memcachedInstance: memcached\\n swiftRing:\\n ringReplicas: 1\\n swiftStorage:\\n replicas: 0\\n networkAttachments:\\n - storage\\n storageClass: crc-csi-hostpath-provisioner\\n storageRequest: 10Gi\\n swiftProxy:\\n secret: osp-secret\\n replicas: 1\\n passwordSelectors:\\n service: SwiftPassword\\n serviceUser: swift\\n override:\\n service:\\n internal:\\n metadata:\\n annotations:\\n metallb.universe.tf/address-pool: internalapi\\n metallb.universe.tf/allow-shared-ip: internalapi\\n metallb.universe.tf/loadBalancerIPs: 172.17.0.80\\n spec:\\n type: LoadBalancer\\n networkAttachments:\\n - storage\\n \\n\": yaml: line 12: mapping values are not allowed in this context"], "stdout": "", "stdout_lines": []} PLAY RECAP ********************************************************************* localhost : ok=74 changed=67 unreachable=0 failed=1 skipped=36 rescued=0 ignored=0