We can see the following on the test environment when tests failed:
root@node-2:~# haproxy-status.sh | grep DOWN object-storage node-1 Status: DOWN/L4TOUT Sessions: 0 Rate: 0 object-storage node-3 Status: DOWN/L4TOUT Sessions: 0 Rate: 0
root@node-3:~# ceph status cluster 3306b9f1-7582-4079-b1e8-18de068de956 health HEALTH_WARN too many PGs per OSD (352 > max 300) monmap e3: 3 mons at {node-1=10.109.7.3:6789/0,node-2=10.109.7.2:6789/0,node-3=10.109.7.1:6789/0} election epoch 18, quorum 0,1,2 node-3,node-2,node-1 osdmap e246: 4 osds: 4 up, 4 in pgmap v4960: 704 pgs, 10 pools, 172 MB data, 98 objects 8709 MB used, 188 GB / 197 GB avail 704 active+clean
We can see the following on the test environment when tests failed:
root@node-2:~# haproxy-status.sh | grep DOWN
object-storage node-1 Status: DOWN/L4TOUT Sessions: 0 Rate: 0
object-storage node-3 Status: DOWN/L4TOUT Sessions: 0 Rate: 0
root@node-3:~# ceph status 7582-4079- b1e8-18de068de9 56 10.109. 7.3:6789/ 0,node- 2=10.109. 7.2:6789/ 0,node- 3=10.109. 7.1:6789/ 0}
election epoch 18, quorum 0,1,2 node-3, node-2, node-1
704 active+clean
cluster 3306b9f1-
health HEALTH_WARN
too many PGs per OSD (352 > max 300)
monmap e3: 3 mons at {node-1=
osdmap e246: 4 osds: 4 up, 4 in
pgmap v4960: 704 pgs, 10 pools, 172 MB data, 98 objects
8709 MB used, 188 GB / 197 GB avail