summaryrefslogtreecommitdiffstats
path: root/qa/suites/upgrade/reef-x/stress-split/1-start.yaml
blob: 59ccfe2cd02428ff621121a017c4db5bf49501e6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
overrides:
  ceph:
    log-ignorelist:
      - do not have an application enabled
      - application not enabled
      - or freeform for custom applications
      - POOL_APP_NOT_ENABLED
      - is down
      - OSD_DOWN
      - mons down
      - mon down
      - MON_DOWN
      - out of quorum
      - PG_AVAILABILITY
      - PG_DEGRADED
      - Reduced data availability
      - Degraded data redundancy
      - pg .* is stuck inactive
      - pg .* is .*degraded
      - FS_DEGRADED
      - OSDMAP_FLAGS
      - OSD_UPGRADE_FINISHED
tasks:
- install:
    branch: reef
    exclude_packages:
      - ceph-volume

- cephadm:
    image: quay.ceph.io/ceph-ci/ceph:reef
    compiled_cephadm_branch: reef
    conf:
      osd:
        #set config option for which cls modules are allowed to be loaded / used
        osd_class_load_list: "*"
        osd_class_default_list: "*"

- cephadm.shell:
    mon.a:
      - ceph fs volume create foo
      - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
      - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force

- ceph.healthy:

- print: "**** upgrading first half of cluster, with stress ****"
- parallel:
    - first-half-tasks
    - first-half-sequence
- print: "**** done upgrading first half of cluster ****"

- ceph.healthy:

- print: "**** applying stress + thrashing to mixed-version cluster ****"

- parallel:
    - stress-tasks

- ceph.healthy:

- print: "**** finishing upgrade ****"
- parallel:
    - second-half-tasks
    - second-half-sequence

- ceph.healthy:


#################

first-half-sequence:
- cephadm.shell:
    env: [sha1]
    mon.a:
      - ceph config set mgr mgr/cephadm/daemon_cache_timeout 60
      - ceph config set global log_to_journald false --force

      - echo wait for mgr daemons to upgrade
      # upgrade the mgr daemons first
      - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr
      - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done

      - echo wait for minority of mons to upgrade
      # upgrade 1 of 3 mon daemons, then wait 60 seconds
      - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --limit 1
      - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
      - sleep 60

      - echo wait for majority of mons to upgrade
      # upgrade one more mon daemon (to get us to 2/3 upgraded) and wait 60 seconds
      - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --limit 1
      - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
      - sleep 60

      - echo wait for all mons to upgrade
      # upgrade final mon daemon and wait 60 seconds
      - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon
      - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
      - sleep 60

      - echo wait for half of osds to upgrade
      # upgrade 4 of the 8 OSDs
      - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 4
      - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done


#################

stress-tasks:
- thrashosds:
    timeout: 1200
    chance_pgnum_grow: 1
    chance_pgpnum_fix: 1
    chance_thrash_cluster_full: 0
    chance_thrash_pg_upmap: 0
    chance_thrash_pg_upmap_items: 0
    disable_objectstore_tool_tests: true
    chance_force_recovery: 0
    aggressive_pg_num_changes: false


#################

second-half-sequence:
  sequential:
    - cephadm.shell:
        env: [sha1]
        mon.a:
          - sleep 60

          - echo wait for upgrade to complete
          # upgrade whatever is left
          - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
          - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done

          - echo upgrade complete
          - ceph orch ps
          - ceph versions
          - ceph versions | jq -e '.overall | length == 1'
          - ceph versions | jq -e '.overall | keys' | grep $sha1