drbd: downgraded error printk to info
[deliverable/linux.git] / drivers / block / drbd / drbd_state.c
CommitLineData
b8907339
PR
1/*
2 drbd_state.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/drbd_limits.h>
29#include "drbd_int.h"
30#include "drbd_req.h"
31
32struct after_state_chg_work {
33 struct drbd_work w;
34 union drbd_state os;
35 union drbd_state ns;
36 enum chg_state_flags flags;
37 struct completion *done;
38};
39
99920dc5 40static int w_after_state_ch(struct drbd_work *w, int unused);
b8907339
PR
41static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
42 union drbd_state ns, enum chg_state_flags flags);
a75f34ad
PR
43static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
44static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state);
3509502d 45static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
4308a0a3
PR
46static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
47 const char **warn_sync_abort);
b8907339 48
2aebfabb
PR
49static inline bool is_susp(union drbd_state s)
50{
51 return s.susp || s.susp_nod || s.susp_fen;
52}
53
d0456c72 54bool conn_all_vols_unconf(struct drbd_tconn *tconn)
0e29d163
PR
55{
56 struct drbd_conf *mdev;
695d08fa 57 bool rv = true;
e90285e0 58 int vnr;
0e29d163 59
695d08fa 60 rcu_read_lock();
e90285e0 61 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
d0456c72
LE
62 if (mdev->state.disk != D_DISKLESS ||
63 mdev->state.conn != C_STANDALONE ||
695d08fa
PR
64 mdev->state.role != R_SECONDARY) {
65 rv = false;
66 break;
67 }
0e29d163 68 }
695d08fa
PR
69 rcu_read_unlock();
70
71 return rv;
0e29d163
PR
72}
73
cb703454
PR
74/* Unfortunately the states where not correctly ordered, when
75 they where defined. therefore can not use max_t() here. */
76static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
77{
78 if (role1 == R_PRIMARY || role2 == R_PRIMARY)
79 return R_PRIMARY;
80 if (role1 == R_SECONDARY || role2 == R_SECONDARY)
81 return R_SECONDARY;
82 return R_UNKNOWN;
83}
84static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
85{
86 if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
87 return R_UNKNOWN;
88 if (role1 == R_SECONDARY || role2 == R_SECONDARY)
89 return R_SECONDARY;
90 return R_PRIMARY;
91}
92
93enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
94{
95 enum drbd_role role = R_UNKNOWN;
96 struct drbd_conf *mdev;
97 int vnr;
98
695d08fa 99 rcu_read_lock();
cb703454
PR
100 idr_for_each_entry(&tconn->volumes, mdev, vnr)
101 role = max_role(role, mdev->state.role);
695d08fa 102 rcu_read_unlock();
cb703454
PR
103
104 return role;
105}
106
107enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
108{
109 enum drbd_role peer = R_UNKNOWN;
110 struct drbd_conf *mdev;
111 int vnr;
112
695d08fa 113 rcu_read_lock();
cb703454
PR
114 idr_for_each_entry(&tconn->volumes, mdev, vnr)
115 peer = max_role(peer, mdev->state.peer);
695d08fa 116 rcu_read_unlock();
cb703454
PR
117
118 return peer;
119}
120
121enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
122{
123 enum drbd_disk_state ds = D_DISKLESS;
124 struct drbd_conf *mdev;
125 int vnr;
126
695d08fa 127 rcu_read_lock();
cb703454
PR
128 idr_for_each_entry(&tconn->volumes, mdev, vnr)
129 ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
695d08fa 130 rcu_read_unlock();
cb703454
PR
131
132 return ds;
133}
134
4669265a
PR
135enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
136{
137 enum drbd_disk_state ds = D_MASK;
138 struct drbd_conf *mdev;
139 int vnr;
140
695d08fa 141 rcu_read_lock();
4669265a
PR
142 idr_for_each_entry(&tconn->volumes, mdev, vnr)
143 ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
695d08fa 144 rcu_read_unlock();
4669265a
PR
145
146 return ds;
147}
148
cb703454
PR
149enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
150{
151 enum drbd_disk_state ds = D_DISKLESS;
152 struct drbd_conf *mdev;
153 int vnr;
154
695d08fa 155 rcu_read_lock();
cb703454
PR
156 idr_for_each_entry(&tconn->volumes, mdev, vnr)
157 ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
695d08fa 158 rcu_read_unlock();
cb703454
PR
159
160 return ds;
161}
162
19f83c76
PR
163enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
164{
165 enum drbd_conns conn = C_MASK;
166 struct drbd_conf *mdev;
167 int vnr;
168
695d08fa 169 rcu_read_lock();
19f83c76
PR
170 idr_for_each_entry(&tconn->volumes, mdev, vnr)
171 conn = min_t(enum drbd_conns, conn, mdev->state.conn);
695d08fa 172 rcu_read_unlock();
19f83c76
PR
173
174 return conn;
175}
176
b8907339
PR
177/**
178 * cl_wide_st_chg() - true if the state change is a cluster wide one
179 * @mdev: DRBD device.
180 * @os: old (current) state.
181 * @ns: new (wanted) state.
182 */
183static int cl_wide_st_chg(struct drbd_conf *mdev,
184 union drbd_state os, union drbd_state ns)
185{
186 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
187 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
188 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
189 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
190 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
191 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
192 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
193}
194
56707f9e
PR
195static union drbd_state
196apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
197{
198 union drbd_state ns;
199 ns.i = (os.i & ~mask.i) | val.i;
200 return ns;
201}
202
b8907339
PR
203enum drbd_state_rv
204drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
205 union drbd_state mask, union drbd_state val)
206{
207 unsigned long flags;
56707f9e 208 union drbd_state ns;
b8907339
PR
209 enum drbd_state_rv rv;
210
211 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
78bae59b 212 ns = apply_mask_val(drbd_read_state(mdev), mask, val);
b8907339 213 rv = _drbd_set_state(mdev, ns, f, NULL);
b8907339
PR
214 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
215
216 return rv;
217}
218
219/**
220 * drbd_force_state() - Impose a change which happens outside our control on our state
221 * @mdev: DRBD device.
222 * @mask: mask of state bits to change.
223 * @val: value of new state bits.
224 */
225void drbd_force_state(struct drbd_conf *mdev,
226 union drbd_state mask, union drbd_state val)
227{
228 drbd_change_state(mdev, CS_HARD, mask, val);
229}
230
231static enum drbd_state_rv
232_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
233 union drbd_state val)
234{
235 union drbd_state os, ns;
236 unsigned long flags;
237 enum drbd_state_rv rv;
238
239 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
240 return SS_CW_SUCCESS;
241
242 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
243 return SS_CW_FAILED_BY_PEER;
244
b8907339 245 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
78bae59b 246 os = drbd_read_state(mdev);
56707f9e 247 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
3509502d
PR
248 rv = is_valid_transition(os, ns);
249 if (rv == SS_SUCCESS)
250 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
b8907339
PR
251
252 if (!cl_wide_st_chg(mdev, os, ns))
253 rv = SS_CW_NO_NEED;
3509502d 254 if (rv == SS_UNKNOWN_ERROR) {
b8907339
PR
255 rv = is_valid_state(mdev, ns);
256 if (rv == SS_SUCCESS) {
a75f34ad 257 rv = is_valid_soft_transition(os, ns);
b8907339
PR
258 if (rv == SS_SUCCESS)
259 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
260 }
261 }
262 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
263
264 return rv;
265}
266
267/**
268 * drbd_req_state() - Perform an eventually cluster wide state change
269 * @mdev: DRBD device.
270 * @mask: mask of state bits to change.
271 * @val: value of new state bits.
272 * @f: flags
273 *
274 * Should not be called directly, use drbd_request_state() or
275 * _drbd_request_state().
276 */
277static enum drbd_state_rv
278drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
279 union drbd_state val, enum chg_state_flags f)
280{
281 struct completion done;
282 unsigned long flags;
283 union drbd_state os, ns;
284 enum drbd_state_rv rv;
285
286 init_completion(&done);
287
288 if (f & CS_SERIALIZE)
8410da8f 289 mutex_lock(mdev->state_mutex);
b8907339
PR
290
291 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
78bae59b 292 os = drbd_read_state(mdev);
56707f9e 293 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
3509502d 294 rv = is_valid_transition(os, ns);
3c5e5f6a
LE
295 if (rv < SS_SUCCESS) {
296 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
3509502d 297 goto abort;
3c5e5f6a 298 }
b8907339
PR
299
300 if (cl_wide_st_chg(mdev, os, ns)) {
301 rv = is_valid_state(mdev, ns);
302 if (rv == SS_SUCCESS)
a75f34ad 303 rv = is_valid_soft_transition(os, ns);
b8907339
PR
304 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
305
306 if (rv < SS_SUCCESS) {
307 if (f & CS_VERBOSE)
308 print_st_err(mdev, os, ns, rv);
309 goto abort;
310 }
311
d24ae219 312 if (drbd_send_state_req(mdev, mask, val)) {
b8907339
PR
313 rv = SS_CW_FAILED_BY_PEER;
314 if (f & CS_VERBOSE)
315 print_st_err(mdev, os, ns, rv);
316 goto abort;
317 }
318
319 wait_event(mdev->state_wait,
320 (rv = _req_st_cond(mdev, mask, val)));
321
322 if (rv < SS_SUCCESS) {
b8907339
PR
323 if (f & CS_VERBOSE)
324 print_st_err(mdev, os, ns, rv);
325 goto abort;
326 }
327 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
78bae59b 328 ns = apply_mask_val(drbd_read_state(mdev), mask, val);
b8907339 329 rv = _drbd_set_state(mdev, ns, f, &done);
b8907339
PR
330 } else {
331 rv = _drbd_set_state(mdev, ns, f, &done);
332 }
333
334 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
335
336 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
337 D_ASSERT(current != mdev->tconn->worker.task);
338 wait_for_completion(&done);
339 }
340
341abort:
342 if (f & CS_SERIALIZE)
8410da8f 343 mutex_unlock(mdev->state_mutex);
b8907339
PR
344
345 return rv;
346}
347
348/**
349 * _drbd_request_state() - Request a state change (with flags)
350 * @mdev: DRBD device.
351 * @mask: mask of state bits to change.
352 * @val: value of new state bits.
353 * @f: flags
354 *
355 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
356 * flag, or when logging of failed state change requests is not desired.
357 */
358enum drbd_state_rv
359_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
360 union drbd_state val, enum chg_state_flags f)
361{
362 enum drbd_state_rv rv;
363
364 wait_event(mdev->state_wait,
365 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
366
367 return rv;
368}
369
370static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
371{
372 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
373 name,
374 drbd_conn_str(ns.conn),
375 drbd_role_str(ns.role),
376 drbd_role_str(ns.peer),
377 drbd_disk_str(ns.disk),
378 drbd_disk_str(ns.pdsk),
379 is_susp(ns) ? 's' : 'r',
380 ns.aftr_isp ? 'a' : '-',
381 ns.peer_isp ? 'p' : '-',
382 ns.user_isp ? 'u' : '-',
383 ns.susp_fen ? 'F' : '-',
384 ns.susp_nod ? 'N' : '-'
385 );
386}
387
388void print_st_err(struct drbd_conf *mdev, union drbd_state os,
389 union drbd_state ns, enum drbd_state_rv err)
390{
391 if (err == SS_IN_TRANSIENT_STATE)
392 return;
393 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
394 print_st(mdev, " state", os);
395 print_st(mdev, "wanted", ns);
396}
397
435693e8 398static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
bbeb641c
PR
399 enum chg_state_flags flags)
400{
435693e8 401 char *pbp;
bbeb641c
PR
402 pbp = pb;
403 *pbp = 0;
706cb24c 404
435693e8 405 if (ns.role != os.role && flags & CS_DC_ROLE)
bbeb641c
PR
406 pbp += sprintf(pbp, "role( %s -> %s ) ",
407 drbd_role_str(os.role),
408 drbd_role_str(ns.role));
435693e8 409 if (ns.peer != os.peer && flags & CS_DC_PEER)
bbeb641c
PR
410 pbp += sprintf(pbp, "peer( %s -> %s ) ",
411 drbd_role_str(os.peer),
412 drbd_role_str(ns.peer));
435693e8 413 if (ns.conn != os.conn && flags & CS_DC_CONN)
bbeb641c
PR
414 pbp += sprintf(pbp, "conn( %s -> %s ) ",
415 drbd_conn_str(os.conn),
416 drbd_conn_str(ns.conn));
435693e8 417 if (ns.disk != os.disk && flags & CS_DC_DISK)
bbeb641c
PR
418 pbp += sprintf(pbp, "disk( %s -> %s ) ",
419 drbd_disk_str(os.disk),
420 drbd_disk_str(ns.disk));
435693e8 421 if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
bbeb641c
PR
422 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
423 drbd_disk_str(os.pdsk),
424 drbd_disk_str(ns.pdsk));
706cb24c
PR
425
426 return pbp - pb;
427}
428
429static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
430 enum chg_state_flags flags)
431{
432 char pb[300];
433 char *pbp = pb;
434
435 pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
436
bbeb641c
PR
437 if (ns.aftr_isp != os.aftr_isp)
438 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
439 os.aftr_isp,
440 ns.aftr_isp);
441 if (ns.peer_isp != os.peer_isp)
442 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
443 os.peer_isp,
444 ns.peer_isp);
445 if (ns.user_isp != os.user_isp)
446 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
447 os.user_isp,
448 ns.user_isp);
435693e8 449
706cb24c 450 if (pbp != pb)
bbeb641c
PR
451 dev_info(DEV, "%s\n", pb);
452}
b8907339 453
435693e8
PR
454static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
455 enum chg_state_flags flags)
456{
457 char pb[300];
706cb24c
PR
458 char *pbp = pb;
459
460 pbp += print_state_change(pbp, os, ns, flags);
435693e8 461
706cb24c
PR
462 if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
463 pbp += sprintf(pbp, "susp( %d -> %d ) ",
464 is_susp(os),
465 is_susp(ns));
466
467 if (pbp != pb)
435693e8
PR
468 conn_info(tconn, "%s\n", pb);
469}
470
471
b8907339
PR
472/**
473 * is_valid_state() - Returns an SS_ error code if ns is not valid
474 * @mdev: DRBD device.
475 * @ns: State to consider.
476 */
477static enum drbd_state_rv
478is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
479{
480 /* See drbd_state_sw_errors in drbd_strings.c */
481
482 enum drbd_fencing_p fp;
483 enum drbd_state_rv rv = SS_SUCCESS;
44ed167d 484 struct net_conf *nc;
b8907339 485
daeda1cc 486 rcu_read_lock();
b8907339
PR
487 fp = FP_DONT_CARE;
488 if (get_ldev(mdev)) {
daeda1cc 489 fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
b8907339
PR
490 put_ldev(mdev);
491 }
492
44ed167d
PR
493 nc = rcu_dereference(mdev->tconn->net_conf);
494 if (nc) {
495 if (!nc->two_primaries && ns.role == R_PRIMARY) {
047e95e2
PR
496 if (ns.peer == R_PRIMARY)
497 rv = SS_TWO_PRIMARIES;
cb703454 498 else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
047e95e2 499 rv = SS_O_VOL_PEER_PRI;
44ed167d 500 }
b8907339
PR
501 }
502
503 if (rv <= 0)
504 /* already found a reason to abort */;
505 else if (ns.role == R_SECONDARY && mdev->open_cnt)
506 rv = SS_DEVICE_IN_USE;
507
508 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
509 rv = SS_NO_UP_TO_DATE_DISK;
510
511 else if (fp >= FP_RESOURCE &&
512 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
513 rv = SS_PRIMARY_NOP;
514
515 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
516 rv = SS_NO_UP_TO_DATE_DISK;
517
518 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
519 rv = SS_NO_LOCAL_DISK;
520
521 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
522 rv = SS_NO_REMOTE_DISK;
523
524 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
525 rv = SS_NO_UP_TO_DATE_DISK;
526
527 else if ((ns.conn == C_CONNECTED ||
528 ns.conn == C_WF_BITMAP_S ||
529 ns.conn == C_SYNC_SOURCE ||
530 ns.conn == C_PAUSED_SYNC_S) &&
531 ns.disk == D_OUTDATED)
532 rv = SS_CONNECTED_OUTDATES;
533
534 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
44ed167d 535 (nc->verify_alg[0] == 0))
b8907339
PR
536 rv = SS_NO_VERIFY_ALG;
537
538 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
539 mdev->tconn->agreed_pro_version < 88)
540 rv = SS_NOT_SUPPORTED;
541
542 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
543 rv = SS_CONNECTED_OUTDATES;
544
44ed167d
PR
545 rcu_read_unlock();
546
b8907339
PR
547 return rv;
548}
549
550/**
a75f34ad 551 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
3509502d
PR
552 * This function limits state transitions that may be declined by DRBD. I.e.
553 * user requests (aka soft transitions).
b8907339
PR
554 * @mdev: DRBD device.
555 * @ns: new state.
556 * @os: old state.
557 */
558static enum drbd_state_rv
a75f34ad 559is_valid_soft_transition(union drbd_state os, union drbd_state ns)
b8907339
PR
560{
561 enum drbd_state_rv rv = SS_SUCCESS;
562
563 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
564 os.conn > C_CONNECTED)
565 rv = SS_RESYNC_RUNNING;
566
567 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
568 rv = SS_ALREADY_STANDALONE;
569
570 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
571 rv = SS_IS_DISKLESS;
572
573 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
574 rv = SS_NO_NET_CONFIG;
575
576 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
577 rv = SS_LOWER_THAN_OUTDATED;
578
579 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
580 rv = SS_IN_TRANSIENT_STATE;
581
2325eb66
PR
582 /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
583 rv = SS_IN_TRANSIENT_STATE; */
b8907339
PR
584
585 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
586 rv = SS_NEED_CONNECTION;
587
588 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
589 ns.conn != os.conn && os.conn > C_CONNECTED)
590 rv = SS_RESYNC_RUNNING;
591
592 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
593 os.conn < C_CONNECTED)
594 rv = SS_NEED_CONNECTION;
595
596 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
597 && os.conn < C_WF_REPORT_PARAMS)
598 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
599
600 return rv;
601}
602
3509502d 603static enum drbd_state_rv
fda74117 604is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
3509502d 605{
d9cc6e23
LE
606 /* no change -> nothing to do, at least for the connection part */
607 if (oc == nc)
608 return SS_NOTHING_TO_DO;
3509502d 609
d9cc6e23
LE
610 /* disconnect of an unconfigured connection does not make sense */
611 if (oc == C_STANDALONE && nc == C_DISCONNECTING)
612 return SS_ALREADY_STANDALONE;
613
614 /* from C_STANDALONE, we start with C_UNCONNECTED */
615 if (oc == C_STANDALONE && nc != C_UNCONNECTED)
616 return SS_NEED_CONNECTION;
3509502d
PR
617
618 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
fda74117 619 if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
d9cc6e23 620 return SS_IN_TRANSIENT_STATE;
3509502d
PR
621
622 /* After C_DISCONNECTING only C_STANDALONE may follow */
fda74117 623 if (oc == C_DISCONNECTING && nc != C_STANDALONE)
d9cc6e23 624 return SS_IN_TRANSIENT_STATE;
3509502d 625
d9cc6e23 626 return SS_SUCCESS;
fda74117
PR
627}
628
629
630/**
631 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
632 * This limits hard state transitions. Hard state transitions are facts there are
633 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
634 * But those hard state transitions are still not allowed to do everything.
635 * @ns: new state.
636 * @os: old state.
637 */
638static enum drbd_state_rv
639is_valid_transition(union drbd_state os, union drbd_state ns)
640{
641 enum drbd_state_rv rv;
642
643 rv = is_valid_conn_transition(os.conn, ns.conn);
644
3509502d
PR
645 /* we cannot fail (again) if we already detached */
646 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
647 rv = SS_IS_DISKLESS;
648
4308a0a3
PR
649 /* if we are only D_ATTACHING yet,
650 * we can (and should) go directly to D_DISKLESS. */
651 if (ns.disk == D_FAILED && os.disk == D_ATTACHING) {
652 printk("TODO: FIX ME\n");
653 rv = SS_IS_DISKLESS;
654 }
655
3509502d
PR
656 return rv;
657}
658
b8907339
PR
659/**
660 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
661 * @mdev: DRBD device.
662 * @os: old state.
663 * @ns: new state.
664 * @warn_sync_abort:
665 *
666 * When we loose connection, we have to set the state of the peers disk (pdsk)
667 * to D_UNKNOWN. This rule and many more along those lines are in this function.
668 */
4308a0a3
PR
669static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
670 const char **warn_sync_abort)
b8907339
PR
671{
672 enum drbd_fencing_p fp;
673 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
674
675 fp = FP_DONT_CARE;
676 if (get_ldev(mdev)) {
daeda1cc
PR
677 rcu_read_lock();
678 fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
679 rcu_read_unlock();
b8907339
PR
680 put_ldev(mdev);
681 }
682
3509502d 683 /* Implications from connection to peer and peer_isp */
b8907339
PR
684 if (ns.conn < C_CONNECTED) {
685 ns.peer_isp = 0;
686 ns.peer = R_UNKNOWN;
687 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
688 ns.pdsk = D_UNKNOWN;
689 }
690
691 /* Clear the aftr_isp when becoming unconfigured */
692 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
693 ns.aftr_isp = 0;
694
4308a0a3 695 /* An implication of the disk states onto the connection state */
b8907339 696 /* Abort resync if a disk fails/detaches */
4308a0a3 697 if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
b8907339
PR
698 if (warn_sync_abort)
699 *warn_sync_abort =
4308a0a3 700 ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
b8907339
PR
701 "Online-verify" : "Resync";
702 ns.conn = C_CONNECTED;
703 }
704
705 /* Connection breaks down before we finished "Negotiating" */
706 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
707 get_ldev_if_state(mdev, D_NEGOTIATING)) {
708 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
709 ns.disk = mdev->new_state_tmp.disk;
710 ns.pdsk = mdev->new_state_tmp.pdsk;
711 } else {
712 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
713 ns.disk = D_DISKLESS;
714 ns.pdsk = D_UNKNOWN;
715 }
716 put_ldev(mdev);
717 }
718
719 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
720 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
721 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
722 ns.disk = D_UP_TO_DATE;
723 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
724 ns.pdsk = D_UP_TO_DATE;
725 }
726
727 /* Implications of the connection stat on the disk states */
728 disk_min = D_DISKLESS;
729 disk_max = D_UP_TO_DATE;
730 pdsk_min = D_INCONSISTENT;
731 pdsk_max = D_UNKNOWN;
732 switch ((enum drbd_conns)ns.conn) {
733 case C_WF_BITMAP_T:
734 case C_PAUSED_SYNC_T:
735 case C_STARTING_SYNC_T:
736 case C_WF_SYNC_UUID:
737 case C_BEHIND:
738 disk_min = D_INCONSISTENT;
739 disk_max = D_OUTDATED;
740 pdsk_min = D_UP_TO_DATE;
741 pdsk_max = D_UP_TO_DATE;
742 break;
743 case C_VERIFY_S:
744 case C_VERIFY_T:
745 disk_min = D_UP_TO_DATE;
746 disk_max = D_UP_TO_DATE;
747 pdsk_min = D_UP_TO_DATE;
748 pdsk_max = D_UP_TO_DATE;
749 break;
750 case C_CONNECTED:
751 disk_min = D_DISKLESS;
752 disk_max = D_UP_TO_DATE;
753 pdsk_min = D_DISKLESS;
754 pdsk_max = D_UP_TO_DATE;
755 break;
756 case C_WF_BITMAP_S:
757 case C_PAUSED_SYNC_S:
758 case C_STARTING_SYNC_S:
759 case C_AHEAD:
760 disk_min = D_UP_TO_DATE;
761 disk_max = D_UP_TO_DATE;
762 pdsk_min = D_INCONSISTENT;
763 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
764 break;
765 case C_SYNC_TARGET:
766 disk_min = D_INCONSISTENT;
767 disk_max = D_INCONSISTENT;
768 pdsk_min = D_UP_TO_DATE;
769 pdsk_max = D_UP_TO_DATE;
770 break;
771 case C_SYNC_SOURCE:
772 disk_min = D_UP_TO_DATE;
773 disk_max = D_UP_TO_DATE;
774 pdsk_min = D_INCONSISTENT;
775 pdsk_max = D_INCONSISTENT;
776 break;
777 case C_STANDALONE:
778 case C_DISCONNECTING:
779 case C_UNCONNECTED:
780 case C_TIMEOUT:
781 case C_BROKEN_PIPE:
782 case C_NETWORK_FAILURE:
783 case C_PROTOCOL_ERROR:
784 case C_TEAR_DOWN:
785 case C_WF_CONNECTION:
786 case C_WF_REPORT_PARAMS:
787 case C_MASK:
788 break;
789 }
790 if (ns.disk > disk_max)
791 ns.disk = disk_max;
792
793 if (ns.disk < disk_min) {
794 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
795 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
796 ns.disk = disk_min;
797 }
798 if (ns.pdsk > pdsk_max)
799 ns.pdsk = pdsk_max;
800
801 if (ns.pdsk < pdsk_min) {
802 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
803 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
804 ns.pdsk = pdsk_min;
805 }
806
807 if (fp == FP_STONITH &&
4308a0a3 808 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
b8907339
PR
809 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
810
f399002e 811 if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
4308a0a3 812 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
b8907339
PR
813 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
814
815 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
816 if (ns.conn == C_SYNC_SOURCE)
817 ns.conn = C_PAUSED_SYNC_S;
818 if (ns.conn == C_SYNC_TARGET)
819 ns.conn = C_PAUSED_SYNC_T;
820 } else {
821 if (ns.conn == C_PAUSED_SYNC_S)
822 ns.conn = C_SYNC_SOURCE;
823 if (ns.conn == C_PAUSED_SYNC_T)
824 ns.conn = C_SYNC_TARGET;
825 }
826
827 return ns;
828}
829
830void drbd_resume_al(struct drbd_conf *mdev)
831{
832 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
833 dev_info(DEV, "Resumed AL updates\n");
834}
835
836/* helper for __drbd_set_state */
837static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
838{
839 if (mdev->tconn->agreed_pro_version < 90)
840 mdev->ov_start_sector = 0;
841 mdev->rs_total = drbd_bm_bits(mdev);
842 mdev->ov_position = 0;
843 if (cs == C_VERIFY_T) {
844 /* starting online verify from an arbitrary position
845 * does not fit well into the existing protocol.
846 * on C_VERIFY_T, we initialize ov_left and friends
847 * implicitly in receive_DataRequest once the
848 * first P_OV_REQUEST is received */
849 mdev->ov_start_sector = ~(sector_t)0;
850 } else {
851 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
852 if (bit >= mdev->rs_total) {
853 mdev->ov_start_sector =
854 BM_BIT_TO_SECT(mdev->rs_total - 1);
855 mdev->rs_total = 1;
856 } else
857 mdev->rs_total -= bit;
858 mdev->ov_position = mdev->ov_start_sector;
859 }
860 mdev->ov_left = mdev->rs_total;
861}
862
863/**
864 * __drbd_set_state() - Set a new DRBD state
865 * @mdev: DRBD device.
866 * @ns: new state.
867 * @flags: Flags
868 * @done: Optional completion, that will get completed after the after_state_ch() finished
869 *
870 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
871 */
872enum drbd_state_rv
873__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
874 enum chg_state_flags flags, struct completion *done)
875{
876 union drbd_state os;
877 enum drbd_state_rv rv = SS_SUCCESS;
878 const char *warn_sync_abort = NULL;
879 struct after_state_chg_work *ascw;
880
78bae59b 881 os = drbd_read_state(mdev);
b8907339 882
4308a0a3 883 ns = sanitize_state(mdev, ns, &warn_sync_abort);
b8907339
PR
884 if (ns.i == os.i)
885 return SS_NOTHING_TO_DO;
886
3509502d
PR
887 rv = is_valid_transition(os, ns);
888 if (rv < SS_SUCCESS)
889 return rv;
890
b8907339
PR
891 if (!(flags & CS_HARD)) {
892 /* pre-state-change checks ; only look at ns */
893 /* See drbd_state_sw_errors in drbd_strings.c */
894
895 rv = is_valid_state(mdev, ns);
896 if (rv < SS_SUCCESS) {
897 /* If the old state was illegal as well, then let
898 this happen...*/
899
900 if (is_valid_state(mdev, os) == rv)
a75f34ad 901 rv = is_valid_soft_transition(os, ns);
b8907339 902 } else
a75f34ad 903 rv = is_valid_soft_transition(os, ns);
b8907339
PR
904 }
905
906 if (rv < SS_SUCCESS) {
907 if (flags & CS_VERBOSE)
908 print_st_err(mdev, os, ns, rv);
909 return rv;
910 }
911
912 if (warn_sync_abort)
913 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
914
435693e8 915 drbd_pr_state_change(mdev, os, ns, flags);
b8907339 916
706cb24c
PR
917 /* Display changes to the susp* flags that where caused by the call to
918 sanitize_state(). Only display it here if we where not called from
919 _conn_request_state() */
920 if (!(flags & CS_DC_SUSP))
921 conn_pr_state_change(mdev->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
922
b8907339
PR
923 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
924 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
925 * drbd_ldev_destroy() won't happen before our corresponding
926 * after_state_ch works run, where we put_ldev again. */
927 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
928 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
929 atomic_inc(&mdev->local_cnt);
930
da9fbc27 931 mdev->state.i = ns.i;
8e0af25f
PR
932 mdev->tconn->susp = ns.susp;
933 mdev->tconn->susp_nod = ns.susp_nod;
934 mdev->tconn->susp_fen = ns.susp_fen;
b8907339
PR
935
936 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
937 drbd_print_uuids(mdev, "attached to UUIDs");
938
939 wake_up(&mdev->misc_wait);
940 wake_up(&mdev->state_wait);
2a67d8b9 941 wake_up(&mdev->tconn->ping_wait);
b8907339
PR
942
943 /* aborted verify run. log the last position */
944 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
945 ns.conn < C_CONNECTED) {
946 mdev->ov_start_sector =
947 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
948 dev_info(DEV, "Online Verify reached sector %llu\n",
949 (unsigned long long)mdev->ov_start_sector);
950 }
951
952 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
953 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
954 dev_info(DEV, "Syncer continues.\n");
955 mdev->rs_paused += (long)jiffies
956 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
957 if (ns.conn == C_SYNC_TARGET)
958 mod_timer(&mdev->resync_timer, jiffies);
959 }
960
961 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
962 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
963 dev_info(DEV, "Resync suspended\n");
964 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
965 }
966
967 if (os.conn == C_CONNECTED &&
968 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
969 unsigned long now = jiffies;
970 int i;
971
972 set_ov_position(mdev, ns.conn);
973 mdev->rs_start = now;
974 mdev->rs_last_events = 0;
975 mdev->rs_last_sect_ev = 0;
976 mdev->ov_last_oos_size = 0;
977 mdev->ov_last_oos_start = 0;
978
979 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
980 mdev->rs_mark_left[i] = mdev->ov_left;
981 mdev->rs_mark_time[i] = now;
982 }
983
984 drbd_rs_controller_reset(mdev);
985
986 if (ns.conn == C_VERIFY_S) {
987 dev_info(DEV, "Starting Online Verify from sector %llu\n",
988 (unsigned long long)mdev->ov_position);
989 mod_timer(&mdev->resync_timer, jiffies);
990 }
991 }
992
993 if (get_ldev(mdev)) {
994 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
995 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
996 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
997
998 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
999 mdf |= MDF_CRASHED_PRIMARY;
1000 if (mdev->state.role == R_PRIMARY ||
1001 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1002 mdf |= MDF_PRIMARY_IND;
1003 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1004 mdf |= MDF_CONNECTED_IND;
1005 if (mdev->state.disk > D_INCONSISTENT)
1006 mdf |= MDF_CONSISTENT;
1007 if (mdev->state.disk > D_OUTDATED)
1008 mdf |= MDF_WAS_UP_TO_DATE;
1009 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1010 mdf |= MDF_PEER_OUT_DATED;
1011 if (mdf != mdev->ldev->md.flags) {
1012 mdev->ldev->md.flags = mdf;
1013 drbd_md_mark_dirty(mdev);
1014 }
1015 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1016 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1017 put_ldev(mdev);
1018 }
1019
1020 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1021 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1022 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1023 set_bit(CONSIDER_RESYNC, &mdev->flags);
1024
1025 /* Receiver should clean up itself */
1026 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1027 drbd_thread_stop_nowait(&mdev->tconn->receiver);
1028
1029 /* Now the receiver finished cleaning up itself, it should die */
1030 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1031 drbd_thread_stop_nowait(&mdev->tconn->receiver);
1032
1033 /* Upon network failure, we need to restart the receiver. */
1034 if (os.conn > C_TEAR_DOWN &&
1035 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1036 drbd_thread_restart_nowait(&mdev->tconn->receiver);
1037
1038 /* Resume AL writing if we get a connection */
1039 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1040 drbd_resume_al(mdev);
1041
1042 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1043 if (ascw) {
1044 ascw->os = os;
1045 ascw->ns = ns;
1046 ascw->flags = flags;
1047 ascw->w.cb = w_after_state_ch;
a21e9298 1048 ascw->w.mdev = mdev;
b8907339
PR
1049 ascw->done = done;
1050 drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
1051 } else {
bbeb641c 1052 dev_err(DEV, "Could not kmalloc an ascw\n");
b8907339
PR
1053 }
1054
1055 return rv;
1056}
1057
99920dc5 1058static int w_after_state_ch(struct drbd_work *w, int unused)
b8907339
PR
1059{
1060 struct after_state_chg_work *ascw =
1061 container_of(w, struct after_state_chg_work, w);
00d56944 1062 struct drbd_conf *mdev = w->mdev;
b8907339
PR
1063
1064 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1065 if (ascw->flags & CS_WAIT_COMPLETE) {
1066 D_ASSERT(ascw->done != NULL);
1067 complete(ascw->done);
1068 }
1069 kfree(ascw);
1070
99920dc5 1071 return 0;
b8907339
PR
1072}
1073
1074static void abw_start_sync(struct drbd_conf *mdev, int rv)
1075{
1076 if (rv) {
1077 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1078 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1079 return;
1080 }
1081
1082 switch (mdev->state.conn) {
1083 case C_STARTING_SYNC_T:
1084 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1085 break;
1086 case C_STARTING_SYNC_S:
1087 drbd_start_resync(mdev, C_SYNC_SOURCE);
1088 break;
1089 }
1090}
1091
1092int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1093 int (*io_fn)(struct drbd_conf *),
1094 char *why, enum bm_flag flags)
1095{
1096 int rv;
1097
1098 D_ASSERT(current == mdev->tconn->worker.task);
1099
1100 /* open coded non-blocking drbd_suspend_io(mdev); */
1101 set_bit(SUSPEND_IO, &mdev->flags);
1102
1103 drbd_bm_lock(mdev, why, flags);
1104 rv = io_fn(mdev);
1105 drbd_bm_unlock(mdev);
1106
1107 drbd_resume_io(mdev);
1108
1109 return rv;
1110}
1111
1112/**
1113 * after_state_ch() - Perform after state change actions that may sleep
1114 * @mdev: DRBD device.
1115 * @os: old state.
1116 * @ns: new state.
1117 * @flags: Flags
1118 */
1119static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1120 union drbd_state ns, enum chg_state_flags flags)
1121{
1122 enum drbd_fencing_p fp;
3b98c0c2
LE
1123 struct sib_info sib;
1124
1125 sib.sib_reason = SIB_STATE_CHANGE;
1126 sib.os = os;
1127 sib.ns = ns;
b8907339
PR
1128
1129 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1130 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1131 if (mdev->p_uuid)
1132 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1133 }
1134
1135 fp = FP_DONT_CARE;
1136 if (get_ldev(mdev)) {
daeda1cc
PR
1137 rcu_read_lock();
1138 fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1139 rcu_read_unlock();
b8907339
PR
1140 put_ldev(mdev);
1141 }
1142
1143 /* Inform userspace about the change... */
3b98c0c2 1144 drbd_bcast_event(mdev, &sib);
b8907339
PR
1145
1146 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1147 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1148 drbd_khelper(mdev, "pri-on-incon-degr");
1149
1150 /* Here we have the actions that are performed after a
1151 state change. This function might sleep */
1152
b8907339 1153 if (ns.susp_nod) {
a6d00c8e
PR
1154 enum drbd_req_event what = NOTHING;
1155
1156 if (os.conn < C_CONNECTED && conn_lowest_conn(mdev->tconn) >= C_CONNECTED)
b8907339
PR
1157 what = RESEND;
1158
a6d00c8e 1159 if (os.disk == D_ATTACHING && conn_lowest_disk(mdev->tconn) > D_ATTACHING)
b8907339
PR
1160 what = RESTART_FROZEN_DISK_IO;
1161
a6d00c8e 1162 if (what != NOTHING) {
b8907339 1163 spin_lock_irq(&mdev->tconn->req_lock);
a6d00c8e
PR
1164 _tl_restart(mdev->tconn, what);
1165 _drbd_set_state(_NS(mdev, susp_nod, 0), CS_VERBOSE, NULL);
b8907339
PR
1166 spin_unlock_irq(&mdev->tconn->req_lock);
1167 }
b8907339
PR
1168 }
1169
1170 /* Became sync source. With protocol >= 96, we still need to send out
1171 * the sync uuid now. Need to do that before any drbd_send_state, or
1172 * the other side may go "paused sync" before receiving the sync uuids,
1173 * which is unexpected. */
1174 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1175 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1176 mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
1177 drbd_gen_and_send_sync_uuid(mdev);
1178 put_ldev(mdev);
1179 }
1180
1181 /* Do not change the order of the if above and the two below... */
1182 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1183 drbd_send_uuids(mdev);
1184 drbd_send_state(mdev);
1185 }
1186 /* No point in queuing send_bitmap if we don't have a connection
1187 * anymore, so check also the _current_ state, not only the new state
1188 * at the time this work was queued. */
1189 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1190 mdev->state.conn == C_WF_BITMAP_S)
1191 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1192 "send_bitmap (WFBitMapS)",
1193 BM_LOCKED_TEST_ALLOWED);
1194
1195 /* Lost contact to peer's copy of the data */
1196 if ((os.pdsk >= D_INCONSISTENT &&
1197 os.pdsk != D_UNKNOWN &&
1198 os.pdsk != D_OUTDATED)
1199 && (ns.pdsk < D_INCONSISTENT ||
1200 ns.pdsk == D_UNKNOWN ||
1201 ns.pdsk == D_OUTDATED)) {
1202 if (get_ldev(mdev)) {
1203 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1204 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
2aebfabb 1205 if (drbd_suspended(mdev)) {
b8907339
PR
1206 set_bit(NEW_CUR_UUID, &mdev->flags);
1207 } else {
1208 drbd_uuid_new_current(mdev);
1209 drbd_send_uuids(mdev);
1210 }
1211 }
1212 put_ldev(mdev);
1213 }
1214 }
1215
1216 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1217 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1218 drbd_uuid_new_current(mdev);
1219 drbd_send_uuids(mdev);
1220 }
1221
1222 /* D_DISKLESS Peer becomes secondary */
1223 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1224 /* We may still be Primary ourselves.
1225 * No harm done if the bitmap still changes,
1226 * redirtied pages will follow later. */
1227 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1228 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1229 put_ldev(mdev);
1230 }
1231
1232 /* Write out all changed bits on demote.
1233 * Though, no need to da that just yet
1234 * if there is a resync going on still */
1235 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1236 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1237 /* No changes to the bitmap expected this time, so assert that,
1238 * even though no harm was done if it did change. */
1239 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1240 "demote", BM_LOCKED_TEST_ALLOWED);
1241 put_ldev(mdev);
1242 }
1243
1244 /* Last part of the attaching process ... */
1245 if (ns.conn >= C_CONNECTED &&
1246 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1247 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1248 drbd_send_uuids(mdev);
1249 drbd_send_state(mdev);
1250 }
1251
1252 /* We want to pause/continue resync, tell peer. */
1253 if (ns.conn >= C_CONNECTED &&
1254 ((os.aftr_isp != ns.aftr_isp) ||
1255 (os.user_isp != ns.user_isp)))
1256 drbd_send_state(mdev);
1257
1258 /* In case one of the isp bits got set, suspend other devices. */
1259 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1260 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1261 suspend_other_sg(mdev);
1262
1263 /* Make sure the peer gets informed about eventual state
1264 changes (ISP bits) while we were in WFReportParams. */
1265 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1266 drbd_send_state(mdev);
1267
1268 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1269 drbd_send_state(mdev);
1270
1271 /* We are in the progress to start a full sync... */
1272 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1273 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1274 /* no other bitmap changes expected during this phase */
1275 drbd_queue_bitmap_io(mdev,
1276 &drbd_bmio_set_n_write, &abw_start_sync,
1277 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1278
1279 /* We are invalidating our self... */
1280 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1281 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1282 /* other bitmap operation expected during this phase */
1283 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1284 "set_n_write from invalidate", BM_LOCKED_MASK);
1285
1286 /* first half of local IO error, failure to attach,
1287 * or administrative detach */
1288 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1289 enum drbd_io_error_p eh;
1290 int was_io_error;
1291 /* corresponding get_ldev was in __drbd_set_state, to serialize
1292 * our cleanup here with the transition to D_DISKLESS,
1293 * so it is safe to dreference ldev here. */
daeda1cc
PR
1294 rcu_read_lock();
1295 eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
1296 rcu_read_unlock();
b8907339
PR
1297 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1298
1299 /* current state still has to be D_FAILED,
1300 * there is only one way out: to D_DISKLESS,
1301 * and that may only happen after our put_ldev below. */
1302 if (mdev->state.disk != D_FAILED)
1303 dev_err(DEV,
1304 "ASSERT FAILED: disk is %s during detach\n",
1305 drbd_disk_str(mdev->state.disk));
1306
927036f9 1307 if (!drbd_send_state(mdev))
4dbdae3e 1308 dev_info(DEV, "Notified peer that I am detaching my disk\n");
b8907339
PR
1309
1310 drbd_rs_cancel_all(mdev);
1311
1312 /* In case we want to get something to stable storage still,
1313 * this may be the last chance.
1314 * Following put_ldev may transition to D_DISKLESS. */
1315 drbd_md_sync(mdev);
1316 put_ldev(mdev);
1317
1318 if (was_io_error && eh == EP_CALL_HELPER)
1319 drbd_khelper(mdev, "local-io-error");
1320 }
1321
1322 /* second half of local IO error, failure to attach,
1323 * or administrative detach,
1324 * after local_cnt references have reached zero again */
1325 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1326 /* We must still be diskless,
1327 * re-attach has to be serialized with this! */
1328 if (mdev->state.disk != D_DISKLESS)
1329 dev_err(DEV,
1330 "ASSERT FAILED: disk is %s while going diskless\n",
1331 drbd_disk_str(mdev->state.disk));
1332
1333 mdev->rs_total = 0;
1334 mdev->rs_failed = 0;
1335 atomic_set(&mdev->rs_pending_cnt, 0);
1336
927036f9 1337 if (!drbd_send_state(mdev))
4dbdae3e 1338 dev_info(DEV, "Notified peer that I'm now diskless.\n");
b8907339
PR
1339 /* corresponding get_ldev in __drbd_set_state
1340 * this may finally trigger drbd_ldev_destroy. */
1341 put_ldev(mdev);
1342 }
1343
1344 /* Notify peer that I had a local IO error, and did not detached.. */
1345 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1346 drbd_send_state(mdev);
1347
1348 /* Disks got bigger while they were detached */
1349 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1350 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1351 if (ns.conn == C_CONNECTED)
1352 resync_after_online_grow(mdev);
1353 }
1354
1355 /* A resync finished or aborted, wake paused devices... */
1356 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1357 (os.peer_isp && !ns.peer_isp) ||
1358 (os.user_isp && !ns.user_isp))
1359 resume_next_sg(mdev);
1360
1361 /* sync target done with resync. Explicitly notify peer, even though
1362 * it should (at least for non-empty resyncs) already know itself. */
1363 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1364 drbd_send_state(mdev);
1365
1366 /* This triggers bitmap writeout of potentially still unwritten pages
1367 * if the resync finished cleanly, or aborted because of peer disk
1368 * failure, or because of connection loss.
1369 * For resync aborted because of local disk failure, we cannot do
1370 * any bitmap writeout anymore.
1371 * No harm done if some bits change during this phase.
1372 */
1373 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1374 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1375 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1376 put_ldev(mdev);
1377 }
1378
1379 if (ns.disk == D_DISKLESS &&
1380 ns.conn == C_STANDALONE &&
1381 ns.role == R_SECONDARY) {
1382 if (os.aftr_isp != ns.aftr_isp)
1383 resume_next_sg(mdev);
1384 }
1385
b8907339
PR
1386 drbd_md_sync(mdev);
1387}
1388
bbeb641c
PR
1389struct after_conn_state_chg_work {
1390 struct drbd_work w;
1391 enum drbd_conns oc;
8c7e16c3 1392 union drbd_state ns_min;
5f082f98 1393 union drbd_state ns_max; /* new, max state, over all mdevs */
bbeb641c
PR
1394 enum chg_state_flags flags;
1395};
1396
99920dc5 1397static int w_after_conn_state_ch(struct drbd_work *w, int unused)
bbeb641c
PR
1398{
1399 struct after_conn_state_chg_work *acscw =
1400 container_of(w, struct after_conn_state_chg_work, w);
1401 struct drbd_tconn *tconn = w->tconn;
1402 enum drbd_conns oc = acscw->oc;
5f082f98 1403 union drbd_state ns_max = acscw->ns_max;
a6d00c8e
PR
1404 union drbd_state ns_min = acscw->ns_min;
1405 struct drbd_conf *mdev;
1406 int vnr;
bbeb641c
PR
1407
1408 kfree(acscw);
1409
b8907339 1410 /* Upon network configuration, we need to start the receiver */
5f082f98 1411 if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
b8907339
PR
1412 drbd_thread_start(&tconn->receiver);
1413
f3dfa40a
LE
1414 if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
1415 struct net_conf *old_conf;
1416
a0095508 1417 mutex_lock(&tconn->conf_update);
f3dfa40a 1418 old_conf = tconn->net_conf;
089c075d
AG
1419 tconn->my_addr_len = 0;
1420 tconn->peer_addr_len = 0;
f3dfa40a
LE
1421 rcu_assign_pointer(tconn->net_conf, NULL);
1422 conn_free_crypto(tconn);
a0095508 1423 mutex_unlock(&tconn->conf_update);
f3dfa40a
LE
1424
1425 synchronize_rcu();
1426 kfree(old_conf);
1427 }
1428
a6d00c8e
PR
1429 if (ns_max.susp_fen) {
1430 /* case1: The outdate peer handler is successful: */
1431 if (ns_max.pdsk <= D_OUTDATED) {
1432 tl_clear(tconn);
695d08fa 1433 rcu_read_lock();
a6d00c8e
PR
1434 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1435 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1436 drbd_uuid_new_current(mdev);
1437 clear_bit(NEW_CUR_UUID, &mdev->flags);
1438 }
1439 }
695d08fa 1440 rcu_read_unlock();
a6d00c8e
PR
1441 conn_request_state(tconn,
1442 (union drbd_state) { { .susp_fen = 1 } },
1443 (union drbd_state) { { .susp_fen = 0 } },
1444 CS_VERBOSE);
1445 }
1446 /* case2: The connection was established again: */
1447 if (ns_min.conn >= C_CONNECTED) {
695d08fa 1448 rcu_read_lock();
a6d00c8e
PR
1449 idr_for_each_entry(&tconn->volumes, mdev, vnr)
1450 clear_bit(NEW_CUR_UUID, &mdev->flags);
695d08fa 1451 rcu_read_unlock();
a6d00c8e
PR
1452 spin_lock_irq(&tconn->req_lock);
1453 _tl_restart(tconn, RESEND);
1454 _conn_request_state(tconn,
1455 (union drbd_state) { { .susp_fen = 1 } },
1456 (union drbd_state) { { .susp_fen = 0 } },
1457 CS_VERBOSE);
1458 spin_unlock_irq(&tconn->req_lock);
1459 }
1460 }
9dc9fbb3 1461 kref_put(&tconn->kref, &conn_destroy);
99920dc5 1462 return 0;
bbeb641c
PR
1463}
1464
435693e8 1465void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
88ef594e 1466{
435693e8 1467 enum chg_state_flags flags = ~0;
da9fbc27 1468 union drbd_dev_state os, cs = {}; /* old_state, common_state */
88ef594e 1469 struct drbd_conf *mdev;
435693e8 1470 int vnr, first_vol = 1;
88ef594e 1471
695d08fa 1472 rcu_read_lock();
88ef594e
PR
1473 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1474 os = mdev->state;
1475
435693e8
PR
1476 if (first_vol) {
1477 cs = os;
1478 first_vol = 0;
1479 continue;
1480 }
1481
1482 if (cs.role != os.role)
1483 flags &= ~CS_DC_ROLE;
1484
1485 if (cs.peer != os.peer)
1486 flags &= ~CS_DC_PEER;
1487
1488 if (cs.conn != os.conn)
1489 flags &= ~CS_DC_CONN;
88ef594e 1490
435693e8
PR
1491 if (cs.disk != os.disk)
1492 flags &= ~CS_DC_DISK;
88ef594e 1493
435693e8
PR
1494 if (cs.pdsk != os.pdsk)
1495 flags &= ~CS_DC_PDSK;
1496 }
695d08fa 1497 rcu_read_unlock();
435693e8
PR
1498
1499 *pf |= CS_DC_MASK;
1500 *pf &= flags;
da9fbc27 1501 (*pcs).i = cs.i;
88ef594e 1502}
bbeb641c 1503
bd0c824a
PR
1504static enum drbd_state_rv
1505conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
88ef594e 1506 enum chg_state_flags flags)
bbeb641c 1507{
bd0c824a 1508 enum drbd_state_rv rv = SS_SUCCESS;
bbeb641c 1509 union drbd_state ns, os;
bd0c824a
PR
1510 struct drbd_conf *mdev;
1511 int vnr;
bbeb641c 1512
695d08fa 1513 rcu_read_lock();
bd0c824a 1514 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
78bae59b 1515 os = drbd_read_state(mdev);
bd0c824a
PR
1516 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
1517
778bcf2e
PR
1518 if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
1519 ns.disk = os.disk;
1520
bd0c824a
PR
1521 if (ns.i == os.i)
1522 continue;
bbeb641c 1523
bd0c824a
PR
1524 rv = is_valid_transition(os, ns);
1525 if (rv < SS_SUCCESS)
1526 break;
1527
1528 if (!(flags & CS_HARD)) {
1529 rv = is_valid_state(mdev, ns);
1530 if (rv < SS_SUCCESS) {
1531 if (is_valid_state(mdev, os) == rv)
1532 rv = is_valid_soft_transition(os, ns);
1533 } else
1534 rv = is_valid_soft_transition(os, ns);
1535 }
1536 if (rv < SS_SUCCESS)
1537 break;
bbeb641c 1538 }
695d08fa 1539 rcu_read_unlock();
bbeb641c 1540
bd0c824a
PR
1541 if (rv < SS_SUCCESS && flags & CS_VERBOSE)
1542 print_st_err(mdev, os, ns, rv);
1543
1544 return rv;
bbeb641c
PR
1545}
1546
8c7e16c3 1547void
bd0c824a 1548conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
8c7e16c3 1549 union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
bbeb641c 1550{
8c7e16c3
PR
1551 union drbd_state ns, os, ns_max = { };
1552 union drbd_state ns_min = {
1553 { .role = R_MASK,
1554 .peer = R_MASK,
1555 .disk = D_MASK,
1556 .pdsk = D_MASK
1557 } };
bd0c824a 1558 struct drbd_conf *mdev;
bbeb641c 1559 enum drbd_state_rv rv;
bd0c824a 1560 int vnr;
bbeb641c 1561
bd0c824a
PR
1562 if (mask.conn == C_MASK)
1563 tconn->cstate = val.conn;
1564
695d08fa 1565 rcu_read_lock();
bd0c824a 1566 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
78bae59b 1567 os = drbd_read_state(mdev);
bd0c824a
PR
1568 ns = apply_mask_val(os, mask, val);
1569 ns = sanitize_state(mdev, ns, NULL);
bbeb641c 1570
778bcf2e
PR
1571 if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
1572 ns.disk = os.disk;
1573
bd0c824a
PR
1574 rv = __drbd_set_state(mdev, ns, flags, NULL);
1575 if (rv < SS_SUCCESS)
1576 BUG();
bbeb641c 1577
8c7e16c3
PR
1578 ns.i = mdev->state.i;
1579 ns_max.role = max_role(ns.role, ns_max.role);
1580 ns_max.peer = max_role(ns.peer, ns_max.peer);
1581 ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
1582 ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
1583 ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
1584
1585 ns_min.role = min_role(ns.role, ns_min.role);
1586 ns_min.peer = min_role(ns.peer, ns_min.peer);
1587 ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
1588 ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
1589 ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
bd0c824a 1590 }
695d08fa 1591 rcu_read_unlock();
bbeb641c 1592
8c7e16c3
PR
1593 ns_min.susp = ns_max.susp = tconn->susp;
1594 ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
1595 ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
1596
1597 *pns_min = ns_min;
1598 *pns_max = ns_max;
bbeb641c
PR
1599}
1600
df24aa45
PR
1601static enum drbd_state_rv
1602_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1603{
df24aa45
PR
1604 enum drbd_state_rv rv;
1605
1606 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
1607 return SS_CW_SUCCESS;
1608
1609 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
1610 return SS_CW_FAILED_BY_PEER;
1611
df24aa45
PR
1612 spin_lock_irq(&tconn->req_lock);
1613 rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
1614
1615 if (rv == SS_UNKNOWN_ERROR)
435693e8 1616 rv = conn_is_valid_transition(tconn, mask, val, 0);
df24aa45 1617
bd0c824a
PR
1618 if (rv == SS_SUCCESS)
1619 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
df24aa45
PR
1620
1621 spin_unlock_irq(&tconn->req_lock);
1622
1623 return rv;
1624}
1625
1626static enum drbd_state_rv
1627conn_cl_wide(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1628 enum chg_state_flags f)
1629{
1630 enum drbd_state_rv rv;
1631
1632 spin_unlock_irq(&tconn->req_lock);
1633 mutex_lock(&tconn->cstate_mutex);
1634
caee1c3a 1635 if (conn_send_state_req(tconn, mask, val)) {
df24aa45
PR
1636 rv = SS_CW_FAILED_BY_PEER;
1637 /* if (f & CS_VERBOSE)
1638 print_st_err(mdev, os, ns, rv); */
1639 goto abort;
1640 }
1641
1642 wait_event(tconn->ping_wait, (rv = _conn_rq_cond(tconn, mask, val)));
1643
1644abort:
1645 mutex_unlock(&tconn->cstate_mutex);
1646 spin_lock_irq(&tconn->req_lock);
1647
1648 return rv;
1649}
1650
bbeb641c
PR
1651enum drbd_state_rv
1652_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1653 enum chg_state_flags flags)
1654{
1655 enum drbd_state_rv rv = SS_SUCCESS;
bbeb641c
PR
1656 struct after_conn_state_chg_work *acscw;
1657 enum drbd_conns oc = tconn->cstate;
8c7e16c3 1658 union drbd_state ns_max, ns_min, os;
bbeb641c 1659
bbeb641c
PR
1660 rv = is_valid_conn_transition(oc, val.conn);
1661 if (rv < SS_SUCCESS)
1662 goto abort;
1663
88ef594e 1664 rv = conn_is_valid_transition(tconn, mask, val, flags);
bbeb641c
PR
1665 if (rv < SS_SUCCESS)
1666 goto abort;
1667
df24aa45
PR
1668 if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
1669 !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
1670 rv = conn_cl_wide(tconn, mask, val, flags);
1671 if (rv < SS_SUCCESS)
1672 goto abort;
1673 }
1674
435693e8 1675 conn_old_common_state(tconn, &os, &flags);
706cb24c 1676 flags |= CS_DC_SUSP;
8c7e16c3 1677 conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
5f082f98 1678 conn_pr_state_change(tconn, os, ns_max, flags);
bbeb641c
PR
1679
1680 acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
1681 if (acscw) {
435693e8 1682 acscw->oc = os.conn;
8c7e16c3 1683 acscw->ns_min = ns_min;
5f082f98 1684 acscw->ns_max = ns_max;
bbeb641c
PR
1685 acscw->flags = flags;
1686 acscw->w.cb = w_after_conn_state_ch;
9dc9fbb3 1687 kref_get(&tconn->kref);
bbeb641c
PR
1688 acscw->w.tconn = tconn;
1689 drbd_queue_work(&tconn->data.work, &acscw->w);
1690 } else {
1691 conn_err(tconn, "Could not kmalloc an acscw\n");
b8907339 1692 }
bbeb641c
PR
1693
1694abort:
bbeb641c
PR
1695 return rv;
1696}
1697
1698enum drbd_state_rv
1699conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1700 enum chg_state_flags flags)
1701{
1702 enum drbd_state_rv rv;
1703
1704 spin_lock_irq(&tconn->req_lock);
1705 rv = _conn_request_state(tconn, mask, val, flags);
1706 spin_unlock_irq(&tconn->req_lock);
1707
1708 return rv;
b8907339 1709}
This page took 0.136481 seconds and 5 git commands to generate.