4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/drbd_limits.h>
33 extern void tl_abort_disk_io(struct drbd_conf
*mdev
);
35 struct after_state_chg_work
{
39 enum chg_state_flags flags
;
40 struct completion
*done
;
43 enum sanitize_state_warnings
{
45 ABORTED_ONLINE_VERIFY
,
47 CONNECTION_LOST_NEGOTIATING
,
48 IMPLICITLY_UPGRADED_DISK
,
49 IMPLICITLY_UPGRADED_PDSK
,
52 static int w_after_state_ch(struct drbd_work
*w
, int unused
);
53 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
54 union drbd_state ns
, enum chg_state_flags flags
);
55 static enum drbd_state_rv
is_valid_state(struct drbd_conf
*, union drbd_state
);
56 static enum drbd_state_rv
is_valid_soft_transition(union drbd_state
, union drbd_state
, struct drbd_tconn
*);
57 static enum drbd_state_rv
is_valid_transition(union drbd_state os
, union drbd_state ns
);
58 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state ns
,
59 enum sanitize_state_warnings
*warn
);
61 static inline bool is_susp(union drbd_state s
)
63 return s
.susp
|| s
.susp_nod
|| s
.susp_fen
;
66 bool conn_all_vols_unconf(struct drbd_tconn
*tconn
)
68 struct drbd_conf
*mdev
;
73 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
74 if (mdev
->state
.disk
!= D_DISKLESS
||
75 mdev
->state
.conn
!= C_STANDALONE
||
76 mdev
->state
.role
!= R_SECONDARY
) {
86 /* Unfortunately the states where not correctly ordered, when
87 they where defined. therefore can not use max_t() here. */
88 static enum drbd_role
max_role(enum drbd_role role1
, enum drbd_role role2
)
90 if (role1
== R_PRIMARY
|| role2
== R_PRIMARY
)
92 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
96 static enum drbd_role
min_role(enum drbd_role role1
, enum drbd_role role2
)
98 if (role1
== R_UNKNOWN
|| role2
== R_UNKNOWN
)
100 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
105 enum drbd_role
conn_highest_role(struct drbd_tconn
*tconn
)
107 enum drbd_role role
= R_UNKNOWN
;
108 struct drbd_conf
*mdev
;
112 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
113 role
= max_role(role
, mdev
->state
.role
);
119 enum drbd_role
conn_highest_peer(struct drbd_tconn
*tconn
)
121 enum drbd_role peer
= R_UNKNOWN
;
122 struct drbd_conf
*mdev
;
126 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
127 peer
= max_role(peer
, mdev
->state
.peer
);
133 enum drbd_disk_state
conn_highest_disk(struct drbd_tconn
*tconn
)
135 enum drbd_disk_state ds
= D_DISKLESS
;
136 struct drbd_conf
*mdev
;
140 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
141 ds
= max_t(enum drbd_disk_state
, ds
, mdev
->state
.disk
);
147 enum drbd_disk_state
conn_lowest_disk(struct drbd_tconn
*tconn
)
149 enum drbd_disk_state ds
= D_MASK
;
150 struct drbd_conf
*mdev
;
154 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
155 ds
= min_t(enum drbd_disk_state
, ds
, mdev
->state
.disk
);
161 enum drbd_disk_state
conn_highest_pdsk(struct drbd_tconn
*tconn
)
163 enum drbd_disk_state ds
= D_DISKLESS
;
164 struct drbd_conf
*mdev
;
168 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
169 ds
= max_t(enum drbd_disk_state
, ds
, mdev
->state
.pdsk
);
175 enum drbd_conns
conn_lowest_conn(struct drbd_tconn
*tconn
)
177 enum drbd_conns conn
= C_MASK
;
178 struct drbd_conf
*mdev
;
182 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
183 conn
= min_t(enum drbd_conns
, conn
, mdev
->state
.conn
);
189 static bool no_peer_wf_report_params(struct drbd_tconn
*tconn
)
191 struct drbd_conf
*mdev
;
196 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
197 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
208 * cl_wide_st_chg() - true if the state change is a cluster wide one
209 * @mdev: DRBD device.
210 * @os: old (current) state.
211 * @ns: new (wanted) state.
213 static int cl_wide_st_chg(struct drbd_conf
*mdev
,
214 union drbd_state os
, union drbd_state ns
)
216 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
217 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
218 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
219 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
220 (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
))) ||
221 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
222 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
) ||
223 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_WF_REPORT_PARAMS
);
226 static union drbd_state
227 apply_mask_val(union drbd_state os
, union drbd_state mask
, union drbd_state val
)
230 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
235 drbd_change_state(struct drbd_conf
*mdev
, enum chg_state_flags f
,
236 union drbd_state mask
, union drbd_state val
)
240 enum drbd_state_rv rv
;
242 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
243 ns
= apply_mask_val(drbd_read_state(mdev
), mask
, val
);
244 rv
= _drbd_set_state(mdev
, ns
, f
, NULL
);
245 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
251 * drbd_force_state() - Impose a change which happens outside our control on our state
252 * @mdev: DRBD device.
253 * @mask: mask of state bits to change.
254 * @val: value of new state bits.
256 void drbd_force_state(struct drbd_conf
*mdev
,
257 union drbd_state mask
, union drbd_state val
)
259 drbd_change_state(mdev
, CS_HARD
, mask
, val
);
262 static enum drbd_state_rv
263 _req_st_cond(struct drbd_conf
*mdev
, union drbd_state mask
,
264 union drbd_state val
)
266 union drbd_state os
, ns
;
268 enum drbd_state_rv rv
;
270 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
))
271 return SS_CW_SUCCESS
;
273 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &mdev
->flags
))
274 return SS_CW_FAILED_BY_PEER
;
276 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
277 os
= drbd_read_state(mdev
);
278 ns
= sanitize_state(mdev
, apply_mask_val(os
, mask
, val
), NULL
);
279 rv
= is_valid_transition(os
, ns
);
280 if (rv
>= SS_SUCCESS
)
281 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
283 if (!cl_wide_st_chg(mdev
, os
, ns
))
285 if (rv
== SS_UNKNOWN_ERROR
) {
286 rv
= is_valid_state(mdev
, ns
);
287 if (rv
>= SS_SUCCESS
) {
288 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
289 if (rv
>= SS_SUCCESS
)
290 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
293 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
299 * drbd_req_state() - Perform an eventually cluster wide state change
300 * @mdev: DRBD device.
301 * @mask: mask of state bits to change.
302 * @val: value of new state bits.
305 * Should not be called directly, use drbd_request_state() or
306 * _drbd_request_state().
308 static enum drbd_state_rv
309 drbd_req_state(struct drbd_conf
*mdev
, union drbd_state mask
,
310 union drbd_state val
, enum chg_state_flags f
)
312 struct completion done
;
314 union drbd_state os
, ns
;
315 enum drbd_state_rv rv
;
317 init_completion(&done
);
319 if (f
& CS_SERIALIZE
)
320 mutex_lock(mdev
->state_mutex
);
322 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
323 os
= drbd_read_state(mdev
);
324 ns
= sanitize_state(mdev
, apply_mask_val(os
, mask
, val
), NULL
);
325 rv
= is_valid_transition(os
, ns
);
326 if (rv
< SS_SUCCESS
) {
327 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
331 if (cl_wide_st_chg(mdev
, os
, ns
)) {
332 rv
= is_valid_state(mdev
, ns
);
333 if (rv
== SS_SUCCESS
)
334 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
335 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
337 if (rv
< SS_SUCCESS
) {
339 print_st_err(mdev
, os
, ns
, rv
);
343 if (drbd_send_state_req(mdev
, mask
, val
)) {
344 rv
= SS_CW_FAILED_BY_PEER
;
346 print_st_err(mdev
, os
, ns
, rv
);
350 wait_event(mdev
->state_wait
,
351 (rv
= _req_st_cond(mdev
, mask
, val
)));
353 if (rv
< SS_SUCCESS
) {
355 print_st_err(mdev
, os
, ns
, rv
);
358 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
359 ns
= apply_mask_val(drbd_read_state(mdev
), mask
, val
);
360 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
362 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
365 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
367 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
368 D_ASSERT(current
!= mdev
->tconn
->worker
.task
);
369 wait_for_completion(&done
);
373 if (f
& CS_SERIALIZE
)
374 mutex_unlock(mdev
->state_mutex
);
380 * _drbd_request_state() - Request a state change (with flags)
381 * @mdev: DRBD device.
382 * @mask: mask of state bits to change.
383 * @val: value of new state bits.
386 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
387 * flag, or when logging of failed state change requests is not desired.
390 _drbd_request_state(struct drbd_conf
*mdev
, union drbd_state mask
,
391 union drbd_state val
, enum chg_state_flags f
)
393 enum drbd_state_rv rv
;
395 wait_event(mdev
->state_wait
,
396 (rv
= drbd_req_state(mdev
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
401 static void print_st(struct drbd_conf
*mdev
, char *name
, union drbd_state ns
)
403 dev_err(DEV
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
405 drbd_conn_str(ns
.conn
),
406 drbd_role_str(ns
.role
),
407 drbd_role_str(ns
.peer
),
408 drbd_disk_str(ns
.disk
),
409 drbd_disk_str(ns
.pdsk
),
410 is_susp(ns
) ? 's' : 'r',
411 ns
.aftr_isp
? 'a' : '-',
412 ns
.peer_isp
? 'p' : '-',
413 ns
.user_isp
? 'u' : '-',
414 ns
.susp_fen
? 'F' : '-',
415 ns
.susp_nod
? 'N' : '-'
419 void print_st_err(struct drbd_conf
*mdev
, union drbd_state os
,
420 union drbd_state ns
, enum drbd_state_rv err
)
422 if (err
== SS_IN_TRANSIENT_STATE
)
424 dev_err(DEV
, "State change failed: %s\n", drbd_set_st_err_str(err
));
425 print_st(mdev
, " state", os
);
426 print_st(mdev
, "wanted", ns
);
429 static long print_state_change(char *pb
, union drbd_state os
, union drbd_state ns
,
430 enum chg_state_flags flags
)
436 if (ns
.role
!= os
.role
&& flags
& CS_DC_ROLE
)
437 pbp
+= sprintf(pbp
, "role( %s -> %s ) ",
438 drbd_role_str(os
.role
),
439 drbd_role_str(ns
.role
));
440 if (ns
.peer
!= os
.peer
&& flags
& CS_DC_PEER
)
441 pbp
+= sprintf(pbp
, "peer( %s -> %s ) ",
442 drbd_role_str(os
.peer
),
443 drbd_role_str(ns
.peer
));
444 if (ns
.conn
!= os
.conn
&& flags
& CS_DC_CONN
)
445 pbp
+= sprintf(pbp
, "conn( %s -> %s ) ",
446 drbd_conn_str(os
.conn
),
447 drbd_conn_str(ns
.conn
));
448 if (ns
.disk
!= os
.disk
&& flags
& CS_DC_DISK
)
449 pbp
+= sprintf(pbp
, "disk( %s -> %s ) ",
450 drbd_disk_str(os
.disk
),
451 drbd_disk_str(ns
.disk
));
452 if (ns
.pdsk
!= os
.pdsk
&& flags
& CS_DC_PDSK
)
453 pbp
+= sprintf(pbp
, "pdsk( %s -> %s ) ",
454 drbd_disk_str(os
.pdsk
),
455 drbd_disk_str(ns
.pdsk
));
460 static void drbd_pr_state_change(struct drbd_conf
*mdev
, union drbd_state os
, union drbd_state ns
,
461 enum chg_state_flags flags
)
466 pbp
+= print_state_change(pbp
, os
, ns
, flags
^ CS_DC_MASK
);
468 if (ns
.aftr_isp
!= os
.aftr_isp
)
469 pbp
+= sprintf(pbp
, "aftr_isp( %d -> %d ) ",
472 if (ns
.peer_isp
!= os
.peer_isp
)
473 pbp
+= sprintf(pbp
, "peer_isp( %d -> %d ) ",
476 if (ns
.user_isp
!= os
.user_isp
)
477 pbp
+= sprintf(pbp
, "user_isp( %d -> %d ) ",
482 dev_info(DEV
, "%s\n", pb
);
485 static void conn_pr_state_change(struct drbd_tconn
*tconn
, union drbd_state os
, union drbd_state ns
,
486 enum chg_state_flags flags
)
491 pbp
+= print_state_change(pbp
, os
, ns
, flags
);
493 if (is_susp(ns
) != is_susp(os
) && flags
& CS_DC_SUSP
)
494 pbp
+= sprintf(pbp
, "susp( %d -> %d ) ",
499 conn_info(tconn
, "%s\n", pb
);
504 * is_valid_state() - Returns an SS_ error code if ns is not valid
505 * @mdev: DRBD device.
506 * @ns: State to consider.
508 static enum drbd_state_rv
509 is_valid_state(struct drbd_conf
*mdev
, union drbd_state ns
)
511 /* See drbd_state_sw_errors in drbd_strings.c */
513 enum drbd_fencing_p fp
;
514 enum drbd_state_rv rv
= SS_SUCCESS
;
519 if (get_ldev(mdev
)) {
520 fp
= rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
;
524 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
526 if (!nc
->two_primaries
&& ns
.role
== R_PRIMARY
) {
527 if (ns
.peer
== R_PRIMARY
)
528 rv
= SS_TWO_PRIMARIES
;
529 else if (conn_highest_peer(mdev
->tconn
) == R_PRIMARY
)
530 rv
= SS_O_VOL_PEER_PRI
;
535 /* already found a reason to abort */;
536 else if (ns
.role
== R_SECONDARY
&& mdev
->open_cnt
)
537 rv
= SS_DEVICE_IN_USE
;
539 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
540 rv
= SS_NO_UP_TO_DATE_DISK
;
542 else if (fp
>= FP_RESOURCE
&&
543 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
546 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
547 rv
= SS_NO_UP_TO_DATE_DISK
;
549 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
550 rv
= SS_NO_LOCAL_DISK
;
552 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
553 rv
= SS_NO_REMOTE_DISK
;
555 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
556 rv
= SS_NO_UP_TO_DATE_DISK
;
558 else if ((ns
.conn
== C_CONNECTED
||
559 ns
.conn
== C_WF_BITMAP_S
||
560 ns
.conn
== C_SYNC_SOURCE
||
561 ns
.conn
== C_PAUSED_SYNC_S
) &&
562 ns
.disk
== D_OUTDATED
)
563 rv
= SS_CONNECTED_OUTDATES
;
565 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
566 (nc
->verify_alg
[0] == 0))
567 rv
= SS_NO_VERIFY_ALG
;
569 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
570 mdev
->tconn
->agreed_pro_version
< 88)
571 rv
= SS_NOT_SUPPORTED
;
573 else if (ns
.conn
>= C_CONNECTED
&& ns
.pdsk
== D_UNKNOWN
)
574 rv
= SS_CONNECTED_OUTDATES
;
582 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
583 * This function limits state transitions that may be declined by DRBD. I.e.
584 * user requests (aka soft transitions).
585 * @mdev: DRBD device.
589 static enum drbd_state_rv
590 is_valid_soft_transition(union drbd_state os
, union drbd_state ns
, struct drbd_tconn
*tconn
)
592 enum drbd_state_rv rv
= SS_SUCCESS
;
594 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
595 os
.conn
> C_CONNECTED
)
596 rv
= SS_RESYNC_RUNNING
;
598 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
599 rv
= SS_ALREADY_STANDALONE
;
601 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
604 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
605 rv
= SS_NO_NET_CONFIG
;
607 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
608 rv
= SS_LOWER_THAN_OUTDATED
;
610 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
611 rv
= SS_IN_TRANSIENT_STATE
;
613 /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
614 rv = SS_IN_TRANSIENT_STATE; */
616 /* While establishing a connection only allow cstate to change.
617 Delay/refuse role changes, detach attach etc... */
618 if (test_bit(STATE_SENT
, &tconn
->flags
) &&
619 !(os
.conn
== C_WF_REPORT_PARAMS
||
620 (ns
.conn
== C_WF_REPORT_PARAMS
&& os
.conn
== C_WF_CONNECTION
)))
621 rv
= SS_IN_TRANSIENT_STATE
;
623 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
624 rv
= SS_NEED_CONNECTION
;
626 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
627 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
628 rv
= SS_RESYNC_RUNNING
;
630 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
631 os
.conn
< C_CONNECTED
)
632 rv
= SS_NEED_CONNECTION
;
634 if ((ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)
635 && os
.conn
< C_WF_REPORT_PARAMS
)
636 rv
= SS_NEED_CONNECTION
; /* No NetworkFailure -> SyncTarget etc... */
641 static enum drbd_state_rv
642 is_valid_conn_transition(enum drbd_conns oc
, enum drbd_conns nc
)
644 /* no change -> nothing to do, at least for the connection part */
646 return SS_NOTHING_TO_DO
;
648 /* disconnect of an unconfigured connection does not make sense */
649 if (oc
== C_STANDALONE
&& nc
== C_DISCONNECTING
)
650 return SS_ALREADY_STANDALONE
;
652 /* from C_STANDALONE, we start with C_UNCONNECTED */
653 if (oc
== C_STANDALONE
&& nc
!= C_UNCONNECTED
)
654 return SS_NEED_CONNECTION
;
656 /* When establishing a connection we need to go through WF_REPORT_PARAMS!
657 Necessary to do the right thing upon invalidate-remote on a disconnected resource */
658 if (oc
< C_WF_REPORT_PARAMS
&& nc
>= C_CONNECTED
)
659 return SS_NEED_CONNECTION
;
661 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
662 if (oc
>= C_TIMEOUT
&& oc
<= C_TEAR_DOWN
&& nc
!= C_UNCONNECTED
&& nc
!= C_DISCONNECTING
)
663 return SS_IN_TRANSIENT_STATE
;
665 /* After C_DISCONNECTING only C_STANDALONE may follow */
666 if (oc
== C_DISCONNECTING
&& nc
!= C_STANDALONE
)
667 return SS_IN_TRANSIENT_STATE
;
674 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
675 * This limits hard state transitions. Hard state transitions are facts there are
676 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
677 * But those hard state transitions are still not allowed to do everything.
681 static enum drbd_state_rv
682 is_valid_transition(union drbd_state os
, union drbd_state ns
)
684 enum drbd_state_rv rv
;
686 rv
= is_valid_conn_transition(os
.conn
, ns
.conn
);
688 /* we cannot fail (again) if we already detached */
689 if (ns
.disk
== D_FAILED
&& os
.disk
== D_DISKLESS
)
695 static void print_sanitize_warnings(struct drbd_conf
*mdev
, enum sanitize_state_warnings warn
)
697 static const char *msg_table
[] = {
699 [ABORTED_ONLINE_VERIFY
] = "Online-verify aborted.",
700 [ABORTED_RESYNC
] = "Resync aborted.",
701 [CONNECTION_LOST_NEGOTIATING
] = "Connection lost while negotiating, no data!",
702 [IMPLICITLY_UPGRADED_DISK
] = "Implicitly upgraded disk",
703 [IMPLICITLY_UPGRADED_PDSK
] = "Implicitly upgraded pdsk",
706 if (warn
!= NO_WARNING
)
707 dev_warn(DEV
, "%s\n", msg_table
[warn
]);
711 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
712 * @mdev: DRBD device.
717 * When we loose connection, we have to set the state of the peers disk (pdsk)
718 * to D_UNKNOWN. This rule and many more along those lines are in this function.
720 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state ns
,
721 enum sanitize_state_warnings
*warn
)
723 enum drbd_fencing_p fp
;
724 enum drbd_disk_state disk_min
, disk_max
, pdsk_min
, pdsk_max
;
730 if (get_ldev(mdev
)) {
732 fp
= rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
;
737 /* Implications from connection to peer and peer_isp */
738 if (ns
.conn
< C_CONNECTED
) {
741 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
745 /* Clear the aftr_isp when becoming unconfigured */
746 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
749 /* An implication of the disk states onto the connection state */
750 /* Abort resync if a disk fails/detaches */
751 if (ns
.conn
> C_CONNECTED
&& (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
753 *warn
= ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
?
754 ABORTED_ONLINE_VERIFY
: ABORTED_RESYNC
;
755 ns
.conn
= C_CONNECTED
;
758 /* Connection breaks down before we finished "Negotiating" */
759 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
760 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
761 if (mdev
->ed_uuid
== mdev
->ldev
->md
.uuid
[UI_CURRENT
]) {
762 ns
.disk
= mdev
->new_state_tmp
.disk
;
763 ns
.pdsk
= mdev
->new_state_tmp
.pdsk
;
766 *warn
= CONNECTION_LOST_NEGOTIATING
;
767 ns
.disk
= D_DISKLESS
;
773 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
774 if (ns
.conn
>= C_CONNECTED
&& ns
.conn
< C_AHEAD
) {
775 if (ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
)
776 ns
.disk
= D_UP_TO_DATE
;
777 if (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)
778 ns
.pdsk
= D_UP_TO_DATE
;
781 /* Implications of the connection stat on the disk states */
782 disk_min
= D_DISKLESS
;
783 disk_max
= D_UP_TO_DATE
;
784 pdsk_min
= D_INCONSISTENT
;
785 pdsk_max
= D_UNKNOWN
;
786 switch ((enum drbd_conns
)ns
.conn
) {
788 case C_PAUSED_SYNC_T
:
789 case C_STARTING_SYNC_T
:
792 disk_min
= D_INCONSISTENT
;
793 disk_max
= D_OUTDATED
;
794 pdsk_min
= D_UP_TO_DATE
;
795 pdsk_max
= D_UP_TO_DATE
;
799 disk_min
= D_UP_TO_DATE
;
800 disk_max
= D_UP_TO_DATE
;
801 pdsk_min
= D_UP_TO_DATE
;
802 pdsk_max
= D_UP_TO_DATE
;
805 disk_min
= D_DISKLESS
;
806 disk_max
= D_UP_TO_DATE
;
807 pdsk_min
= D_DISKLESS
;
808 pdsk_max
= D_UP_TO_DATE
;
811 case C_PAUSED_SYNC_S
:
812 case C_STARTING_SYNC_S
:
814 disk_min
= D_UP_TO_DATE
;
815 disk_max
= D_UP_TO_DATE
;
816 pdsk_min
= D_INCONSISTENT
;
817 pdsk_max
= D_CONSISTENT
; /* D_OUTDATED would be nice. But explicit outdate necessary*/
820 disk_min
= D_INCONSISTENT
;
821 disk_max
= D_INCONSISTENT
;
822 pdsk_min
= D_UP_TO_DATE
;
823 pdsk_max
= D_UP_TO_DATE
;
826 disk_min
= D_UP_TO_DATE
;
827 disk_max
= D_UP_TO_DATE
;
828 pdsk_min
= D_INCONSISTENT
;
829 pdsk_max
= D_INCONSISTENT
;
832 case C_DISCONNECTING
:
836 case C_NETWORK_FAILURE
:
837 case C_PROTOCOL_ERROR
:
839 case C_WF_CONNECTION
:
840 case C_WF_REPORT_PARAMS
:
844 if (ns
.disk
> disk_max
)
847 if (ns
.disk
< disk_min
) {
849 *warn
= IMPLICITLY_UPGRADED_DISK
;
852 if (ns
.pdsk
> pdsk_max
)
855 if (ns
.pdsk
< pdsk_min
) {
857 *warn
= IMPLICITLY_UPGRADED_PDSK
;
861 if (fp
== FP_STONITH
&&
862 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
))
863 ns
.susp_fen
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
865 if (mdev
->tconn
->res_opts
.on_no_data
== OND_SUSPEND_IO
&&
866 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
867 ns
.susp_nod
= 1; /* Suspend IO while no data available (no accessible data available) */
869 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
870 if (ns
.conn
== C_SYNC_SOURCE
)
871 ns
.conn
= C_PAUSED_SYNC_S
;
872 if (ns
.conn
== C_SYNC_TARGET
)
873 ns
.conn
= C_PAUSED_SYNC_T
;
875 if (ns
.conn
== C_PAUSED_SYNC_S
)
876 ns
.conn
= C_SYNC_SOURCE
;
877 if (ns
.conn
== C_PAUSED_SYNC_T
)
878 ns
.conn
= C_SYNC_TARGET
;
884 void drbd_resume_al(struct drbd_conf
*mdev
)
886 if (test_and_clear_bit(AL_SUSPENDED
, &mdev
->flags
))
887 dev_info(DEV
, "Resumed AL updates\n");
890 /* helper for __drbd_set_state */
891 static void set_ov_position(struct drbd_conf
*mdev
, enum drbd_conns cs
)
893 if (mdev
->tconn
->agreed_pro_version
< 90)
894 mdev
->ov_start_sector
= 0;
895 mdev
->rs_total
= drbd_bm_bits(mdev
);
896 mdev
->ov_position
= 0;
897 if (cs
== C_VERIFY_T
) {
898 /* starting online verify from an arbitrary position
899 * does not fit well into the existing protocol.
900 * on C_VERIFY_T, we initialize ov_left and friends
901 * implicitly in receive_DataRequest once the
902 * first P_OV_REQUEST is received */
903 mdev
->ov_start_sector
= ~(sector_t
)0;
905 unsigned long bit
= BM_SECT_TO_BIT(mdev
->ov_start_sector
);
906 if (bit
>= mdev
->rs_total
) {
907 mdev
->ov_start_sector
=
908 BM_BIT_TO_SECT(mdev
->rs_total
- 1);
911 mdev
->rs_total
-= bit
;
912 mdev
->ov_position
= mdev
->ov_start_sector
;
914 mdev
->ov_left
= mdev
->rs_total
;
918 * __drbd_set_state() - Set a new DRBD state
919 * @mdev: DRBD device.
922 * @done: Optional completion, that will get completed after the after_state_ch() finished
924 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
927 __drbd_set_state(struct drbd_conf
*mdev
, union drbd_state ns
,
928 enum chg_state_flags flags
, struct completion
*done
)
931 enum drbd_state_rv rv
= SS_SUCCESS
;
932 enum sanitize_state_warnings ssw
;
933 struct after_state_chg_work
*ascw
;
934 bool did_remote
, should_do_remote
;
936 os
= drbd_read_state(mdev
);
938 ns
= sanitize_state(mdev
, ns
, &ssw
);
940 return SS_NOTHING_TO_DO
;
942 rv
= is_valid_transition(os
, ns
);
946 if (!(flags
& CS_HARD
)) {
947 /* pre-state-change checks ; only look at ns */
948 /* See drbd_state_sw_errors in drbd_strings.c */
950 rv
= is_valid_state(mdev
, ns
);
951 if (rv
< SS_SUCCESS
) {
952 /* If the old state was illegal as well, then let
955 if (is_valid_state(mdev
, os
) == rv
)
956 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
958 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
961 if (rv
< SS_SUCCESS
) {
962 if (flags
& CS_VERBOSE
)
963 print_st_err(mdev
, os
, ns
, rv
);
967 print_sanitize_warnings(mdev
, ssw
);
969 drbd_pr_state_change(mdev
, os
, ns
, flags
);
971 /* Display changes to the susp* flags that where caused by the call to
972 sanitize_state(). Only display it here if we where not called from
973 _conn_request_state() */
974 if (!(flags
& CS_DC_SUSP
))
975 conn_pr_state_change(mdev
->tconn
, os
, ns
, (flags
& ~CS_DC_MASK
) | CS_DC_SUSP
);
977 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
978 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
979 * drbd_ldev_destroy() won't happen before our corresponding
980 * after_state_ch works run, where we put_ldev again. */
981 if ((os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) ||
982 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))
983 atomic_inc(&mdev
->local_cnt
);
985 did_remote
= drbd_should_do_remote(mdev
->state
);
986 mdev
->state
.i
= ns
.i
;
987 should_do_remote
= drbd_should_do_remote(mdev
->state
);
988 mdev
->tconn
->susp
= ns
.susp
;
989 mdev
->tconn
->susp_nod
= ns
.susp_nod
;
990 mdev
->tconn
->susp_fen
= ns
.susp_fen
;
992 /* put replicated vs not-replicated requests in seperate epochs */
993 if (did_remote
!= should_do_remote
)
994 start_new_tl_epoch(mdev
->tconn
);
996 if (os
.disk
== D_ATTACHING
&& ns
.disk
>= D_NEGOTIATING
)
997 drbd_print_uuids(mdev
, "attached to UUIDs");
999 /* Wake up role changes, that were delayed because of connection establishing */
1000 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
!= C_WF_REPORT_PARAMS
&&
1001 no_peer_wf_report_params(mdev
->tconn
))
1002 clear_bit(STATE_SENT
, &mdev
->tconn
->flags
);
1004 wake_up(&mdev
->misc_wait
);
1005 wake_up(&mdev
->state_wait
);
1006 wake_up(&mdev
->tconn
->ping_wait
);
1008 /* Aborted verify run, or we reached the stop sector.
1009 * Log the last position, unless end-of-device. */
1010 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1011 ns
.conn
<= C_CONNECTED
) {
1012 mdev
->ov_start_sector
=
1013 BM_BIT_TO_SECT(drbd_bm_bits(mdev
) - mdev
->ov_left
);
1015 dev_info(DEV
, "Online Verify reached sector %llu\n",
1016 (unsigned long long)mdev
->ov_start_sector
);
1019 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1020 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1021 dev_info(DEV
, "Syncer continues.\n");
1022 mdev
->rs_paused
+= (long)jiffies
1023 -(long)mdev
->rs_mark_time
[mdev
->rs_last_mark
];
1024 if (ns
.conn
== C_SYNC_TARGET
)
1025 mod_timer(&mdev
->resync_timer
, jiffies
);
1028 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1029 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1030 dev_info(DEV
, "Resync suspended\n");
1031 mdev
->rs_mark_time
[mdev
->rs_last_mark
] = jiffies
;
1034 if (os
.conn
== C_CONNECTED
&&
1035 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1036 unsigned long now
= jiffies
;
1039 set_ov_position(mdev
, ns
.conn
);
1040 mdev
->rs_start
= now
;
1041 mdev
->rs_last_events
= 0;
1042 mdev
->rs_last_sect_ev
= 0;
1043 mdev
->ov_last_oos_size
= 0;
1044 mdev
->ov_last_oos_start
= 0;
1046 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1047 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
1048 mdev
->rs_mark_time
[i
] = now
;
1051 drbd_rs_controller_reset(mdev
);
1053 if (ns
.conn
== C_VERIFY_S
) {
1054 dev_info(DEV
, "Starting Online Verify from sector %llu\n",
1055 (unsigned long long)mdev
->ov_position
);
1056 mod_timer(&mdev
->resync_timer
, jiffies
);
1060 if (get_ldev(mdev
)) {
1061 u32 mdf
= mdev
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1062 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1063 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1065 mdf
&= ~MDF_AL_CLEAN
;
1066 if (test_bit(CRASHED_PRIMARY
, &mdev
->flags
))
1067 mdf
|= MDF_CRASHED_PRIMARY
;
1068 if (mdev
->state
.role
== R_PRIMARY
||
1069 (mdev
->state
.pdsk
< D_INCONSISTENT
&& mdev
->state
.peer
== R_PRIMARY
))
1070 mdf
|= MDF_PRIMARY_IND
;
1071 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
)
1072 mdf
|= MDF_CONNECTED_IND
;
1073 if (mdev
->state
.disk
> D_INCONSISTENT
)
1074 mdf
|= MDF_CONSISTENT
;
1075 if (mdev
->state
.disk
> D_OUTDATED
)
1076 mdf
|= MDF_WAS_UP_TO_DATE
;
1077 if (mdev
->state
.pdsk
<= D_OUTDATED
&& mdev
->state
.pdsk
>= D_INCONSISTENT
)
1078 mdf
|= MDF_PEER_OUT_DATED
;
1079 if (mdf
!= mdev
->ldev
->md
.flags
) {
1080 mdev
->ldev
->md
.flags
= mdf
;
1081 drbd_md_mark_dirty(mdev
);
1083 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1084 drbd_set_ed_uuid(mdev
, mdev
->ldev
->md
.uuid
[UI_CURRENT
]);
1088 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1089 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1090 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1091 set_bit(CONSIDER_RESYNC
, &mdev
->flags
);
1093 /* Receiver should clean up itself */
1094 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1095 drbd_thread_stop_nowait(&mdev
->tconn
->receiver
);
1097 /* Now the receiver finished cleaning up itself, it should die */
1098 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1099 drbd_thread_stop_nowait(&mdev
->tconn
->receiver
);
1101 /* Upon network failure, we need to restart the receiver. */
1102 if (os
.conn
> C_WF_CONNECTION
&&
1103 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1104 drbd_thread_restart_nowait(&mdev
->tconn
->receiver
);
1106 /* Resume AL writing if we get a connection */
1107 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
)
1108 drbd_resume_al(mdev
);
1110 /* remember last attach time so request_timer_fn() won't
1111 * kill newly established sessions while we are still trying to thaw
1112 * previously frozen IO */
1113 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1114 ns
.disk
> D_NEGOTIATING
)
1115 mdev
->last_reattach_jif
= jiffies
;
1117 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1121 ascw
->flags
= flags
;
1122 ascw
->w
.cb
= w_after_state_ch
;
1123 ascw
->w
.mdev
= mdev
;
1125 drbd_queue_work(&mdev
->tconn
->sender_work
, &ascw
->w
);
1127 dev_err(DEV
, "Could not kmalloc an ascw\n");
1133 static int w_after_state_ch(struct drbd_work
*w
, int unused
)
1135 struct after_state_chg_work
*ascw
=
1136 container_of(w
, struct after_state_chg_work
, w
);
1137 struct drbd_conf
*mdev
= w
->mdev
;
1139 after_state_ch(mdev
, ascw
->os
, ascw
->ns
, ascw
->flags
);
1140 if (ascw
->flags
& CS_WAIT_COMPLETE
) {
1141 D_ASSERT(ascw
->done
!= NULL
);
1142 complete(ascw
->done
);
1149 static void abw_start_sync(struct drbd_conf
*mdev
, int rv
)
1152 dev_err(DEV
, "Writing the bitmap failed not starting resync.\n");
1153 _drbd_request_state(mdev
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1157 switch (mdev
->state
.conn
) {
1158 case C_STARTING_SYNC_T
:
1159 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1161 case C_STARTING_SYNC_S
:
1162 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
1167 int drbd_bitmap_io_from_worker(struct drbd_conf
*mdev
,
1168 int (*io_fn
)(struct drbd_conf
*),
1169 char *why
, enum bm_flag flags
)
1173 D_ASSERT(current
== mdev
->tconn
->worker
.task
);
1175 /* open coded non-blocking drbd_suspend_io(mdev); */
1176 set_bit(SUSPEND_IO
, &mdev
->flags
);
1178 drbd_bm_lock(mdev
, why
, flags
);
1180 drbd_bm_unlock(mdev
);
1182 drbd_resume_io(mdev
);
1188 * after_state_ch() - Perform after state change actions that may sleep
1189 * @mdev: DRBD device.
1194 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
1195 union drbd_state ns
, enum chg_state_flags flags
)
1197 struct sib_info sib
;
1199 sib
.sib_reason
= SIB_STATE_CHANGE
;
1203 if (os
.conn
!= C_CONNECTED
&& ns
.conn
== C_CONNECTED
) {
1204 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1206 mdev
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1209 /* Inform userspace about the change... */
1210 drbd_bcast_event(mdev
, &sib
);
1212 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1213 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1214 drbd_khelper(mdev
, "pri-on-incon-degr");
1216 /* Here we have the actions that are performed after a
1217 state change. This function might sleep */
1220 struct drbd_tconn
*tconn
= mdev
->tconn
;
1221 enum drbd_req_event what
= NOTHING
;
1223 spin_lock_irq(&tconn
->req_lock
);
1224 if (os
.conn
< C_CONNECTED
&& conn_lowest_conn(tconn
) >= C_CONNECTED
)
1227 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1228 conn_lowest_disk(tconn
) > D_NEGOTIATING
)
1229 what
= RESTART_FROZEN_DISK_IO
;
1231 if (tconn
->susp_nod
&& what
!= NOTHING
) {
1232 _tl_restart(tconn
, what
);
1233 _conn_request_state(tconn
,
1234 (union drbd_state
) { { .susp_nod
= 1 } },
1235 (union drbd_state
) { { .susp_nod
= 0 } },
1238 spin_unlock_irq(&tconn
->req_lock
);
1242 struct drbd_tconn
*tconn
= mdev
->tconn
;
1244 spin_lock_irq(&tconn
->req_lock
);
1245 if (tconn
->susp_fen
&& conn_lowest_conn(tconn
) >= C_CONNECTED
) {
1246 /* case2: The connection was established again: */
1247 struct drbd_conf
*odev
;
1251 idr_for_each_entry(&tconn
->volumes
, odev
, vnr
)
1252 clear_bit(NEW_CUR_UUID
, &odev
->flags
);
1254 _tl_restart(tconn
, RESEND
);
1255 _conn_request_state(tconn
,
1256 (union drbd_state
) { { .susp_fen
= 1 } },
1257 (union drbd_state
) { { .susp_fen
= 0 } },
1260 spin_unlock_irq(&tconn
->req_lock
);
1263 /* Became sync source. With protocol >= 96, we still need to send out
1264 * the sync uuid now. Need to do that before any drbd_send_state, or
1265 * the other side may go "paused sync" before receiving the sync uuids,
1266 * which is unexpected. */
1267 if ((os
.conn
!= C_SYNC_SOURCE
&& os
.conn
!= C_PAUSED_SYNC_S
) &&
1268 (ns
.conn
== C_SYNC_SOURCE
|| ns
.conn
== C_PAUSED_SYNC_S
) &&
1269 mdev
->tconn
->agreed_pro_version
>= 96 && get_ldev(mdev
)) {
1270 drbd_gen_and_send_sync_uuid(mdev
);
1274 /* Do not change the order of the if above and the two below... */
1275 if (os
.pdsk
== D_DISKLESS
&&
1276 ns
.pdsk
> D_DISKLESS
&& ns
.pdsk
!= D_UNKNOWN
) { /* attach on the peer */
1277 /* we probably will start a resync soon.
1278 * make sure those things are properly reset. */
1280 mdev
->rs_failed
= 0;
1281 atomic_set(&mdev
->rs_pending_cnt
, 0);
1282 drbd_rs_cancel_all(mdev
);
1284 drbd_send_uuids(mdev
);
1285 drbd_send_state(mdev
, ns
);
1287 /* No point in queuing send_bitmap if we don't have a connection
1288 * anymore, so check also the _current_ state, not only the new state
1289 * at the time this work was queued. */
1290 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
&&
1291 mdev
->state
.conn
== C_WF_BITMAP_S
)
1292 drbd_queue_bitmap_io(mdev
, &drbd_send_bitmap
, NULL
,
1293 "send_bitmap (WFBitMapS)",
1294 BM_LOCKED_TEST_ALLOWED
);
1296 /* Lost contact to peer's copy of the data */
1297 if ((os
.pdsk
>= D_INCONSISTENT
&&
1298 os
.pdsk
!= D_UNKNOWN
&&
1299 os
.pdsk
!= D_OUTDATED
)
1300 && (ns
.pdsk
< D_INCONSISTENT
||
1301 ns
.pdsk
== D_UNKNOWN
||
1302 ns
.pdsk
== D_OUTDATED
)) {
1303 if (get_ldev(mdev
)) {
1304 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1305 mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1306 if (drbd_suspended(mdev
)) {
1307 set_bit(NEW_CUR_UUID
, &mdev
->flags
);
1309 drbd_uuid_new_current(mdev
);
1310 drbd_send_uuids(mdev
);
1317 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(mdev
)) {
1318 if (os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
&&
1319 mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1320 drbd_uuid_new_current(mdev
);
1321 drbd_send_uuids(mdev
);
1323 /* D_DISKLESS Peer becomes secondary */
1324 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1325 /* We may still be Primary ourselves.
1326 * No harm done if the bitmap still changes,
1327 * redirtied pages will follow later. */
1328 drbd_bitmap_io_from_worker(mdev
, &drbd_bm_write
,
1329 "demote diskless peer", BM_LOCKED_SET_ALLOWED
);
1333 /* Write out all changed bits on demote.
1334 * Though, no need to da that just yet
1335 * if there is a resync going on still */
1336 if (os
.role
== R_PRIMARY
&& ns
.role
== R_SECONDARY
&&
1337 mdev
->state
.conn
<= C_CONNECTED
&& get_ldev(mdev
)) {
1338 /* No changes to the bitmap expected this time, so assert that,
1339 * even though no harm was done if it did change. */
1340 drbd_bitmap_io_from_worker(mdev
, &drbd_bm_write
,
1341 "demote", BM_LOCKED_TEST_ALLOWED
);
1345 /* Last part of the attaching process ... */
1346 if (ns
.conn
>= C_CONNECTED
&&
1347 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1348 drbd_send_sizes(mdev
, 0, 0); /* to start sync... */
1349 drbd_send_uuids(mdev
);
1350 drbd_send_state(mdev
, ns
);
1353 /* We want to pause/continue resync, tell peer. */
1354 if (ns
.conn
>= C_CONNECTED
&&
1355 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1356 (os
.user_isp
!= ns
.user_isp
)))
1357 drbd_send_state(mdev
, ns
);
1359 /* In case one of the isp bits got set, suspend other devices. */
1360 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1361 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1362 suspend_other_sg(mdev
);
1364 /* Make sure the peer gets informed about eventual state
1365 changes (ISP bits) while we were in WFReportParams. */
1366 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1367 drbd_send_state(mdev
, ns
);
1369 if (os
.conn
!= C_AHEAD
&& ns
.conn
== C_AHEAD
)
1370 drbd_send_state(mdev
, ns
);
1372 /* We are in the progress to start a full sync... */
1373 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1374 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1375 /* no other bitmap changes expected during this phase */
1376 drbd_queue_bitmap_io(mdev
,
1377 &drbd_bmio_set_n_write
, &abw_start_sync
,
1378 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED
);
1380 /* We are invalidating our self... */
1381 if (os
.conn
< C_CONNECTED
&& ns
.conn
< C_CONNECTED
&&
1382 os
.disk
> D_INCONSISTENT
&& ns
.disk
== D_INCONSISTENT
)
1383 /* other bitmap operation expected during this phase */
1384 drbd_queue_bitmap_io(mdev
, &drbd_bmio_set_n_write
, NULL
,
1385 "set_n_write from invalidate", BM_LOCKED_MASK
);
1387 /* first half of local IO error, failure to attach,
1388 * or administrative detach */
1389 if (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) {
1390 enum drbd_io_error_p eh
= EP_PASS_ON
;
1391 int was_io_error
= 0;
1392 /* corresponding get_ldev was in __drbd_set_state, to serialize
1393 * our cleanup here with the transition to D_DISKLESS.
1394 * But is is still not save to dreference ldev here, since
1395 * we might come from an failed Attach before ldev was set. */
1398 eh
= rcu_dereference(mdev
->ldev
->disk_conf
)->on_io_error
;
1401 was_io_error
= test_and_clear_bit(WAS_IO_ERROR
, &mdev
->flags
);
1403 if (was_io_error
&& eh
== EP_CALL_HELPER
)
1404 drbd_khelper(mdev
, "local-io-error");
1406 /* Immediately allow completion of all application IO,
1407 * that waits for completion from the local disk,
1408 * if this was a force-detach due to disk_timeout
1409 * or administrator request (drbdsetup detach --force).
1410 * Do NOT abort otherwise.
1411 * Aborting local requests may cause serious problems,
1412 * if requests are completed to upper layers already,
1413 * and then later the already submitted local bio completes.
1414 * This can cause DMA into former bio pages that meanwhile
1415 * have been re-used for other things.
1416 * So aborting local requests may cause crashes,
1417 * or even worse, silent data corruption.
1419 if (test_and_clear_bit(FORCE_DETACH
, &mdev
->flags
))
1420 tl_abort_disk_io(mdev
);
1422 /* current state still has to be D_FAILED,
1423 * there is only one way out: to D_DISKLESS,
1424 * and that may only happen after our put_ldev below. */
1425 if (mdev
->state
.disk
!= D_FAILED
)
1427 "ASSERT FAILED: disk is %s during detach\n",
1428 drbd_disk_str(mdev
->state
.disk
));
1430 if (ns
.conn
>= C_CONNECTED
)
1431 drbd_send_state(mdev
, ns
);
1433 drbd_rs_cancel_all(mdev
);
1435 /* In case we want to get something to stable storage still,
1436 * this may be the last chance.
1437 * Following put_ldev may transition to D_DISKLESS. */
1443 /* second half of local IO error, failure to attach,
1444 * or administrative detach,
1445 * after local_cnt references have reached zero again */
1446 if (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1447 /* We must still be diskless,
1448 * re-attach has to be serialized with this! */
1449 if (mdev
->state
.disk
!= D_DISKLESS
)
1451 "ASSERT FAILED: disk is %s while going diskless\n",
1452 drbd_disk_str(mdev
->state
.disk
));
1454 if (ns
.conn
>= C_CONNECTED
)
1455 drbd_send_state(mdev
, ns
);
1456 /* corresponding get_ldev in __drbd_set_state
1457 * this may finally trigger drbd_ldev_destroy. */
1461 /* Notify peer that I had a local IO error, and did not detached.. */
1462 if (os
.disk
== D_UP_TO_DATE
&& ns
.disk
== D_INCONSISTENT
&& ns
.conn
>= C_CONNECTED
)
1463 drbd_send_state(mdev
, ns
);
1465 /* Disks got bigger while they were detached */
1466 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1467 test_and_clear_bit(RESYNC_AFTER_NEG
, &mdev
->flags
)) {
1468 if (ns
.conn
== C_CONNECTED
)
1469 resync_after_online_grow(mdev
);
1472 /* A resync finished or aborted, wake paused devices... */
1473 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1474 (os
.peer_isp
&& !ns
.peer_isp
) ||
1475 (os
.user_isp
&& !ns
.user_isp
))
1476 resume_next_sg(mdev
);
1478 /* sync target done with resync. Explicitly notify peer, even though
1479 * it should (at least for non-empty resyncs) already know itself. */
1480 if (os
.disk
< D_UP_TO_DATE
&& os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
== C_CONNECTED
)
1481 drbd_send_state(mdev
, ns
);
1483 /* Verify finished, or reached stop sector. Peer did not know about
1484 * the stop sector, and we may even have changed the stop sector during
1485 * verify to interrupt/stop early. Send the new state. */
1486 if (os
.conn
== C_VERIFY_S
&& ns
.conn
== C_CONNECTED
1487 && verify_can_do_stop_sector(mdev
))
1488 drbd_send_state(mdev
, ns
);
1490 /* This triggers bitmap writeout of potentially still unwritten pages
1491 * if the resync finished cleanly, or aborted because of peer disk
1492 * failure, or because of connection loss.
1493 * For resync aborted because of local disk failure, we cannot do
1494 * any bitmap writeout anymore.
1495 * No harm done if some bits change during this phase.
1497 if (os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
&& get_ldev(mdev
)) {
1498 drbd_queue_bitmap_io(mdev
, &drbd_bm_write_copy_pages
, NULL
,
1499 "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED
);
1503 if (ns
.disk
== D_DISKLESS
&&
1504 ns
.conn
== C_STANDALONE
&&
1505 ns
.role
== R_SECONDARY
) {
1506 if (os
.aftr_isp
!= ns
.aftr_isp
)
1507 resume_next_sg(mdev
);
1513 struct after_conn_state_chg_work
{
1516 union drbd_state ns_min
;
1517 union drbd_state ns_max
; /* new, max state, over all mdevs */
1518 enum chg_state_flags flags
;
1521 static int w_after_conn_state_ch(struct drbd_work
*w
, int unused
)
1523 struct after_conn_state_chg_work
*acscw
=
1524 container_of(w
, struct after_conn_state_chg_work
, w
);
1525 struct drbd_tconn
*tconn
= w
->tconn
;
1526 enum drbd_conns oc
= acscw
->oc
;
1527 union drbd_state ns_max
= acscw
->ns_max
;
1528 struct drbd_conf
*mdev
;
1533 /* Upon network configuration, we need to start the receiver */
1534 if (oc
== C_STANDALONE
&& ns_max
.conn
== C_UNCONNECTED
)
1535 drbd_thread_start(&tconn
->receiver
);
1537 if (oc
== C_DISCONNECTING
&& ns_max
.conn
== C_STANDALONE
) {
1538 struct net_conf
*old_conf
;
1540 mutex_lock(&tconn
->conf_update
);
1541 old_conf
= tconn
->net_conf
;
1542 tconn
->my_addr_len
= 0;
1543 tconn
->peer_addr_len
= 0;
1544 rcu_assign_pointer(tconn
->net_conf
, NULL
);
1545 conn_free_crypto(tconn
);
1546 mutex_unlock(&tconn
->conf_update
);
1552 if (ns_max
.susp_fen
) {
1553 /* case1: The outdate peer handler is successful: */
1554 if (ns_max
.pdsk
<= D_OUTDATED
) {
1556 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1557 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
1558 drbd_uuid_new_current(mdev
);
1559 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1563 spin_lock_irq(&tconn
->req_lock
);
1564 _tl_restart(tconn
, CONNECTION_LOST_WHILE_PENDING
);
1565 _conn_request_state(tconn
,
1566 (union drbd_state
) { { .susp_fen
= 1 } },
1567 (union drbd_state
) { { .susp_fen
= 0 } },
1569 spin_unlock_irq(&tconn
->req_lock
);
1572 kref_put(&tconn
->kref
, &conn_destroy
);
1574 conn_md_sync(tconn
);
1579 void conn_old_common_state(struct drbd_tconn
*tconn
, union drbd_state
*pcs
, enum chg_state_flags
*pf
)
1581 enum chg_state_flags flags
= ~0;
1582 struct drbd_conf
*mdev
;
1583 int vnr
, first_vol
= 1;
1584 union drbd_dev_state os
, cs
= {
1585 { .role
= R_SECONDARY
,
1587 .conn
= tconn
->cstate
,
1593 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1602 if (cs
.role
!= os
.role
)
1603 flags
&= ~CS_DC_ROLE
;
1605 if (cs
.peer
!= os
.peer
)
1606 flags
&= ~CS_DC_PEER
;
1608 if (cs
.conn
!= os
.conn
)
1609 flags
&= ~CS_DC_CONN
;
1611 if (cs
.disk
!= os
.disk
)
1612 flags
&= ~CS_DC_DISK
;
1614 if (cs
.pdsk
!= os
.pdsk
)
1615 flags
&= ~CS_DC_PDSK
;
1624 static enum drbd_state_rv
1625 conn_is_valid_transition(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1626 enum chg_state_flags flags
)
1628 enum drbd_state_rv rv
= SS_SUCCESS
;
1629 union drbd_state ns
, os
;
1630 struct drbd_conf
*mdev
;
1634 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1635 os
= drbd_read_state(mdev
);
1636 ns
= sanitize_state(mdev
, apply_mask_val(os
, mask
, val
), NULL
);
1638 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
1644 rv
= is_valid_transition(os
, ns
);
1645 if (rv
< SS_SUCCESS
)
1648 if (!(flags
& CS_HARD
)) {
1649 rv
= is_valid_state(mdev
, ns
);
1650 if (rv
< SS_SUCCESS
) {
1651 if (is_valid_state(mdev
, os
) == rv
)
1652 rv
= is_valid_soft_transition(os
, ns
, tconn
);
1654 rv
= is_valid_soft_transition(os
, ns
, tconn
);
1656 if (rv
< SS_SUCCESS
)
1661 if (rv
< SS_SUCCESS
&& flags
& CS_VERBOSE
)
1662 print_st_err(mdev
, os
, ns
, rv
);
1668 conn_set_state(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1669 union drbd_state
*pns_min
, union drbd_state
*pns_max
, enum chg_state_flags flags
)
1671 union drbd_state ns
, os
, ns_max
= { };
1672 union drbd_state ns_min
= {
1679 struct drbd_conf
*mdev
;
1680 enum drbd_state_rv rv
;
1681 int vnr
, number_of_volumes
= 0;
1683 if (mask
.conn
== C_MASK
) {
1684 /* remember last connect time so request_timer_fn() won't
1685 * kill newly established sessions while we are still trying to thaw
1686 * previously frozen IO */
1687 if (tconn
->cstate
!= C_WF_REPORT_PARAMS
&& val
.conn
== C_WF_REPORT_PARAMS
)
1688 tconn
->last_reconnect_jif
= jiffies
;
1690 tconn
->cstate
= val
.conn
;
1694 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1695 number_of_volumes
++;
1696 os
= drbd_read_state(mdev
);
1697 ns
= apply_mask_val(os
, mask
, val
);
1698 ns
= sanitize_state(mdev
, ns
, NULL
);
1700 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
1703 rv
= __drbd_set_state(mdev
, ns
, flags
, NULL
);
1704 if (rv
< SS_SUCCESS
)
1707 ns
.i
= mdev
->state
.i
;
1708 ns_max
.role
= max_role(ns
.role
, ns_max
.role
);
1709 ns_max
.peer
= max_role(ns
.peer
, ns_max
.peer
);
1710 ns_max
.conn
= max_t(enum drbd_conns
, ns
.conn
, ns_max
.conn
);
1711 ns_max
.disk
= max_t(enum drbd_disk_state
, ns
.disk
, ns_max
.disk
);
1712 ns_max
.pdsk
= max_t(enum drbd_disk_state
, ns
.pdsk
, ns_max
.pdsk
);
1714 ns_min
.role
= min_role(ns
.role
, ns_min
.role
);
1715 ns_min
.peer
= min_role(ns
.peer
, ns_min
.peer
);
1716 ns_min
.conn
= min_t(enum drbd_conns
, ns
.conn
, ns_min
.conn
);
1717 ns_min
.disk
= min_t(enum drbd_disk_state
, ns
.disk
, ns_min
.disk
);
1718 ns_min
.pdsk
= min_t(enum drbd_disk_state
, ns
.pdsk
, ns_min
.pdsk
);
1722 if (number_of_volumes
== 0) {
1723 ns_min
= ns_max
= (union drbd_state
) { {
1724 .role
= R_SECONDARY
,
1732 ns_min
.susp
= ns_max
.susp
= tconn
->susp
;
1733 ns_min
.susp_nod
= ns_max
.susp_nod
= tconn
->susp_nod
;
1734 ns_min
.susp_fen
= ns_max
.susp_fen
= tconn
->susp_fen
;
1740 static enum drbd_state_rv
1741 _conn_rq_cond(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
)
1743 enum drbd_state_rv rv
;
1745 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY
, &tconn
->flags
))
1746 return SS_CW_SUCCESS
;
1748 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL
, &tconn
->flags
))
1749 return SS_CW_FAILED_BY_PEER
;
1751 rv
= tconn
->cstate
!= C_WF_REPORT_PARAMS
? SS_CW_NO_NEED
: SS_UNKNOWN_ERROR
;
1753 if (rv
== SS_UNKNOWN_ERROR
)
1754 rv
= conn_is_valid_transition(tconn
, mask
, val
, 0);
1756 if (rv
== SS_SUCCESS
)
1757 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
1763 _conn_request_state(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1764 enum chg_state_flags flags
)
1766 enum drbd_state_rv rv
= SS_SUCCESS
;
1767 struct after_conn_state_chg_work
*acscw
;
1768 enum drbd_conns oc
= tconn
->cstate
;
1769 union drbd_state ns_max
, ns_min
, os
;
1770 bool have_mutex
= false;
1773 rv
= is_valid_conn_transition(oc
, val
.conn
);
1774 if (rv
< SS_SUCCESS
)
1778 rv
= conn_is_valid_transition(tconn
, mask
, val
, flags
);
1779 if (rv
< SS_SUCCESS
)
1782 if (oc
== C_WF_REPORT_PARAMS
&& val
.conn
== C_DISCONNECTING
&&
1783 !(flags
& (CS_LOCAL_ONLY
| CS_HARD
))) {
1785 /* This will be a cluster-wide state change.
1786 * Need to give up the spinlock, grab the mutex,
1787 * then send the state change request, ... */
1788 spin_unlock_irq(&tconn
->req_lock
);
1789 mutex_lock(&tconn
->cstate_mutex
);
1792 set_bit(CONN_WD_ST_CHG_REQ
, &tconn
->flags
);
1793 if (conn_send_state_req(tconn
, mask
, val
)) {
1794 /* sending failed. */
1795 clear_bit(CONN_WD_ST_CHG_REQ
, &tconn
->flags
);
1796 rv
= SS_CW_FAILED_BY_PEER
;
1797 /* need to re-aquire the spin lock, though */
1798 goto abort_unlocked
;
1801 if (val
.conn
== C_DISCONNECTING
)
1802 set_bit(DISCONNECT_SENT
, &tconn
->flags
);
1804 /* ... and re-aquire the spinlock.
1805 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
1806 * conn_set_state() within the same spinlock. */
1807 spin_lock_irq(&tconn
->req_lock
);
1808 wait_event_lock_irq(tconn
->ping_wait
,
1809 (rv
= _conn_rq_cond(tconn
, mask
, val
)),
1811 clear_bit(CONN_WD_ST_CHG_REQ
, &tconn
->flags
);
1812 if (rv
< SS_SUCCESS
)
1816 conn_old_common_state(tconn
, &os
, &flags
);
1817 flags
|= CS_DC_SUSP
;
1818 conn_set_state(tconn
, mask
, val
, &ns_min
, &ns_max
, flags
);
1819 conn_pr_state_change(tconn
, os
, ns_max
, flags
);
1821 acscw
= kmalloc(sizeof(*acscw
), GFP_ATOMIC
);
1823 acscw
->oc
= os
.conn
;
1824 acscw
->ns_min
= ns_min
;
1825 acscw
->ns_max
= ns_max
;
1826 acscw
->flags
= flags
;
1827 acscw
->w
.cb
= w_after_conn_state_ch
;
1828 kref_get(&tconn
->kref
);
1829 acscw
->w
.tconn
= tconn
;
1830 drbd_queue_work(&tconn
->sender_work
, &acscw
->w
);
1832 conn_err(tconn
, "Could not kmalloc an acscw\n");
1837 /* mutex_unlock() "... must not be used in interrupt context.",
1838 * so give up the spinlock, then re-aquire it */
1839 spin_unlock_irq(&tconn
->req_lock
);
1841 mutex_unlock(&tconn
->cstate_mutex
);
1842 spin_lock_irq(&tconn
->req_lock
);
1844 if (rv
< SS_SUCCESS
&& flags
& CS_VERBOSE
) {
1845 conn_err(tconn
, "State change failed: %s\n", drbd_set_st_err_str(rv
));
1846 conn_err(tconn
, " mask = 0x%x val = 0x%x\n", mask
.i
, val
.i
);
1847 conn_err(tconn
, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc
), drbd_conn_str(val
.conn
));
1853 conn_request_state(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1854 enum chg_state_flags flags
)
1856 enum drbd_state_rv rv
;
1858 spin_lock_irq(&tconn
->req_lock
);
1859 rv
= _conn_request_state(tconn
, mask
, val
, flags
);
1860 spin_unlock_irq(&tconn
->req_lock
);