4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/drbd_limits.h>
32 struct after_state_chg_work
{
36 enum chg_state_flags flags
;
37 struct completion
*done
;
40 enum sanitize_state_warnings
{
42 ABORTED_ONLINE_VERIFY
,
44 CONNECTION_LOST_NEGOTIATING
,
45 IMPLICITLY_UPGRADED_DISK
,
46 IMPLICITLY_UPGRADED_PDSK
,
49 static int w_after_state_ch(struct drbd_work
*w
, int unused
);
50 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
51 union drbd_state ns
, enum chg_state_flags flags
);
52 static enum drbd_state_rv
is_valid_state(struct drbd_conf
*, union drbd_state
);
53 static enum drbd_state_rv
is_valid_soft_transition(union drbd_state
, union drbd_state
, struct drbd_tconn
*);
54 static enum drbd_state_rv
is_valid_transition(union drbd_state os
, union drbd_state ns
);
55 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state ns
,
56 enum sanitize_state_warnings
*warn
);
58 static inline bool is_susp(union drbd_state s
)
60 return s
.susp
|| s
.susp_nod
|| s
.susp_fen
;
63 bool conn_all_vols_unconf(struct drbd_tconn
*tconn
)
65 struct drbd_conf
*mdev
;
70 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
71 if (mdev
->state
.disk
!= D_DISKLESS
||
72 mdev
->state
.conn
!= C_STANDALONE
||
73 mdev
->state
.role
!= R_SECONDARY
) {
83 /* Unfortunately the states where not correctly ordered, when
84 they where defined. therefore can not use max_t() here. */
85 static enum drbd_role
max_role(enum drbd_role role1
, enum drbd_role role2
)
87 if (role1
== R_PRIMARY
|| role2
== R_PRIMARY
)
89 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
93 static enum drbd_role
min_role(enum drbd_role role1
, enum drbd_role role2
)
95 if (role1
== R_UNKNOWN
|| role2
== R_UNKNOWN
)
97 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
102 enum drbd_role
conn_highest_role(struct drbd_tconn
*tconn
)
104 enum drbd_role role
= R_UNKNOWN
;
105 struct drbd_conf
*mdev
;
109 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
110 role
= max_role(role
, mdev
->state
.role
);
116 enum drbd_role
conn_highest_peer(struct drbd_tconn
*tconn
)
118 enum drbd_role peer
= R_UNKNOWN
;
119 struct drbd_conf
*mdev
;
123 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
124 peer
= max_role(peer
, mdev
->state
.peer
);
130 enum drbd_disk_state
conn_highest_disk(struct drbd_tconn
*tconn
)
132 enum drbd_disk_state ds
= D_DISKLESS
;
133 struct drbd_conf
*mdev
;
137 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
138 ds
= max_t(enum drbd_disk_state
, ds
, mdev
->state
.disk
);
144 enum drbd_disk_state
conn_lowest_disk(struct drbd_tconn
*tconn
)
146 enum drbd_disk_state ds
= D_MASK
;
147 struct drbd_conf
*mdev
;
151 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
152 ds
= min_t(enum drbd_disk_state
, ds
, mdev
->state
.disk
);
158 enum drbd_disk_state
conn_highest_pdsk(struct drbd_tconn
*tconn
)
160 enum drbd_disk_state ds
= D_DISKLESS
;
161 struct drbd_conf
*mdev
;
165 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
166 ds
= max_t(enum drbd_disk_state
, ds
, mdev
->state
.pdsk
);
172 enum drbd_conns
conn_lowest_conn(struct drbd_tconn
*tconn
)
174 enum drbd_conns conn
= C_MASK
;
175 struct drbd_conf
*mdev
;
179 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
180 conn
= min_t(enum drbd_conns
, conn
, mdev
->state
.conn
);
186 static bool no_peer_wf_report_params(struct drbd_tconn
*tconn
)
188 struct drbd_conf
*mdev
;
193 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
194 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
205 * cl_wide_st_chg() - true if the state change is a cluster wide one
206 * @mdev: DRBD device.
207 * @os: old (current) state.
208 * @ns: new (wanted) state.
210 static int cl_wide_st_chg(struct drbd_conf
*mdev
,
211 union drbd_state os
, union drbd_state ns
)
213 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
214 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
215 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
216 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
217 (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
))) ||
218 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
219 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
) ||
220 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_WF_REPORT_PARAMS
);
223 static union drbd_state
224 apply_mask_val(union drbd_state os
, union drbd_state mask
, union drbd_state val
)
227 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
232 drbd_change_state(struct drbd_conf
*mdev
, enum chg_state_flags f
,
233 union drbd_state mask
, union drbd_state val
)
237 enum drbd_state_rv rv
;
239 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
240 ns
= apply_mask_val(drbd_read_state(mdev
), mask
, val
);
241 rv
= _drbd_set_state(mdev
, ns
, f
, NULL
);
242 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
248 * drbd_force_state() - Impose a change which happens outside our control on our state
249 * @mdev: DRBD device.
250 * @mask: mask of state bits to change.
251 * @val: value of new state bits.
253 void drbd_force_state(struct drbd_conf
*mdev
,
254 union drbd_state mask
, union drbd_state val
)
256 drbd_change_state(mdev
, CS_HARD
, mask
, val
);
259 static enum drbd_state_rv
260 _req_st_cond(struct drbd_conf
*mdev
, union drbd_state mask
,
261 union drbd_state val
)
263 union drbd_state os
, ns
;
265 enum drbd_state_rv rv
;
267 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
))
268 return SS_CW_SUCCESS
;
270 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &mdev
->flags
))
271 return SS_CW_FAILED_BY_PEER
;
273 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
274 os
= drbd_read_state(mdev
);
275 ns
= sanitize_state(mdev
, apply_mask_val(os
, mask
, val
), NULL
);
276 rv
= is_valid_transition(os
, ns
);
277 if (rv
>= SS_SUCCESS
)
278 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
280 if (!cl_wide_st_chg(mdev
, os
, ns
))
282 if (rv
== SS_UNKNOWN_ERROR
) {
283 rv
= is_valid_state(mdev
, ns
);
284 if (rv
>= SS_SUCCESS
) {
285 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
286 if (rv
>= SS_SUCCESS
)
287 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
290 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
296 * drbd_req_state() - Perform an eventually cluster wide state change
297 * @mdev: DRBD device.
298 * @mask: mask of state bits to change.
299 * @val: value of new state bits.
302 * Should not be called directly, use drbd_request_state() or
303 * _drbd_request_state().
305 static enum drbd_state_rv
306 drbd_req_state(struct drbd_conf
*mdev
, union drbd_state mask
,
307 union drbd_state val
, enum chg_state_flags f
)
309 struct completion done
;
311 union drbd_state os
, ns
;
312 enum drbd_state_rv rv
;
314 init_completion(&done
);
316 if (f
& CS_SERIALIZE
)
317 mutex_lock(mdev
->state_mutex
);
319 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
320 os
= drbd_read_state(mdev
);
321 ns
= sanitize_state(mdev
, apply_mask_val(os
, mask
, val
), NULL
);
322 rv
= is_valid_transition(os
, ns
);
323 if (rv
< SS_SUCCESS
) {
324 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
328 if (cl_wide_st_chg(mdev
, os
, ns
)) {
329 rv
= is_valid_state(mdev
, ns
);
330 if (rv
== SS_SUCCESS
)
331 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
332 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
334 if (rv
< SS_SUCCESS
) {
336 print_st_err(mdev
, os
, ns
, rv
);
340 if (drbd_send_state_req(mdev
, mask
, val
)) {
341 rv
= SS_CW_FAILED_BY_PEER
;
343 print_st_err(mdev
, os
, ns
, rv
);
347 wait_event(mdev
->state_wait
,
348 (rv
= _req_st_cond(mdev
, mask
, val
)));
350 if (rv
< SS_SUCCESS
) {
352 print_st_err(mdev
, os
, ns
, rv
);
355 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
356 ns
= apply_mask_val(drbd_read_state(mdev
), mask
, val
);
357 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
359 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
362 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
364 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
365 D_ASSERT(current
!= mdev
->tconn
->worker
.task
);
366 wait_for_completion(&done
);
370 if (f
& CS_SERIALIZE
)
371 mutex_unlock(mdev
->state_mutex
);
377 * _drbd_request_state() - Request a state change (with flags)
378 * @mdev: DRBD device.
379 * @mask: mask of state bits to change.
380 * @val: value of new state bits.
383 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
384 * flag, or when logging of failed state change requests is not desired.
387 _drbd_request_state(struct drbd_conf
*mdev
, union drbd_state mask
,
388 union drbd_state val
, enum chg_state_flags f
)
390 enum drbd_state_rv rv
;
392 wait_event(mdev
->state_wait
,
393 (rv
= drbd_req_state(mdev
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
398 static void print_st(struct drbd_conf
*mdev
, char *name
, union drbd_state ns
)
400 dev_err(DEV
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
402 drbd_conn_str(ns
.conn
),
403 drbd_role_str(ns
.role
),
404 drbd_role_str(ns
.peer
),
405 drbd_disk_str(ns
.disk
),
406 drbd_disk_str(ns
.pdsk
),
407 is_susp(ns
) ? 's' : 'r',
408 ns
.aftr_isp
? 'a' : '-',
409 ns
.peer_isp
? 'p' : '-',
410 ns
.user_isp
? 'u' : '-',
411 ns
.susp_fen
? 'F' : '-',
412 ns
.susp_nod
? 'N' : '-'
416 void print_st_err(struct drbd_conf
*mdev
, union drbd_state os
,
417 union drbd_state ns
, enum drbd_state_rv err
)
419 if (err
== SS_IN_TRANSIENT_STATE
)
421 dev_err(DEV
, "State change failed: %s\n", drbd_set_st_err_str(err
));
422 print_st(mdev
, " state", os
);
423 print_st(mdev
, "wanted", ns
);
426 static long print_state_change(char *pb
, union drbd_state os
, union drbd_state ns
,
427 enum chg_state_flags flags
)
433 if (ns
.role
!= os
.role
&& flags
& CS_DC_ROLE
)
434 pbp
+= sprintf(pbp
, "role( %s -> %s ) ",
435 drbd_role_str(os
.role
),
436 drbd_role_str(ns
.role
));
437 if (ns
.peer
!= os
.peer
&& flags
& CS_DC_PEER
)
438 pbp
+= sprintf(pbp
, "peer( %s -> %s ) ",
439 drbd_role_str(os
.peer
),
440 drbd_role_str(ns
.peer
));
441 if (ns
.conn
!= os
.conn
&& flags
& CS_DC_CONN
)
442 pbp
+= sprintf(pbp
, "conn( %s -> %s ) ",
443 drbd_conn_str(os
.conn
),
444 drbd_conn_str(ns
.conn
));
445 if (ns
.disk
!= os
.disk
&& flags
& CS_DC_DISK
)
446 pbp
+= sprintf(pbp
, "disk( %s -> %s ) ",
447 drbd_disk_str(os
.disk
),
448 drbd_disk_str(ns
.disk
));
449 if (ns
.pdsk
!= os
.pdsk
&& flags
& CS_DC_PDSK
)
450 pbp
+= sprintf(pbp
, "pdsk( %s -> %s ) ",
451 drbd_disk_str(os
.pdsk
),
452 drbd_disk_str(ns
.pdsk
));
457 static void drbd_pr_state_change(struct drbd_conf
*mdev
, union drbd_state os
, union drbd_state ns
,
458 enum chg_state_flags flags
)
463 pbp
+= print_state_change(pbp
, os
, ns
, flags
^ CS_DC_MASK
);
465 if (ns
.aftr_isp
!= os
.aftr_isp
)
466 pbp
+= sprintf(pbp
, "aftr_isp( %d -> %d ) ",
469 if (ns
.peer_isp
!= os
.peer_isp
)
470 pbp
+= sprintf(pbp
, "peer_isp( %d -> %d ) ",
473 if (ns
.user_isp
!= os
.user_isp
)
474 pbp
+= sprintf(pbp
, "user_isp( %d -> %d ) ",
479 dev_info(DEV
, "%s\n", pb
);
482 static void conn_pr_state_change(struct drbd_tconn
*tconn
, union drbd_state os
, union drbd_state ns
,
483 enum chg_state_flags flags
)
488 pbp
+= print_state_change(pbp
, os
, ns
, flags
);
490 if (is_susp(ns
) != is_susp(os
) && flags
& CS_DC_SUSP
)
491 pbp
+= sprintf(pbp
, "susp( %d -> %d ) ",
496 conn_info(tconn
, "%s\n", pb
);
501 * is_valid_state() - Returns an SS_ error code if ns is not valid
502 * @mdev: DRBD device.
503 * @ns: State to consider.
505 static enum drbd_state_rv
506 is_valid_state(struct drbd_conf
*mdev
, union drbd_state ns
)
508 /* See drbd_state_sw_errors in drbd_strings.c */
510 enum drbd_fencing_p fp
;
511 enum drbd_state_rv rv
= SS_SUCCESS
;
516 if (get_ldev(mdev
)) {
517 fp
= rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
;
521 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
523 if (!nc
->two_primaries
&& ns
.role
== R_PRIMARY
) {
524 if (ns
.peer
== R_PRIMARY
)
525 rv
= SS_TWO_PRIMARIES
;
526 else if (conn_highest_peer(mdev
->tconn
) == R_PRIMARY
)
527 rv
= SS_O_VOL_PEER_PRI
;
532 /* already found a reason to abort */;
533 else if (ns
.role
== R_SECONDARY
&& mdev
->open_cnt
)
534 rv
= SS_DEVICE_IN_USE
;
536 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
537 rv
= SS_NO_UP_TO_DATE_DISK
;
539 else if (fp
>= FP_RESOURCE
&&
540 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
543 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
544 rv
= SS_NO_UP_TO_DATE_DISK
;
546 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
547 rv
= SS_NO_LOCAL_DISK
;
549 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
550 rv
= SS_NO_REMOTE_DISK
;
552 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
553 rv
= SS_NO_UP_TO_DATE_DISK
;
555 else if ((ns
.conn
== C_CONNECTED
||
556 ns
.conn
== C_WF_BITMAP_S
||
557 ns
.conn
== C_SYNC_SOURCE
||
558 ns
.conn
== C_PAUSED_SYNC_S
) &&
559 ns
.disk
== D_OUTDATED
)
560 rv
= SS_CONNECTED_OUTDATES
;
562 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
563 (nc
->verify_alg
[0] == 0))
564 rv
= SS_NO_VERIFY_ALG
;
566 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
567 mdev
->tconn
->agreed_pro_version
< 88)
568 rv
= SS_NOT_SUPPORTED
;
570 else if (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
571 rv
= SS_NO_UP_TO_DATE_DISK
;
573 else if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
574 ns
.pdsk
== D_UNKNOWN
)
575 rv
= SS_NEED_CONNECTION
;
577 else if (ns
.conn
>= C_CONNECTED
&& ns
.pdsk
== D_UNKNOWN
)
578 rv
= SS_CONNECTED_OUTDATES
;
586 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
587 * This function limits state transitions that may be declined by DRBD. I.e.
588 * user requests (aka soft transitions).
589 * @mdev: DRBD device.
593 static enum drbd_state_rv
594 is_valid_soft_transition(union drbd_state os
, union drbd_state ns
, struct drbd_tconn
*tconn
)
596 enum drbd_state_rv rv
= SS_SUCCESS
;
598 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
599 os
.conn
> C_CONNECTED
)
600 rv
= SS_RESYNC_RUNNING
;
602 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
603 rv
= SS_ALREADY_STANDALONE
;
605 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
608 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
609 rv
= SS_NO_NET_CONFIG
;
611 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
612 rv
= SS_LOWER_THAN_OUTDATED
;
614 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
615 rv
= SS_IN_TRANSIENT_STATE
;
617 /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
618 rv = SS_IN_TRANSIENT_STATE; */
620 /* While establishing a connection only allow cstate to change.
621 Delay/refuse role changes, detach attach etc... */
622 if (test_bit(STATE_SENT
, &tconn
->flags
) &&
623 !(os
.conn
== C_WF_REPORT_PARAMS
||
624 (ns
.conn
== C_WF_REPORT_PARAMS
&& os
.conn
== C_WF_CONNECTION
)))
625 rv
= SS_IN_TRANSIENT_STATE
;
627 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
628 rv
= SS_NEED_CONNECTION
;
630 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
631 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
632 rv
= SS_RESYNC_RUNNING
;
634 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
635 os
.conn
< C_CONNECTED
)
636 rv
= SS_NEED_CONNECTION
;
638 if ((ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)
639 && os
.conn
< C_WF_REPORT_PARAMS
)
640 rv
= SS_NEED_CONNECTION
; /* No NetworkFailure -> SyncTarget etc... */
642 if (ns
.conn
== C_DISCONNECTING
&& ns
.pdsk
== D_OUTDATED
&&
643 os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
)
644 rv
= SS_OUTDATE_WO_CONN
;
649 static enum drbd_state_rv
650 is_valid_conn_transition(enum drbd_conns oc
, enum drbd_conns nc
)
652 /* no change -> nothing to do, at least for the connection part */
654 return SS_NOTHING_TO_DO
;
656 /* disconnect of an unconfigured connection does not make sense */
657 if (oc
== C_STANDALONE
&& nc
== C_DISCONNECTING
)
658 return SS_ALREADY_STANDALONE
;
660 /* from C_STANDALONE, we start with C_UNCONNECTED */
661 if (oc
== C_STANDALONE
&& nc
!= C_UNCONNECTED
)
662 return SS_NEED_CONNECTION
;
664 /* When establishing a connection we need to go through WF_REPORT_PARAMS!
665 Necessary to do the right thing upon invalidate-remote on a disconnected resource */
666 if (oc
< C_WF_REPORT_PARAMS
&& nc
>= C_CONNECTED
)
667 return SS_NEED_CONNECTION
;
669 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
670 if (oc
>= C_TIMEOUT
&& oc
<= C_TEAR_DOWN
&& nc
!= C_UNCONNECTED
&& nc
!= C_DISCONNECTING
)
671 return SS_IN_TRANSIENT_STATE
;
673 /* After C_DISCONNECTING only C_STANDALONE may follow */
674 if (oc
== C_DISCONNECTING
&& nc
!= C_STANDALONE
)
675 return SS_IN_TRANSIENT_STATE
;
682 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
683 * This limits hard state transitions. Hard state transitions are facts there are
684 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
685 * But those hard state transitions are still not allowed to do everything.
689 static enum drbd_state_rv
690 is_valid_transition(union drbd_state os
, union drbd_state ns
)
692 enum drbd_state_rv rv
;
694 rv
= is_valid_conn_transition(os
.conn
, ns
.conn
);
696 /* we cannot fail (again) if we already detached */
697 if (ns
.disk
== D_FAILED
&& os
.disk
== D_DISKLESS
)
703 static void print_sanitize_warnings(struct drbd_conf
*mdev
, enum sanitize_state_warnings warn
)
705 static const char *msg_table
[] = {
707 [ABORTED_ONLINE_VERIFY
] = "Online-verify aborted.",
708 [ABORTED_RESYNC
] = "Resync aborted.",
709 [CONNECTION_LOST_NEGOTIATING
] = "Connection lost while negotiating, no data!",
710 [IMPLICITLY_UPGRADED_DISK
] = "Implicitly upgraded disk",
711 [IMPLICITLY_UPGRADED_PDSK
] = "Implicitly upgraded pdsk",
714 if (warn
!= NO_WARNING
)
715 dev_warn(DEV
, "%s\n", msg_table
[warn
]);
719 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
720 * @mdev: DRBD device.
725 * When we loose connection, we have to set the state of the peers disk (pdsk)
726 * to D_UNKNOWN. This rule and many more along those lines are in this function.
728 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state ns
,
729 enum sanitize_state_warnings
*warn
)
731 enum drbd_fencing_p fp
;
732 enum drbd_disk_state disk_min
, disk_max
, pdsk_min
, pdsk_max
;
738 if (get_ldev(mdev
)) {
740 fp
= rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
;
745 /* Implications from connection to peer and peer_isp */
746 if (ns
.conn
< C_CONNECTED
) {
749 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
753 /* Clear the aftr_isp when becoming unconfigured */
754 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
757 /* An implication of the disk states onto the connection state */
758 /* Abort resync if a disk fails/detaches */
759 if (ns
.conn
> C_CONNECTED
&& (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
761 *warn
= ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
?
762 ABORTED_ONLINE_VERIFY
: ABORTED_RESYNC
;
763 ns
.conn
= C_CONNECTED
;
766 /* Connection breaks down before we finished "Negotiating" */
767 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
768 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
769 if (mdev
->ed_uuid
== mdev
->ldev
->md
.uuid
[UI_CURRENT
]) {
770 ns
.disk
= mdev
->new_state_tmp
.disk
;
771 ns
.pdsk
= mdev
->new_state_tmp
.pdsk
;
774 *warn
= CONNECTION_LOST_NEGOTIATING
;
775 ns
.disk
= D_DISKLESS
;
781 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
782 if (ns
.conn
>= C_CONNECTED
&& ns
.conn
< C_AHEAD
) {
783 if (ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
)
784 ns
.disk
= D_UP_TO_DATE
;
785 if (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)
786 ns
.pdsk
= D_UP_TO_DATE
;
789 /* Implications of the connection stat on the disk states */
790 disk_min
= D_DISKLESS
;
791 disk_max
= D_UP_TO_DATE
;
792 pdsk_min
= D_INCONSISTENT
;
793 pdsk_max
= D_UNKNOWN
;
794 switch ((enum drbd_conns
)ns
.conn
) {
796 case C_PAUSED_SYNC_T
:
797 case C_STARTING_SYNC_T
:
800 disk_min
= D_INCONSISTENT
;
801 disk_max
= D_OUTDATED
;
802 pdsk_min
= D_UP_TO_DATE
;
803 pdsk_max
= D_UP_TO_DATE
;
807 disk_min
= D_UP_TO_DATE
;
808 disk_max
= D_UP_TO_DATE
;
809 pdsk_min
= D_UP_TO_DATE
;
810 pdsk_max
= D_UP_TO_DATE
;
813 disk_min
= D_DISKLESS
;
814 disk_max
= D_UP_TO_DATE
;
815 pdsk_min
= D_DISKLESS
;
816 pdsk_max
= D_UP_TO_DATE
;
819 case C_PAUSED_SYNC_S
:
820 case C_STARTING_SYNC_S
:
822 disk_min
= D_UP_TO_DATE
;
823 disk_max
= D_UP_TO_DATE
;
824 pdsk_min
= D_INCONSISTENT
;
825 pdsk_max
= D_CONSISTENT
; /* D_OUTDATED would be nice. But explicit outdate necessary*/
828 disk_min
= D_INCONSISTENT
;
829 disk_max
= D_INCONSISTENT
;
830 pdsk_min
= D_UP_TO_DATE
;
831 pdsk_max
= D_UP_TO_DATE
;
834 disk_min
= D_UP_TO_DATE
;
835 disk_max
= D_UP_TO_DATE
;
836 pdsk_min
= D_INCONSISTENT
;
837 pdsk_max
= D_INCONSISTENT
;
840 case C_DISCONNECTING
:
844 case C_NETWORK_FAILURE
:
845 case C_PROTOCOL_ERROR
:
847 case C_WF_CONNECTION
:
848 case C_WF_REPORT_PARAMS
:
852 if (ns
.disk
> disk_max
)
855 if (ns
.disk
< disk_min
) {
857 *warn
= IMPLICITLY_UPGRADED_DISK
;
860 if (ns
.pdsk
> pdsk_max
)
863 if (ns
.pdsk
< pdsk_min
) {
865 *warn
= IMPLICITLY_UPGRADED_PDSK
;
869 if (fp
== FP_STONITH
&&
870 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
))
871 ns
.susp_fen
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
873 if (mdev
->tconn
->res_opts
.on_no_data
== OND_SUSPEND_IO
&&
874 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
875 ns
.susp_nod
= 1; /* Suspend IO while no data available (no accessible data available) */
877 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
878 if (ns
.conn
== C_SYNC_SOURCE
)
879 ns
.conn
= C_PAUSED_SYNC_S
;
880 if (ns
.conn
== C_SYNC_TARGET
)
881 ns
.conn
= C_PAUSED_SYNC_T
;
883 if (ns
.conn
== C_PAUSED_SYNC_S
)
884 ns
.conn
= C_SYNC_SOURCE
;
885 if (ns
.conn
== C_PAUSED_SYNC_T
)
886 ns
.conn
= C_SYNC_TARGET
;
892 void drbd_resume_al(struct drbd_conf
*mdev
)
894 if (test_and_clear_bit(AL_SUSPENDED
, &mdev
->flags
))
895 dev_info(DEV
, "Resumed AL updates\n");
898 /* helper for __drbd_set_state */
899 static void set_ov_position(struct drbd_conf
*mdev
, enum drbd_conns cs
)
901 if (mdev
->tconn
->agreed_pro_version
< 90)
902 mdev
->ov_start_sector
= 0;
903 mdev
->rs_total
= drbd_bm_bits(mdev
);
904 mdev
->ov_position
= 0;
905 if (cs
== C_VERIFY_T
) {
906 /* starting online verify from an arbitrary position
907 * does not fit well into the existing protocol.
908 * on C_VERIFY_T, we initialize ov_left and friends
909 * implicitly in receive_DataRequest once the
910 * first P_OV_REQUEST is received */
911 mdev
->ov_start_sector
= ~(sector_t
)0;
913 unsigned long bit
= BM_SECT_TO_BIT(mdev
->ov_start_sector
);
914 if (bit
>= mdev
->rs_total
) {
915 mdev
->ov_start_sector
=
916 BM_BIT_TO_SECT(mdev
->rs_total
- 1);
919 mdev
->rs_total
-= bit
;
920 mdev
->ov_position
= mdev
->ov_start_sector
;
922 mdev
->ov_left
= mdev
->rs_total
;
926 * __drbd_set_state() - Set a new DRBD state
927 * @mdev: DRBD device.
930 * @done: Optional completion, that will get completed after the after_state_ch() finished
932 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
935 __drbd_set_state(struct drbd_conf
*mdev
, union drbd_state ns
,
936 enum chg_state_flags flags
, struct completion
*done
)
939 enum drbd_state_rv rv
= SS_SUCCESS
;
940 enum sanitize_state_warnings ssw
;
941 struct after_state_chg_work
*ascw
;
942 bool did_remote
, should_do_remote
;
944 os
= drbd_read_state(mdev
);
946 ns
= sanitize_state(mdev
, ns
, &ssw
);
948 return SS_NOTHING_TO_DO
;
950 rv
= is_valid_transition(os
, ns
);
954 if (!(flags
& CS_HARD
)) {
955 /* pre-state-change checks ; only look at ns */
956 /* See drbd_state_sw_errors in drbd_strings.c */
958 rv
= is_valid_state(mdev
, ns
);
959 if (rv
< SS_SUCCESS
) {
960 /* If the old state was illegal as well, then let
963 if (is_valid_state(mdev
, os
) == rv
)
964 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
966 rv
= is_valid_soft_transition(os
, ns
, mdev
->tconn
);
969 if (rv
< SS_SUCCESS
) {
970 if (flags
& CS_VERBOSE
)
971 print_st_err(mdev
, os
, ns
, rv
);
975 print_sanitize_warnings(mdev
, ssw
);
977 drbd_pr_state_change(mdev
, os
, ns
, flags
);
979 /* Display changes to the susp* flags that where caused by the call to
980 sanitize_state(). Only display it here if we where not called from
981 _conn_request_state() */
982 if (!(flags
& CS_DC_SUSP
))
983 conn_pr_state_change(mdev
->tconn
, os
, ns
, (flags
& ~CS_DC_MASK
) | CS_DC_SUSP
);
985 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
986 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
987 * drbd_ldev_destroy() won't happen before our corresponding
988 * after_state_ch works run, where we put_ldev again. */
989 if ((os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) ||
990 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))
991 atomic_inc(&mdev
->local_cnt
);
993 did_remote
= drbd_should_do_remote(mdev
->state
);
994 mdev
->state
.i
= ns
.i
;
995 should_do_remote
= drbd_should_do_remote(mdev
->state
);
996 mdev
->tconn
->susp
= ns
.susp
;
997 mdev
->tconn
->susp_nod
= ns
.susp_nod
;
998 mdev
->tconn
->susp_fen
= ns
.susp_fen
;
1000 /* put replicated vs not-replicated requests in seperate epochs */
1001 if (did_remote
!= should_do_remote
)
1002 start_new_tl_epoch(mdev
->tconn
);
1004 if (os
.disk
== D_ATTACHING
&& ns
.disk
>= D_NEGOTIATING
)
1005 drbd_print_uuids(mdev
, "attached to UUIDs");
1007 /* Wake up role changes, that were delayed because of connection establishing */
1008 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
!= C_WF_REPORT_PARAMS
&&
1009 no_peer_wf_report_params(mdev
->tconn
))
1010 clear_bit(STATE_SENT
, &mdev
->tconn
->flags
);
1012 wake_up(&mdev
->misc_wait
);
1013 wake_up(&mdev
->state_wait
);
1014 wake_up(&mdev
->tconn
->ping_wait
);
1016 /* Aborted verify run, or we reached the stop sector.
1017 * Log the last position, unless end-of-device. */
1018 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1019 ns
.conn
<= C_CONNECTED
) {
1020 mdev
->ov_start_sector
=
1021 BM_BIT_TO_SECT(drbd_bm_bits(mdev
) - mdev
->ov_left
);
1023 dev_info(DEV
, "Online Verify reached sector %llu\n",
1024 (unsigned long long)mdev
->ov_start_sector
);
1027 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1028 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1029 dev_info(DEV
, "Syncer continues.\n");
1030 mdev
->rs_paused
+= (long)jiffies
1031 -(long)mdev
->rs_mark_time
[mdev
->rs_last_mark
];
1032 if (ns
.conn
== C_SYNC_TARGET
)
1033 mod_timer(&mdev
->resync_timer
, jiffies
);
1036 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1037 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1038 dev_info(DEV
, "Resync suspended\n");
1039 mdev
->rs_mark_time
[mdev
->rs_last_mark
] = jiffies
;
1042 if (os
.conn
== C_CONNECTED
&&
1043 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1044 unsigned long now
= jiffies
;
1047 set_ov_position(mdev
, ns
.conn
);
1048 mdev
->rs_start
= now
;
1049 mdev
->rs_last_events
= 0;
1050 mdev
->rs_last_sect_ev
= 0;
1051 mdev
->ov_last_oos_size
= 0;
1052 mdev
->ov_last_oos_start
= 0;
1054 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1055 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
1056 mdev
->rs_mark_time
[i
] = now
;
1059 drbd_rs_controller_reset(mdev
);
1061 if (ns
.conn
== C_VERIFY_S
) {
1062 dev_info(DEV
, "Starting Online Verify from sector %llu\n",
1063 (unsigned long long)mdev
->ov_position
);
1064 mod_timer(&mdev
->resync_timer
, jiffies
);
1068 if (get_ldev(mdev
)) {
1069 u32 mdf
= mdev
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1070 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1071 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1073 mdf
&= ~MDF_AL_CLEAN
;
1074 if (test_bit(CRASHED_PRIMARY
, &mdev
->flags
))
1075 mdf
|= MDF_CRASHED_PRIMARY
;
1076 if (mdev
->state
.role
== R_PRIMARY
||
1077 (mdev
->state
.pdsk
< D_INCONSISTENT
&& mdev
->state
.peer
== R_PRIMARY
))
1078 mdf
|= MDF_PRIMARY_IND
;
1079 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
)
1080 mdf
|= MDF_CONNECTED_IND
;
1081 if (mdev
->state
.disk
> D_INCONSISTENT
)
1082 mdf
|= MDF_CONSISTENT
;
1083 if (mdev
->state
.disk
> D_OUTDATED
)
1084 mdf
|= MDF_WAS_UP_TO_DATE
;
1085 if (mdev
->state
.pdsk
<= D_OUTDATED
&& mdev
->state
.pdsk
>= D_INCONSISTENT
)
1086 mdf
|= MDF_PEER_OUT_DATED
;
1087 if (mdf
!= mdev
->ldev
->md
.flags
) {
1088 mdev
->ldev
->md
.flags
= mdf
;
1089 drbd_md_mark_dirty(mdev
);
1091 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1092 drbd_set_ed_uuid(mdev
, mdev
->ldev
->md
.uuid
[UI_CURRENT
]);
1096 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1097 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1098 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1099 set_bit(CONSIDER_RESYNC
, &mdev
->flags
);
1101 /* Receiver should clean up itself */
1102 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1103 drbd_thread_stop_nowait(&mdev
->tconn
->receiver
);
1105 /* Now the receiver finished cleaning up itself, it should die */
1106 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1107 drbd_thread_stop_nowait(&mdev
->tconn
->receiver
);
1109 /* Upon network failure, we need to restart the receiver. */
1110 if (os
.conn
> C_WF_CONNECTION
&&
1111 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1112 drbd_thread_restart_nowait(&mdev
->tconn
->receiver
);
1114 /* Resume AL writing if we get a connection */
1115 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1116 drbd_resume_al(mdev
);
1117 mdev
->tconn
->connect_cnt
++;
1120 /* remember last attach time so request_timer_fn() won't
1121 * kill newly established sessions while we are still trying to thaw
1122 * previously frozen IO */
1123 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1124 ns
.disk
> D_NEGOTIATING
)
1125 mdev
->last_reattach_jif
= jiffies
;
1127 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1131 ascw
->flags
= flags
;
1132 ascw
->w
.cb
= w_after_state_ch
;
1133 ascw
->w
.mdev
= mdev
;
1135 drbd_queue_work(&mdev
->tconn
->sender_work
, &ascw
->w
);
1137 dev_err(DEV
, "Could not kmalloc an ascw\n");
1143 static int w_after_state_ch(struct drbd_work
*w
, int unused
)
1145 struct after_state_chg_work
*ascw
=
1146 container_of(w
, struct after_state_chg_work
, w
);
1147 struct drbd_conf
*mdev
= w
->mdev
;
1149 after_state_ch(mdev
, ascw
->os
, ascw
->ns
, ascw
->flags
);
1150 if (ascw
->flags
& CS_WAIT_COMPLETE
) {
1151 D_ASSERT(ascw
->done
!= NULL
);
1152 complete(ascw
->done
);
1159 static void abw_start_sync(struct drbd_conf
*mdev
, int rv
)
1162 dev_err(DEV
, "Writing the bitmap failed not starting resync.\n");
1163 _drbd_request_state(mdev
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1167 switch (mdev
->state
.conn
) {
1168 case C_STARTING_SYNC_T
:
1169 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1171 case C_STARTING_SYNC_S
:
1172 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
1177 int drbd_bitmap_io_from_worker(struct drbd_conf
*mdev
,
1178 int (*io_fn
)(struct drbd_conf
*),
1179 char *why
, enum bm_flag flags
)
1183 D_ASSERT(current
== mdev
->tconn
->worker
.task
);
1185 /* open coded non-blocking drbd_suspend_io(mdev); */
1186 set_bit(SUSPEND_IO
, &mdev
->flags
);
1188 drbd_bm_lock(mdev
, why
, flags
);
1190 drbd_bm_unlock(mdev
);
1192 drbd_resume_io(mdev
);
1198 * after_state_ch() - Perform after state change actions that may sleep
1199 * @mdev: DRBD device.
1204 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
1205 union drbd_state ns
, enum chg_state_flags flags
)
1207 struct sib_info sib
;
1209 sib
.sib_reason
= SIB_STATE_CHANGE
;
1213 if (os
.conn
!= C_CONNECTED
&& ns
.conn
== C_CONNECTED
) {
1214 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1216 mdev
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1219 /* Inform userspace about the change... */
1220 drbd_bcast_event(mdev
, &sib
);
1222 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1223 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1224 drbd_khelper(mdev
, "pri-on-incon-degr");
1226 /* Here we have the actions that are performed after a
1227 state change. This function might sleep */
1230 struct drbd_tconn
*tconn
= mdev
->tconn
;
1231 enum drbd_req_event what
= NOTHING
;
1233 spin_lock_irq(&tconn
->req_lock
);
1234 if (os
.conn
< C_CONNECTED
&& conn_lowest_conn(tconn
) >= C_CONNECTED
)
1237 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1238 conn_lowest_disk(tconn
) > D_NEGOTIATING
)
1239 what
= RESTART_FROZEN_DISK_IO
;
1241 if (tconn
->susp_nod
&& what
!= NOTHING
) {
1242 _tl_restart(tconn
, what
);
1243 _conn_request_state(tconn
,
1244 (union drbd_state
) { { .susp_nod
= 1 } },
1245 (union drbd_state
) { { .susp_nod
= 0 } },
1248 spin_unlock_irq(&tconn
->req_lock
);
1252 struct drbd_tconn
*tconn
= mdev
->tconn
;
1254 spin_lock_irq(&tconn
->req_lock
);
1255 if (tconn
->susp_fen
&& conn_lowest_conn(tconn
) >= C_CONNECTED
) {
1256 /* case2: The connection was established again: */
1257 struct drbd_conf
*odev
;
1261 idr_for_each_entry(&tconn
->volumes
, odev
, vnr
)
1262 clear_bit(NEW_CUR_UUID
, &odev
->flags
);
1264 _tl_restart(tconn
, RESEND
);
1265 _conn_request_state(tconn
,
1266 (union drbd_state
) { { .susp_fen
= 1 } },
1267 (union drbd_state
) { { .susp_fen
= 0 } },
1270 spin_unlock_irq(&tconn
->req_lock
);
1273 /* Became sync source. With protocol >= 96, we still need to send out
1274 * the sync uuid now. Need to do that before any drbd_send_state, or
1275 * the other side may go "paused sync" before receiving the sync uuids,
1276 * which is unexpected. */
1277 if ((os
.conn
!= C_SYNC_SOURCE
&& os
.conn
!= C_PAUSED_SYNC_S
) &&
1278 (ns
.conn
== C_SYNC_SOURCE
|| ns
.conn
== C_PAUSED_SYNC_S
) &&
1279 mdev
->tconn
->agreed_pro_version
>= 96 && get_ldev(mdev
)) {
1280 drbd_gen_and_send_sync_uuid(mdev
);
1284 /* Do not change the order of the if above and the two below... */
1285 if (os
.pdsk
== D_DISKLESS
&&
1286 ns
.pdsk
> D_DISKLESS
&& ns
.pdsk
!= D_UNKNOWN
) { /* attach on the peer */
1287 /* we probably will start a resync soon.
1288 * make sure those things are properly reset. */
1290 mdev
->rs_failed
= 0;
1291 atomic_set(&mdev
->rs_pending_cnt
, 0);
1292 drbd_rs_cancel_all(mdev
);
1294 drbd_send_uuids(mdev
);
1295 drbd_send_state(mdev
, ns
);
1297 /* No point in queuing send_bitmap if we don't have a connection
1298 * anymore, so check also the _current_ state, not only the new state
1299 * at the time this work was queued. */
1300 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
&&
1301 mdev
->state
.conn
== C_WF_BITMAP_S
)
1302 drbd_queue_bitmap_io(mdev
, &drbd_send_bitmap
, NULL
,
1303 "send_bitmap (WFBitMapS)",
1304 BM_LOCKED_TEST_ALLOWED
);
1306 /* Lost contact to peer's copy of the data */
1307 if ((os
.pdsk
>= D_INCONSISTENT
&&
1308 os
.pdsk
!= D_UNKNOWN
&&
1309 os
.pdsk
!= D_OUTDATED
)
1310 && (ns
.pdsk
< D_INCONSISTENT
||
1311 ns
.pdsk
== D_UNKNOWN
||
1312 ns
.pdsk
== D_OUTDATED
)) {
1313 if (get_ldev(mdev
)) {
1314 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1315 mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1316 if (drbd_suspended(mdev
)) {
1317 set_bit(NEW_CUR_UUID
, &mdev
->flags
);
1319 drbd_uuid_new_current(mdev
);
1320 drbd_send_uuids(mdev
);
1327 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(mdev
)) {
1328 if (os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
&&
1329 mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1330 drbd_uuid_new_current(mdev
);
1331 drbd_send_uuids(mdev
);
1333 /* D_DISKLESS Peer becomes secondary */
1334 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1335 /* We may still be Primary ourselves.
1336 * No harm done if the bitmap still changes,
1337 * redirtied pages will follow later. */
1338 drbd_bitmap_io_from_worker(mdev
, &drbd_bm_write
,
1339 "demote diskless peer", BM_LOCKED_SET_ALLOWED
);
1343 /* Write out all changed bits on demote.
1344 * Though, no need to da that just yet
1345 * if there is a resync going on still */
1346 if (os
.role
== R_PRIMARY
&& ns
.role
== R_SECONDARY
&&
1347 mdev
->state
.conn
<= C_CONNECTED
&& get_ldev(mdev
)) {
1348 /* No changes to the bitmap expected this time, so assert that,
1349 * even though no harm was done if it did change. */
1350 drbd_bitmap_io_from_worker(mdev
, &drbd_bm_write
,
1351 "demote", BM_LOCKED_TEST_ALLOWED
);
1355 /* Last part of the attaching process ... */
1356 if (ns
.conn
>= C_CONNECTED
&&
1357 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1358 drbd_send_sizes(mdev
, 0, 0); /* to start sync... */
1359 drbd_send_uuids(mdev
);
1360 drbd_send_state(mdev
, ns
);
1363 /* We want to pause/continue resync, tell peer. */
1364 if (ns
.conn
>= C_CONNECTED
&&
1365 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1366 (os
.user_isp
!= ns
.user_isp
)))
1367 drbd_send_state(mdev
, ns
);
1369 /* In case one of the isp bits got set, suspend other devices. */
1370 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1371 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1372 suspend_other_sg(mdev
);
1374 /* Make sure the peer gets informed about eventual state
1375 changes (ISP bits) while we were in WFReportParams. */
1376 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1377 drbd_send_state(mdev
, ns
);
1379 if (os
.conn
!= C_AHEAD
&& ns
.conn
== C_AHEAD
)
1380 drbd_send_state(mdev
, ns
);
1382 /* We are in the progress to start a full sync... */
1383 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1384 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1385 /* no other bitmap changes expected during this phase */
1386 drbd_queue_bitmap_io(mdev
,
1387 &drbd_bmio_set_n_write
, &abw_start_sync
,
1388 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED
);
1390 /* first half of local IO error, failure to attach,
1391 * or administrative detach */
1392 if (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) {
1393 enum drbd_io_error_p eh
= EP_PASS_ON
;
1394 int was_io_error
= 0;
1395 /* corresponding get_ldev was in __drbd_set_state, to serialize
1396 * our cleanup here with the transition to D_DISKLESS.
1397 * But is is still not save to dreference ldev here, since
1398 * we might come from an failed Attach before ldev was set. */
1401 eh
= rcu_dereference(mdev
->ldev
->disk_conf
)->on_io_error
;
1404 was_io_error
= test_and_clear_bit(WAS_IO_ERROR
, &mdev
->flags
);
1406 if (was_io_error
&& eh
== EP_CALL_HELPER
)
1407 drbd_khelper(mdev
, "local-io-error");
1409 /* Immediately allow completion of all application IO,
1410 * that waits for completion from the local disk,
1411 * if this was a force-detach due to disk_timeout
1412 * or administrator request (drbdsetup detach --force).
1413 * Do NOT abort otherwise.
1414 * Aborting local requests may cause serious problems,
1415 * if requests are completed to upper layers already,
1416 * and then later the already submitted local bio completes.
1417 * This can cause DMA into former bio pages that meanwhile
1418 * have been re-used for other things.
1419 * So aborting local requests may cause crashes,
1420 * or even worse, silent data corruption.
1422 if (test_and_clear_bit(FORCE_DETACH
, &mdev
->flags
))
1423 tl_abort_disk_io(mdev
);
1425 /* current state still has to be D_FAILED,
1426 * there is only one way out: to D_DISKLESS,
1427 * and that may only happen after our put_ldev below. */
1428 if (mdev
->state
.disk
!= D_FAILED
)
1430 "ASSERT FAILED: disk is %s during detach\n",
1431 drbd_disk_str(mdev
->state
.disk
));
1433 if (ns
.conn
>= C_CONNECTED
)
1434 drbd_send_state(mdev
, ns
);
1436 drbd_rs_cancel_all(mdev
);
1438 /* In case we want to get something to stable storage still,
1439 * this may be the last chance.
1440 * Following put_ldev may transition to D_DISKLESS. */
1446 /* second half of local IO error, failure to attach,
1447 * or administrative detach,
1448 * after local_cnt references have reached zero again */
1449 if (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1450 /* We must still be diskless,
1451 * re-attach has to be serialized with this! */
1452 if (mdev
->state
.disk
!= D_DISKLESS
)
1454 "ASSERT FAILED: disk is %s while going diskless\n",
1455 drbd_disk_str(mdev
->state
.disk
));
1457 if (ns
.conn
>= C_CONNECTED
)
1458 drbd_send_state(mdev
, ns
);
1459 /* corresponding get_ldev in __drbd_set_state
1460 * this may finally trigger drbd_ldev_destroy. */
1464 /* Notify peer that I had a local IO error, and did not detached.. */
1465 if (os
.disk
== D_UP_TO_DATE
&& ns
.disk
== D_INCONSISTENT
&& ns
.conn
>= C_CONNECTED
)
1466 drbd_send_state(mdev
, ns
);
1468 /* Disks got bigger while they were detached */
1469 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1470 test_and_clear_bit(RESYNC_AFTER_NEG
, &mdev
->flags
)) {
1471 if (ns
.conn
== C_CONNECTED
)
1472 resync_after_online_grow(mdev
);
1475 /* A resync finished or aborted, wake paused devices... */
1476 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1477 (os
.peer_isp
&& !ns
.peer_isp
) ||
1478 (os
.user_isp
&& !ns
.user_isp
))
1479 resume_next_sg(mdev
);
1481 /* sync target done with resync. Explicitly notify peer, even though
1482 * it should (at least for non-empty resyncs) already know itself. */
1483 if (os
.disk
< D_UP_TO_DATE
&& os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
== C_CONNECTED
)
1484 drbd_send_state(mdev
, ns
);
1486 /* Verify finished, or reached stop sector. Peer did not know about
1487 * the stop sector, and we may even have changed the stop sector during
1488 * verify to interrupt/stop early. Send the new state. */
1489 if (os
.conn
== C_VERIFY_S
&& ns
.conn
== C_CONNECTED
1490 && verify_can_do_stop_sector(mdev
))
1491 drbd_send_state(mdev
, ns
);
1493 /* This triggers bitmap writeout of potentially still unwritten pages
1494 * if the resync finished cleanly, or aborted because of peer disk
1495 * failure, or because of connection loss.
1496 * For resync aborted because of local disk failure, we cannot do
1497 * any bitmap writeout anymore.
1498 * No harm done if some bits change during this phase.
1500 if (os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
&& get_ldev(mdev
)) {
1501 drbd_queue_bitmap_io(mdev
, &drbd_bm_write_copy_pages
, NULL
,
1502 "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED
);
1506 if (ns
.disk
== D_DISKLESS
&&
1507 ns
.conn
== C_STANDALONE
&&
1508 ns
.role
== R_SECONDARY
) {
1509 if (os
.aftr_isp
!= ns
.aftr_isp
)
1510 resume_next_sg(mdev
);
1516 struct after_conn_state_chg_work
{
1519 union drbd_state ns_min
;
1520 union drbd_state ns_max
; /* new, max state, over all mdevs */
1521 enum chg_state_flags flags
;
1524 static int w_after_conn_state_ch(struct drbd_work
*w
, int unused
)
1526 struct after_conn_state_chg_work
*acscw
=
1527 container_of(w
, struct after_conn_state_chg_work
, w
);
1528 struct drbd_tconn
*tconn
= w
->tconn
;
1529 enum drbd_conns oc
= acscw
->oc
;
1530 union drbd_state ns_max
= acscw
->ns_max
;
1531 struct drbd_conf
*mdev
;
1536 /* Upon network configuration, we need to start the receiver */
1537 if (oc
== C_STANDALONE
&& ns_max
.conn
== C_UNCONNECTED
)
1538 drbd_thread_start(&tconn
->receiver
);
1540 if (oc
== C_DISCONNECTING
&& ns_max
.conn
== C_STANDALONE
) {
1541 struct net_conf
*old_conf
;
1543 mutex_lock(&tconn
->conf_update
);
1544 old_conf
= tconn
->net_conf
;
1545 tconn
->my_addr_len
= 0;
1546 tconn
->peer_addr_len
= 0;
1547 rcu_assign_pointer(tconn
->net_conf
, NULL
);
1548 conn_free_crypto(tconn
);
1549 mutex_unlock(&tconn
->conf_update
);
1555 if (ns_max
.susp_fen
) {
1556 /* case1: The outdate peer handler is successful: */
1557 if (ns_max
.pdsk
<= D_OUTDATED
) {
1559 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1560 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
1561 drbd_uuid_new_current(mdev
);
1562 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1566 spin_lock_irq(&tconn
->req_lock
);
1567 _tl_restart(tconn
, CONNECTION_LOST_WHILE_PENDING
);
1568 _conn_request_state(tconn
,
1569 (union drbd_state
) { { .susp_fen
= 1 } },
1570 (union drbd_state
) { { .susp_fen
= 0 } },
1572 spin_unlock_irq(&tconn
->req_lock
);
1575 kref_put(&tconn
->kref
, &conn_destroy
);
1577 conn_md_sync(tconn
);
1582 void conn_old_common_state(struct drbd_tconn
*tconn
, union drbd_state
*pcs
, enum chg_state_flags
*pf
)
1584 enum chg_state_flags flags
= ~0;
1585 struct drbd_conf
*mdev
;
1586 int vnr
, first_vol
= 1;
1587 union drbd_dev_state os
, cs
= {
1588 { .role
= R_SECONDARY
,
1590 .conn
= tconn
->cstate
,
1596 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1605 if (cs
.role
!= os
.role
)
1606 flags
&= ~CS_DC_ROLE
;
1608 if (cs
.peer
!= os
.peer
)
1609 flags
&= ~CS_DC_PEER
;
1611 if (cs
.conn
!= os
.conn
)
1612 flags
&= ~CS_DC_CONN
;
1614 if (cs
.disk
!= os
.disk
)
1615 flags
&= ~CS_DC_DISK
;
1617 if (cs
.pdsk
!= os
.pdsk
)
1618 flags
&= ~CS_DC_PDSK
;
1627 static enum drbd_state_rv
1628 conn_is_valid_transition(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1629 enum chg_state_flags flags
)
1631 enum drbd_state_rv rv
= SS_SUCCESS
;
1632 union drbd_state ns
, os
;
1633 struct drbd_conf
*mdev
;
1637 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1638 os
= drbd_read_state(mdev
);
1639 ns
= sanitize_state(mdev
, apply_mask_val(os
, mask
, val
), NULL
);
1641 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
1647 rv
= is_valid_transition(os
, ns
);
1648 if (rv
< SS_SUCCESS
)
1651 if (!(flags
& CS_HARD
)) {
1652 rv
= is_valid_state(mdev
, ns
);
1653 if (rv
< SS_SUCCESS
) {
1654 if (is_valid_state(mdev
, os
) == rv
)
1655 rv
= is_valid_soft_transition(os
, ns
, tconn
);
1657 rv
= is_valid_soft_transition(os
, ns
, tconn
);
1659 if (rv
< SS_SUCCESS
)
1664 if (rv
< SS_SUCCESS
&& flags
& CS_VERBOSE
)
1665 print_st_err(mdev
, os
, ns
, rv
);
1671 conn_set_state(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1672 union drbd_state
*pns_min
, union drbd_state
*pns_max
, enum chg_state_flags flags
)
1674 union drbd_state ns
, os
, ns_max
= { };
1675 union drbd_state ns_min
= {
1682 struct drbd_conf
*mdev
;
1683 enum drbd_state_rv rv
;
1684 int vnr
, number_of_volumes
= 0;
1686 if (mask
.conn
== C_MASK
) {
1687 /* remember last connect time so request_timer_fn() won't
1688 * kill newly established sessions while we are still trying to thaw
1689 * previously frozen IO */
1690 if (tconn
->cstate
!= C_WF_REPORT_PARAMS
&& val
.conn
== C_WF_REPORT_PARAMS
)
1691 tconn
->last_reconnect_jif
= jiffies
;
1693 tconn
->cstate
= val
.conn
;
1697 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1698 number_of_volumes
++;
1699 os
= drbd_read_state(mdev
);
1700 ns
= apply_mask_val(os
, mask
, val
);
1701 ns
= sanitize_state(mdev
, ns
, NULL
);
1703 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
1706 rv
= __drbd_set_state(mdev
, ns
, flags
, NULL
);
1707 if (rv
< SS_SUCCESS
)
1710 ns
.i
= mdev
->state
.i
;
1711 ns_max
.role
= max_role(ns
.role
, ns_max
.role
);
1712 ns_max
.peer
= max_role(ns
.peer
, ns_max
.peer
);
1713 ns_max
.conn
= max_t(enum drbd_conns
, ns
.conn
, ns_max
.conn
);
1714 ns_max
.disk
= max_t(enum drbd_disk_state
, ns
.disk
, ns_max
.disk
);
1715 ns_max
.pdsk
= max_t(enum drbd_disk_state
, ns
.pdsk
, ns_max
.pdsk
);
1717 ns_min
.role
= min_role(ns
.role
, ns_min
.role
);
1718 ns_min
.peer
= min_role(ns
.peer
, ns_min
.peer
);
1719 ns_min
.conn
= min_t(enum drbd_conns
, ns
.conn
, ns_min
.conn
);
1720 ns_min
.disk
= min_t(enum drbd_disk_state
, ns
.disk
, ns_min
.disk
);
1721 ns_min
.pdsk
= min_t(enum drbd_disk_state
, ns
.pdsk
, ns_min
.pdsk
);
1725 if (number_of_volumes
== 0) {
1726 ns_min
= ns_max
= (union drbd_state
) { {
1727 .role
= R_SECONDARY
,
1735 ns_min
.susp
= ns_max
.susp
= tconn
->susp
;
1736 ns_min
.susp_nod
= ns_max
.susp_nod
= tconn
->susp_nod
;
1737 ns_min
.susp_fen
= ns_max
.susp_fen
= tconn
->susp_fen
;
1743 static enum drbd_state_rv
1744 _conn_rq_cond(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
)
1746 enum drbd_state_rv rv
;
1748 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY
, &tconn
->flags
))
1749 return SS_CW_SUCCESS
;
1751 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL
, &tconn
->flags
))
1752 return SS_CW_FAILED_BY_PEER
;
1754 rv
= conn_is_valid_transition(tconn
, mask
, val
, 0);
1755 if (rv
== SS_SUCCESS
&& tconn
->cstate
== C_WF_REPORT_PARAMS
)
1756 rv
= SS_UNKNOWN_ERROR
; /* continue waiting */
1762 _conn_request_state(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1763 enum chg_state_flags flags
)
1765 enum drbd_state_rv rv
= SS_SUCCESS
;
1766 struct after_conn_state_chg_work
*acscw
;
1767 enum drbd_conns oc
= tconn
->cstate
;
1768 union drbd_state ns_max
, ns_min
, os
;
1769 bool have_mutex
= false;
1772 rv
= is_valid_conn_transition(oc
, val
.conn
);
1773 if (rv
< SS_SUCCESS
)
1777 rv
= conn_is_valid_transition(tconn
, mask
, val
, flags
);
1778 if (rv
< SS_SUCCESS
)
1781 if (oc
== C_WF_REPORT_PARAMS
&& val
.conn
== C_DISCONNECTING
&&
1782 !(flags
& (CS_LOCAL_ONLY
| CS_HARD
))) {
1784 /* This will be a cluster-wide state change.
1785 * Need to give up the spinlock, grab the mutex,
1786 * then send the state change request, ... */
1787 spin_unlock_irq(&tconn
->req_lock
);
1788 mutex_lock(&tconn
->cstate_mutex
);
1791 set_bit(CONN_WD_ST_CHG_REQ
, &tconn
->flags
);
1792 if (conn_send_state_req(tconn
, mask
, val
)) {
1793 /* sending failed. */
1794 clear_bit(CONN_WD_ST_CHG_REQ
, &tconn
->flags
);
1795 rv
= SS_CW_FAILED_BY_PEER
;
1796 /* need to re-aquire the spin lock, though */
1797 goto abort_unlocked
;
1800 if (val
.conn
== C_DISCONNECTING
)
1801 set_bit(DISCONNECT_SENT
, &tconn
->flags
);
1803 /* ... and re-aquire the spinlock.
1804 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
1805 * conn_set_state() within the same spinlock. */
1806 spin_lock_irq(&tconn
->req_lock
);
1807 wait_event_lock_irq(tconn
->ping_wait
,
1808 (rv
= _conn_rq_cond(tconn
, mask
, val
)),
1810 clear_bit(CONN_WD_ST_CHG_REQ
, &tconn
->flags
);
1811 if (rv
< SS_SUCCESS
)
1815 conn_old_common_state(tconn
, &os
, &flags
);
1816 flags
|= CS_DC_SUSP
;
1817 conn_set_state(tconn
, mask
, val
, &ns_min
, &ns_max
, flags
);
1818 conn_pr_state_change(tconn
, os
, ns_max
, flags
);
1820 acscw
= kmalloc(sizeof(*acscw
), GFP_ATOMIC
);
1822 acscw
->oc
= os
.conn
;
1823 acscw
->ns_min
= ns_min
;
1824 acscw
->ns_max
= ns_max
;
1825 acscw
->flags
= flags
;
1826 acscw
->w
.cb
= w_after_conn_state_ch
;
1827 kref_get(&tconn
->kref
);
1828 acscw
->w
.tconn
= tconn
;
1829 drbd_queue_work(&tconn
->sender_work
, &acscw
->w
);
1831 conn_err(tconn
, "Could not kmalloc an acscw\n");
1836 /* mutex_unlock() "... must not be used in interrupt context.",
1837 * so give up the spinlock, then re-aquire it */
1838 spin_unlock_irq(&tconn
->req_lock
);
1840 mutex_unlock(&tconn
->cstate_mutex
);
1841 spin_lock_irq(&tconn
->req_lock
);
1843 if (rv
< SS_SUCCESS
&& flags
& CS_VERBOSE
) {
1844 conn_err(tconn
, "State change failed: %s\n", drbd_set_st_err_str(rv
));
1845 conn_err(tconn
, " mask = 0x%x val = 0x%x\n", mask
.i
, val
.i
);
1846 conn_err(tconn
, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc
), drbd_conn_str(val
.conn
));
1852 conn_request_state(struct drbd_tconn
*tconn
, union drbd_state mask
, union drbd_state val
,
1853 enum chg_state_flags flags
)
1855 enum drbd_state_rv rv
;
1857 spin_lock_irq(&tconn
->req_lock
);
1858 rv
= _conn_request_state(tconn
, mask
, val
, flags
);
1859 spin_unlock_irq(&tconn
->req_lock
);