drbd: Ignore the exit code of a fence-peer handler if it returns too late
[deliverable/linux.git] / drivers / block / drbd / drbd_nl.c
... / ...
CommitLineData
1/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
32#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
35#include "drbd_req.h"
36#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
38#include <linux/drbd_limits.h>
39#include <linux/kthread.h>
40
41#include <net/genetlink.h>
42
43/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
52int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
78#include "drbd_nla.h"
79#include <linux/genl_magic_func.h>
80
81/* used blkdev_get_by_path, to claim our meta data device(s) */
82static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
84/* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
87 */
88static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
90 unsigned int minor;
91 /* assigned from request attributes, if present */
92 unsigned int volume;
93#define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
96 char *resource_name;
97 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
99
100 /* reply buffer */
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
107} adm_ctx;
108
109static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
114}
115
116/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
118int drbd_msg_put_info(const char *info)
119{
120 struct sk_buff *skb = adm_ctx.reply_skb;
121 struct nlattr *nla;
122 int err = -EMSGSIZE;
123
124 if (!info || !info[0])
125 return 0;
126
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 if (!nla)
129 return err;
130
131 err = nla_put_string(skb, T_info_text, info);
132 if (err) {
133 nla_nest_cancel(skb, nla);
134 return err;
135 } else
136 nla_nest_end(skb, nla);
137 return 0;
138}
139
140/* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
144 */
145#define DRBD_ADM_NEED_MINOR 1
146#define DRBD_ADM_NEED_RESOURCE 2
147#define DRBD_ADM_NEED_CONNECTION 4
148static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149 unsigned flags)
150{
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
153 int err;
154
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
159 return -EPERM;
160
161 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
162 if (!adm_ctx.reply_skb) {
163 err = -ENOMEM;
164 goto fail;
165 }
166
167 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
168 info, &drbd_genl_family, 0, cmd);
169 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
170 * but anyways */
171 if (!adm_ctx.reply_dh) {
172 err = -ENOMEM;
173 goto fail;
174 }
175
176 adm_ctx.reply_dh->minor = d_in->minor;
177 adm_ctx.reply_dh->ret_code = NO_ERROR;
178
179 adm_ctx.volume = VOLUME_UNSPECIFIED;
180 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
181 struct nlattr *nla;
182 /* parse and validate only */
183 err = drbd_cfg_context_from_attrs(NULL, info);
184 if (err)
185 goto fail;
186
187 /* It was present, and valid,
188 * copy it over to the reply skb. */
189 err = nla_put_nohdr(adm_ctx.reply_skb,
190 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]);
192 if (err)
193 goto fail;
194
195 /* and assign stuff to the global adm_ctx */
196 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
197 if (nla)
198 adm_ctx.volume = nla_get_u32(nla);
199 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
200 if (nla)
201 adm_ctx.resource_name = nla_data(nla);
202 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
203 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
204 if ((adm_ctx.my_addr &&
205 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
206 (adm_ctx.peer_addr &&
207 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
208 err = -EINVAL;
209 goto fail;
210 }
211 }
212
213 adm_ctx.minor = d_in->minor;
214 adm_ctx.mdev = minor_to_mdev(d_in->minor);
215 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
216
217 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
218 drbd_msg_put_info("unknown minor");
219 return ERR_MINOR_INVALID;
220 }
221 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
222 drbd_msg_put_info("unknown resource");
223 return ERR_INVALID_REQUEST;
224 }
225
226 if (flags & DRBD_ADM_NEED_CONNECTION) {
227 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
228 drbd_msg_put_info("no resource name expected");
229 return ERR_INVALID_REQUEST;
230 }
231 if (adm_ctx.mdev) {
232 drbd_msg_put_info("no minor number expected");
233 return ERR_INVALID_REQUEST;
234 }
235 if (adm_ctx.my_addr && adm_ctx.peer_addr)
236 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
237 nla_len(adm_ctx.my_addr),
238 nla_data(adm_ctx.peer_addr),
239 nla_len(adm_ctx.peer_addr));
240 if (!adm_ctx.tconn) {
241 drbd_msg_put_info("unknown connection");
242 return ERR_INVALID_REQUEST;
243 }
244 }
245
246 /* some more paranoia, if the request was over-determined */
247 if (adm_ctx.mdev && adm_ctx.tconn &&
248 adm_ctx.mdev->tconn != adm_ctx.tconn) {
249 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
250 adm_ctx.minor, adm_ctx.resource_name,
251 adm_ctx.mdev->tconn->name);
252 drbd_msg_put_info("minor exists in different resource");
253 return ERR_INVALID_REQUEST;
254 }
255 if (adm_ctx.mdev &&
256 adm_ctx.volume != VOLUME_UNSPECIFIED &&
257 adm_ctx.volume != adm_ctx.mdev->vnr) {
258 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
259 adm_ctx.minor, adm_ctx.volume,
260 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
261 drbd_msg_put_info("minor exists as different volume");
262 return ERR_INVALID_REQUEST;
263 }
264
265 return NO_ERROR;
266
267fail:
268 nlmsg_free(adm_ctx.reply_skb);
269 adm_ctx.reply_skb = NULL;
270 return err;
271}
272
273static int drbd_adm_finish(struct genl_info *info, int retcode)
274{
275 if (adm_ctx.tconn) {
276 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
277 adm_ctx.tconn = NULL;
278 }
279
280 if (!adm_ctx.reply_skb)
281 return -ENOMEM;
282
283 adm_ctx.reply_dh->ret_code = retcode;
284 drbd_adm_send_reply(adm_ctx.reply_skb, info);
285 return 0;
286}
287
288static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
289{
290 char *afs;
291
292 /* FIXME: A future version will not allow this case. */
293 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
294 return;
295
296 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
297 case AF_INET6:
298 afs = "ipv6";
299 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
300 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
301 break;
302 case AF_INET:
303 afs = "ipv4";
304 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
305 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
306 break;
307 default:
308 afs = "ssocks";
309 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
310 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
311 }
312 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
313}
314
315int drbd_khelper(struct drbd_conf *mdev, char *cmd)
316{
317 char *envp[] = { "HOME=/",
318 "TERM=linux",
319 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
320 (char[20]) { }, /* address family */
321 (char[60]) { }, /* address */
322 NULL };
323 char mb[12];
324 char *argv[] = {usermode_helper, cmd, mb, NULL };
325 struct drbd_tconn *tconn = mdev->tconn;
326 struct sib_info sib;
327 int ret;
328
329 if (current == tconn->worker.task)
330 set_bit(CALLBACK_PENDING, &tconn->flags);
331
332 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
333 setup_khelper_env(tconn, envp);
334
335 /* The helper may take some time.
336 * write out any unsynced meta data changes now */
337 drbd_md_sync(mdev);
338
339 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
340 sib.sib_reason = SIB_HELPER_PRE;
341 sib.helper_name = cmd;
342 drbd_bcast_event(mdev, &sib);
343 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
344 if (ret)
345 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
346 usermode_helper, cmd, mb,
347 (ret >> 8) & 0xff, ret);
348 else
349 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
350 usermode_helper, cmd, mb,
351 (ret >> 8) & 0xff, ret);
352 sib.sib_reason = SIB_HELPER_POST;
353 sib.helper_exit_code = ret;
354 drbd_bcast_event(mdev, &sib);
355
356 if (current == tconn->worker.task)
357 clear_bit(CALLBACK_PENDING, &tconn->flags);
358
359 if (ret < 0) /* Ignore any ERRNOs we got. */
360 ret = 0;
361
362 return ret;
363}
364
365int conn_khelper(struct drbd_tconn *tconn, char *cmd)
366{
367 char *envp[] = { "HOME=/",
368 "TERM=linux",
369 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
370 (char[20]) { }, /* address family */
371 (char[60]) { }, /* address */
372 NULL };
373 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
374 int ret;
375
376 setup_khelper_env(tconn, envp);
377 conn_md_sync(tconn);
378
379 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
380 /* TODO: conn_bcast_event() ?? */
381
382 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
383 if (ret)
384 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
385 usermode_helper, cmd, tconn->name,
386 (ret >> 8) & 0xff, ret);
387 else
388 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
389 usermode_helper, cmd, tconn->name,
390 (ret >> 8) & 0xff, ret);
391 /* TODO: conn_bcast_event() ?? */
392
393 if (ret < 0) /* Ignore any ERRNOs we got. */
394 ret = 0;
395
396 return ret;
397}
398
399static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
400{
401 enum drbd_fencing_p fp = FP_NOT_AVAIL;
402 struct drbd_conf *mdev;
403 int vnr;
404
405 rcu_read_lock();
406 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
407 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
408 fp = max_t(enum drbd_fencing_p, fp,
409 rcu_dereference(mdev->ldev->disk_conf)->fencing);
410 put_ldev(mdev);
411 }
412 }
413 rcu_read_unlock();
414
415 return fp;
416}
417
418bool conn_try_outdate_peer(struct drbd_tconn *tconn)
419{
420 unsigned int connect_cnt;
421 union drbd_state mask = { };
422 union drbd_state val = { };
423 enum drbd_fencing_p fp;
424 char *ex_to_string;
425 int r;
426
427 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
428 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
429 return false;
430 }
431
432 spin_lock_irq(&tconn->req_lock);
433 connect_cnt = tconn->connect_cnt;
434 spin_unlock_irq(&tconn->req_lock);
435
436 fp = highest_fencing_policy(tconn);
437 switch (fp) {
438 case FP_NOT_AVAIL:
439 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
440 goto out;
441 case FP_DONT_CARE:
442 return true;
443 default: ;
444 }
445
446 r = conn_khelper(tconn, "fence-peer");
447
448 switch ((r>>8) & 0xff) {
449 case 3: /* peer is inconsistent */
450 ex_to_string = "peer is inconsistent or worse";
451 mask.pdsk = D_MASK;
452 val.pdsk = D_INCONSISTENT;
453 break;
454 case 4: /* peer got outdated, or was already outdated */
455 ex_to_string = "peer was fenced";
456 mask.pdsk = D_MASK;
457 val.pdsk = D_OUTDATED;
458 break;
459 case 5: /* peer was down */
460 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
461 /* we will(have) create(d) a new UUID anyways... */
462 ex_to_string = "peer is unreachable, assumed to be dead";
463 mask.pdsk = D_MASK;
464 val.pdsk = D_OUTDATED;
465 } else {
466 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
467 }
468 break;
469 case 6: /* Peer is primary, voluntarily outdate myself.
470 * This is useful when an unconnected R_SECONDARY is asked to
471 * become R_PRIMARY, but finds the other peer being active. */
472 ex_to_string = "peer is active";
473 conn_warn(tconn, "Peer is primary, outdating myself.\n");
474 mask.disk = D_MASK;
475 val.disk = D_OUTDATED;
476 break;
477 case 7:
478 if (fp != FP_STONITH)
479 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
480 ex_to_string = "peer was stonithed";
481 mask.pdsk = D_MASK;
482 val.pdsk = D_OUTDATED;
483 break;
484 default:
485 /* The script is broken ... */
486 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
487 return false; /* Eventually leave IO frozen */
488 }
489
490 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
491 (r>>8) & 0xff, ex_to_string);
492
493 out:
494
495 /* Not using
496 conn_request_state(tconn, mask, val, CS_VERBOSE);
497 here, because we might were able to re-establish the connection in the
498 meantime. */
499 spin_lock_irq(&tconn->req_lock);
500 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
501 if (tconn->connect_cnt != connect_cnt)
502 /* In case the connection was established and droped
503 while the fence-peer handler was running, ignore it */
504 conn_info(tconn, "Ignoring fence-peer exit code\n");
505 else
506 _conn_request_state(tconn, mask, val, CS_VERBOSE);
507 }
508 spin_unlock_irq(&tconn->req_lock);
509
510 return conn_highest_pdsk(tconn) <= D_OUTDATED;
511}
512
513static int _try_outdate_peer_async(void *data)
514{
515 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
516
517 conn_try_outdate_peer(tconn);
518
519 kref_put(&tconn->kref, &conn_destroy);
520 return 0;
521}
522
523void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
524{
525 struct task_struct *opa;
526
527 kref_get(&tconn->kref);
528 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
529 if (IS_ERR(opa)) {
530 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
531 kref_put(&tconn->kref, &conn_destroy);
532 }
533}
534
535enum drbd_state_rv
536drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
537{
538 const int max_tries = 4;
539 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
540 struct net_conf *nc;
541 int try = 0;
542 int forced = 0;
543 union drbd_state mask, val;
544
545 if (new_role == R_PRIMARY)
546 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
547
548 mutex_lock(mdev->state_mutex);
549
550 mask.i = 0; mask.role = R_MASK;
551 val.i = 0; val.role = new_role;
552
553 while (try++ < max_tries) {
554 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
555
556 /* in case we first succeeded to outdate,
557 * but now suddenly could establish a connection */
558 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
559 val.pdsk = 0;
560 mask.pdsk = 0;
561 continue;
562 }
563
564 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
565 (mdev->state.disk < D_UP_TO_DATE &&
566 mdev->state.disk >= D_INCONSISTENT)) {
567 mask.disk = D_MASK;
568 val.disk = D_UP_TO_DATE;
569 forced = 1;
570 continue;
571 }
572
573 if (rv == SS_NO_UP_TO_DATE_DISK &&
574 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
575 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
576
577 if (conn_try_outdate_peer(mdev->tconn)) {
578 val.disk = D_UP_TO_DATE;
579 mask.disk = D_MASK;
580 }
581 continue;
582 }
583
584 if (rv == SS_NOTHING_TO_DO)
585 goto out;
586 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
587 if (!conn_try_outdate_peer(mdev->tconn) && force) {
588 dev_warn(DEV, "Forced into split brain situation!\n");
589 mask.pdsk = D_MASK;
590 val.pdsk = D_OUTDATED;
591
592 }
593 continue;
594 }
595 if (rv == SS_TWO_PRIMARIES) {
596 /* Maybe the peer is detected as dead very soon...
597 retry at most once more in this case. */
598 int timeo;
599 rcu_read_lock();
600 nc = rcu_dereference(mdev->tconn->net_conf);
601 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
602 rcu_read_unlock();
603 schedule_timeout_interruptible(timeo);
604 if (try < max_tries)
605 try = max_tries - 1;
606 continue;
607 }
608 if (rv < SS_SUCCESS) {
609 rv = _drbd_request_state(mdev, mask, val,
610 CS_VERBOSE + CS_WAIT_COMPLETE);
611 if (rv < SS_SUCCESS)
612 goto out;
613 }
614 break;
615 }
616
617 if (rv < SS_SUCCESS)
618 goto out;
619
620 if (forced)
621 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
622
623 /* Wait until nothing is on the fly :) */
624 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
625
626 /* FIXME also wait for all pending P_BARRIER_ACK? */
627
628 if (new_role == R_SECONDARY) {
629 set_disk_ro(mdev->vdisk, true);
630 if (get_ldev(mdev)) {
631 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
632 put_ldev(mdev);
633 }
634 } else {
635 mutex_lock(&mdev->tconn->conf_update);
636 nc = mdev->tconn->net_conf;
637 if (nc)
638 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
639 mutex_unlock(&mdev->tconn->conf_update);
640
641 set_disk_ro(mdev->vdisk, false);
642 if (get_ldev(mdev)) {
643 if (((mdev->state.conn < C_CONNECTED ||
644 mdev->state.pdsk <= D_FAILED)
645 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
646 drbd_uuid_new_current(mdev);
647
648 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
649 put_ldev(mdev);
650 }
651 }
652
653 /* writeout of activity log covered areas of the bitmap
654 * to stable storage done in after state change already */
655
656 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
657 /* if this was forced, we should consider sync */
658 if (forced)
659 drbd_send_uuids(mdev);
660 drbd_send_current_state(mdev);
661 }
662
663 drbd_md_sync(mdev);
664
665 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
666out:
667 mutex_unlock(mdev->state_mutex);
668 return rv;
669}
670
671static const char *from_attrs_err_to_txt(int err)
672{
673 return err == -ENOMSG ? "required attribute missing" :
674 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
675 err == -EEXIST ? "can not change invariant setting" :
676 "invalid attribute value";
677}
678
679int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
680{
681 struct set_role_parms parms;
682 int err;
683 enum drbd_ret_code retcode;
684
685 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
686 if (!adm_ctx.reply_skb)
687 return retcode;
688 if (retcode != NO_ERROR)
689 goto out;
690
691 memset(&parms, 0, sizeof(parms));
692 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
693 err = set_role_parms_from_attrs(&parms, info);
694 if (err) {
695 retcode = ERR_MANDATORY_TAG;
696 drbd_msg_put_info(from_attrs_err_to_txt(err));
697 goto out;
698 }
699 }
700
701 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
702 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
703 else
704 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
705out:
706 drbd_adm_finish(info, retcode);
707 return 0;
708}
709
710/* Initializes the md.*_offset members, so we are able to find
711 * the on disk meta data.
712 *
713 * We currently have two possible layouts:
714 * external:
715 * |----------- md_size_sect ------------------|
716 * [ 4k superblock ][ activity log ][ Bitmap ]
717 * | al_offset == 8 |
718 * | bm_offset = al_offset + X |
719 * ==> bitmap sectors = md_size_sect - bm_offset
720 *
721 * internal:
722 * |----------- md_size_sect ------------------|
723 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
724 * | al_offset < 0 |
725 * | bm_offset = al_offset - Y |
726 * ==> bitmap sectors = Y = al_offset - bm_offset
727 *
728 * Activity log size used to be fixed 32kB,
729 * but is about to become configurable.
730 */
731static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
732 struct drbd_backing_dev *bdev)
733{
734 sector_t md_size_sect = 0;
735 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
736
737 bdev->md.md_offset = drbd_md_ss(bdev);
738
739 switch (bdev->md.meta_dev_idx) {
740 default:
741 /* v07 style fixed size indexed meta data */
742 bdev->md.md_size_sect = MD_128MB_SECT;
743 bdev->md.al_offset = MD_4kB_SECT;
744 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
745 break;
746 case DRBD_MD_INDEX_FLEX_EXT:
747 /* just occupy the full device; unit: sectors */
748 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
749 bdev->md.al_offset = MD_4kB_SECT;
750 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
751 break;
752 case DRBD_MD_INDEX_INTERNAL:
753 case DRBD_MD_INDEX_FLEX_INT:
754 /* al size is still fixed */
755 bdev->md.al_offset = -al_size_sect;
756 /* we need (slightly less than) ~ this much bitmap sectors: */
757 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
758 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
759 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
760 md_size_sect = ALIGN(md_size_sect, 8);
761
762 /* plus the "drbd meta data super block",
763 * and the activity log; */
764 md_size_sect += MD_4kB_SECT + al_size_sect;
765
766 bdev->md.md_size_sect = md_size_sect;
767 /* bitmap offset is adjusted by 'super' block size */
768 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
769 break;
770 }
771}
772
773/* input size is expected to be in KB */
774char *ppsize(char *buf, unsigned long long size)
775{
776 /* Needs 9 bytes at max including trailing NUL:
777 * -1ULL ==> "16384 EB" */
778 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
779 int base = 0;
780 while (size >= 10000 && base < sizeof(units)-1) {
781 /* shift + round */
782 size = (size >> 10) + !!(size & (1<<9));
783 base++;
784 }
785 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
786
787 return buf;
788}
789
790/* there is still a theoretical deadlock when called from receiver
791 * on an D_INCONSISTENT R_PRIMARY:
792 * remote READ does inc_ap_bio, receiver would need to receive answer
793 * packet from remote to dec_ap_bio again.
794 * receiver receive_sizes(), comes here,
795 * waits for ap_bio_cnt == 0. -> deadlock.
796 * but this cannot happen, actually, because:
797 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
798 * (not connected, or bad/no disk on peer):
799 * see drbd_fail_request_early, ap_bio_cnt is zero.
800 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
801 * peer may not initiate a resize.
802 */
803/* Note these are not to be confused with
804 * drbd_adm_suspend_io/drbd_adm_resume_io,
805 * which are (sub) state changes triggered by admin (drbdsetup),
806 * and can be long lived.
807 * This changes an mdev->flag, is triggered by drbd internals,
808 * and should be short-lived. */
809void drbd_suspend_io(struct drbd_conf *mdev)
810{
811 set_bit(SUSPEND_IO, &mdev->flags);
812 if (drbd_suspended(mdev))
813 return;
814 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
815}
816
817void drbd_resume_io(struct drbd_conf *mdev)
818{
819 clear_bit(SUSPEND_IO, &mdev->flags);
820 wake_up(&mdev->misc_wait);
821}
822
823/**
824 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
825 * @mdev: DRBD device.
826 *
827 * Returns 0 on success, negative return values indicate errors.
828 * You should call drbd_md_sync() after calling this function.
829 */
830enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
831{
832 sector_t prev_first_sect, prev_size; /* previous meta location */
833 sector_t la_size_sect, u_size;
834 sector_t size;
835 char ppb[10];
836
837 int md_moved, la_size_changed;
838 enum determine_dev_size rv = unchanged;
839
840 /* race:
841 * application request passes inc_ap_bio,
842 * but then cannot get an AL-reference.
843 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
844 *
845 * to avoid that:
846 * Suspend IO right here.
847 * still lock the act_log to not trigger ASSERTs there.
848 */
849 drbd_suspend_io(mdev);
850
851 /* no wait necessary anymore, actually we could assert that */
852 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
853
854 prev_first_sect = drbd_md_first_sector(mdev->ldev);
855 prev_size = mdev->ldev->md.md_size_sect;
856 la_size_sect = mdev->ldev->md.la_size_sect;
857
858 /* TODO: should only be some assert here, not (re)init... */
859 drbd_md_set_sector_offsets(mdev, mdev->ldev);
860
861 rcu_read_lock();
862 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
863 rcu_read_unlock();
864 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
865
866 if (drbd_get_capacity(mdev->this_bdev) != size ||
867 drbd_bm_capacity(mdev) != size) {
868 int err;
869 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
870 if (unlikely(err)) {
871 /* currently there is only one error: ENOMEM! */
872 size = drbd_bm_capacity(mdev)>>1;
873 if (size == 0) {
874 dev_err(DEV, "OUT OF MEMORY! "
875 "Could not allocate bitmap!\n");
876 } else {
877 dev_err(DEV, "BM resizing failed. "
878 "Leaving size unchanged at size = %lu KB\n",
879 (unsigned long)size);
880 }
881 rv = dev_size_error;
882 }
883 /* racy, see comments above. */
884 drbd_set_my_capacity(mdev, size);
885 mdev->ldev->md.la_size_sect = size;
886 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
887 (unsigned long long)size>>1);
888 }
889 if (rv == dev_size_error)
890 goto out;
891
892 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
893
894 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
895 || prev_size != mdev->ldev->md.md_size_sect;
896
897 if (la_size_changed || md_moved) {
898 int err;
899
900 drbd_al_shrink(mdev); /* All extents inactive. */
901 dev_info(DEV, "Writing the whole bitmap, %s\n",
902 la_size_changed && md_moved ? "size changed and md moved" :
903 la_size_changed ? "size changed" : "md moved");
904 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
905 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
906 "size changed", BM_LOCKED_MASK);
907 if (err) {
908 rv = dev_size_error;
909 goto out;
910 }
911 drbd_md_mark_dirty(mdev);
912 }
913
914 if (size > la_size_sect)
915 rv = grew;
916 if (size < la_size_sect)
917 rv = shrunk;
918out:
919 lc_unlock(mdev->act_log);
920 wake_up(&mdev->al_wait);
921 drbd_resume_io(mdev);
922
923 return rv;
924}
925
926sector_t
927drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
928 sector_t u_size, int assume_peer_has_space)
929{
930 sector_t p_size = mdev->p_size; /* partner's disk size. */
931 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
932 sector_t m_size; /* my size */
933 sector_t size = 0;
934
935 m_size = drbd_get_max_capacity(bdev);
936
937 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
938 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
939 p_size = m_size;
940 }
941
942 if (p_size && m_size) {
943 size = min_t(sector_t, p_size, m_size);
944 } else {
945 if (la_size_sect) {
946 size = la_size_sect;
947 if (m_size && m_size < size)
948 size = m_size;
949 if (p_size && p_size < size)
950 size = p_size;
951 } else {
952 if (m_size)
953 size = m_size;
954 if (p_size)
955 size = p_size;
956 }
957 }
958
959 if (size == 0)
960 dev_err(DEV, "Both nodes diskless!\n");
961
962 if (u_size) {
963 if (u_size > size)
964 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
965 (unsigned long)u_size>>1, (unsigned long)size>>1);
966 else
967 size = u_size;
968 }
969
970 return size;
971}
972
973/**
974 * drbd_check_al_size() - Ensures that the AL is of the right size
975 * @mdev: DRBD device.
976 *
977 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
978 * failed, and 0 on success. You should call drbd_md_sync() after you called
979 * this function.
980 */
981static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
982{
983 struct lru_cache *n, *t;
984 struct lc_element *e;
985 unsigned int in_use;
986 int i;
987
988 if (mdev->act_log &&
989 mdev->act_log->nr_elements == dc->al_extents)
990 return 0;
991
992 in_use = 0;
993 t = mdev->act_log;
994 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
995 dc->al_extents, sizeof(struct lc_element), 0);
996
997 if (n == NULL) {
998 dev_err(DEV, "Cannot allocate act_log lru!\n");
999 return -ENOMEM;
1000 }
1001 spin_lock_irq(&mdev->al_lock);
1002 if (t) {
1003 for (i = 0; i < t->nr_elements; i++) {
1004 e = lc_element_by_index(t, i);
1005 if (e->refcnt)
1006 dev_err(DEV, "refcnt(%d)==%d\n",
1007 e->lc_number, e->refcnt);
1008 in_use += e->refcnt;
1009 }
1010 }
1011 if (!in_use)
1012 mdev->act_log = n;
1013 spin_unlock_irq(&mdev->al_lock);
1014 if (in_use) {
1015 dev_err(DEV, "Activity log still in use!\n");
1016 lc_destroy(n);
1017 return -EBUSY;
1018 } else {
1019 if (t)
1020 lc_destroy(t);
1021 }
1022 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1023 return 0;
1024}
1025
1026static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
1027{
1028 struct request_queue * const q = mdev->rq_queue;
1029 unsigned int max_hw_sectors = max_bio_size >> 9;
1030 unsigned int max_segments = 0;
1031
1032 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1033 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1034
1035 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1036 rcu_read_lock();
1037 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1038 rcu_read_unlock();
1039 put_ldev(mdev);
1040 }
1041
1042 blk_queue_logical_block_size(q, 512);
1043 blk_queue_max_hw_sectors(q, max_hw_sectors);
1044 /* This is the workaround for "bio would need to, but cannot, be split" */
1045 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1046 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1047
1048 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1049 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1050
1051 blk_queue_stack_limits(q, b);
1052
1053 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1054 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1055 q->backing_dev_info.ra_pages,
1056 b->backing_dev_info.ra_pages);
1057 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1058 }
1059 put_ldev(mdev);
1060 }
1061}
1062
1063void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1064{
1065 unsigned int now, new, local, peer;
1066
1067 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1068 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1069 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1070
1071 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1072 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1073 mdev->local_max_bio_size = local;
1074 put_ldev(mdev);
1075 }
1076 local = min(local, DRBD_MAX_BIO_SIZE);
1077
1078 /* We may ignore peer limits if the peer is modern enough.
1079 Because new from 8.3.8 onwards the peer can use multiple
1080 BIOs for a single peer_request */
1081 if (mdev->state.conn >= C_CONNECTED) {
1082 if (mdev->tconn->agreed_pro_version < 94)
1083 peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1084 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1085 else if (mdev->tconn->agreed_pro_version == 94)
1086 peer = DRBD_MAX_SIZE_H80_PACKET;
1087 else if (mdev->tconn->agreed_pro_version < 100)
1088 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1089 else
1090 peer = DRBD_MAX_BIO_SIZE;
1091 }
1092
1093 new = min(local, peer);
1094
1095 if (mdev->state.role == R_PRIMARY && new < now)
1096 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1097
1098 if (new != now)
1099 dev_info(DEV, "max BIO size = %u\n", new);
1100
1101 drbd_setup_queue_param(mdev, new);
1102}
1103
1104/* Starts the worker thread */
1105static void conn_reconfig_start(struct drbd_tconn *tconn)
1106{
1107 drbd_thread_start(&tconn->worker);
1108 conn_flush_workqueue(tconn);
1109}
1110
1111/* if still unconfigured, stops worker again. */
1112static void conn_reconfig_done(struct drbd_tconn *tconn)
1113{
1114 bool stop_threads;
1115 spin_lock_irq(&tconn->req_lock);
1116 stop_threads = conn_all_vols_unconf(tconn) &&
1117 tconn->cstate == C_STANDALONE;
1118 spin_unlock_irq(&tconn->req_lock);
1119 if (stop_threads) {
1120 /* asender is implicitly stopped by receiver
1121 * in conn_disconnect() */
1122 drbd_thread_stop(&tconn->receiver);
1123 drbd_thread_stop(&tconn->worker);
1124 }
1125}
1126
1127/* Make sure IO is suspended before calling this function(). */
1128static void drbd_suspend_al(struct drbd_conf *mdev)
1129{
1130 int s = 0;
1131
1132 if (!lc_try_lock(mdev->act_log)) {
1133 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1134 return;
1135 }
1136
1137 drbd_al_shrink(mdev);
1138 spin_lock_irq(&mdev->tconn->req_lock);
1139 if (mdev->state.conn < C_CONNECTED)
1140 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1141 spin_unlock_irq(&mdev->tconn->req_lock);
1142 lc_unlock(mdev->act_log);
1143
1144 if (s)
1145 dev_info(DEV, "Suspended AL updates\n");
1146}
1147
1148
1149static bool should_set_defaults(struct genl_info *info)
1150{
1151 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1152 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1153}
1154
1155static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1156{
1157 /* This is limited by 16 bit "slot" numbers,
1158 * and by available on-disk context storage.
1159 *
1160 * Also (u16)~0 is special (denotes a "free" extent).
1161 *
1162 * One transaction occupies one 4kB on-disk block,
1163 * we have n such blocks in the on disk ring buffer,
1164 * the "current" transaction may fail (n-1),
1165 * and there is 919 slot numbers context information per transaction.
1166 *
1167 * 72 transaction blocks amounts to more than 2**16 context slots,
1168 * so cap there first.
1169 */
1170 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1171 const unsigned int sufficient_on_disk =
1172 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1173 /AL_CONTEXT_PER_TRANSACTION;
1174
1175 unsigned int al_size_4k = bdev->md.al_size_4k;
1176
1177 if (al_size_4k > sufficient_on_disk)
1178 return max_al_nr;
1179
1180 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1181}
1182
1183int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1184{
1185 enum drbd_ret_code retcode;
1186 struct drbd_conf *mdev;
1187 struct disk_conf *new_disk_conf, *old_disk_conf;
1188 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1189 int err, fifo_size;
1190
1191 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1192 if (!adm_ctx.reply_skb)
1193 return retcode;
1194 if (retcode != NO_ERROR)
1195 goto out;
1196
1197 mdev = adm_ctx.mdev;
1198
1199 /* we also need a disk
1200 * to change the options on */
1201 if (!get_ldev(mdev)) {
1202 retcode = ERR_NO_DISK;
1203 goto out;
1204 }
1205
1206 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1207 if (!new_disk_conf) {
1208 retcode = ERR_NOMEM;
1209 goto fail;
1210 }
1211
1212 mutex_lock(&mdev->tconn->conf_update);
1213 old_disk_conf = mdev->ldev->disk_conf;
1214 *new_disk_conf = *old_disk_conf;
1215 if (should_set_defaults(info))
1216 set_disk_conf_defaults(new_disk_conf);
1217
1218 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1219 if (err && err != -ENOMSG) {
1220 retcode = ERR_MANDATORY_TAG;
1221 drbd_msg_put_info(from_attrs_err_to_txt(err));
1222 }
1223
1224 if (!expect(new_disk_conf->resync_rate >= 1))
1225 new_disk_conf->resync_rate = 1;
1226
1227 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1228 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1229 if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
1230 new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
1231
1232 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1233 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1234
1235 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1236 if (fifo_size != mdev->rs_plan_s->size) {
1237 new_plan = fifo_alloc(fifo_size);
1238 if (!new_plan) {
1239 dev_err(DEV, "kmalloc of fifo_buffer failed");
1240 retcode = ERR_NOMEM;
1241 goto fail_unlock;
1242 }
1243 }
1244
1245 drbd_suspend_io(mdev);
1246 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1247 drbd_al_shrink(mdev);
1248 err = drbd_check_al_size(mdev, new_disk_conf);
1249 lc_unlock(mdev->act_log);
1250 wake_up(&mdev->al_wait);
1251 drbd_resume_io(mdev);
1252
1253 if (err) {
1254 retcode = ERR_NOMEM;
1255 goto fail_unlock;
1256 }
1257
1258 write_lock_irq(&global_state_lock);
1259 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1260 if (retcode == NO_ERROR) {
1261 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1262 drbd_resync_after_changed(mdev);
1263 }
1264 write_unlock_irq(&global_state_lock);
1265
1266 if (retcode != NO_ERROR)
1267 goto fail_unlock;
1268
1269 if (new_plan) {
1270 old_plan = mdev->rs_plan_s;
1271 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1272 }
1273
1274 mutex_unlock(&mdev->tconn->conf_update);
1275
1276 if (new_disk_conf->al_updates)
1277 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
1278 else
1279 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1280
1281 if (new_disk_conf->md_flushes)
1282 clear_bit(MD_NO_FUA, &mdev->flags);
1283 else
1284 set_bit(MD_NO_FUA, &mdev->flags);
1285
1286 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1287
1288 drbd_md_sync(mdev);
1289
1290 if (mdev->state.conn >= C_CONNECTED)
1291 drbd_send_sync_param(mdev);
1292
1293 synchronize_rcu();
1294 kfree(old_disk_conf);
1295 kfree(old_plan);
1296 mod_timer(&mdev->request_timer, jiffies + HZ);
1297 goto success;
1298
1299fail_unlock:
1300 mutex_unlock(&mdev->tconn->conf_update);
1301 fail:
1302 kfree(new_disk_conf);
1303 kfree(new_plan);
1304success:
1305 put_ldev(mdev);
1306 out:
1307 drbd_adm_finish(info, retcode);
1308 return 0;
1309}
1310
1311int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1312{
1313 struct drbd_conf *mdev;
1314 int err;
1315 enum drbd_ret_code retcode;
1316 enum determine_dev_size dd;
1317 sector_t max_possible_sectors;
1318 sector_t min_md_device_sectors;
1319 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1320 struct disk_conf *new_disk_conf = NULL;
1321 struct block_device *bdev;
1322 struct lru_cache *resync_lru = NULL;
1323 struct fifo_buffer *new_plan = NULL;
1324 union drbd_state ns, os;
1325 enum drbd_state_rv rv;
1326 struct net_conf *nc;
1327
1328 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1329 if (!adm_ctx.reply_skb)
1330 return retcode;
1331 if (retcode != NO_ERROR)
1332 goto finish;
1333
1334 mdev = adm_ctx.mdev;
1335 conn_reconfig_start(mdev->tconn);
1336
1337 /* if you want to reconfigure, please tear down first */
1338 if (mdev->state.disk > D_DISKLESS) {
1339 retcode = ERR_DISK_CONFIGURED;
1340 goto fail;
1341 }
1342 /* It may just now have detached because of IO error. Make sure
1343 * drbd_ldev_destroy is done already, we may end up here very fast,
1344 * e.g. if someone calls attach from the on-io-error handler,
1345 * to realize a "hot spare" feature (not that I'd recommend that) */
1346 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1347
1348 /* make sure there is no leftover from previous force-detach attempts */
1349 clear_bit(FORCE_DETACH, &mdev->flags);
1350 clear_bit(WAS_IO_ERROR, &mdev->flags);
1351 clear_bit(WAS_READ_ERROR, &mdev->flags);
1352
1353 /* and no leftover from previously aborted resync or verify, either */
1354 mdev->rs_total = 0;
1355 mdev->rs_failed = 0;
1356 atomic_set(&mdev->rs_pending_cnt, 0);
1357
1358 /* allocation not in the IO path, drbdsetup context */
1359 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1360 if (!nbc) {
1361 retcode = ERR_NOMEM;
1362 goto fail;
1363 }
1364 spin_lock_init(&nbc->md.uuid_lock);
1365
1366 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1367 if (!new_disk_conf) {
1368 retcode = ERR_NOMEM;
1369 goto fail;
1370 }
1371 nbc->disk_conf = new_disk_conf;
1372
1373 set_disk_conf_defaults(new_disk_conf);
1374 err = disk_conf_from_attrs(new_disk_conf, info);
1375 if (err) {
1376 retcode = ERR_MANDATORY_TAG;
1377 drbd_msg_put_info(from_attrs_err_to_txt(err));
1378 goto fail;
1379 }
1380
1381 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1382 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1383
1384 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1385 if (!new_plan) {
1386 retcode = ERR_NOMEM;
1387 goto fail;
1388 }
1389
1390 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1391 retcode = ERR_MD_IDX_INVALID;
1392 goto fail;
1393 }
1394
1395 write_lock_irq(&global_state_lock);
1396 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1397 write_unlock_irq(&global_state_lock);
1398 if (retcode != NO_ERROR)
1399 goto fail;
1400
1401 rcu_read_lock();
1402 nc = rcu_dereference(mdev->tconn->net_conf);
1403 if (nc) {
1404 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1405 rcu_read_unlock();
1406 retcode = ERR_STONITH_AND_PROT_A;
1407 goto fail;
1408 }
1409 }
1410 rcu_read_unlock();
1411
1412 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1413 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1414 if (IS_ERR(bdev)) {
1415 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1416 PTR_ERR(bdev));
1417 retcode = ERR_OPEN_DISK;
1418 goto fail;
1419 }
1420 nbc->backing_bdev = bdev;
1421
1422 /*
1423 * meta_dev_idx >= 0: external fixed size, possibly multiple
1424 * drbd sharing one meta device. TODO in that case, paranoia
1425 * check that [md_bdev, meta_dev_idx] is not yet used by some
1426 * other drbd minor! (if you use drbd.conf + drbdadm, that
1427 * should check it for you already; but if you don't, or
1428 * someone fooled it, we need to double check here)
1429 */
1430 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1431 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1432 (new_disk_conf->meta_dev_idx < 0) ?
1433 (void *)mdev : (void *)drbd_m_holder);
1434 if (IS_ERR(bdev)) {
1435 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1436 PTR_ERR(bdev));
1437 retcode = ERR_OPEN_MD_DISK;
1438 goto fail;
1439 }
1440 nbc->md_bdev = bdev;
1441
1442 if ((nbc->backing_bdev == nbc->md_bdev) !=
1443 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1444 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1445 retcode = ERR_MD_IDX_INVALID;
1446 goto fail;
1447 }
1448
1449 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1450 1, 61, sizeof(struct bm_extent),
1451 offsetof(struct bm_extent, lce));
1452 if (!resync_lru) {
1453 retcode = ERR_NOMEM;
1454 goto fail;
1455 }
1456
1457 /* Read our meta data super block early.
1458 * This also sets other on-disk offsets. */
1459 retcode = drbd_md_read(mdev, nbc);
1460 if (retcode != NO_ERROR)
1461 goto fail;
1462
1463 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1464 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1465 if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
1466 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1467
1468 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1469 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1470 (unsigned long long) drbd_get_max_capacity(nbc),
1471 (unsigned long long) new_disk_conf->disk_size);
1472 retcode = ERR_DISK_TOO_SMALL;
1473 goto fail;
1474 }
1475
1476 if (new_disk_conf->meta_dev_idx < 0) {
1477 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1478 /* at least one MB, otherwise it does not make sense */
1479 min_md_device_sectors = (2<<10);
1480 } else {
1481 max_possible_sectors = DRBD_MAX_SECTORS;
1482 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1483 }
1484
1485 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1486 retcode = ERR_MD_DISK_TOO_SMALL;
1487 dev_warn(DEV, "refusing attach: md-device too small, "
1488 "at least %llu sectors needed for this meta-disk type\n",
1489 (unsigned long long) min_md_device_sectors);
1490 goto fail;
1491 }
1492
1493 /* Make sure the new disk is big enough
1494 * (we may currently be R_PRIMARY with no local disk...) */
1495 if (drbd_get_max_capacity(nbc) <
1496 drbd_get_capacity(mdev->this_bdev)) {
1497 retcode = ERR_DISK_TOO_SMALL;
1498 goto fail;
1499 }
1500
1501 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1502
1503 if (nbc->known_size > max_possible_sectors) {
1504 dev_warn(DEV, "==> truncating very big lower level device "
1505 "to currently maximum possible %llu sectors <==\n",
1506 (unsigned long long) max_possible_sectors);
1507 if (new_disk_conf->meta_dev_idx >= 0)
1508 dev_warn(DEV, "==>> using internal or flexible "
1509 "meta data may help <<==\n");
1510 }
1511
1512 drbd_suspend_io(mdev);
1513 /* also wait for the last barrier ack. */
1514 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1515 * We need a way to either ignore barrier acks for barriers sent before a device
1516 * was attached, or a way to wait for all pending barrier acks to come in.
1517 * As barriers are counted per resource,
1518 * we'd need to suspend io on all devices of a resource.
1519 */
1520 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1521 /* and for any other previously queued work */
1522 drbd_flush_workqueue(mdev);
1523
1524 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1525 retcode = rv; /* FIXME: Type mismatch. */
1526 drbd_resume_io(mdev);
1527 if (rv < SS_SUCCESS)
1528 goto fail;
1529
1530 if (!get_ldev_if_state(mdev, D_ATTACHING))
1531 goto force_diskless;
1532
1533 if (!mdev->bitmap) {
1534 if (drbd_bm_init(mdev)) {
1535 retcode = ERR_NOMEM;
1536 goto force_diskless_dec;
1537 }
1538 }
1539
1540 if (mdev->state.conn < C_CONNECTED &&
1541 mdev->state.role == R_PRIMARY &&
1542 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1543 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1544 (unsigned long long)mdev->ed_uuid);
1545 retcode = ERR_DATA_NOT_CURRENT;
1546 goto force_diskless_dec;
1547 }
1548
1549 /* Since we are diskless, fix the activity log first... */
1550 if (drbd_check_al_size(mdev, new_disk_conf)) {
1551 retcode = ERR_NOMEM;
1552 goto force_diskless_dec;
1553 }
1554
1555 /* Prevent shrinking of consistent devices ! */
1556 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1557 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1558 dev_warn(DEV, "refusing to truncate a consistent device\n");
1559 retcode = ERR_DISK_TOO_SMALL;
1560 goto force_diskless_dec;
1561 }
1562
1563 /* Reset the "barriers don't work" bits here, then force meta data to
1564 * be written, to ensure we determine if barriers are supported. */
1565 if (new_disk_conf->md_flushes)
1566 clear_bit(MD_NO_FUA, &mdev->flags);
1567 else
1568 set_bit(MD_NO_FUA, &mdev->flags);
1569
1570 /* Point of no return reached.
1571 * Devices and memory are no longer released by error cleanup below.
1572 * now mdev takes over responsibility, and the state engine should
1573 * clean it up somewhere. */
1574 D_ASSERT(mdev->ldev == NULL);
1575 mdev->ldev = nbc;
1576 mdev->resync = resync_lru;
1577 mdev->rs_plan_s = new_plan;
1578 nbc = NULL;
1579 resync_lru = NULL;
1580 new_disk_conf = NULL;
1581 new_plan = NULL;
1582
1583 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1584
1585 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1586 set_bit(CRASHED_PRIMARY, &mdev->flags);
1587 else
1588 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1589
1590 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1591 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
1592 set_bit(CRASHED_PRIMARY, &mdev->flags);
1593
1594 mdev->send_cnt = 0;
1595 mdev->recv_cnt = 0;
1596 mdev->read_cnt = 0;
1597 mdev->writ_cnt = 0;
1598
1599 drbd_reconsider_max_bio_size(mdev);
1600
1601 /* If I am currently not R_PRIMARY,
1602 * but meta data primary indicator is set,
1603 * I just now recover from a hard crash,
1604 * and have been R_PRIMARY before that crash.
1605 *
1606 * Now, if I had no connection before that crash
1607 * (have been degraded R_PRIMARY), chances are that
1608 * I won't find my peer now either.
1609 *
1610 * In that case, and _only_ in that case,
1611 * we use the degr-wfc-timeout instead of the default,
1612 * so we can automatically recover from a crash of a
1613 * degraded but active "cluster" after a certain timeout.
1614 */
1615 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1616 if (mdev->state.role != R_PRIMARY &&
1617 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1618 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1619 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1620
1621 dd = drbd_determine_dev_size(mdev, 0);
1622 if (dd == dev_size_error) {
1623 retcode = ERR_NOMEM_BITMAP;
1624 goto force_diskless_dec;
1625 } else if (dd == grew)
1626 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1627
1628 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1629 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1630 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
1631 dev_info(DEV, "Assuming that all blocks are out of sync "
1632 "(aka FullSync)\n");
1633 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1634 "set_n_write from attaching", BM_LOCKED_MASK)) {
1635 retcode = ERR_IO_MD_DISK;
1636 goto force_diskless_dec;
1637 }
1638 } else {
1639 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1640 "read from attaching", BM_LOCKED_MASK)) {
1641 retcode = ERR_IO_MD_DISK;
1642 goto force_diskless_dec;
1643 }
1644 }
1645
1646 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1647 drbd_suspend_al(mdev); /* IO is still suspended here... */
1648
1649 spin_lock_irq(&mdev->tconn->req_lock);
1650 os = drbd_read_state(mdev);
1651 ns = os;
1652 /* If MDF_CONSISTENT is not set go into inconsistent state,
1653 otherwise investigate MDF_WasUpToDate...
1654 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1655 otherwise into D_CONSISTENT state.
1656 */
1657 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1658 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1659 ns.disk = D_CONSISTENT;
1660 else
1661 ns.disk = D_OUTDATED;
1662 } else {
1663 ns.disk = D_INCONSISTENT;
1664 }
1665
1666 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1667 ns.pdsk = D_OUTDATED;
1668
1669 rcu_read_lock();
1670 if (ns.disk == D_CONSISTENT &&
1671 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1672 ns.disk = D_UP_TO_DATE;
1673
1674 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1675 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1676 this point, because drbd_request_state() modifies these
1677 flags. */
1678
1679 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
1680 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
1681 else
1682 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1683
1684 rcu_read_unlock();
1685
1686 /* In case we are C_CONNECTED postpone any decision on the new disk
1687 state after the negotiation phase. */
1688 if (mdev->state.conn == C_CONNECTED) {
1689 mdev->new_state_tmp.i = ns.i;
1690 ns.i = os.i;
1691 ns.disk = D_NEGOTIATING;
1692
1693 /* We expect to receive up-to-date UUIDs soon.
1694 To avoid a race in receive_state, free p_uuid while
1695 holding req_lock. I.e. atomic with the state change */
1696 kfree(mdev->p_uuid);
1697 mdev->p_uuid = NULL;
1698 }
1699
1700 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1701 spin_unlock_irq(&mdev->tconn->req_lock);
1702
1703 if (rv < SS_SUCCESS)
1704 goto force_diskless_dec;
1705
1706 mod_timer(&mdev->request_timer, jiffies + HZ);
1707
1708 if (mdev->state.role == R_PRIMARY)
1709 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1710 else
1711 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1712
1713 drbd_md_mark_dirty(mdev);
1714 drbd_md_sync(mdev);
1715
1716 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1717 put_ldev(mdev);
1718 conn_reconfig_done(mdev->tconn);
1719 drbd_adm_finish(info, retcode);
1720 return 0;
1721
1722 force_diskless_dec:
1723 put_ldev(mdev);
1724 force_diskless:
1725 drbd_force_state(mdev, NS(disk, D_DISKLESS));
1726 drbd_md_sync(mdev);
1727 fail:
1728 conn_reconfig_done(mdev->tconn);
1729 if (nbc) {
1730 if (nbc->backing_bdev)
1731 blkdev_put(nbc->backing_bdev,
1732 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1733 if (nbc->md_bdev)
1734 blkdev_put(nbc->md_bdev,
1735 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1736 kfree(nbc);
1737 }
1738 kfree(new_disk_conf);
1739 lc_destroy(resync_lru);
1740 kfree(new_plan);
1741
1742 finish:
1743 drbd_adm_finish(info, retcode);
1744 return 0;
1745}
1746
1747static int adm_detach(struct drbd_conf *mdev, int force)
1748{
1749 enum drbd_state_rv retcode;
1750 int ret;
1751
1752 if (force) {
1753 set_bit(FORCE_DETACH, &mdev->flags);
1754 drbd_force_state(mdev, NS(disk, D_FAILED));
1755 retcode = SS_SUCCESS;
1756 goto out;
1757 }
1758
1759 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1760 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
1761 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1762 drbd_md_put_buffer(mdev);
1763 /* D_FAILED will transition to DISKLESS. */
1764 ret = wait_event_interruptible(mdev->misc_wait,
1765 mdev->state.disk != D_FAILED);
1766 drbd_resume_io(mdev);
1767 if ((int)retcode == (int)SS_IS_DISKLESS)
1768 retcode = SS_NOTHING_TO_DO;
1769 if (ret)
1770 retcode = ERR_INTR;
1771out:
1772 return retcode;
1773}
1774
1775/* Detaching the disk is a process in multiple stages. First we need to lock
1776 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1777 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1778 * internal references as well.
1779 * Only then we have finally detached. */
1780int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1781{
1782 enum drbd_ret_code retcode;
1783 struct detach_parms parms = { };
1784 int err;
1785
1786 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1787 if (!adm_ctx.reply_skb)
1788 return retcode;
1789 if (retcode != NO_ERROR)
1790 goto out;
1791
1792 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1793 err = detach_parms_from_attrs(&parms, info);
1794 if (err) {
1795 retcode = ERR_MANDATORY_TAG;
1796 drbd_msg_put_info(from_attrs_err_to_txt(err));
1797 goto out;
1798 }
1799 }
1800
1801 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
1802out:
1803 drbd_adm_finish(info, retcode);
1804 return 0;
1805}
1806
1807static bool conn_resync_running(struct drbd_tconn *tconn)
1808{
1809 struct drbd_conf *mdev;
1810 bool rv = false;
1811 int vnr;
1812
1813 rcu_read_lock();
1814 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1815 if (mdev->state.conn == C_SYNC_SOURCE ||
1816 mdev->state.conn == C_SYNC_TARGET ||
1817 mdev->state.conn == C_PAUSED_SYNC_S ||
1818 mdev->state.conn == C_PAUSED_SYNC_T) {
1819 rv = true;
1820 break;
1821 }
1822 }
1823 rcu_read_unlock();
1824
1825 return rv;
1826}
1827
1828static bool conn_ov_running(struct drbd_tconn *tconn)
1829{
1830 struct drbd_conf *mdev;
1831 bool rv = false;
1832 int vnr;
1833
1834 rcu_read_lock();
1835 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1836 if (mdev->state.conn == C_VERIFY_S ||
1837 mdev->state.conn == C_VERIFY_T) {
1838 rv = true;
1839 break;
1840 }
1841 }
1842 rcu_read_unlock();
1843
1844 return rv;
1845}
1846
1847static enum drbd_ret_code
1848_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1849{
1850 struct drbd_conf *mdev;
1851 int i;
1852
1853 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1854 if (new_conf->wire_protocol != old_conf->wire_protocol)
1855 return ERR_NEED_APV_100;
1856
1857 if (new_conf->two_primaries != old_conf->two_primaries)
1858 return ERR_NEED_APV_100;
1859
1860 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1861 return ERR_NEED_APV_100;
1862 }
1863
1864 if (!new_conf->two_primaries &&
1865 conn_highest_role(tconn) == R_PRIMARY &&
1866 conn_highest_peer(tconn) == R_PRIMARY)
1867 return ERR_NEED_ALLOW_TWO_PRI;
1868
1869 if (new_conf->two_primaries &&
1870 (new_conf->wire_protocol != DRBD_PROT_C))
1871 return ERR_NOT_PROTO_C;
1872
1873 idr_for_each_entry(&tconn->volumes, mdev, i) {
1874 if (get_ldev(mdev)) {
1875 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1876 put_ldev(mdev);
1877 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1878 return ERR_STONITH_AND_PROT_A;
1879 }
1880 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1881 return ERR_DISCARD_IMPOSSIBLE;
1882 }
1883
1884 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1885 return ERR_CONG_NOT_PROTO_A;
1886
1887 return NO_ERROR;
1888}
1889
1890static enum drbd_ret_code
1891check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1892{
1893 static enum drbd_ret_code rv;
1894 struct drbd_conf *mdev;
1895 int i;
1896
1897 rcu_read_lock();
1898 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1899 rcu_read_unlock();
1900
1901 /* tconn->volumes protected by genl_lock() here */
1902 idr_for_each_entry(&tconn->volumes, mdev, i) {
1903 if (!mdev->bitmap) {
1904 if(drbd_bm_init(mdev))
1905 return ERR_NOMEM;
1906 }
1907 }
1908
1909 return rv;
1910}
1911
1912struct crypto {
1913 struct crypto_hash *verify_tfm;
1914 struct crypto_hash *csums_tfm;
1915 struct crypto_hash *cram_hmac_tfm;
1916 struct crypto_hash *integrity_tfm;
1917};
1918
1919static int
1920alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1921{
1922 if (!tfm_name[0])
1923 return NO_ERROR;
1924
1925 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1926 if (IS_ERR(*tfm)) {
1927 *tfm = NULL;
1928 return err_alg;
1929 }
1930
1931 return NO_ERROR;
1932}
1933
1934static enum drbd_ret_code
1935alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1936{
1937 char hmac_name[CRYPTO_MAX_ALG_NAME];
1938 enum drbd_ret_code rv;
1939
1940 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1941 ERR_CSUMS_ALG);
1942 if (rv != NO_ERROR)
1943 return rv;
1944 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1945 ERR_VERIFY_ALG);
1946 if (rv != NO_ERROR)
1947 return rv;
1948 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1949 ERR_INTEGRITY_ALG);
1950 if (rv != NO_ERROR)
1951 return rv;
1952 if (new_conf->cram_hmac_alg[0] != 0) {
1953 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1954 new_conf->cram_hmac_alg);
1955
1956 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1957 ERR_AUTH_ALG);
1958 }
1959
1960 return rv;
1961}
1962
1963static void free_crypto(struct crypto *crypto)
1964{
1965 crypto_free_hash(crypto->cram_hmac_tfm);
1966 crypto_free_hash(crypto->integrity_tfm);
1967 crypto_free_hash(crypto->csums_tfm);
1968 crypto_free_hash(crypto->verify_tfm);
1969}
1970
1971int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1972{
1973 enum drbd_ret_code retcode;
1974 struct drbd_tconn *tconn;
1975 struct net_conf *old_conf, *new_conf = NULL;
1976 int err;
1977 int ovr; /* online verify running */
1978 int rsr; /* re-sync running */
1979 struct crypto crypto = { };
1980
1981 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
1982 if (!adm_ctx.reply_skb)
1983 return retcode;
1984 if (retcode != NO_ERROR)
1985 goto out;
1986
1987 tconn = adm_ctx.tconn;
1988
1989 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1990 if (!new_conf) {
1991 retcode = ERR_NOMEM;
1992 goto out;
1993 }
1994
1995 conn_reconfig_start(tconn);
1996
1997 mutex_lock(&tconn->data.mutex);
1998 mutex_lock(&tconn->conf_update);
1999 old_conf = tconn->net_conf;
2000
2001 if (!old_conf) {
2002 drbd_msg_put_info("net conf missing, try connect");
2003 retcode = ERR_INVALID_REQUEST;
2004 goto fail;
2005 }
2006
2007 *new_conf = *old_conf;
2008 if (should_set_defaults(info))
2009 set_net_conf_defaults(new_conf);
2010
2011 err = net_conf_from_attrs_for_change(new_conf, info);
2012 if (err && err != -ENOMSG) {
2013 retcode = ERR_MANDATORY_TAG;
2014 drbd_msg_put_info(from_attrs_err_to_txt(err));
2015 goto fail;
2016 }
2017
2018 retcode = check_net_options(tconn, new_conf);
2019 if (retcode != NO_ERROR)
2020 goto fail;
2021
2022 /* re-sync running */
2023 rsr = conn_resync_running(tconn);
2024 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
2025 retcode = ERR_CSUMS_RESYNC_RUNNING;
2026 goto fail;
2027 }
2028
2029 /* online verify running */
2030 ovr = conn_ov_running(tconn);
2031 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
2032 retcode = ERR_VERIFY_RUNNING;
2033 goto fail;
2034 }
2035
2036 retcode = alloc_crypto(&crypto, new_conf);
2037 if (retcode != NO_ERROR)
2038 goto fail;
2039
2040 rcu_assign_pointer(tconn->net_conf, new_conf);
2041
2042 if (!rsr) {
2043 crypto_free_hash(tconn->csums_tfm);
2044 tconn->csums_tfm = crypto.csums_tfm;
2045 crypto.csums_tfm = NULL;
2046 }
2047 if (!ovr) {
2048 crypto_free_hash(tconn->verify_tfm);
2049 tconn->verify_tfm = crypto.verify_tfm;
2050 crypto.verify_tfm = NULL;
2051 }
2052
2053 crypto_free_hash(tconn->integrity_tfm);
2054 tconn->integrity_tfm = crypto.integrity_tfm;
2055 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
2056 /* Do this without trying to take tconn->data.mutex again. */
2057 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
2058
2059 crypto_free_hash(tconn->cram_hmac_tfm);
2060 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2061
2062 mutex_unlock(&tconn->conf_update);
2063 mutex_unlock(&tconn->data.mutex);
2064 synchronize_rcu();
2065 kfree(old_conf);
2066
2067 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2068 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2069
2070 goto done;
2071
2072 fail:
2073 mutex_unlock(&tconn->conf_update);
2074 mutex_unlock(&tconn->data.mutex);
2075 free_crypto(&crypto);
2076 kfree(new_conf);
2077 done:
2078 conn_reconfig_done(tconn);
2079 out:
2080 drbd_adm_finish(info, retcode);
2081 return 0;
2082}
2083
2084int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2085{
2086 struct drbd_conf *mdev;
2087 struct net_conf *old_conf, *new_conf = NULL;
2088 struct crypto crypto = { };
2089 struct drbd_tconn *tconn;
2090 enum drbd_ret_code retcode;
2091 int i;
2092 int err;
2093
2094 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2095
2096 if (!adm_ctx.reply_skb)
2097 return retcode;
2098 if (retcode != NO_ERROR)
2099 goto out;
2100 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2101 drbd_msg_put_info("connection endpoint(s) missing");
2102 retcode = ERR_INVALID_REQUEST;
2103 goto out;
2104 }
2105
2106 /* No need for _rcu here. All reconfiguration is
2107 * strictly serialized on genl_lock(). We are protected against
2108 * concurrent reconfiguration/addition/deletion */
2109 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2110 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2111 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2112 retcode = ERR_LOCAL_ADDR;
2113 goto out;
2114 }
2115
2116 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2117 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2118 retcode = ERR_PEER_ADDR;
2119 goto out;
2120 }
2121 }
2122
2123 tconn = adm_ctx.tconn;
2124 conn_reconfig_start(tconn);
2125
2126 if (tconn->cstate > C_STANDALONE) {
2127 retcode = ERR_NET_CONFIGURED;
2128 goto fail;
2129 }
2130
2131 /* allocation not in the IO path, drbdsetup / netlink process context */
2132 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
2133 if (!new_conf) {
2134 retcode = ERR_NOMEM;
2135 goto fail;
2136 }
2137
2138 set_net_conf_defaults(new_conf);
2139
2140 err = net_conf_from_attrs(new_conf, info);
2141 if (err && err != -ENOMSG) {
2142 retcode = ERR_MANDATORY_TAG;
2143 drbd_msg_put_info(from_attrs_err_to_txt(err));
2144 goto fail;
2145 }
2146
2147 retcode = check_net_options(tconn, new_conf);
2148 if (retcode != NO_ERROR)
2149 goto fail;
2150
2151 retcode = alloc_crypto(&crypto, new_conf);
2152 if (retcode != NO_ERROR)
2153 goto fail;
2154
2155 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2156
2157 conn_flush_workqueue(tconn);
2158
2159 mutex_lock(&tconn->conf_update);
2160 old_conf = tconn->net_conf;
2161 if (old_conf) {
2162 retcode = ERR_NET_CONFIGURED;
2163 mutex_unlock(&tconn->conf_update);
2164 goto fail;
2165 }
2166 rcu_assign_pointer(tconn->net_conf, new_conf);
2167
2168 conn_free_crypto(tconn);
2169 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2170 tconn->integrity_tfm = crypto.integrity_tfm;
2171 tconn->csums_tfm = crypto.csums_tfm;
2172 tconn->verify_tfm = crypto.verify_tfm;
2173
2174 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2175 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2176 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2177 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2178
2179 mutex_unlock(&tconn->conf_update);
2180
2181 rcu_read_lock();
2182 idr_for_each_entry(&tconn->volumes, mdev, i) {
2183 mdev->send_cnt = 0;
2184 mdev->recv_cnt = 0;
2185 }
2186 rcu_read_unlock();
2187
2188 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2189
2190 conn_reconfig_done(tconn);
2191 drbd_adm_finish(info, retcode);
2192 return 0;
2193
2194fail:
2195 free_crypto(&crypto);
2196 kfree(new_conf);
2197
2198 conn_reconfig_done(tconn);
2199out:
2200 drbd_adm_finish(info, retcode);
2201 return 0;
2202}
2203
2204static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2205{
2206 enum drbd_state_rv rv;
2207
2208 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2209 force ? CS_HARD : 0);
2210
2211 switch (rv) {
2212 case SS_NOTHING_TO_DO:
2213 break;
2214 case SS_ALREADY_STANDALONE:
2215 return SS_SUCCESS;
2216 case SS_PRIMARY_NOP:
2217 /* Our state checking code wants to see the peer outdated. */
2218 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2219
2220 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2221 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2222
2223 break;
2224 case SS_CW_FAILED_BY_PEER:
2225 /* The peer probably wants to see us outdated. */
2226 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2227 disk, D_OUTDATED), 0);
2228 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2229 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2230 CS_HARD);
2231 }
2232 break;
2233 default:;
2234 /* no special handling necessary */
2235 }
2236
2237 if (rv >= SS_SUCCESS) {
2238 enum drbd_state_rv rv2;
2239 /* No one else can reconfigure the network while I am here.
2240 * The state handling only uses drbd_thread_stop_nowait(),
2241 * we want to really wait here until the receiver is no more.
2242 */
2243 drbd_thread_stop(&adm_ctx.tconn->receiver);
2244
2245 /* Race breaker. This additional state change request may be
2246 * necessary, if this was a forced disconnect during a receiver
2247 * restart. We may have "killed" the receiver thread just
2248 * after drbdd_init() returned. Typically, we should be
2249 * C_STANDALONE already, now, and this becomes a no-op.
2250 */
2251 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2252 CS_VERBOSE | CS_HARD);
2253 if (rv2 < SS_SUCCESS)
2254 conn_err(tconn,
2255 "unexpected rv2=%d in conn_try_disconnect()\n",
2256 rv2);
2257 }
2258 return rv;
2259}
2260
2261int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2262{
2263 struct disconnect_parms parms;
2264 struct drbd_tconn *tconn;
2265 enum drbd_state_rv rv;
2266 enum drbd_ret_code retcode;
2267 int err;
2268
2269 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
2270 if (!adm_ctx.reply_skb)
2271 return retcode;
2272 if (retcode != NO_ERROR)
2273 goto fail;
2274
2275 tconn = adm_ctx.tconn;
2276 memset(&parms, 0, sizeof(parms));
2277 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2278 err = disconnect_parms_from_attrs(&parms, info);
2279 if (err) {
2280 retcode = ERR_MANDATORY_TAG;
2281 drbd_msg_put_info(from_attrs_err_to_txt(err));
2282 goto fail;
2283 }
2284 }
2285
2286 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2287 if (rv < SS_SUCCESS)
2288 retcode = rv; /* FIXME: Type mismatch. */
2289 else
2290 retcode = NO_ERROR;
2291 fail:
2292 drbd_adm_finish(info, retcode);
2293 return 0;
2294}
2295
2296void resync_after_online_grow(struct drbd_conf *mdev)
2297{
2298 int iass; /* I am sync source */
2299
2300 dev_info(DEV, "Resync of new storage after online grow\n");
2301 if (mdev->state.role != mdev->state.peer)
2302 iass = (mdev->state.role == R_PRIMARY);
2303 else
2304 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2305
2306 if (iass)
2307 drbd_start_resync(mdev, C_SYNC_SOURCE);
2308 else
2309 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2310}
2311
2312int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2313{
2314 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2315 struct resize_parms rs;
2316 struct drbd_conf *mdev;
2317 enum drbd_ret_code retcode;
2318 enum determine_dev_size dd;
2319 enum dds_flags ddsf;
2320 sector_t u_size;
2321 int err;
2322
2323 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2324 if (!adm_ctx.reply_skb)
2325 return retcode;
2326 if (retcode != NO_ERROR)
2327 goto fail;
2328
2329 memset(&rs, 0, sizeof(struct resize_parms));
2330 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2331 err = resize_parms_from_attrs(&rs, info);
2332 if (err) {
2333 retcode = ERR_MANDATORY_TAG;
2334 drbd_msg_put_info(from_attrs_err_to_txt(err));
2335 goto fail;
2336 }
2337 }
2338
2339 mdev = adm_ctx.mdev;
2340 if (mdev->state.conn > C_CONNECTED) {
2341 retcode = ERR_RESIZE_RESYNC;
2342 goto fail;
2343 }
2344
2345 if (mdev->state.role == R_SECONDARY &&
2346 mdev->state.peer == R_SECONDARY) {
2347 retcode = ERR_NO_PRIMARY;
2348 goto fail;
2349 }
2350
2351 if (!get_ldev(mdev)) {
2352 retcode = ERR_NO_DISK;
2353 goto fail;
2354 }
2355
2356 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2357 retcode = ERR_NEED_APV_93;
2358 goto fail_ldev;
2359 }
2360
2361 rcu_read_lock();
2362 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2363 rcu_read_unlock();
2364 if (u_size != (sector_t)rs.resize_size) {
2365 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2366 if (!new_disk_conf) {
2367 retcode = ERR_NOMEM;
2368 goto fail_ldev;
2369 }
2370 }
2371
2372 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2373 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2374
2375 if (new_disk_conf) {
2376 mutex_lock(&mdev->tconn->conf_update);
2377 old_disk_conf = mdev->ldev->disk_conf;
2378 *new_disk_conf = *old_disk_conf;
2379 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2380 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2381 mutex_unlock(&mdev->tconn->conf_update);
2382 synchronize_rcu();
2383 kfree(old_disk_conf);
2384 }
2385
2386 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2387 dd = drbd_determine_dev_size(mdev, ddsf);
2388 drbd_md_sync(mdev);
2389 put_ldev(mdev);
2390 if (dd == dev_size_error) {
2391 retcode = ERR_NOMEM_BITMAP;
2392 goto fail;
2393 }
2394
2395 if (mdev->state.conn == C_CONNECTED) {
2396 if (dd == grew)
2397 set_bit(RESIZE_PENDING, &mdev->flags);
2398
2399 drbd_send_uuids(mdev);
2400 drbd_send_sizes(mdev, 1, ddsf);
2401 }
2402
2403 fail:
2404 drbd_adm_finish(info, retcode);
2405 return 0;
2406
2407 fail_ldev:
2408 put_ldev(mdev);
2409 goto fail;
2410}
2411
2412int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2413{
2414 enum drbd_ret_code retcode;
2415 struct drbd_tconn *tconn;
2416 struct res_opts res_opts;
2417 int err;
2418
2419 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2420 if (!adm_ctx.reply_skb)
2421 return retcode;
2422 if (retcode != NO_ERROR)
2423 goto fail;
2424 tconn = adm_ctx.tconn;
2425
2426 res_opts = tconn->res_opts;
2427 if (should_set_defaults(info))
2428 set_res_opts_defaults(&res_opts);
2429
2430 err = res_opts_from_attrs(&res_opts, info);
2431 if (err && err != -ENOMSG) {
2432 retcode = ERR_MANDATORY_TAG;
2433 drbd_msg_put_info(from_attrs_err_to_txt(err));
2434 goto fail;
2435 }
2436
2437 err = set_resource_options(tconn, &res_opts);
2438 if (err) {
2439 retcode = ERR_INVALID_REQUEST;
2440 if (err == -ENOMEM)
2441 retcode = ERR_NOMEM;
2442 }
2443
2444fail:
2445 drbd_adm_finish(info, retcode);
2446 return 0;
2447}
2448
2449int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2450{
2451 struct drbd_conf *mdev;
2452 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2453
2454 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2455 if (!adm_ctx.reply_skb)
2456 return retcode;
2457 if (retcode != NO_ERROR)
2458 goto out;
2459
2460 mdev = adm_ctx.mdev;
2461
2462 /* If there is still bitmap IO pending, probably because of a previous
2463 * resync just being finished, wait for it before requesting a new resync.
2464 * Also wait for it's after_state_ch(). */
2465 drbd_suspend_io(mdev);
2466 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2467 drbd_flush_workqueue(mdev);
2468
2469 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2470 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2471 * try to start a resync handshake as sync target for full sync.
2472 */
2473 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
2474 retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
2475 if (retcode >= SS_SUCCESS) {
2476 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
2477 "set_n_write from invalidate", BM_LOCKED_MASK))
2478 retcode = ERR_IO_MD_DISK;
2479 }
2480 } else
2481 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2482 drbd_resume_io(mdev);
2483
2484out:
2485 drbd_adm_finish(info, retcode);
2486 return 0;
2487}
2488
2489static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2490 union drbd_state mask, union drbd_state val)
2491{
2492 enum drbd_ret_code retcode;
2493
2494 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2495 if (!adm_ctx.reply_skb)
2496 return retcode;
2497 if (retcode != NO_ERROR)
2498 goto out;
2499
2500 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2501out:
2502 drbd_adm_finish(info, retcode);
2503 return 0;
2504}
2505
2506static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2507{
2508 int rv;
2509
2510 rv = drbd_bmio_set_n_write(mdev);
2511 drbd_suspend_al(mdev);
2512 return rv;
2513}
2514
2515int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2516{
2517 int retcode; /* drbd_ret_code, drbd_state_rv */
2518 struct drbd_conf *mdev;
2519
2520 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2521 if (!adm_ctx.reply_skb)
2522 return retcode;
2523 if (retcode != NO_ERROR)
2524 goto out;
2525
2526 mdev = adm_ctx.mdev;
2527
2528 /* If there is still bitmap IO pending, probably because of a previous
2529 * resync just being finished, wait for it before requesting a new resync.
2530 * Also wait for it's after_state_ch(). */
2531 drbd_suspend_io(mdev);
2532 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2533 drbd_flush_workqueue(mdev);
2534
2535 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2536 * in the bitmap. Otherwise, try to start a resync handshake
2537 * as sync source for full sync.
2538 */
2539 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
2540 /* The peer will get a resync upon connect anyways. Just make that
2541 into a full resync. */
2542 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2543 if (retcode >= SS_SUCCESS) {
2544 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2545 "set_n_write from invalidate_peer",
2546 BM_LOCKED_SET_ALLOWED))
2547 retcode = ERR_IO_MD_DISK;
2548 }
2549 } else
2550 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2551 drbd_resume_io(mdev);
2552
2553out:
2554 drbd_adm_finish(info, retcode);
2555 return 0;
2556}
2557
2558int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2559{
2560 enum drbd_ret_code retcode;
2561
2562 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2563 if (!adm_ctx.reply_skb)
2564 return retcode;
2565 if (retcode != NO_ERROR)
2566 goto out;
2567
2568 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2569 retcode = ERR_PAUSE_IS_SET;
2570out:
2571 drbd_adm_finish(info, retcode);
2572 return 0;
2573}
2574
2575int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2576{
2577 union drbd_dev_state s;
2578 enum drbd_ret_code retcode;
2579
2580 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2581 if (!adm_ctx.reply_skb)
2582 return retcode;
2583 if (retcode != NO_ERROR)
2584 goto out;
2585
2586 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2587 s = adm_ctx.mdev->state;
2588 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2589 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2590 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2591 } else {
2592 retcode = ERR_PAUSE_IS_CLEAR;
2593 }
2594 }
2595
2596out:
2597 drbd_adm_finish(info, retcode);
2598 return 0;
2599}
2600
2601int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2602{
2603 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2604}
2605
2606int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2607{
2608 struct drbd_conf *mdev;
2609 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2610
2611 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2612 if (!adm_ctx.reply_skb)
2613 return retcode;
2614 if (retcode != NO_ERROR)
2615 goto out;
2616
2617 mdev = adm_ctx.mdev;
2618 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2619 drbd_uuid_new_current(mdev);
2620 clear_bit(NEW_CUR_UUID, &mdev->flags);
2621 }
2622 drbd_suspend_io(mdev);
2623 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2624 if (retcode == SS_SUCCESS) {
2625 if (mdev->state.conn < C_CONNECTED)
2626 tl_clear(mdev->tconn);
2627 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2628 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2629 }
2630 drbd_resume_io(mdev);
2631
2632out:
2633 drbd_adm_finish(info, retcode);
2634 return 0;
2635}
2636
2637int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2638{
2639 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2640}
2641
2642int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
2643{
2644 struct nlattr *nla;
2645 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2646 if (!nla)
2647 goto nla_put_failure;
2648 if (vnr != VOLUME_UNSPECIFIED &&
2649 nla_put_u32(skb, T_ctx_volume, vnr))
2650 goto nla_put_failure;
2651 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2652 goto nla_put_failure;
2653 if (tconn->my_addr_len &&
2654 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2655 goto nla_put_failure;
2656 if (tconn->peer_addr_len &&
2657 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2658 goto nla_put_failure;
2659 nla_nest_end(skb, nla);
2660 return 0;
2661
2662nla_put_failure:
2663 if (nla)
2664 nla_nest_cancel(skb, nla);
2665 return -EMSGSIZE;
2666}
2667
2668int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2669 const struct sib_info *sib)
2670{
2671 struct state_info *si = NULL; /* for sizeof(si->member); */
2672 struct nlattr *nla;
2673 int got_ldev;
2674 int err = 0;
2675 int exclude_sensitive;
2676
2677 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2678 * to. So we better exclude_sensitive information.
2679 *
2680 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2681 * in the context of the requesting user process. Exclude sensitive
2682 * information, unless current has superuser.
2683 *
2684 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2685 * relies on the current implementation of netlink_dump(), which
2686 * executes the dump callback successively from netlink_recvmsg(),
2687 * always in the context of the receiving process */
2688 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2689
2690 got_ldev = get_ldev(mdev);
2691
2692 /* We need to add connection name and volume number information still.
2693 * Minor number is in drbd_genlmsghdr. */
2694 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
2695 goto nla_put_failure;
2696
2697 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2698 goto nla_put_failure;
2699
2700 rcu_read_lock();
2701 if (got_ldev) {
2702 struct disk_conf *disk_conf;
2703
2704 disk_conf = rcu_dereference(mdev->ldev->disk_conf);
2705 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
2706 }
2707 if (!err) {
2708 struct net_conf *nc;
2709
2710 nc = rcu_dereference(mdev->tconn->net_conf);
2711 if (nc)
2712 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2713 }
2714 rcu_read_unlock();
2715 if (err)
2716 goto nla_put_failure;
2717
2718 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2719 if (!nla)
2720 goto nla_put_failure;
2721 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2722 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2723 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
2724 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2725 nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2726 nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2727 nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2728 nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2729 nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2730 nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2731 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2732 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2733 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
2734 goto nla_put_failure;
2735
2736 if (got_ldev) {
2737 int err;
2738
2739 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2740 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2741 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2742
2743 if (err)
2744 goto nla_put_failure;
2745
2746 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
2747 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2748 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2749 goto nla_put_failure;
2750 if (C_SYNC_SOURCE <= mdev->state.conn &&
2751 C_PAUSED_SYNC_T >= mdev->state.conn) {
2752 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2753 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2754 goto nla_put_failure;
2755 }
2756 }
2757
2758 if (sib) {
2759 switch(sib->sib_reason) {
2760 case SIB_SYNC_PROGRESS:
2761 case SIB_GET_STATUS_REPLY:
2762 break;
2763 case SIB_STATE_CHANGE:
2764 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2765 nla_put_u32(skb, T_new_state, sib->ns.i))
2766 goto nla_put_failure;
2767 break;
2768 case SIB_HELPER_POST:
2769 if (nla_put_u32(skb, T_helper_exit_code,
2770 sib->helper_exit_code))
2771 goto nla_put_failure;
2772 /* fall through */
2773 case SIB_HELPER_PRE:
2774 if (nla_put_string(skb, T_helper, sib->helper_name))
2775 goto nla_put_failure;
2776 break;
2777 }
2778 }
2779 nla_nest_end(skb, nla);
2780
2781 if (0)
2782nla_put_failure:
2783 err = -EMSGSIZE;
2784 if (got_ldev)
2785 put_ldev(mdev);
2786 return err;
2787}
2788
2789int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2790{
2791 enum drbd_ret_code retcode;
2792 int err;
2793
2794 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2795 if (!adm_ctx.reply_skb)
2796 return retcode;
2797 if (retcode != NO_ERROR)
2798 goto out;
2799
2800 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2801 if (err) {
2802 nlmsg_free(adm_ctx.reply_skb);
2803 return err;
2804 }
2805out:
2806 drbd_adm_finish(info, retcode);
2807 return 0;
2808}
2809
2810int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2811{
2812 struct drbd_conf *mdev;
2813 struct drbd_genlmsghdr *dh;
2814 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2815 struct drbd_tconn *tconn = NULL;
2816 struct drbd_tconn *tmp;
2817 unsigned volume = cb->args[1];
2818
2819 /* Open coded, deferred, iteration:
2820 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2821 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2822 * ...
2823 * }
2824 * }
2825 * where tconn is cb->args[0];
2826 * and i is cb->args[1];
2827 *
2828 * cb->args[2] indicates if we shall loop over all resources,
2829 * or just dump all volumes of a single resource.
2830 *
2831 * This may miss entries inserted after this dump started,
2832 * or entries deleted before they are reached.
2833 *
2834 * We need to make sure the mdev won't disappear while
2835 * we are looking at it, and revalidate our iterators
2836 * on each iteration.
2837 */
2838
2839 /* synchronize with conn_create()/conn_destroy() */
2840 rcu_read_lock();
2841 /* revalidate iterator position */
2842 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2843 if (pos == NULL) {
2844 /* first iteration */
2845 pos = tmp;
2846 tconn = pos;
2847 break;
2848 }
2849 if (tmp == pos) {
2850 tconn = pos;
2851 break;
2852 }
2853 }
2854 if (tconn) {
2855next_tconn:
2856 mdev = idr_get_next(&tconn->volumes, &volume);
2857 if (!mdev) {
2858 /* No more volumes to dump on this tconn.
2859 * Advance tconn iterator. */
2860 pos = list_entry_rcu(tconn->all_tconn.next,
2861 struct drbd_tconn, all_tconn);
2862 /* Did we dump any volume on this tconn yet? */
2863 if (volume != 0) {
2864 /* If we reached the end of the list,
2865 * or only a single resource dump was requested,
2866 * we are done. */
2867 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2868 goto out;
2869 volume = 0;
2870 tconn = pos;
2871 goto next_tconn;
2872 }
2873 }
2874
2875 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2876 cb->nlh->nlmsg_seq, &drbd_genl_family,
2877 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2878 if (!dh)
2879 goto out;
2880
2881 if (!mdev) {
2882 /* This is a tconn without a single volume.
2883 * Suprisingly enough, it may have a network
2884 * configuration. */
2885 struct net_conf *nc;
2886 dh->minor = -1U;
2887 dh->ret_code = NO_ERROR;
2888 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
2889 goto cancel;
2890 nc = rcu_dereference(tconn->net_conf);
2891 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2892 goto cancel;
2893 goto done;
2894 }
2895
2896 D_ASSERT(mdev->vnr == volume);
2897 D_ASSERT(mdev->tconn == tconn);
2898
2899 dh->minor = mdev_to_minor(mdev);
2900 dh->ret_code = NO_ERROR;
2901
2902 if (nla_put_status_info(skb, mdev, NULL)) {
2903cancel:
2904 genlmsg_cancel(skb, dh);
2905 goto out;
2906 }
2907done:
2908 genlmsg_end(skb, dh);
2909 }
2910
2911out:
2912 rcu_read_unlock();
2913 /* where to start the next iteration */
2914 cb->args[0] = (long)pos;
2915 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2916
2917 /* No more tconns/volumes/minors found results in an empty skb.
2918 * Which will terminate the dump. */
2919 return skb->len;
2920}
2921
2922/*
2923 * Request status of all resources, or of all volumes within a single resource.
2924 *
2925 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2926 * Which means we cannot use the family->attrbuf or other such members, because
2927 * dump is NOT protected by the genl_lock(). During dump, we only have access
2928 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2929 *
2930 * Once things are setup properly, we call into get_one_status().
2931 */
2932int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2933{
2934 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2935 struct nlattr *nla;
2936 const char *resource_name;
2937 struct drbd_tconn *tconn;
2938 int maxtype;
2939
2940 /* Is this a followup call? */
2941 if (cb->args[0]) {
2942 /* ... of a single resource dump,
2943 * and the resource iterator has been advanced already? */
2944 if (cb->args[2] && cb->args[2] != cb->args[0])
2945 return 0; /* DONE. */
2946 goto dump;
2947 }
2948
2949 /* First call (from netlink_dump_start). We need to figure out
2950 * which resource(s) the user wants us to dump. */
2951 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2952 nlmsg_attrlen(cb->nlh, hdrlen),
2953 DRBD_NLA_CFG_CONTEXT);
2954
2955 /* No explicit context given. Dump all. */
2956 if (!nla)
2957 goto dump;
2958 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2959 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2960 if (IS_ERR(nla))
2961 return PTR_ERR(nla);
2962 /* context given, but no name present? */
2963 if (!nla)
2964 return -EINVAL;
2965 resource_name = nla_data(nla);
2966 tconn = conn_get_by_name(resource_name);
2967
2968 if (!tconn)
2969 return -ENODEV;
2970
2971 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2972
2973 /* prime iterators, and set "filter" mode mark:
2974 * only dump this tconn. */
2975 cb->args[0] = (long)tconn;
2976 /* cb->args[1] = 0; passed in this way. */
2977 cb->args[2] = (long)tconn;
2978
2979dump:
2980 return get_one_status(skb, cb);
2981}
2982
2983int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2984{
2985 enum drbd_ret_code retcode;
2986 struct timeout_parms tp;
2987 int err;
2988
2989 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2990 if (!adm_ctx.reply_skb)
2991 return retcode;
2992 if (retcode != NO_ERROR)
2993 goto out;
2994
2995 tp.timeout_type =
2996 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2997 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2998 UT_DEFAULT;
2999
3000 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
3001 if (err) {
3002 nlmsg_free(adm_ctx.reply_skb);
3003 return err;
3004 }
3005out:
3006 drbd_adm_finish(info, retcode);
3007 return 0;
3008}
3009
3010int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
3011{
3012 struct drbd_conf *mdev;
3013 enum drbd_ret_code retcode;
3014 struct start_ov_parms parms;
3015
3016 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3017 if (!adm_ctx.reply_skb)
3018 return retcode;
3019 if (retcode != NO_ERROR)
3020 goto out;
3021
3022 mdev = adm_ctx.mdev;
3023
3024 /* resume from last known position, if possible */
3025 parms.ov_start_sector = mdev->ov_start_sector;
3026 parms.ov_stop_sector = ULLONG_MAX;
3027 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
3028 int err = start_ov_parms_from_attrs(&parms, info);
3029 if (err) {
3030 retcode = ERR_MANDATORY_TAG;
3031 drbd_msg_put_info(from_attrs_err_to_txt(err));
3032 goto out;
3033 }
3034 }
3035 /* w_make_ov_request expects position to be aligned */
3036 mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3037 mdev->ov_stop_sector = parms.ov_stop_sector;
3038
3039 /* If there is still bitmap IO pending, e.g. previous resync or verify
3040 * just being finished, wait for it before requesting a new resync. */
3041 drbd_suspend_io(mdev);
3042 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3043 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
3044 drbd_resume_io(mdev);
3045out:
3046 drbd_adm_finish(info, retcode);
3047 return 0;
3048}
3049
3050
3051int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3052{
3053 struct drbd_conf *mdev;
3054 enum drbd_ret_code retcode;
3055 int skip_initial_sync = 0;
3056 int err;
3057 struct new_c_uuid_parms args;
3058
3059 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3060 if (!adm_ctx.reply_skb)
3061 return retcode;
3062 if (retcode != NO_ERROR)
3063 goto out_nolock;
3064
3065 mdev = adm_ctx.mdev;
3066 memset(&args, 0, sizeof(args));
3067 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
3068 err = new_c_uuid_parms_from_attrs(&args, info);
3069 if (err) {
3070 retcode = ERR_MANDATORY_TAG;
3071 drbd_msg_put_info(from_attrs_err_to_txt(err));
3072 goto out_nolock;
3073 }
3074 }
3075
3076 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
3077
3078 if (!get_ldev(mdev)) {
3079 retcode = ERR_NO_DISK;
3080 goto out;
3081 }
3082
3083 /* this is "skip initial sync", assume to be clean */
3084 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
3085 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3086 dev_info(DEV, "Preparing to skip initial sync\n");
3087 skip_initial_sync = 1;
3088 } else if (mdev->state.conn != C_STANDALONE) {
3089 retcode = ERR_CONNECTED;
3090 goto out_dec;
3091 }
3092
3093 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3094 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3095
3096 if (args.clear_bm) {
3097 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3098 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3099 if (err) {
3100 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3101 retcode = ERR_IO_MD_DISK;
3102 }
3103 if (skip_initial_sync) {
3104 drbd_send_uuids_skip_initial_sync(mdev);
3105 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3106 drbd_print_uuids(mdev, "cleared bitmap UUID");
3107 spin_lock_irq(&mdev->tconn->req_lock);
3108 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3109 CS_VERBOSE, NULL);
3110 spin_unlock_irq(&mdev->tconn->req_lock);
3111 }
3112 }
3113
3114 drbd_md_sync(mdev);
3115out_dec:
3116 put_ldev(mdev);
3117out:
3118 mutex_unlock(mdev->state_mutex);
3119out_nolock:
3120 drbd_adm_finish(info, retcode);
3121 return 0;
3122}
3123
3124static enum drbd_ret_code
3125drbd_check_resource_name(const char *name)
3126{
3127 if (!name || !name[0]) {
3128 drbd_msg_put_info("resource name missing");
3129 return ERR_MANDATORY_TAG;
3130 }
3131 /* if we want to use these in sysfs/configfs/debugfs some day,
3132 * we must not allow slashes */
3133 if (strchr(name, '/')) {
3134 drbd_msg_put_info("invalid resource name");
3135 return ERR_INVALID_REQUEST;
3136 }
3137 return NO_ERROR;
3138}
3139
3140int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3141{
3142 enum drbd_ret_code retcode;
3143 struct res_opts res_opts;
3144 int err;
3145
3146 retcode = drbd_adm_prepare(skb, info, 0);
3147 if (!adm_ctx.reply_skb)
3148 return retcode;
3149 if (retcode != NO_ERROR)
3150 goto out;
3151
3152 set_res_opts_defaults(&res_opts);
3153 err = res_opts_from_attrs(&res_opts, info);
3154 if (err && err != -ENOMSG) {
3155 retcode = ERR_MANDATORY_TAG;
3156 drbd_msg_put_info(from_attrs_err_to_txt(err));
3157 goto out;
3158 }
3159
3160 retcode = drbd_check_resource_name(adm_ctx.resource_name);
3161 if (retcode != NO_ERROR)
3162 goto out;
3163
3164 if (adm_ctx.tconn) {
3165 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3166 retcode = ERR_INVALID_REQUEST;
3167 drbd_msg_put_info("resource exists");
3168 }
3169 /* else: still NO_ERROR */
3170 goto out;
3171 }
3172
3173 if (!conn_create(adm_ctx.resource_name, &res_opts))
3174 retcode = ERR_NOMEM;
3175out:
3176 drbd_adm_finish(info, retcode);
3177 return 0;
3178}
3179
3180int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3181{
3182 struct drbd_genlmsghdr *dh = info->userhdr;
3183 enum drbd_ret_code retcode;
3184
3185 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3186 if (!adm_ctx.reply_skb)
3187 return retcode;
3188 if (retcode != NO_ERROR)
3189 goto out;
3190
3191 if (dh->minor > MINORMASK) {
3192 drbd_msg_put_info("requested minor out of range");
3193 retcode = ERR_INVALID_REQUEST;
3194 goto out;
3195 }
3196 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3197 drbd_msg_put_info("requested volume id out of range");
3198 retcode = ERR_INVALID_REQUEST;
3199 goto out;
3200 }
3201
3202 /* drbd_adm_prepare made sure already
3203 * that mdev->tconn and mdev->vnr match the request. */
3204 if (adm_ctx.mdev) {
3205 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3206 retcode = ERR_MINOR_EXISTS;
3207 /* else: still NO_ERROR */
3208 goto out;
3209 }
3210
3211 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3212out:
3213 drbd_adm_finish(info, retcode);
3214 return 0;
3215}
3216
3217static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3218{
3219 if (mdev->state.disk == D_DISKLESS &&
3220 /* no need to be mdev->state.conn == C_STANDALONE &&
3221 * we may want to delete a minor from a live replication group.
3222 */
3223 mdev->state.role == R_SECONDARY) {
3224 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3225 CS_VERBOSE + CS_WAIT_COMPLETE);
3226 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3227 idr_remove(&minors, mdev_to_minor(mdev));
3228 destroy_workqueue(mdev->submit.wq);
3229 del_gendisk(mdev->vdisk);
3230 synchronize_rcu();
3231 kref_put(&mdev->kref, &drbd_minor_destroy);
3232 return NO_ERROR;
3233 } else
3234 return ERR_MINOR_CONFIGURED;
3235}
3236
3237int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3238{
3239 enum drbd_ret_code retcode;
3240
3241 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3242 if (!adm_ctx.reply_skb)
3243 return retcode;
3244 if (retcode != NO_ERROR)
3245 goto out;
3246
3247 retcode = adm_delete_minor(adm_ctx.mdev);
3248out:
3249 drbd_adm_finish(info, retcode);
3250 return 0;
3251}
3252
3253int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3254{
3255 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3256 struct drbd_conf *mdev;
3257 unsigned i;
3258
3259 retcode = drbd_adm_prepare(skb, info, 0);
3260 if (!adm_ctx.reply_skb)
3261 return retcode;
3262 if (retcode != NO_ERROR)
3263 goto out;
3264
3265 if (!adm_ctx.tconn) {
3266 retcode = ERR_RES_NOT_KNOWN;
3267 goto out;
3268 }
3269
3270 /* demote */
3271 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3272 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3273 if (retcode < SS_SUCCESS) {
3274 drbd_msg_put_info("failed to demote");
3275 goto out;
3276 }
3277 }
3278
3279 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3280 if (retcode < SS_SUCCESS) {
3281 drbd_msg_put_info("failed to disconnect");
3282 goto out;
3283 }
3284
3285 /* detach */
3286 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3287 retcode = adm_detach(mdev, 0);
3288 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3289 drbd_msg_put_info("failed to detach");
3290 goto out;
3291 }
3292 }
3293
3294 /* If we reach this, all volumes (of this tconn) are Secondary,
3295 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3296 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3297 drbd_thread_stop(&adm_ctx.tconn->worker);
3298
3299 /* Now, nothing can fail anymore */
3300
3301 /* delete volumes */
3302 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3303 retcode = adm_delete_minor(mdev);
3304 if (retcode != NO_ERROR) {
3305 /* "can not happen" */
3306 drbd_msg_put_info("failed to delete volume");
3307 goto out;
3308 }
3309 }
3310
3311 /* delete connection */
3312 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3313 list_del_rcu(&adm_ctx.tconn->all_tconn);
3314 synchronize_rcu();
3315 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3316
3317 retcode = NO_ERROR;
3318 } else {
3319 /* "can not happen" */
3320 retcode = ERR_RES_IN_USE;
3321 drbd_msg_put_info("failed to delete connection");
3322 }
3323 goto out;
3324out:
3325 drbd_adm_finish(info, retcode);
3326 return 0;
3327}
3328
3329int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3330{
3331 enum drbd_ret_code retcode;
3332
3333 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3334 if (!adm_ctx.reply_skb)
3335 return retcode;
3336 if (retcode != NO_ERROR)
3337 goto out;
3338
3339 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3340 list_del_rcu(&adm_ctx.tconn->all_tconn);
3341 synchronize_rcu();
3342 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3343
3344 retcode = NO_ERROR;
3345 } else {
3346 retcode = ERR_RES_IN_USE;
3347 }
3348
3349 if (retcode == NO_ERROR)
3350 drbd_thread_stop(&adm_ctx.tconn->worker);
3351out:
3352 drbd_adm_finish(info, retcode);
3353 return 0;
3354}
3355
3356void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3357{
3358 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3359 struct sk_buff *msg;
3360 struct drbd_genlmsghdr *d_out;
3361 unsigned seq;
3362 int err = -ENOMEM;
3363
3364 if (sib->sib_reason == SIB_SYNC_PROGRESS) {
3365 if (time_after(jiffies, mdev->rs_last_bcast + HZ))
3366 mdev->rs_last_bcast = jiffies;
3367 else
3368 return;
3369 }
3370
3371 seq = atomic_inc_return(&drbd_genl_seq);
3372 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3373 if (!msg)
3374 goto failed;
3375
3376 err = -EMSGSIZE;
3377 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3378 if (!d_out) /* cannot happen, but anyways. */
3379 goto nla_put_failure;
3380 d_out->minor = mdev_to_minor(mdev);
3381 d_out->ret_code = NO_ERROR;
3382
3383 if (nla_put_status_info(msg, mdev, sib))
3384 goto nla_put_failure;
3385 genlmsg_end(msg, d_out);
3386 err = drbd_genl_multicast_events(msg, 0);
3387 /* msg has been consumed or freed in netlink_broadcast() */
3388 if (err && err != -ESRCH)
3389 goto failed;
3390
3391 return;
3392
3393nla_put_failure:
3394 nlmsg_free(msg);
3395failed:
3396 dev_err(DEV, "Error %d while broadcasting event. "
3397 "Event seq:%u sib_reason:%u\n",
3398 err, seq, sib->sib_reason);
3399}
This page took 0.041903 seconds and 5 git commands to generate.