drbd: Ignore the exit code of a fence-peer handler if it returns too late
[deliverable/linux.git] / drivers / block / drbd / drbd_nl.c
CommitLineData
b411b363
PR
1/*
2 drbd_nl.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
b411b363
PR
26#include <linux/module.h>
27#include <linux/drbd.h>
28#include <linux/in.h>
29#include <linux/fs.h>
30#include <linux/file.h>
31#include <linux/slab.h>
b411b363
PR
32#include <linux/blkpg.h>
33#include <linux/cpumask.h>
34#include "drbd_int.h"
265be2d0 35#include "drbd_req.h"
b411b363
PR
36#include "drbd_wrappers.h"
37#include <asm/unaligned.h>
b411b363 38#include <linux/drbd_limits.h>
87f7be4c 39#include <linux/kthread.h>
b411b363 40
3b98c0c2
LE
41#include <net/genetlink.h>
42
43/* .doit */
44// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
789c1b62
AG
50int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
85f75dd7 52int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
53
54int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
f399002e 56int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
57int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
f399002e 59int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
60int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
f399002e 71int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
3b98c0c2
LE
72int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74/* .dumpit */
75int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77#include <linux/drbd_genl_api.h>
01b39b50 78#include "drbd_nla.h"
3b98c0c2
LE
79#include <linux/genl_magic_func.h>
80
81/* used blkdev_get_by_path, to claim our meta data device(s) */
b411b363
PR
82static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
3b98c0c2
LE
84/* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
87 */
88static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
90 unsigned int minor;
91 /* assigned from request attributes, if present */
92 unsigned int volume;
93#define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
7c3063cc 96 char *resource_name;
089c075d
AG
97 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
3b98c0c2
LE
99
100 /* reply buffer */
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
107} adm_ctx;
108
109static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110{
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
b411b363 114}
3b98c0c2
LE
115
116/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
8432b314 118int drbd_msg_put_info(const char *info)
3b98c0c2
LE
119{
120 struct sk_buff *skb = adm_ctx.reply_skb;
121 struct nlattr *nla;
122 int err = -EMSGSIZE;
123
124 if (!info || !info[0])
125 return 0;
126
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 if (!nla)
129 return err;
130
131 err = nla_put_string(skb, T_info_text, info);
132 if (err) {
133 nla_nest_cancel(skb, nla);
134 return err;
135 } else
136 nla_nest_end(skb, nla);
137 return 0;
b411b363
PR
138}
139
3b98c0c2
LE
140/* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
144 */
145#define DRBD_ADM_NEED_MINOR 1
44e52cfa 146#define DRBD_ADM_NEED_RESOURCE 2
089c075d 147#define DRBD_ADM_NEED_CONNECTION 4
3b98c0c2
LE
148static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149 unsigned flags)
150{
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
153 int err;
154
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
98683650 158 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
3b98c0c2
LE
159 return -EPERM;
160
161 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1e2a2551
AG
162 if (!adm_ctx.reply_skb) {
163 err = -ENOMEM;
3b98c0c2 164 goto fail;
1e2a2551 165 }
3b98c0c2
LE
166
167 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
168 info, &drbd_genl_family, 0, cmd);
169 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
170 * but anyways */
1e2a2551
AG
171 if (!adm_ctx.reply_dh) {
172 err = -ENOMEM;
3b98c0c2 173 goto fail;
1e2a2551 174 }
3b98c0c2
LE
175
176 adm_ctx.reply_dh->minor = d_in->minor;
177 adm_ctx.reply_dh->ret_code = NO_ERROR;
178
089c075d 179 adm_ctx.volume = VOLUME_UNSPECIFIED;
3b98c0c2
LE
180 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
181 struct nlattr *nla;
182 /* parse and validate only */
f399002e 183 err = drbd_cfg_context_from_attrs(NULL, info);
3b98c0c2
LE
184 if (err)
185 goto fail;
186
187 /* It was present, and valid,
188 * copy it over to the reply skb. */
189 err = nla_put_nohdr(adm_ctx.reply_skb,
190 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]);
192 if (err)
193 goto fail;
194
195 /* and assign stuff to the global adm_ctx */
196 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
089c075d
AG
197 if (nla)
198 adm_ctx.volume = nla_get_u32(nla);
7c3063cc 199 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
3b98c0c2 200 if (nla)
7c3063cc 201 adm_ctx.resource_name = nla_data(nla);
089c075d
AG
202 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
203 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
204 if ((adm_ctx.my_addr &&
205 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
206 (adm_ctx.peer_addr &&
207 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
208 err = -EINVAL;
209 goto fail;
210 }
211 }
3b98c0c2
LE
212
213 adm_ctx.minor = d_in->minor;
214 adm_ctx.mdev = minor_to_mdev(d_in->minor);
7c3063cc 215 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
3b98c0c2
LE
216
217 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
218 drbd_msg_put_info("unknown minor");
219 return ERR_MINOR_INVALID;
220 }
44e52cfa
AG
221 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
222 drbd_msg_put_info("unknown resource");
3b98c0c2
LE
223 return ERR_INVALID_REQUEST;
224 }
225
089c075d
AG
226 if (flags & DRBD_ADM_NEED_CONNECTION) {
227 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
228 drbd_msg_put_info("no resource name expected");
229 return ERR_INVALID_REQUEST;
230 }
231 if (adm_ctx.mdev) {
232 drbd_msg_put_info("no minor number expected");
233 return ERR_INVALID_REQUEST;
234 }
235 if (adm_ctx.my_addr && adm_ctx.peer_addr)
236 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
237 nla_len(adm_ctx.my_addr),
238 nla_data(adm_ctx.peer_addr),
239 nla_len(adm_ctx.peer_addr));
240 if (!adm_ctx.tconn) {
241 drbd_msg_put_info("unknown connection");
242 return ERR_INVALID_REQUEST;
243 }
244 }
245
3b98c0c2 246 /* some more paranoia, if the request was over-determined */
527f4b24
LE
247 if (adm_ctx.mdev && adm_ctx.tconn &&
248 adm_ctx.mdev->tconn != adm_ctx.tconn) {
44e52cfa 249 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
7c3063cc
AG
250 adm_ctx.minor, adm_ctx.resource_name,
251 adm_ctx.mdev->tconn->name);
44e52cfa 252 drbd_msg_put_info("minor exists in different resource");
527f4b24
LE
253 return ERR_INVALID_REQUEST;
254 }
3b98c0c2
LE
255 if (adm_ctx.mdev &&
256 adm_ctx.volume != VOLUME_UNSPECIFIED &&
257 adm_ctx.volume != adm_ctx.mdev->vnr) {
258 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
259 adm_ctx.minor, adm_ctx.volume,
260 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
527f4b24 261 drbd_msg_put_info("minor exists as different volume");
3b98c0c2
LE
262 return ERR_INVALID_REQUEST;
263 }
0ace9dfa 264
3b98c0c2
LE
265 return NO_ERROR;
266
267fail:
268 nlmsg_free(adm_ctx.reply_skb);
269 adm_ctx.reply_skb = NULL;
1e2a2551 270 return err;
3b98c0c2
LE
271}
272
273static int drbd_adm_finish(struct genl_info *info, int retcode)
274{
0ace9dfa
PR
275 if (adm_ctx.tconn) {
276 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
277 adm_ctx.tconn = NULL;
278 }
279
3b98c0c2
LE
280 if (!adm_ctx.reply_skb)
281 return -ENOMEM;
282
283 adm_ctx.reply_dh->ret_code = retcode;
3b98c0c2
LE
284 drbd_adm_send_reply(adm_ctx.reply_skb, info);
285 return 0;
286}
b411b363 287
6b75dced 288static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
b411b363 289{
6b75dced 290 char *afs;
b411b363 291
089c075d
AG
292 /* FIXME: A future version will not allow this case. */
293 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
294 return;
295
296 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
297 case AF_INET6:
298 afs = "ipv6";
299 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
300 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
b411b363 301 break;
089c075d
AG
302 case AF_INET:
303 afs = "ipv4";
304 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
305 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
b411b363 306 break;
089c075d
AG
307 default:
308 afs = "ssocks";
309 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
310 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
b411b363 311 }
089c075d 312 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
6b75dced 313}
b411b363
PR
314
315int drbd_khelper(struct drbd_conf *mdev, char *cmd)
316{
317 char *envp[] = { "HOME=/",
318 "TERM=linux",
319 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
6b75dced
PR
320 (char[20]) { }, /* address family */
321 (char[60]) { }, /* address */
b411b363 322 NULL };
6b75dced 323 char mb[12];
b411b363 324 char *argv[] = {usermode_helper, cmd, mb, NULL };
6f3465ed 325 struct drbd_tconn *tconn = mdev->tconn;
6b75dced 326 struct sib_info sib;
b411b363
PR
327 int ret;
328
6f3465ed
LE
329 if (current == tconn->worker.task)
330 set_bit(CALLBACK_PENDING, &tconn->flags);
c2ba686f 331
b411b363 332 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
6f3465ed 333 setup_khelper_env(tconn, envp);
b411b363 334
1090c056
LE
335 /* The helper may take some time.
336 * write out any unsynced meta data changes now */
337 drbd_md_sync(mdev);
338
b411b363 339 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
3b98c0c2
LE
340 sib.sib_reason = SIB_HELPER_PRE;
341 sib.helper_name = cmd;
342 drbd_bcast_event(mdev, &sib);
70834d30 343 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
b411b363
PR
344 if (ret)
345 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
346 usermode_helper, cmd, mb,
347 (ret >> 8) & 0xff, ret);
348 else
349 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
350 usermode_helper, cmd, mb,
351 (ret >> 8) & 0xff, ret);
3b98c0c2
LE
352 sib.sib_reason = SIB_HELPER_POST;
353 sib.helper_exit_code = ret;
354 drbd_bcast_event(mdev, &sib);
b411b363 355
6f3465ed
LE
356 if (current == tconn->worker.task)
357 clear_bit(CALLBACK_PENDING, &tconn->flags);
b411b363
PR
358
359 if (ret < 0) /* Ignore any ERRNOs we got. */
360 ret = 0;
361
362 return ret;
363}
364
6b75dced
PR
365int conn_khelper(struct drbd_tconn *tconn, char *cmd)
366{
367 char *envp[] = { "HOME=/",
368 "TERM=linux",
369 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
370 (char[20]) { }, /* address family */
371 (char[60]) { }, /* address */
372 NULL };
373 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
374 int ret;
375
376 setup_khelper_env(tconn, envp);
377 conn_md_sync(tconn);
378
379 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
380 /* TODO: conn_bcast_event() ?? */
381
98683650 382 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
6b75dced
PR
383 if (ret)
384 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
385 usermode_helper, cmd, tconn->name,
386 (ret >> 8) & 0xff, ret);
387 else
388 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
389 usermode_helper, cmd, tconn->name,
390 (ret >> 8) & 0xff, ret);
391 /* TODO: conn_bcast_event() ?? */
c2ba686f 392
b411b363
PR
393 if (ret < 0) /* Ignore any ERRNOs we got. */
394 ret = 0;
395
396 return ret;
397}
398
cb703454 399static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
b411b363 400{
cb703454
PR
401 enum drbd_fencing_p fp = FP_NOT_AVAIL;
402 struct drbd_conf *mdev;
403 int vnr;
404
695d08fa 405 rcu_read_lock();
cb703454
PR
406 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
407 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
daeda1cc
PR
408 fp = max_t(enum drbd_fencing_p, fp,
409 rcu_dereference(mdev->ldev->disk_conf)->fencing);
cb703454
PR
410 put_ldev(mdev);
411 }
412 }
695d08fa 413 rcu_read_unlock();
cb703454
PR
414
415 return fp;
416}
417
418bool conn_try_outdate_peer(struct drbd_tconn *tconn)
b411b363 419{
28e448bb 420 unsigned int connect_cnt;
cb703454
PR
421 union drbd_state mask = { };
422 union drbd_state val = { };
423 enum drbd_fencing_p fp;
b411b363
PR
424 char *ex_to_string;
425 int r;
b411b363 426
cb703454
PR
427 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
428 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
429 return false;
430 }
b411b363 431
28e448bb
PR
432 spin_lock_irq(&tconn->req_lock);
433 connect_cnt = tconn->connect_cnt;
434 spin_unlock_irq(&tconn->req_lock);
435
cb703454
PR
436 fp = highest_fencing_policy(tconn);
437 switch (fp) {
438 case FP_NOT_AVAIL:
439 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
fb22c402 440 goto out;
cb703454
PR
441 case FP_DONT_CARE:
442 return true;
443 default: ;
b411b363
PR
444 }
445
cb703454 446 r = conn_khelper(tconn, "fence-peer");
b411b363
PR
447
448 switch ((r>>8) & 0xff) {
449 case 3: /* peer is inconsistent */
450 ex_to_string = "peer is inconsistent or worse";
cb703454
PR
451 mask.pdsk = D_MASK;
452 val.pdsk = D_INCONSISTENT;
b411b363
PR
453 break;
454 case 4: /* peer got outdated, or was already outdated */
455 ex_to_string = "peer was fenced";
cb703454
PR
456 mask.pdsk = D_MASK;
457 val.pdsk = D_OUTDATED;
b411b363
PR
458 break;
459 case 5: /* peer was down */
cb703454 460 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
b411b363
PR
461 /* we will(have) create(d) a new UUID anyways... */
462 ex_to_string = "peer is unreachable, assumed to be dead";
cb703454
PR
463 mask.pdsk = D_MASK;
464 val.pdsk = D_OUTDATED;
b411b363
PR
465 } else {
466 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
b411b363
PR
467 }
468 break;
469 case 6: /* Peer is primary, voluntarily outdate myself.
470 * This is useful when an unconnected R_SECONDARY is asked to
471 * become R_PRIMARY, but finds the other peer being active. */
472 ex_to_string = "peer is active";
cb703454
PR
473 conn_warn(tconn, "Peer is primary, outdating myself.\n");
474 mask.disk = D_MASK;
475 val.disk = D_OUTDATED;
b411b363
PR
476 break;
477 case 7:
478 if (fp != FP_STONITH)
cb703454 479 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
b411b363 480 ex_to_string = "peer was stonithed";
cb703454
PR
481 mask.pdsk = D_MASK;
482 val.pdsk = D_OUTDATED;
b411b363
PR
483 break;
484 default:
485 /* The script is broken ... */
cb703454
PR
486 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
487 return false; /* Eventually leave IO frozen */
b411b363
PR
488 }
489
cb703454
PR
490 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
491 (r>>8) & 0xff, ex_to_string);
fb22c402 492
cb703454 493 out:
fb22c402 494
cb703454
PR
495 /* Not using
496 conn_request_state(tconn, mask, val, CS_VERBOSE);
497 here, because we might were able to re-establish the connection in the
498 meantime. */
499 spin_lock_irq(&tconn->req_lock);
28e448bb
PR
500 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
501 if (tconn->connect_cnt != connect_cnt)
502 /* In case the connection was established and droped
503 while the fence-peer handler was running, ignore it */
504 conn_info(tconn, "Ignoring fence-peer exit code\n");
505 else
506 _conn_request_state(tconn, mask, val, CS_VERBOSE);
507 }
cb703454
PR
508 spin_unlock_irq(&tconn->req_lock);
509
510 return conn_highest_pdsk(tconn) <= D_OUTDATED;
b411b363
PR
511}
512
87f7be4c
PR
513static int _try_outdate_peer_async(void *data)
514{
cb703454 515 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
87f7be4c 516
cb703454 517 conn_try_outdate_peer(tconn);
87f7be4c 518
9dc9fbb3 519 kref_put(&tconn->kref, &conn_destroy);
87f7be4c
PR
520 return 0;
521}
522
cb703454 523void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
87f7be4c
PR
524{
525 struct task_struct *opa;
526
9dc9fbb3 527 kref_get(&tconn->kref);
cb703454 528 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
9dc9fbb3 529 if (IS_ERR(opa)) {
cb703454 530 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
9dc9fbb3
PR
531 kref_put(&tconn->kref, &conn_destroy);
532 }
87f7be4c 533}
b411b363 534
bf885f8a
AG
535enum drbd_state_rv
536drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
b411b363
PR
537{
538 const int max_tries = 4;
bf885f8a 539 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
44ed167d 540 struct net_conf *nc;
b411b363
PR
541 int try = 0;
542 int forced = 0;
543 union drbd_state mask, val;
b411b363
PR
544
545 if (new_role == R_PRIMARY)
0625ac19 546 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
b411b363 547
8410da8f 548 mutex_lock(mdev->state_mutex);
b411b363
PR
549
550 mask.i = 0; mask.role = R_MASK;
551 val.i = 0; val.role = new_role;
552
553 while (try++ < max_tries) {
bf885f8a 554 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
b411b363
PR
555
556 /* in case we first succeeded to outdate,
557 * but now suddenly could establish a connection */
bf885f8a 558 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
b411b363
PR
559 val.pdsk = 0;
560 mask.pdsk = 0;
561 continue;
562 }
563
bf885f8a 564 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
d10a33c6
PR
565 (mdev->state.disk < D_UP_TO_DATE &&
566 mdev->state.disk >= D_INCONSISTENT)) {
b411b363
PR
567 mask.disk = D_MASK;
568 val.disk = D_UP_TO_DATE;
569 forced = 1;
570 continue;
571 }
572
bf885f8a 573 if (rv == SS_NO_UP_TO_DATE_DISK &&
b411b363
PR
574 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
575 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
b411b363 576
cb703454 577 if (conn_try_outdate_peer(mdev->tconn)) {
b411b363
PR
578 val.disk = D_UP_TO_DATE;
579 mask.disk = D_MASK;
580 }
b411b363
PR
581 continue;
582 }
583
bf885f8a 584 if (rv == SS_NOTHING_TO_DO)
3b98c0c2 585 goto out;
bf885f8a 586 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
cb703454 587 if (!conn_try_outdate_peer(mdev->tconn) && force) {
b411b363 588 dev_warn(DEV, "Forced into split brain situation!\n");
cb703454
PR
589 mask.pdsk = D_MASK;
590 val.pdsk = D_OUTDATED;
b411b363 591
cb703454 592 }
b411b363
PR
593 continue;
594 }
bf885f8a 595 if (rv == SS_TWO_PRIMARIES) {
b411b363
PR
596 /* Maybe the peer is detected as dead very soon...
597 retry at most once more in this case. */
44ed167d
PR
598 int timeo;
599 rcu_read_lock();
600 nc = rcu_dereference(mdev->tconn->net_conf);
601 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
602 rcu_read_unlock();
603 schedule_timeout_interruptible(timeo);
b411b363
PR
604 if (try < max_tries)
605 try = max_tries - 1;
606 continue;
607 }
bf885f8a
AG
608 if (rv < SS_SUCCESS) {
609 rv = _drbd_request_state(mdev, mask, val,
b411b363 610 CS_VERBOSE + CS_WAIT_COMPLETE);
bf885f8a 611 if (rv < SS_SUCCESS)
3b98c0c2 612 goto out;
b411b363
PR
613 }
614 break;
615 }
616
bf885f8a 617 if (rv < SS_SUCCESS)
3b98c0c2 618 goto out;
b411b363
PR
619
620 if (forced)
621 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
622
623 /* Wait until nothing is on the fly :) */
624 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
625
b6dd1a89
LE
626 /* FIXME also wait for all pending P_BARRIER_ACK? */
627
b411b363 628 if (new_role == R_SECONDARY) {
81e84650 629 set_disk_ro(mdev->vdisk, true);
b411b363
PR
630 if (get_ldev(mdev)) {
631 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
632 put_ldev(mdev);
633 }
634 } else {
a0095508 635 mutex_lock(&mdev->tconn->conf_update);
91fd4dad 636 nc = mdev->tconn->net_conf;
44ed167d 637 if (nc)
6139f60d 638 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
a0095508 639 mutex_unlock(&mdev->tconn->conf_update);
91fd4dad 640
81e84650 641 set_disk_ro(mdev->vdisk, false);
b411b363
PR
642 if (get_ldev(mdev)) {
643 if (((mdev->state.conn < C_CONNECTED ||
644 mdev->state.pdsk <= D_FAILED)
645 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
646 drbd_uuid_new_current(mdev);
647
648 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
649 put_ldev(mdev);
650 }
651 }
652
19f843aa
LE
653 /* writeout of activity log covered areas of the bitmap
654 * to stable storage done in after state change already */
b411b363
PR
655
656 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
657 /* if this was forced, we should consider sync */
658 if (forced)
659 drbd_send_uuids(mdev);
f479ea06 660 drbd_send_current_state(mdev);
b411b363
PR
661 }
662
663 drbd_md_sync(mdev);
664
665 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
3b98c0c2 666out:
8410da8f 667 mutex_unlock(mdev->state_mutex);
bf885f8a 668 return rv;
b411b363
PR
669}
670
3b98c0c2 671static const char *from_attrs_err_to_txt(int err)
ef50a3e3 672{
3b98c0c2
LE
673 return err == -ENOMSG ? "required attribute missing" :
674 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
f399002e 675 err == -EEXIST ? "can not change invariant setting" :
3b98c0c2 676 "invalid attribute value";
ef50a3e3 677}
b411b363 678
3b98c0c2 679int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
b411b363 680{
3b98c0c2
LE
681 struct set_role_parms parms;
682 int err;
683 enum drbd_ret_code retcode;
b411b363 684
3b98c0c2
LE
685 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
686 if (!adm_ctx.reply_skb)
687 return retcode;
688 if (retcode != NO_ERROR)
689 goto out;
b411b363 690
3b98c0c2
LE
691 memset(&parms, 0, sizeof(parms));
692 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
f399002e 693 err = set_role_parms_from_attrs(&parms, info);
3b98c0c2
LE
694 if (err) {
695 retcode = ERR_MANDATORY_TAG;
696 drbd_msg_put_info(from_attrs_err_to_txt(err));
697 goto out;
698 }
699 }
b411b363 700
3b98c0c2
LE
701 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
702 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
703 else
704 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
705out:
706 drbd_adm_finish(info, retcode);
b411b363
PR
707 return 0;
708}
709
ae8bf312
LE
710/* Initializes the md.*_offset members, so we are able to find
711 * the on disk meta data.
712 *
713 * We currently have two possible layouts:
714 * external:
715 * |----------- md_size_sect ------------------|
716 * [ 4k superblock ][ activity log ][ Bitmap ]
717 * | al_offset == 8 |
718 * | bm_offset = al_offset + X |
719 * ==> bitmap sectors = md_size_sect - bm_offset
720 *
721 * internal:
722 * |----------- md_size_sect ------------------|
723 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
724 * | al_offset < 0 |
725 * | bm_offset = al_offset - Y |
726 * ==> bitmap sectors = Y = al_offset - bm_offset
727 *
728 * Activity log size used to be fixed 32kB,
729 * but is about to become configurable.
730 */
b411b363
PR
731static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
732 struct drbd_backing_dev *bdev)
733{
734 sector_t md_size_sect = 0;
c04ccaa6 735 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
daeda1cc 736
3a4d4eb3
LE
737 bdev->md.md_offset = drbd_md_ss(bdev);
738
68e41a43 739 switch (bdev->md.meta_dev_idx) {
b411b363
PR
740 default:
741 /* v07 style fixed size indexed meta data */
ae8bf312 742 bdev->md.md_size_sect = MD_128MB_SECT;
ae8bf312
LE
743 bdev->md.al_offset = MD_4kB_SECT;
744 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
b411b363
PR
745 break;
746 case DRBD_MD_INDEX_FLEX_EXT:
747 /* just occupy the full device; unit: sectors */
748 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
ae8bf312
LE
749 bdev->md.al_offset = MD_4kB_SECT;
750 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
b411b363
PR
751 break;
752 case DRBD_MD_INDEX_INTERNAL:
753 case DRBD_MD_INDEX_FLEX_INT:
b411b363 754 /* al size is still fixed */
ae8bf312 755 bdev->md.al_offset = -al_size_sect;
b411b363
PR
756 /* we need (slightly less than) ~ this much bitmap sectors: */
757 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
758 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
759 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
760 md_size_sect = ALIGN(md_size_sect, 8);
761
762 /* plus the "drbd meta data super block",
763 * and the activity log; */
ae8bf312 764 md_size_sect += MD_4kB_SECT + al_size_sect;
b411b363
PR
765
766 bdev->md.md_size_sect = md_size_sect;
767 /* bitmap offset is adjusted by 'super' block size */
ae8bf312 768 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
b411b363
PR
769 break;
770 }
771}
772
4b0715f0 773/* input size is expected to be in KB */
b411b363
PR
774char *ppsize(char *buf, unsigned long long size)
775{
4b0715f0
LE
776 /* Needs 9 bytes at max including trailing NUL:
777 * -1ULL ==> "16384 EB" */
b411b363
PR
778 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
779 int base = 0;
4b0715f0 780 while (size >= 10000 && base < sizeof(units)-1) {
b411b363
PR
781 /* shift + round */
782 size = (size >> 10) + !!(size & (1<<9));
783 base++;
784 }
4b0715f0 785 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
b411b363
PR
786
787 return buf;
788}
789
790/* there is still a theoretical deadlock when called from receiver
791 * on an D_INCONSISTENT R_PRIMARY:
792 * remote READ does inc_ap_bio, receiver would need to receive answer
793 * packet from remote to dec_ap_bio again.
794 * receiver receive_sizes(), comes here,
795 * waits for ap_bio_cnt == 0. -> deadlock.
796 * but this cannot happen, actually, because:
797 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
798 * (not connected, or bad/no disk on peer):
799 * see drbd_fail_request_early, ap_bio_cnt is zero.
800 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
801 * peer may not initiate a resize.
802 */
3b98c0c2
LE
803/* Note these are not to be confused with
804 * drbd_adm_suspend_io/drbd_adm_resume_io,
805 * which are (sub) state changes triggered by admin (drbdsetup),
806 * and can be long lived.
807 * This changes an mdev->flag, is triggered by drbd internals,
808 * and should be short-lived. */
b411b363
PR
809void drbd_suspend_io(struct drbd_conf *mdev)
810{
811 set_bit(SUSPEND_IO, &mdev->flags);
2aebfabb 812 if (drbd_suspended(mdev))
265be2d0 813 return;
b411b363
PR
814 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
815}
816
817void drbd_resume_io(struct drbd_conf *mdev)
818{
819 clear_bit(SUSPEND_IO, &mdev->flags);
820 wake_up(&mdev->misc_wait);
821}
822
823/**
824 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
825 * @mdev: DRBD device.
826 *
827 * Returns 0 on success, negative return values indicate errors.
828 * You should call drbd_md_sync() after calling this function.
829 */
24c4830c 830enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
b411b363
PR
831{
832 sector_t prev_first_sect, prev_size; /* previous meta location */
cccac985 833 sector_t la_size_sect, u_size;
b411b363
PR
834 sector_t size;
835 char ppb[10];
836
837 int md_moved, la_size_changed;
838 enum determine_dev_size rv = unchanged;
839
840 /* race:
841 * application request passes inc_ap_bio,
842 * but then cannot get an AL-reference.
843 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
844 *
845 * to avoid that:
846 * Suspend IO right here.
847 * still lock the act_log to not trigger ASSERTs there.
848 */
849 drbd_suspend_io(mdev);
850
851 /* no wait necessary anymore, actually we could assert that */
852 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
853
854 prev_first_sect = drbd_md_first_sector(mdev->ldev);
855 prev_size = mdev->ldev->md.md_size_sect;
cccac985 856 la_size_sect = mdev->ldev->md.la_size_sect;
b411b363
PR
857
858 /* TODO: should only be some assert here, not (re)init... */
859 drbd_md_set_sector_offsets(mdev, mdev->ldev);
860
daeda1cc
PR
861 rcu_read_lock();
862 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
863 rcu_read_unlock();
ef5e44a6 864 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
b411b363
PR
865
866 if (drbd_get_capacity(mdev->this_bdev) != size ||
867 drbd_bm_capacity(mdev) != size) {
868 int err;
02d9a94b 869 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
b411b363
PR
870 if (unlikely(err)) {
871 /* currently there is only one error: ENOMEM! */
872 size = drbd_bm_capacity(mdev)>>1;
873 if (size == 0) {
874 dev_err(DEV, "OUT OF MEMORY! "
875 "Could not allocate bitmap!\n");
876 } else {
877 dev_err(DEV, "BM resizing failed. "
878 "Leaving size unchanged at size = %lu KB\n",
879 (unsigned long)size);
880 }
881 rv = dev_size_error;
882 }
883 /* racy, see comments above. */
884 drbd_set_my_capacity(mdev, size);
885 mdev->ldev->md.la_size_sect = size;
886 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
887 (unsigned long long)size>>1);
888 }
889 if (rv == dev_size_error)
890 goto out;
891
cccac985 892 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
b411b363
PR
893
894 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
895 || prev_size != mdev->ldev->md.md_size_sect;
896
897 if (la_size_changed || md_moved) {
24dccabb
AG
898 int err;
899
b411b363
PR
900 drbd_al_shrink(mdev); /* All extents inactive. */
901 dev_info(DEV, "Writing the whole bitmap, %s\n",
902 la_size_changed && md_moved ? "size changed and md moved" :
903 la_size_changed ? "size changed" : "md moved");
20ceb2b2 904 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
d1aa4d04
PR
905 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
906 "size changed", BM_LOCKED_MASK);
24dccabb
AG
907 if (err) {
908 rv = dev_size_error;
909 goto out;
910 }
b411b363
PR
911 drbd_md_mark_dirty(mdev);
912 }
913
cccac985 914 if (size > la_size_sect)
b411b363 915 rv = grew;
cccac985 916 if (size < la_size_sect)
b411b363
PR
917 rv = shrunk;
918out:
919 lc_unlock(mdev->act_log);
920 wake_up(&mdev->al_wait);
921 drbd_resume_io(mdev);
922
923 return rv;
924}
925
926sector_t
ef5e44a6
PR
927drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
928 sector_t u_size, int assume_peer_has_space)
b411b363
PR
929{
930 sector_t p_size = mdev->p_size; /* partner's disk size. */
cccac985 931 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
b411b363 932 sector_t m_size; /* my size */
b411b363
PR
933 sector_t size = 0;
934
935 m_size = drbd_get_max_capacity(bdev);
936
a393db6f
PR
937 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
938 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
939 p_size = m_size;
940 }
941
b411b363
PR
942 if (p_size && m_size) {
943 size = min_t(sector_t, p_size, m_size);
944 } else {
cccac985
LE
945 if (la_size_sect) {
946 size = la_size_sect;
b411b363
PR
947 if (m_size && m_size < size)
948 size = m_size;
949 if (p_size && p_size < size)
950 size = p_size;
951 } else {
952 if (m_size)
953 size = m_size;
954 if (p_size)
955 size = p_size;
956 }
957 }
958
959 if (size == 0)
960 dev_err(DEV, "Both nodes diskless!\n");
961
962 if (u_size) {
963 if (u_size > size)
964 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
965 (unsigned long)u_size>>1, (unsigned long)size>>1);
966 else
967 size = u_size;
968 }
969
970 return size;
971}
972
973/**
974 * drbd_check_al_size() - Ensures that the AL is of the right size
975 * @mdev: DRBD device.
976 *
977 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
978 * failed, and 0 on success. You should call drbd_md_sync() after you called
979 * this function.
980 */
f399002e 981static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
b411b363
PR
982{
983 struct lru_cache *n, *t;
984 struct lc_element *e;
985 unsigned int in_use;
986 int i;
987
b411b363 988 if (mdev->act_log &&
f399002e 989 mdev->act_log->nr_elements == dc->al_extents)
b411b363
PR
990 return 0;
991
992 in_use = 0;
993 t = mdev->act_log;
7ad651b5 994 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
f399002e 995 dc->al_extents, sizeof(struct lc_element), 0);
b411b363
PR
996
997 if (n == NULL) {
998 dev_err(DEV, "Cannot allocate act_log lru!\n");
999 return -ENOMEM;
1000 }
1001 spin_lock_irq(&mdev->al_lock);
1002 if (t) {
1003 for (i = 0; i < t->nr_elements; i++) {
1004 e = lc_element_by_index(t, i);
1005 if (e->refcnt)
1006 dev_err(DEV, "refcnt(%d)==%d\n",
1007 e->lc_number, e->refcnt);
1008 in_use += e->refcnt;
1009 }
1010 }
1011 if (!in_use)
1012 mdev->act_log = n;
1013 spin_unlock_irq(&mdev->al_lock);
1014 if (in_use) {
1015 dev_err(DEV, "Activity log still in use!\n");
1016 lc_destroy(n);
1017 return -EBUSY;
1018 } else {
1019 if (t)
1020 lc_destroy(t);
1021 }
1022 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1023 return 0;
1024}
1025
99432fcc 1026static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
b411b363
PR
1027{
1028 struct request_queue * const q = mdev->rq_queue;
db141b2f
LE
1029 unsigned int max_hw_sectors = max_bio_size >> 9;
1030 unsigned int max_segments = 0;
99432fcc
PR
1031
1032 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1033 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1034
1035 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
daeda1cc
PR
1036 rcu_read_lock();
1037 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1038 rcu_read_unlock();
99432fcc
PR
1039 put_ldev(mdev);
1040 }
b411b363 1041
b411b363 1042 blk_queue_logical_block_size(q, 512);
1816a2b4
LE
1043 blk_queue_max_hw_sectors(q, max_hw_sectors);
1044 /* This is the workaround for "bio would need to, but cannot, be split" */
1045 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1046 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
b411b363 1047
99432fcc
PR
1048 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1049 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1050
1051 blk_queue_stack_limits(q, b);
1052
1053 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1054 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1055 q->backing_dev_info.ra_pages,
1056 b->backing_dev_info.ra_pages);
1057 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1058 }
1059 put_ldev(mdev);
1060 }
1061}
1062
1063void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1064{
db141b2f 1065 unsigned int now, new, local, peer;
99432fcc
PR
1066
1067 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1068 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1069 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
b411b363 1070
99432fcc
PR
1071 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1072 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1073 mdev->local_max_bio_size = local;
1074 put_ldev(mdev);
b411b363 1075 }
db141b2f 1076 local = min(local, DRBD_MAX_BIO_SIZE);
99432fcc
PR
1077
1078 /* We may ignore peer limits if the peer is modern enough.
1079 Because new from 8.3.8 onwards the peer can use multiple
1080 BIOs for a single peer_request */
1081 if (mdev->state.conn >= C_CONNECTED) {
31890f4a 1082 if (mdev->tconn->agreed_pro_version < 94)
98683650 1083 peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
6809384c 1084 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
31890f4a 1085 else if (mdev->tconn->agreed_pro_version == 94)
99432fcc 1086 peer = DRBD_MAX_SIZE_H80_PACKET;
2ffca4f3
PR
1087 else if (mdev->tconn->agreed_pro_version < 100)
1088 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1089 else
99432fcc
PR
1090 peer = DRBD_MAX_BIO_SIZE;
1091 }
1092
db141b2f 1093 new = min(local, peer);
99432fcc
PR
1094
1095 if (mdev->state.role == R_PRIMARY && new < now)
db141b2f 1096 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
99432fcc
PR
1097
1098 if (new != now)
1099 dev_info(DEV, "max BIO size = %u\n", new);
1100
1101 drbd_setup_queue_param(mdev, new);
b411b363
PR
1102}
1103
a18e9d1e 1104/* Starts the worker thread */
0e29d163 1105static void conn_reconfig_start(struct drbd_tconn *tconn)
b411b363 1106{
0e29d163
PR
1107 drbd_thread_start(&tconn->worker);
1108 conn_flush_workqueue(tconn);
b411b363
PR
1109}
1110
a18e9d1e 1111/* if still unconfigured, stops worker again. */
0e29d163 1112static void conn_reconfig_done(struct drbd_tconn *tconn)
b411b363 1113{
992d6e91 1114 bool stop_threads;
0e29d163 1115 spin_lock_irq(&tconn->req_lock);
e0e16653
PR
1116 stop_threads = conn_all_vols_unconf(tconn) &&
1117 tconn->cstate == C_STANDALONE;
0e29d163 1118 spin_unlock_irq(&tconn->req_lock);
992d6e91
LE
1119 if (stop_threads) {
1120 /* asender is implicitly stopped by receiver
81fa2e67 1121 * in conn_disconnect() */
992d6e91
LE
1122 drbd_thread_stop(&tconn->receiver);
1123 drbd_thread_stop(&tconn->worker);
1124 }
b411b363
PR
1125}
1126
0778286a
PR
1127/* Make sure IO is suspended before calling this function(). */
1128static void drbd_suspend_al(struct drbd_conf *mdev)
1129{
1130 int s = 0;
1131
61610420 1132 if (!lc_try_lock(mdev->act_log)) {
0778286a
PR
1133 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1134 return;
1135 }
1136
61610420 1137 drbd_al_shrink(mdev);
87eeee41 1138 spin_lock_irq(&mdev->tconn->req_lock);
0778286a
PR
1139 if (mdev->state.conn < C_CONNECTED)
1140 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
87eeee41 1141 spin_unlock_irq(&mdev->tconn->req_lock);
61610420 1142 lc_unlock(mdev->act_log);
0778286a
PR
1143
1144 if (s)
1145 dev_info(DEV, "Suspended AL updates\n");
1146}
1147
5979e361
LE
1148
1149static bool should_set_defaults(struct genl_info *info)
1150{
1151 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1152 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1153}
1154
5bbcf5e6 1155static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
d589a21e 1156{
5bbcf5e6
LE
1157 /* This is limited by 16 bit "slot" numbers,
1158 * and by available on-disk context storage.
1159 *
1160 * Also (u16)~0 is special (denotes a "free" extent).
1161 *
1162 * One transaction occupies one 4kB on-disk block,
1163 * we have n such blocks in the on disk ring buffer,
1164 * the "current" transaction may fail (n-1),
1165 * and there is 919 slot numbers context information per transaction.
1166 *
1167 * 72 transaction blocks amounts to more than 2**16 context slots,
1168 * so cap there first.
1169 */
1170 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1171 const unsigned int sufficient_on_disk =
1172 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1173 /AL_CONTEXT_PER_TRANSACTION;
d589a21e 1174
5bbcf5e6
LE
1175 unsigned int al_size_4k = bdev->md.al_size_4k;
1176
1177 if (al_size_4k > sufficient_on_disk)
1178 return max_al_nr;
1179
1180 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
d589a21e
PR
1181}
1182
f399002e
LE
1183int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1184{
1185 enum drbd_ret_code retcode;
1186 struct drbd_conf *mdev;
daeda1cc 1187 struct disk_conf *new_disk_conf, *old_disk_conf;
813472ce 1188 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
f399002e 1189 int err, fifo_size;
f399002e
LE
1190
1191 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1192 if (!adm_ctx.reply_skb)
1193 return retcode;
1194 if (retcode != NO_ERROR)
1195 goto out;
1196
1197 mdev = adm_ctx.mdev;
1198
1199 /* we also need a disk
1200 * to change the options on */
1201 if (!get_ldev(mdev)) {
1202 retcode = ERR_NO_DISK;
1203 goto out;
1204 }
1205
daeda1cc 1206 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
5ecc72c3 1207 if (!new_disk_conf) {
f399002e
LE
1208 retcode = ERR_NOMEM;
1209 goto fail;
1210 }
1211
daeda1cc
PR
1212 mutex_lock(&mdev->tconn->conf_update);
1213 old_disk_conf = mdev->ldev->disk_conf;
1214 *new_disk_conf = *old_disk_conf;
5979e361 1215 if (should_set_defaults(info))
b966b5dd 1216 set_disk_conf_defaults(new_disk_conf);
5979e361 1217
5ecc72c3 1218 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
c75b9b10 1219 if (err && err != -ENOMSG) {
f399002e
LE
1220 retcode = ERR_MANDATORY_TAG;
1221 drbd_msg_put_info(from_attrs_err_to_txt(err));
1222 }
1223
5ecc72c3
LE
1224 if (!expect(new_disk_conf->resync_rate >= 1))
1225 new_disk_conf->resync_rate = 1;
f399002e 1226
5bbcf5e6
LE
1227 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1228 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1229 if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
1230 new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
1231
1232 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1233 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
f399002e 1234
5ecc72c3 1235 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
9958c857 1236 if (fifo_size != mdev->rs_plan_s->size) {
813472ce
PR
1237 new_plan = fifo_alloc(fifo_size);
1238 if (!new_plan) {
f399002e
LE
1239 dev_err(DEV, "kmalloc of fifo_buffer failed");
1240 retcode = ERR_NOMEM;
daeda1cc 1241 goto fail_unlock;
f399002e
LE
1242 }
1243 }
1244
0ee98e2e 1245 drbd_suspend_io(mdev);
f399002e
LE
1246 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1247 drbd_al_shrink(mdev);
5ecc72c3 1248 err = drbd_check_al_size(mdev, new_disk_conf);
f399002e
LE
1249 lc_unlock(mdev->act_log);
1250 wake_up(&mdev->al_wait);
0ee98e2e 1251 drbd_resume_io(mdev);
f399002e
LE
1252
1253 if (err) {
1254 retcode = ERR_NOMEM;
daeda1cc 1255 goto fail_unlock;
f399002e
LE
1256 }
1257
dc97b708 1258 write_lock_irq(&global_state_lock);
95f8efd0 1259 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
dc97b708 1260 if (retcode == NO_ERROR) {
daeda1cc 1261 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
95f8efd0 1262 drbd_resync_after_changed(mdev);
dc97b708
PR
1263 }
1264 write_unlock_irq(&global_state_lock);
f399002e 1265
daeda1cc
PR
1266 if (retcode != NO_ERROR)
1267 goto fail_unlock;
f399002e 1268
813472ce
PR
1269 if (new_plan) {
1270 old_plan = mdev->rs_plan_s;
1271 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
9958c857 1272 }
9958c857 1273
c141ebda 1274 mutex_unlock(&mdev->tconn->conf_update);
27eb13e9 1275
9a51ab1c 1276 if (new_disk_conf->al_updates)
4035e4c2 1277 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
9a51ab1c
PR
1278 else
1279 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1280
691631c0
LE
1281 if (new_disk_conf->md_flushes)
1282 clear_bit(MD_NO_FUA, &mdev->flags);
1283 else
1284 set_bit(MD_NO_FUA, &mdev->flags);
1285
27eb13e9
PR
1286 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1287
daeda1cc 1288 drbd_md_sync(mdev);
f399002e
LE
1289
1290 if (mdev->state.conn >= C_CONNECTED)
1291 drbd_send_sync_param(mdev);
1292
daeda1cc
PR
1293 synchronize_rcu();
1294 kfree(old_disk_conf);
813472ce 1295 kfree(old_plan);
cdfda633 1296 mod_timer(&mdev->request_timer, jiffies + HZ);
daeda1cc
PR
1297 goto success;
1298
1299fail_unlock:
1300 mutex_unlock(&mdev->tconn->conf_update);
f399002e 1301 fail:
5ecc72c3 1302 kfree(new_disk_conf);
813472ce 1303 kfree(new_plan);
daeda1cc
PR
1304success:
1305 put_ldev(mdev);
f399002e
LE
1306 out:
1307 drbd_adm_finish(info, retcode);
1308 return 0;
1309}
1310
3b98c0c2 1311int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
b411b363 1312{
3b98c0c2
LE
1313 struct drbd_conf *mdev;
1314 int err;
116676ca 1315 enum drbd_ret_code retcode;
b411b363
PR
1316 enum determine_dev_size dd;
1317 sector_t max_possible_sectors;
1318 sector_t min_md_device_sectors;
1319 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
daeda1cc 1320 struct disk_conf *new_disk_conf = NULL;
e525fd89 1321 struct block_device *bdev;
b411b363 1322 struct lru_cache *resync_lru = NULL;
9958c857 1323 struct fifo_buffer *new_plan = NULL;
b411b363 1324 union drbd_state ns, os;
f2024e7c 1325 enum drbd_state_rv rv;
44ed167d 1326 struct net_conf *nc;
b411b363 1327
3b98c0c2
LE
1328 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1329 if (!adm_ctx.reply_skb)
1330 return retcode;
1331 if (retcode != NO_ERROR)
40cbf085 1332 goto finish;
b411b363 1333
3b98c0c2 1334 mdev = adm_ctx.mdev;
0e29d163 1335 conn_reconfig_start(mdev->tconn);
b411b363
PR
1336
1337 /* if you want to reconfigure, please tear down first */
1338 if (mdev->state.disk > D_DISKLESS) {
1339 retcode = ERR_DISK_CONFIGURED;
1340 goto fail;
1341 }
82f59cc6
LE
1342 /* It may just now have detached because of IO error. Make sure
1343 * drbd_ldev_destroy is done already, we may end up here very fast,
1344 * e.g. if someone calls attach from the on-io-error handler,
1345 * to realize a "hot spare" feature (not that I'd recommend that) */
1346 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
b411b363 1347
383606e0 1348 /* make sure there is no leftover from previous force-detach attempts */
0c849666 1349 clear_bit(FORCE_DETACH, &mdev->flags);
edc9f5eb
LE
1350 clear_bit(WAS_IO_ERROR, &mdev->flags);
1351 clear_bit(WAS_READ_ERROR, &mdev->flags);
383606e0 1352
0029d624
LE
1353 /* and no leftover from previously aborted resync or verify, either */
1354 mdev->rs_total = 0;
1355 mdev->rs_failed = 0;
1356 atomic_set(&mdev->rs_pending_cnt, 0);
1357
3b98c0c2 1358 /* allocation not in the IO path, drbdsetup context */
b411b363
PR
1359 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1360 if (!nbc) {
1361 retcode = ERR_NOMEM;
1362 goto fail;
1363 }
9f2247bb
PR
1364 spin_lock_init(&nbc->md.uuid_lock);
1365
daeda1cc
PR
1366 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1367 if (!new_disk_conf) {
1368 retcode = ERR_NOMEM;
b411b363
PR
1369 goto fail;
1370 }
daeda1cc 1371 nbc->disk_conf = new_disk_conf;
b411b363 1372
daeda1cc
PR
1373 set_disk_conf_defaults(new_disk_conf);
1374 err = disk_conf_from_attrs(new_disk_conf, info);
3b98c0c2 1375 if (err) {
b411b363 1376 retcode = ERR_MANDATORY_TAG;
3b98c0c2 1377 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
1378 goto fail;
1379 }
1380
5bbcf5e6
LE
1381 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1382 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
d589a21e 1383
9958c857
PR
1384 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1385 if (!new_plan) {
1386 retcode = ERR_NOMEM;
1387 goto fail;
1388 }
1389
daeda1cc 1390 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
b411b363
PR
1391 retcode = ERR_MD_IDX_INVALID;
1392 goto fail;
1393 }
1394
a3f8f7dc
LE
1395 write_lock_irq(&global_state_lock);
1396 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1397 write_unlock_irq(&global_state_lock);
1398 if (retcode != NO_ERROR)
1399 goto fail;
1400
44ed167d
PR
1401 rcu_read_lock();
1402 nc = rcu_dereference(mdev->tconn->net_conf);
1403 if (nc) {
daeda1cc 1404 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
44ed167d 1405 rcu_read_unlock();
47ff2d0a
PR
1406 retcode = ERR_STONITH_AND_PROT_A;
1407 goto fail;
1408 }
1409 }
44ed167d 1410 rcu_read_unlock();
47ff2d0a 1411
daeda1cc 1412 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
d4d77629 1413 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
e525fd89 1414 if (IS_ERR(bdev)) {
daeda1cc 1415 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
e525fd89 1416 PTR_ERR(bdev));
b411b363
PR
1417 retcode = ERR_OPEN_DISK;
1418 goto fail;
1419 }
e525fd89
TH
1420 nbc->backing_bdev = bdev;
1421
1422 /*
1423 * meta_dev_idx >= 0: external fixed size, possibly multiple
1424 * drbd sharing one meta device. TODO in that case, paranoia
1425 * check that [md_bdev, meta_dev_idx] is not yet used by some
1426 * other drbd minor! (if you use drbd.conf + drbdadm, that
1427 * should check it for you already; but if you don't, or
1428 * someone fooled it, we need to double check here)
1429 */
daeda1cc 1430 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
d4d77629 1431 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
daeda1cc 1432 (new_disk_conf->meta_dev_idx < 0) ?
d4d77629 1433 (void *)mdev : (void *)drbd_m_holder);
e525fd89 1434 if (IS_ERR(bdev)) {
daeda1cc 1435 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
e525fd89 1436 PTR_ERR(bdev));
b411b363
PR
1437 retcode = ERR_OPEN_MD_DISK;
1438 goto fail;
1439 }
e525fd89 1440 nbc->md_bdev = bdev;
b411b363 1441
e525fd89 1442 if ((nbc->backing_bdev == nbc->md_bdev) !=
daeda1cc
PR
1443 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1444 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
e525fd89 1445 retcode = ERR_MD_IDX_INVALID;
b411b363
PR
1446 goto fail;
1447 }
1448
1449 resync_lru = lc_create("resync", drbd_bm_ext_cache,
46a15bc3 1450 1, 61, sizeof(struct bm_extent),
b411b363
PR
1451 offsetof(struct bm_extent, lce));
1452 if (!resync_lru) {
1453 retcode = ERR_NOMEM;
e525fd89 1454 goto fail;
b411b363
PR
1455 }
1456
c04ccaa6
LE
1457 /* Read our meta data super block early.
1458 * This also sets other on-disk offsets. */
1459 retcode = drbd_md_read(mdev, nbc);
1460 if (retcode != NO_ERROR)
1461 goto fail;
b411b363 1462
5bbcf5e6
LE
1463 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1464 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1465 if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
1466 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1467
daeda1cc 1468 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
b411b363
PR
1469 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1470 (unsigned long long) drbd_get_max_capacity(nbc),
daeda1cc 1471 (unsigned long long) new_disk_conf->disk_size);
7948bcdc 1472 retcode = ERR_DISK_TOO_SMALL;
e525fd89 1473 goto fail;
b411b363
PR
1474 }
1475
daeda1cc 1476 if (new_disk_conf->meta_dev_idx < 0) {
b411b363
PR
1477 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1478 /* at least one MB, otherwise it does not make sense */
1479 min_md_device_sectors = (2<<10);
1480 } else {
1481 max_possible_sectors = DRBD_MAX_SECTORS;
ae8bf312 1482 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
b411b363
PR
1483 }
1484
b411b363 1485 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
7948bcdc 1486 retcode = ERR_MD_DISK_TOO_SMALL;
b411b363
PR
1487 dev_warn(DEV, "refusing attach: md-device too small, "
1488 "at least %llu sectors needed for this meta-disk type\n",
1489 (unsigned long long) min_md_device_sectors);
e525fd89 1490 goto fail;
b411b363
PR
1491 }
1492
1493 /* Make sure the new disk is big enough
1494 * (we may currently be R_PRIMARY with no local disk...) */
1495 if (drbd_get_max_capacity(nbc) <
1496 drbd_get_capacity(mdev->this_bdev)) {
7948bcdc 1497 retcode = ERR_DISK_TOO_SMALL;
e525fd89 1498 goto fail;
b411b363
PR
1499 }
1500
1501 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1502
1352994b
LE
1503 if (nbc->known_size > max_possible_sectors) {
1504 dev_warn(DEV, "==> truncating very big lower level device "
1505 "to currently maximum possible %llu sectors <==\n",
1506 (unsigned long long) max_possible_sectors);
daeda1cc 1507 if (new_disk_conf->meta_dev_idx >= 0)
1352994b
LE
1508 dev_warn(DEV, "==>> using internal or flexible "
1509 "meta data may help <<==\n");
1510 }
1511
b411b363
PR
1512 drbd_suspend_io(mdev);
1513 /* also wait for the last barrier ack. */
b6dd1a89
LE
1514 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1515 * We need a way to either ignore barrier acks for barriers sent before a device
1516 * was attached, or a way to wait for all pending barrier acks to come in.
1517 * As barriers are counted per resource,
1518 * we'd need to suspend io on all devices of a resource.
1519 */
2aebfabb 1520 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
b411b363
PR
1521 /* and for any other previously queued work */
1522 drbd_flush_workqueue(mdev);
1523
f2024e7c
AG
1524 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1525 retcode = rv; /* FIXME: Type mismatch. */
b411b363 1526 drbd_resume_io(mdev);
f2024e7c 1527 if (rv < SS_SUCCESS)
e525fd89 1528 goto fail;
b411b363
PR
1529
1530 if (!get_ldev_if_state(mdev, D_ATTACHING))
1531 goto force_diskless;
1532
b411b363
PR
1533 if (!mdev->bitmap) {
1534 if (drbd_bm_init(mdev)) {
1535 retcode = ERR_NOMEM;
1536 goto force_diskless_dec;
1537 }
1538 }
1539
b411b363
PR
1540 if (mdev->state.conn < C_CONNECTED &&
1541 mdev->state.role == R_PRIMARY &&
1542 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1543 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1544 (unsigned long long)mdev->ed_uuid);
1545 retcode = ERR_DATA_NOT_CURRENT;
1546 goto force_diskless_dec;
1547 }
1548
1549 /* Since we are diskless, fix the activity log first... */
daeda1cc 1550 if (drbd_check_al_size(mdev, new_disk_conf)) {
b411b363
PR
1551 retcode = ERR_NOMEM;
1552 goto force_diskless_dec;
1553 }
1554
1555 /* Prevent shrinking of consistent devices ! */
1556 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
daeda1cc 1557 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
b411b363 1558 dev_warn(DEV, "refusing to truncate a consistent device\n");
7948bcdc 1559 retcode = ERR_DISK_TOO_SMALL;
b411b363
PR
1560 goto force_diskless_dec;
1561 }
1562
b411b363
PR
1563 /* Reset the "barriers don't work" bits here, then force meta data to
1564 * be written, to ensure we determine if barriers are supported. */
e544046a 1565 if (new_disk_conf->md_flushes)
a8a4e51e 1566 clear_bit(MD_NO_FUA, &mdev->flags);
b411b363 1567 else
e544046a 1568 set_bit(MD_NO_FUA, &mdev->flags);
b411b363
PR
1569
1570 /* Point of no return reached.
1571 * Devices and memory are no longer released by error cleanup below.
1572 * now mdev takes over responsibility, and the state engine should
1573 * clean it up somewhere. */
1574 D_ASSERT(mdev->ldev == NULL);
1575 mdev->ldev = nbc;
1576 mdev->resync = resync_lru;
9958c857 1577 mdev->rs_plan_s = new_plan;
b411b363
PR
1578 nbc = NULL;
1579 resync_lru = NULL;
daeda1cc 1580 new_disk_conf = NULL;
9958c857 1581 new_plan = NULL;
b411b363 1582
4b0007c0 1583 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
b411b363
PR
1584
1585 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1586 set_bit(CRASHED_PRIMARY, &mdev->flags);
1587 else
1588 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1589
894c6a94 1590 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
d5d7ebd4 1591 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
b411b363 1592 set_bit(CRASHED_PRIMARY, &mdev->flags);
b411b363
PR
1593
1594 mdev->send_cnt = 0;
1595 mdev->recv_cnt = 0;
1596 mdev->read_cnt = 0;
1597 mdev->writ_cnt = 0;
1598
99432fcc 1599 drbd_reconsider_max_bio_size(mdev);
b411b363
PR
1600
1601 /* If I am currently not R_PRIMARY,
1602 * but meta data primary indicator is set,
1603 * I just now recover from a hard crash,
1604 * and have been R_PRIMARY before that crash.
1605 *
1606 * Now, if I had no connection before that crash
1607 * (have been degraded R_PRIMARY), chances are that
1608 * I won't find my peer now either.
1609 *
1610 * In that case, and _only_ in that case,
1611 * we use the degr-wfc-timeout instead of the default,
1612 * so we can automatically recover from a crash of a
1613 * degraded but active "cluster" after a certain timeout.
1614 */
1615 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1616 if (mdev->state.role != R_PRIMARY &&
1617 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1618 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1619 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1620
24c4830c 1621 dd = drbd_determine_dev_size(mdev, 0);
b411b363
PR
1622 if (dd == dev_size_error) {
1623 retcode = ERR_NOMEM_BITMAP;
1624 goto force_diskless_dec;
1625 } else if (dd == grew)
1626 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1627
9a51ab1c
PR
1628 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1629 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1630 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
b411b363
PR
1631 dev_info(DEV, "Assuming that all blocks are out of sync "
1632 "(aka FullSync)\n");
20ceb2b2
LE
1633 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1634 "set_n_write from attaching", BM_LOCKED_MASK)) {
b411b363
PR
1635 retcode = ERR_IO_MD_DISK;
1636 goto force_diskless_dec;
1637 }
1638 } else {
20ceb2b2 1639 if (drbd_bitmap_io(mdev, &drbd_bm_read,
22ab6a30 1640 "read from attaching", BM_LOCKED_MASK)) {
19f843aa
LE
1641 retcode = ERR_IO_MD_DISK;
1642 goto force_diskless_dec;
1643 }
b411b363
PR
1644 }
1645
0778286a
PR
1646 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1647 drbd_suspend_al(mdev); /* IO is still suspended here... */
1648
87eeee41 1649 spin_lock_irq(&mdev->tconn->req_lock);
78bae59b
PR
1650 os = drbd_read_state(mdev);
1651 ns = os;
b411b363
PR
1652 /* If MDF_CONSISTENT is not set go into inconsistent state,
1653 otherwise investigate MDF_WasUpToDate...
1654 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1655 otherwise into D_CONSISTENT state.
1656 */
1657 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1658 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1659 ns.disk = D_CONSISTENT;
1660 else
1661 ns.disk = D_OUTDATED;
1662 } else {
1663 ns.disk = D_INCONSISTENT;
1664 }
1665
1666 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1667 ns.pdsk = D_OUTDATED;
1668
daeda1cc
PR
1669 rcu_read_lock();
1670 if (ns.disk == D_CONSISTENT &&
1671 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
b411b363
PR
1672 ns.disk = D_UP_TO_DATE;
1673
1674 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1675 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1676 this point, because drbd_request_state() modifies these
1677 flags. */
1678
9a51ab1c 1679 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
4035e4c2 1680 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
9a51ab1c
PR
1681 else
1682 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1683
1684 rcu_read_unlock();
1685
b411b363
PR
1686 /* In case we are C_CONNECTED postpone any decision on the new disk
1687 state after the negotiation phase. */
1688 if (mdev->state.conn == C_CONNECTED) {
1689 mdev->new_state_tmp.i = ns.i;
1690 ns.i = os.i;
1691 ns.disk = D_NEGOTIATING;
dc66c74d
PR
1692
1693 /* We expect to receive up-to-date UUIDs soon.
1694 To avoid a race in receive_state, free p_uuid while
1695 holding req_lock. I.e. atomic with the state change */
1696 kfree(mdev->p_uuid);
1697 mdev->p_uuid = NULL;
b411b363
PR
1698 }
1699
1700 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
87eeee41 1701 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
1702
1703 if (rv < SS_SUCCESS)
1704 goto force_diskless_dec;
1705
cdfda633
PR
1706 mod_timer(&mdev->request_timer, jiffies + HZ);
1707
b411b363
PR
1708 if (mdev->state.role == R_PRIMARY)
1709 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1710 else
1711 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1712
1713 drbd_md_mark_dirty(mdev);
1714 drbd_md_sync(mdev);
1715
1716 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1717 put_ldev(mdev);
0e29d163 1718 conn_reconfig_done(mdev->tconn);
3b98c0c2 1719 drbd_adm_finish(info, retcode);
b411b363
PR
1720 return 0;
1721
1722 force_diskless_dec:
1723 put_ldev(mdev);
1724 force_diskless:
9510b241 1725 drbd_force_state(mdev, NS(disk, D_DISKLESS));
b411b363 1726 drbd_md_sync(mdev);
b411b363 1727 fail:
40cbf085 1728 conn_reconfig_done(mdev->tconn);
b411b363 1729 if (nbc) {
e525fd89
TH
1730 if (nbc->backing_bdev)
1731 blkdev_put(nbc->backing_bdev,
1732 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1733 if (nbc->md_bdev)
1734 blkdev_put(nbc->md_bdev,
1735 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
1736 kfree(nbc);
1737 }
daeda1cc 1738 kfree(new_disk_conf);
b411b363 1739 lc_destroy(resync_lru);
9958c857 1740 kfree(new_plan);
b411b363 1741
40cbf085 1742 finish:
3b98c0c2 1743 drbd_adm_finish(info, retcode);
b411b363
PR
1744 return 0;
1745}
1746
cdfda633 1747static int adm_detach(struct drbd_conf *mdev, int force)
b411b363 1748{
19f83c76 1749 enum drbd_state_rv retcode;
9a0d9d03 1750 int ret;
02ee8f95 1751
cdfda633 1752 if (force) {
0c849666 1753 set_bit(FORCE_DETACH, &mdev->flags);
02ee8f95 1754 drbd_force_state(mdev, NS(disk, D_FAILED));
cdfda633 1755 retcode = SS_SUCCESS;
02ee8f95
PR
1756 goto out;
1757 }
1758
82f59cc6 1759 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
a2e91381 1760 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
9a0d9d03 1761 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
a2e91381 1762 drbd_md_put_buffer(mdev);
9a0d9d03
LE
1763 /* D_FAILED will transition to DISKLESS. */
1764 ret = wait_event_interruptible(mdev->misc_wait,
1765 mdev->state.disk != D_FAILED);
82f59cc6 1766 drbd_resume_io(mdev);
9b2f61ae 1767 if ((int)retcode == (int)SS_IS_DISKLESS)
9a0d9d03
LE
1768 retcode = SS_NOTHING_TO_DO;
1769 if (ret)
1770 retcode = ERR_INTR;
02ee8f95 1771out:
85f75dd7 1772 return retcode;
b411b363
PR
1773}
1774
82f59cc6
LE
1775/* Detaching the disk is a process in multiple stages. First we need to lock
1776 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1777 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1778 * internal references as well.
1779 * Only then we have finally detached. */
3b98c0c2 1780int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
b411b363 1781{
116676ca 1782 enum drbd_ret_code retcode;
cdfda633
PR
1783 struct detach_parms parms = { };
1784 int err;
b411b363 1785
3b98c0c2
LE
1786 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1787 if (!adm_ctx.reply_skb)
1788 return retcode;
1789 if (retcode != NO_ERROR)
1790 goto out;
b411b363 1791
cdfda633
PR
1792 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1793 err = detach_parms_from_attrs(&parms, info);
1794 if (err) {
1795 retcode = ERR_MANDATORY_TAG;
1796 drbd_msg_put_info(from_attrs_err_to_txt(err));
1797 goto out;
1798 }
b411b363
PR
1799 }
1800
cdfda633 1801 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
3b98c0c2
LE
1802out:
1803 drbd_adm_finish(info, retcode);
b411b363
PR
1804 return 0;
1805}
b411b363 1806
f399002e
LE
1807static bool conn_resync_running(struct drbd_tconn *tconn)
1808{
1809 struct drbd_conf *mdev;
695d08fa 1810 bool rv = false;
f399002e
LE
1811 int vnr;
1812
695d08fa 1813 rcu_read_lock();
f399002e
LE
1814 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1815 if (mdev->state.conn == C_SYNC_SOURCE ||
1816 mdev->state.conn == C_SYNC_TARGET ||
1817 mdev->state.conn == C_PAUSED_SYNC_S ||
695d08fa
PR
1818 mdev->state.conn == C_PAUSED_SYNC_T) {
1819 rv = true;
1820 break;
1821 }
b411b363 1822 }
695d08fa 1823 rcu_read_unlock();
b411b363 1824
695d08fa 1825 return rv;
f399002e 1826}
47ff2d0a 1827
f399002e
LE
1828static bool conn_ov_running(struct drbd_tconn *tconn)
1829{
1830 struct drbd_conf *mdev;
695d08fa 1831 bool rv = false;
f399002e
LE
1832 int vnr;
1833
695d08fa 1834 rcu_read_lock();
f399002e
LE
1835 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1836 if (mdev->state.conn == C_VERIFY_S ||
695d08fa
PR
1837 mdev->state.conn == C_VERIFY_T) {
1838 rv = true;
1839 break;
47ff2d0a
PR
1840 }
1841 }
695d08fa 1842 rcu_read_unlock();
b411b363 1843
695d08fa 1844 return rv;
f399002e 1845}
422028b1 1846
cd64397c 1847static enum drbd_ret_code
44ed167d 1848_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
cd64397c
PR
1849{
1850 struct drbd_conf *mdev;
1851 int i;
b411b363 1852
dcb20d1a
PR
1853 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1854 if (new_conf->wire_protocol != old_conf->wire_protocol)
1855 return ERR_NEED_APV_100;
b411b363 1856
dcb20d1a
PR
1857 if (new_conf->two_primaries != old_conf->two_primaries)
1858 return ERR_NEED_APV_100;
1859
dcb20d1a
PR
1860 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1861 return ERR_NEED_APV_100;
b411b363
PR
1862 }
1863
dcb20d1a
PR
1864 if (!new_conf->two_primaries &&
1865 conn_highest_role(tconn) == R_PRIMARY &&
1866 conn_highest_peer(tconn) == R_PRIMARY)
1867 return ERR_NEED_ALLOW_TWO_PRI;
b411b363 1868
cd64397c
PR
1869 if (new_conf->two_primaries &&
1870 (new_conf->wire_protocol != DRBD_PROT_C))
1871 return ERR_NOT_PROTO_C;
1872
cd64397c
PR
1873 idr_for_each_entry(&tconn->volumes, mdev, i) {
1874 if (get_ldev(mdev)) {
daeda1cc 1875 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
cd64397c 1876 put_ldev(mdev);
44ed167d 1877 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
cd64397c 1878 return ERR_STONITH_AND_PROT_A;
b411b363 1879 }
6139f60d 1880 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
eb12010e 1881 return ERR_DISCARD_IMPOSSIBLE;
b411b363
PR
1882 }
1883
cd64397c
PR
1884 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1885 return ERR_CONG_NOT_PROTO_A;
b411b363 1886
cd64397c
PR
1887 return NO_ERROR;
1888}
b411b363 1889
44ed167d
PR
1890static enum drbd_ret_code
1891check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1892{
1893 static enum drbd_ret_code rv;
1894 struct drbd_conf *mdev;
1895 int i;
b411b363 1896
44ed167d
PR
1897 rcu_read_lock();
1898 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1899 rcu_read_unlock();
b411b363 1900
44ed167d
PR
1901 /* tconn->volumes protected by genl_lock() here */
1902 idr_for_each_entry(&tconn->volumes, mdev, i) {
1903 if (!mdev->bitmap) {
1904 if(drbd_bm_init(mdev))
1905 return ERR_NOMEM;
b411b363
PR
1906 }
1907 }
1908
44ed167d
PR
1909 return rv;
1910}
b411b363 1911
0fd0ea06
PR
1912struct crypto {
1913 struct crypto_hash *verify_tfm;
1914 struct crypto_hash *csums_tfm;
1915 struct crypto_hash *cram_hmac_tfm;
8d412fc6 1916 struct crypto_hash *integrity_tfm;
0fd0ea06 1917};
b411b363 1918
0fd0ea06 1919static int
4b6ad6d4 1920alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
0fd0ea06
PR
1921{
1922 if (!tfm_name[0])
1923 return NO_ERROR;
b411b363 1924
0fd0ea06
PR
1925 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1926 if (IS_ERR(*tfm)) {
1927 *tfm = NULL;
1928 return err_alg;
b411b363 1929 }
b411b363 1930
0fd0ea06
PR
1931 return NO_ERROR;
1932}
b411b363 1933
0fd0ea06
PR
1934static enum drbd_ret_code
1935alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1936{
1937 char hmac_name[CRYPTO_MAX_ALG_NAME];
1938 enum drbd_ret_code rv;
0fd0ea06 1939
4b6ad6d4
AG
1940 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1941 ERR_CSUMS_ALG);
0fd0ea06
PR
1942 if (rv != NO_ERROR)
1943 return rv;
4b6ad6d4
AG
1944 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1945 ERR_VERIFY_ALG);
0fd0ea06
PR
1946 if (rv != NO_ERROR)
1947 return rv;
4b6ad6d4
AG
1948 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1949 ERR_INTEGRITY_ALG);
0fd0ea06
PR
1950 if (rv != NO_ERROR)
1951 return rv;
0fd0ea06
PR
1952 if (new_conf->cram_hmac_alg[0] != 0) {
1953 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1954 new_conf->cram_hmac_alg);
b411b363 1955
4b6ad6d4
AG
1956 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1957 ERR_AUTH_ALG);
b411b363
PR
1958 }
1959
0fd0ea06
PR
1960 return rv;
1961}
b411b363 1962
0fd0ea06
PR
1963static void free_crypto(struct crypto *crypto)
1964{
0fd0ea06 1965 crypto_free_hash(crypto->cram_hmac_tfm);
8d412fc6 1966 crypto_free_hash(crypto->integrity_tfm);
0fd0ea06
PR
1967 crypto_free_hash(crypto->csums_tfm);
1968 crypto_free_hash(crypto->verify_tfm);
1969}
b411b363 1970
f399002e
LE
1971int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1972{
1973 enum drbd_ret_code retcode;
1974 struct drbd_tconn *tconn;
44ed167d 1975 struct net_conf *old_conf, *new_conf = NULL;
f399002e
LE
1976 int err;
1977 int ovr; /* online verify running */
1978 int rsr; /* re-sync running */
0fd0ea06 1979 struct crypto crypto = { };
b411b363 1980
089c075d 1981 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
f399002e
LE
1982 if (!adm_ctx.reply_skb)
1983 return retcode;
1984 if (retcode != NO_ERROR)
1985 goto out;
b411b363 1986
f399002e 1987 tconn = adm_ctx.tconn;
b411b363 1988
f399002e
LE
1989 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1990 if (!new_conf) {
1991 retcode = ERR_NOMEM;
1992 goto out;
1993 }
b411b363 1994
44ed167d 1995 conn_reconfig_start(tconn);
b411b363 1996
88104ca4 1997 mutex_lock(&tconn->data.mutex);
a0095508 1998 mutex_lock(&tconn->conf_update);
91fd4dad 1999 old_conf = tconn->net_conf;
2561b9c1 2000
44ed167d 2001 if (!old_conf) {
f399002e
LE
2002 drbd_msg_put_info("net conf missing, try connect");
2003 retcode = ERR_INVALID_REQUEST;
2561b9c1
PR
2004 goto fail;
2005 }
2006
44ed167d 2007 *new_conf = *old_conf;
5979e361 2008 if (should_set_defaults(info))
b966b5dd 2009 set_net_conf_defaults(new_conf);
f399002e 2010
f399002e 2011 err = net_conf_from_attrs_for_change(new_conf, info);
c75b9b10 2012 if (err && err != -ENOMSG) {
f399002e
LE
2013 retcode = ERR_MANDATORY_TAG;
2014 drbd_msg_put_info(from_attrs_err_to_txt(err));
2015 goto fail;
2561b9c1 2016 }
b411b363 2017
cd64397c
PR
2018 retcode = check_net_options(tconn, new_conf);
2019 if (retcode != NO_ERROR)
2020 goto fail;
b411b363 2021
f399002e
LE
2022 /* re-sync running */
2023 rsr = conn_resync_running(tconn);
0fd0ea06 2024 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
f399002e 2025 retcode = ERR_CSUMS_RESYNC_RUNNING;
91fd4dad 2026 goto fail;
b411b363
PR
2027 }
2028
f399002e
LE
2029 /* online verify running */
2030 ovr = conn_ov_running(tconn);
0fd0ea06
PR
2031 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
2032 retcode = ERR_VERIFY_RUNNING;
b411b363 2033 goto fail;
f399002e 2034 }
b411b363 2035
0fd0ea06
PR
2036 retcode = alloc_crypto(&crypto, new_conf);
2037 if (retcode != NO_ERROR)
b411b363 2038 goto fail;
f399002e 2039
44ed167d 2040 rcu_assign_pointer(tconn->net_conf, new_conf);
f399002e
LE
2041
2042 if (!rsr) {
2043 crypto_free_hash(tconn->csums_tfm);
0fd0ea06
PR
2044 tconn->csums_tfm = crypto.csums_tfm;
2045 crypto.csums_tfm = NULL;
f399002e
LE
2046 }
2047 if (!ovr) {
2048 crypto_free_hash(tconn->verify_tfm);
0fd0ea06
PR
2049 tconn->verify_tfm = crypto.verify_tfm;
2050 crypto.verify_tfm = NULL;
b411b363
PR
2051 }
2052
8d412fc6
AG
2053 crypto_free_hash(tconn->integrity_tfm);
2054 tconn->integrity_tfm = crypto.integrity_tfm;
d659f2aa 2055 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
88104ca4 2056 /* Do this without trying to take tconn->data.mutex again. */
d659f2aa 2057 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
0fd0ea06 2058
0fd0ea06
PR
2059 crypto_free_hash(tconn->cram_hmac_tfm);
2060 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2061
a0095508 2062 mutex_unlock(&tconn->conf_update);
88104ca4 2063 mutex_unlock(&tconn->data.mutex);
91fd4dad
PR
2064 synchronize_rcu();
2065 kfree(old_conf);
2066
f399002e
LE
2067 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2068 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2069
91fd4dad
PR
2070 goto done;
2071
b411b363 2072 fail:
a0095508 2073 mutex_unlock(&tconn->conf_update);
88104ca4 2074 mutex_unlock(&tconn->data.mutex);
0fd0ea06 2075 free_crypto(&crypto);
f399002e 2076 kfree(new_conf);
91fd4dad 2077 done:
f399002e
LE
2078 conn_reconfig_done(tconn);
2079 out:
2080 drbd_adm_finish(info, retcode);
b411b363
PR
2081 return 0;
2082}
2083
3b98c0c2 2084int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
b411b363 2085{
3b98c0c2 2086 struct drbd_conf *mdev;
44ed167d 2087 struct net_conf *old_conf, *new_conf = NULL;
0fd0ea06 2088 struct crypto crypto = { };
3b98c0c2 2089 struct drbd_tconn *tconn;
3b98c0c2
LE
2090 enum drbd_ret_code retcode;
2091 int i;
2092 int err;
b411b363 2093
44e52cfa 2094 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
b411b363 2095
3b98c0c2
LE
2096 if (!adm_ctx.reply_skb)
2097 return retcode;
2098 if (retcode != NO_ERROR)
2099 goto out;
089c075d
AG
2100 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2101 drbd_msg_put_info("connection endpoint(s) missing");
2102 retcode = ERR_INVALID_REQUEST;
2103 goto out;
2104 }
b411b363 2105
089c075d
AG
2106 /* No need for _rcu here. All reconfiguration is
2107 * strictly serialized on genl_lock(). We are protected against
2108 * concurrent reconfiguration/addition/deletion */
2109 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2110 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2111 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2112 retcode = ERR_LOCAL_ADDR;
2113 goto out;
2114 }
b411b363 2115
089c075d
AG
2116 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2117 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2118 retcode = ERR_PEER_ADDR;
2119 goto out;
2120 }
b411b363
PR
2121 }
2122
3b98c0c2 2123 tconn = adm_ctx.tconn;
80883197 2124 conn_reconfig_start(tconn);
b411b363 2125
80883197 2126 if (tconn->cstate > C_STANDALONE) {
b411b363 2127 retcode = ERR_NET_CONFIGURED;
b411b363
PR
2128 goto fail;
2129 }
2130
a209b4ae 2131 /* allocation not in the IO path, drbdsetup / netlink process context */
5979e361 2132 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
b411b363
PR
2133 if (!new_conf) {
2134 retcode = ERR_NOMEM;
b411b363
PR
2135 goto fail;
2136 }
2137
b966b5dd 2138 set_net_conf_defaults(new_conf);
b411b363 2139
f399002e 2140 err = net_conf_from_attrs(new_conf, info);
25e40932 2141 if (err && err != -ENOMSG) {
b411b363 2142 retcode = ERR_MANDATORY_TAG;
3b98c0c2 2143 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
2144 goto fail;
2145 }
2146
cd64397c
PR
2147 retcode = check_net_options(tconn, new_conf);
2148 if (retcode != NO_ERROR)
422028b1 2149 goto fail;
b411b363 2150
0fd0ea06
PR
2151 retcode = alloc_crypto(&crypto, new_conf);
2152 if (retcode != NO_ERROR)
2153 goto fail;
b411b363 2154
b411b363 2155 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
7b4e4d31 2156
80883197 2157 conn_flush_workqueue(tconn);
b411b363 2158
a0095508 2159 mutex_lock(&tconn->conf_update);
91fd4dad
PR
2160 old_conf = tconn->net_conf;
2161 if (old_conf) {
b411b363 2162 retcode = ERR_NET_CONFIGURED;
a0095508 2163 mutex_unlock(&tconn->conf_update);
b411b363
PR
2164 goto fail;
2165 }
44ed167d 2166 rcu_assign_pointer(tconn->net_conf, new_conf);
b411b363 2167
91fd4dad 2168 conn_free_crypto(tconn);
0fd0ea06 2169 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
8d412fc6 2170 tconn->integrity_tfm = crypto.integrity_tfm;
0fd0ea06
PR
2171 tconn->csums_tfm = crypto.csums_tfm;
2172 tconn->verify_tfm = crypto.verify_tfm;
b411b363 2173
089c075d
AG
2174 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2175 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2176 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2177 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
b411b363 2178
a0095508 2179 mutex_unlock(&tconn->conf_update);
b411b363 2180
695d08fa 2181 rcu_read_lock();
80883197
PR
2182 idr_for_each_entry(&tconn->volumes, mdev, i) {
2183 mdev->send_cnt = 0;
2184 mdev->recv_cnt = 0;
b411b363 2185 }
695d08fa 2186 rcu_read_unlock();
b411b363 2187
5ee743e9 2188 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
b411b363 2189
80883197 2190 conn_reconfig_done(tconn);
3b98c0c2 2191 drbd_adm_finish(info, retcode);
b411b363 2192 return 0;
b411b363 2193
b411b363 2194fail:
0fd0ea06 2195 free_crypto(&crypto);
b411b363 2196 kfree(new_conf);
b411b363 2197
80883197 2198 conn_reconfig_done(tconn);
3b98c0c2
LE
2199out:
2200 drbd_adm_finish(info, retcode);
b411b363
PR
2201 return 0;
2202}
2203
85f75dd7
LE
2204static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2205{
2206 enum drbd_state_rv rv;
85f75dd7 2207
f3dfa40a
LE
2208 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2209 force ? CS_HARD : 0);
85f75dd7
LE
2210
2211 switch (rv) {
2212 case SS_NOTHING_TO_DO:
f3dfa40a 2213 break;
85f75dd7
LE
2214 case SS_ALREADY_STANDALONE:
2215 return SS_SUCCESS;
2216 case SS_PRIMARY_NOP:
2217 /* Our state checking code wants to see the peer outdated. */
2bd5ed5d
PR
2218 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2219
2220 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2221 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2222
85f75dd7
LE
2223 break;
2224 case SS_CW_FAILED_BY_PEER:
2225 /* The peer probably wants to see us outdated. */
2226 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2227 disk, D_OUTDATED), 0);
2228 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
f3dfa40a
LE
2229 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2230 CS_HARD);
b411b363 2231 }
85f75dd7
LE
2232 break;
2233 default:;
2234 /* no special handling necessary */
2235 }
2236
f3dfa40a
LE
2237 if (rv >= SS_SUCCESS) {
2238 enum drbd_state_rv rv2;
2239 /* No one else can reconfigure the network while I am here.
2240 * The state handling only uses drbd_thread_stop_nowait(),
2241 * we want to really wait here until the receiver is no more.
2242 */
2243 drbd_thread_stop(&adm_ctx.tconn->receiver);
2244
2245 /* Race breaker. This additional state change request may be
2246 * necessary, if this was a forced disconnect during a receiver
2247 * restart. We may have "killed" the receiver thread just
2248 * after drbdd_init() returned. Typically, we should be
2249 * C_STANDALONE already, now, and this becomes a no-op.
2250 */
2251 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2252 CS_VERBOSE | CS_HARD);
2253 if (rv2 < SS_SUCCESS)
2254 conn_err(tconn,
2255 "unexpected rv2=%d in conn_try_disconnect()\n",
2256 rv2);
b411b363 2257 }
85f75dd7
LE
2258 return rv;
2259}
b411b363 2260
3b98c0c2 2261int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
b411b363 2262{
3b98c0c2
LE
2263 struct disconnect_parms parms;
2264 struct drbd_tconn *tconn;
85f75dd7 2265 enum drbd_state_rv rv;
3b98c0c2
LE
2266 enum drbd_ret_code retcode;
2267 int err;
2561b9c1 2268
089c075d 2269 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
3b98c0c2
LE
2270 if (!adm_ctx.reply_skb)
2271 return retcode;
2272 if (retcode != NO_ERROR)
2561b9c1 2273 goto fail;
b411b363 2274
3b98c0c2
LE
2275 tconn = adm_ctx.tconn;
2276 memset(&parms, 0, sizeof(parms));
2277 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
f399002e 2278 err = disconnect_parms_from_attrs(&parms, info);
3b98c0c2
LE
2279 if (err) {
2280 retcode = ERR_MANDATORY_TAG;
2281 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
2282 goto fail;
2283 }
2284 }
2285
85f75dd7
LE
2286 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2287 if (rv < SS_SUCCESS)
f3dfa40a
LE
2288 retcode = rv; /* FIXME: Type mismatch. */
2289 else
2290 retcode = NO_ERROR;
b411b363 2291 fail:
3b98c0c2 2292 drbd_adm_finish(info, retcode);
b411b363
PR
2293 return 0;
2294}
2295
2296void resync_after_online_grow(struct drbd_conf *mdev)
2297{
2298 int iass; /* I am sync source */
2299
2300 dev_info(DEV, "Resync of new storage after online grow\n");
2301 if (mdev->state.role != mdev->state.peer)
2302 iass = (mdev->state.role == R_PRIMARY);
2303 else
427c0434 2304 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
b411b363
PR
2305
2306 if (iass)
2307 drbd_start_resync(mdev, C_SYNC_SOURCE);
2308 else
2309 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2310}
2311
3b98c0c2 2312int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
b411b363 2313{
daeda1cc 2314 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3b98c0c2
LE
2315 struct resize_parms rs;
2316 struct drbd_conf *mdev;
2317 enum drbd_ret_code retcode;
b411b363 2318 enum determine_dev_size dd;
6495d2c6 2319 enum dds_flags ddsf;
daeda1cc 2320 sector_t u_size;
3b98c0c2 2321 int err;
b411b363 2322
3b98c0c2
LE
2323 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2324 if (!adm_ctx.reply_skb)
2325 return retcode;
2326 if (retcode != NO_ERROR)
b411b363 2327 goto fail;
3b98c0c2
LE
2328
2329 memset(&rs, 0, sizeof(struct resize_parms));
2330 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
f399002e 2331 err = resize_parms_from_attrs(&rs, info);
b411b363 2332 if (err) {
3b98c0c2
LE
2333 retcode = ERR_MANDATORY_TAG;
2334 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
2335 goto fail;
2336 }
2337 }
2338
3b98c0c2 2339 mdev = adm_ctx.mdev;
b411b363
PR
2340 if (mdev->state.conn > C_CONNECTED) {
2341 retcode = ERR_RESIZE_RESYNC;
2342 goto fail;
b411b363 2343 }
b411b363 2344
b411b363
PR
2345 if (mdev->state.role == R_SECONDARY &&
2346 mdev->state.peer == R_SECONDARY) {
2347 retcode = ERR_NO_PRIMARY;
2348 goto fail;
2349 }
ef50a3e3 2350
b411b363
PR
2351 if (!get_ldev(mdev)) {
2352 retcode = ERR_NO_DISK;
b411b363 2353 goto fail;
b411b363 2354 }
b411b363 2355
31890f4a 2356 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
6495d2c6 2357 retcode = ERR_NEED_APV_93;
9bcd2521 2358 goto fail_ldev;
6495d2c6
PR
2359 }
2360
daeda1cc
PR
2361 rcu_read_lock();
2362 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2363 rcu_read_unlock();
2364 if (u_size != (sector_t)rs.resize_size) {
2365 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2366 if (!new_disk_conf) {
778f271d 2367 retcode = ERR_NOMEM;
9bcd2521 2368 goto fail_ldev;
778f271d
PR
2369 }
2370 }
2371
087c2492 2372 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
b411b363 2373 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
b411b363 2374
daeda1cc
PR
2375 if (new_disk_conf) {
2376 mutex_lock(&mdev->tconn->conf_update);
2377 old_disk_conf = mdev->ldev->disk_conf;
2378 *new_disk_conf = *old_disk_conf;
2379 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2380 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2381 mutex_unlock(&mdev->tconn->conf_update);
2382 synchronize_rcu();
2383 kfree(old_disk_conf);
b411b363
PR
2384 }
2385
6495d2c6 2386 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
24c4830c 2387 dd = drbd_determine_dev_size(mdev, ddsf);
b411b363
PR
2388 drbd_md_sync(mdev);
2389 put_ldev(mdev);
2390 if (dd == dev_size_error) {
2391 retcode = ERR_NOMEM_BITMAP;
2392 goto fail;
b411b363 2393 }
778f271d 2394
087c2492 2395 if (mdev->state.conn == C_CONNECTED) {
b411b363
PR
2396 if (dd == grew)
2397 set_bit(RESIZE_PENDING, &mdev->flags);
2398
2399 drbd_send_uuids(mdev);
6495d2c6 2400 drbd_send_sizes(mdev, 1, ddsf);
778f271d
PR
2401 }
2402
b411b363 2403 fail:
3b98c0c2 2404 drbd_adm_finish(info, retcode);
b411b363 2405 return 0;
b411b363 2406
9bcd2521
PR
2407 fail_ldev:
2408 put_ldev(mdev);
2409 goto fail;
b411b363 2410}
b411b363 2411
f399002e 2412int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
b411b363 2413{
3b98c0c2 2414 enum drbd_ret_code retcode;
f399002e 2415 struct drbd_tconn *tconn;
b57a1e27 2416 struct res_opts res_opts;
f399002e 2417 int err;
b411b363 2418
44e52cfa 2419 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3b98c0c2
LE
2420 if (!adm_ctx.reply_skb)
2421 return retcode;
2422 if (retcode != NO_ERROR)
2423 goto fail;
f399002e 2424 tconn = adm_ctx.tconn;
b411b363 2425
b57a1e27 2426 res_opts = tconn->res_opts;
5979e361 2427 if (should_set_defaults(info))
b966b5dd 2428 set_res_opts_defaults(&res_opts);
b411b363 2429
b57a1e27 2430 err = res_opts_from_attrs(&res_opts, info);
c75b9b10 2431 if (err && err != -ENOMSG) {
b411b363 2432 retcode = ERR_MANDATORY_TAG;
3b98c0c2 2433 drbd_msg_put_info(from_attrs_err_to_txt(err));
b411b363
PR
2434 goto fail;
2435 }
2436
afbbfa88
AG
2437 err = set_resource_options(tconn, &res_opts);
2438 if (err) {
2439 retcode = ERR_INVALID_REQUEST;
2440 if (err == -ENOMEM)
2441 retcode = ERR_NOMEM;
b411b363
PR
2442 }
2443
b411b363 2444fail:
3b98c0c2 2445 drbd_adm_finish(info, retcode);
b411b363
PR
2446 return 0;
2447}
2448
3b98c0c2 2449int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
b411b363 2450{
3b98c0c2
LE
2451 struct drbd_conf *mdev;
2452 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2453
2454 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2455 if (!adm_ctx.reply_skb)
2456 return retcode;
2457 if (retcode != NO_ERROR)
2458 goto out;
2459
2460 mdev = adm_ctx.mdev;
b411b363 2461
194bfb32 2462 /* If there is still bitmap IO pending, probably because of a previous
7ee1fb93
LE
2463 * resync just being finished, wait for it before requesting a new resync.
2464 * Also wait for it's after_state_ch(). */
a574daf5 2465 drbd_suspend_io(mdev);
194bfb32 2466 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
7ee1fb93 2467 drbd_flush_workqueue(mdev);
194bfb32 2468
0b2dafcd
PR
2469 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2470 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2471 * try to start a resync handshake as sync target for full sync.
9376d9f8 2472 */
0b2dafcd
PR
2473 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
2474 retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
2475 if (retcode >= SS_SUCCESS) {
2476 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
2477 "set_n_write from invalidate", BM_LOCKED_MASK))
2478 retcode = ERR_IO_MD_DISK;
2479 }
2480 } else
2481 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
a574daf5 2482 drbd_resume_io(mdev);
b411b363 2483
3b98c0c2
LE
2484out:
2485 drbd_adm_finish(info, retcode);
b411b363
PR
2486 return 0;
2487}
2488
3b98c0c2
LE
2489static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2490 union drbd_state mask, union drbd_state val)
b411b363 2491{
3b98c0c2 2492 enum drbd_ret_code retcode;
194bfb32 2493
3b98c0c2
LE
2494 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2495 if (!adm_ctx.reply_skb)
2496 return retcode;
2497 if (retcode != NO_ERROR)
2498 goto out;
b411b363 2499
3b98c0c2
LE
2500 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2501out:
2502 drbd_adm_finish(info, retcode);
b411b363
PR
2503 return 0;
2504}
2505
0778286a
PR
2506static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2507{
2508 int rv;
2509
2510 rv = drbd_bmio_set_n_write(mdev);
2511 drbd_suspend_al(mdev);
2512 return rv;
2513}
2514
3b98c0c2 2515int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
b411b363 2516{
25b0d6c8
PR
2517 int retcode; /* drbd_ret_code, drbd_state_rv */
2518 struct drbd_conf *mdev;
2519
2520 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2521 if (!adm_ctx.reply_skb)
2522 return retcode;
2523 if (retcode != NO_ERROR)
2524 goto out;
2525
2526 mdev = adm_ctx.mdev;
b411b363 2527
194bfb32 2528 /* If there is still bitmap IO pending, probably because of a previous
7ee1fb93
LE
2529 * resync just being finished, wait for it before requesting a new resync.
2530 * Also wait for it's after_state_ch(). */
a574daf5 2531 drbd_suspend_io(mdev);
5016b82a 2532 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
7ee1fb93 2533 drbd_flush_workqueue(mdev);
194bfb32 2534
0b2dafcd
PR
2535 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2536 * in the bitmap. Otherwise, try to start a resync handshake
2537 * as sync source for full sync.
2538 */
2539 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
2540 /* The peer will get a resync upon connect anyways. Just make that
2541 into a full resync. */
2542 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2543 if (retcode >= SS_SUCCESS) {
2544 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2545 "set_n_write from invalidate_peer",
2546 BM_LOCKED_SET_ALLOWED))
2547 retcode = ERR_IO_MD_DISK;
2548 }
2549 } else
2550 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
a574daf5 2551 drbd_resume_io(mdev);
b411b363 2552
25b0d6c8
PR
2553out:
2554 drbd_adm_finish(info, retcode);
b411b363
PR
2555 return 0;
2556}
2557
3b98c0c2 2558int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
b411b363 2559{
3b98c0c2 2560 enum drbd_ret_code retcode;
b411b363 2561
3b98c0c2
LE
2562 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2563 if (!adm_ctx.reply_skb)
2564 return retcode;
2565 if (retcode != NO_ERROR)
2566 goto out;
b411b363 2567
3b98c0c2
LE
2568 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2569 retcode = ERR_PAUSE_IS_SET;
2570out:
2571 drbd_adm_finish(info, retcode);
b411b363
PR
2572 return 0;
2573}
2574
3b98c0c2 2575int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
b411b363 2576{
da9fbc27 2577 union drbd_dev_state s;
3b98c0c2
LE
2578 enum drbd_ret_code retcode;
2579
2580 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2581 if (!adm_ctx.reply_skb)
2582 return retcode;
2583 if (retcode != NO_ERROR)
2584 goto out;
b411b363 2585
3b98c0c2
LE
2586 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2587 s = adm_ctx.mdev->state;
cd88d030
PR
2588 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2589 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2590 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2591 } else {
2592 retcode = ERR_PAUSE_IS_CLEAR;
2593 }
2594 }
b411b363 2595
3b98c0c2
LE
2596out:
2597 drbd_adm_finish(info, retcode);
b411b363
PR
2598 return 0;
2599}
2600
3b98c0c2 2601int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
b411b363 2602{
3b98c0c2 2603 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
b411b363
PR
2604}
2605
3b98c0c2 2606int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
b411b363 2607{
3b98c0c2
LE
2608 struct drbd_conf *mdev;
2609 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2610
2611 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2612 if (!adm_ctx.reply_skb)
2613 return retcode;
2614 if (retcode != NO_ERROR)
2615 goto out;
2616
2617 mdev = adm_ctx.mdev;
43a5182c
PR
2618 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2619 drbd_uuid_new_current(mdev);
2620 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 2621 }
265be2d0 2622 drbd_suspend_io(mdev);
3b98c0c2
LE
2623 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2624 if (retcode == SS_SUCCESS) {
265be2d0 2625 if (mdev->state.conn < C_CONNECTED)
2f5cdd0b 2626 tl_clear(mdev->tconn);
265be2d0 2627 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2f5cdd0b 2628 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
265be2d0
PR
2629 }
2630 drbd_resume_io(mdev);
2631
3b98c0c2
LE
2632out:
2633 drbd_adm_finish(info, retcode);
b411b363
PR
2634 return 0;
2635}
2636
3b98c0c2 2637int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
b411b363 2638{
3b98c0c2 2639 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
b411b363
PR
2640}
2641
089c075d 2642int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
b411b363 2643{
543cc10b
LE
2644 struct nlattr *nla;
2645 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2646 if (!nla)
2647 goto nla_put_failure;
26ec9287
AG
2648 if (vnr != VOLUME_UNSPECIFIED &&
2649 nla_put_u32(skb, T_ctx_volume, vnr))
2650 goto nla_put_failure;
2651 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2652 goto nla_put_failure;
2653 if (tconn->my_addr_len &&
2654 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2655 goto nla_put_failure;
2656 if (tconn->peer_addr_len &&
2657 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2658 goto nla_put_failure;
543cc10b
LE
2659 nla_nest_end(skb, nla);
2660 return 0;
b411b363 2661
543cc10b
LE
2662nla_put_failure:
2663 if (nla)
2664 nla_nest_cancel(skb, nla);
2665 return -EMSGSIZE;
2666}
b411b363 2667
3b98c0c2
LE
2668int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2669 const struct sib_info *sib)
b411b363 2670{
3b98c0c2
LE
2671 struct state_info *si = NULL; /* for sizeof(si->member); */
2672 struct nlattr *nla;
2673 int got_ldev;
3b98c0c2
LE
2674 int err = 0;
2675 int exclude_sensitive;
2676
2677 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2678 * to. So we better exclude_sensitive information.
2679 *
2680 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2681 * in the context of the requesting user process. Exclude sensitive
2682 * information, unless current has superuser.
2683 *
2684 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2685 * relies on the current implementation of netlink_dump(), which
2686 * executes the dump callback successively from netlink_recvmsg(),
2687 * always in the context of the receiving process */
2688 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2689
2690 got_ldev = get_ldev(mdev);
3b98c0c2
LE
2691
2692 /* We need to add connection name and volume number information still.
2693 * Minor number is in drbd_genlmsghdr. */
089c075d 2694 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
3b98c0c2 2695 goto nla_put_failure;
3b98c0c2 2696
f399002e
LE
2697 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2698 goto nla_put_failure;
2699
daeda1cc 2700 rcu_read_lock();
f9eb7bf4
AG
2701 if (got_ldev) {
2702 struct disk_conf *disk_conf;
44ed167d 2703
f9eb7bf4
AG
2704 disk_conf = rcu_dereference(mdev->ldev->disk_conf);
2705 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
2706 }
2707 if (!err) {
2708 struct net_conf *nc;
2709
2710 nc = rcu_dereference(mdev->tconn->net_conf);
2711 if (nc)
2712 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2713 }
44ed167d
PR
2714 rcu_read_unlock();
2715 if (err)
2716 goto nla_put_failure;
3b98c0c2 2717
3b98c0c2
LE
2718 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2719 if (!nla)
2720 goto nla_put_failure;
26ec9287
AG
2721 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2722 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2723 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
3174f8c5
PM
2724 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2725 nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2726 nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2727 nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2728 nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2729 nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2730 nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2731 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2732 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2733 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
26ec9287 2734 goto nla_put_failure;
3b98c0c2
LE
2735
2736 if (got_ldev) {
39a1aa7f 2737 int err;
b411b363 2738
39a1aa7f
PR
2739 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2740 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2741 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2742
2743 if (err)
2744 goto nla_put_failure;
2745
26ec9287 2746 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
26ec9287
AG
2747 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2748 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2749 goto nla_put_failure;
3b98c0c2
LE
2750 if (C_SYNC_SOURCE <= mdev->state.conn &&
2751 C_PAUSED_SYNC_T >= mdev->state.conn) {
26ec9287
AG
2752 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2753 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2754 goto nla_put_failure;
3b98c0c2 2755 }
b411b363 2756 }
b411b363 2757
3b98c0c2
LE
2758 if (sib) {
2759 switch(sib->sib_reason) {
2760 case SIB_SYNC_PROGRESS:
2761 case SIB_GET_STATUS_REPLY:
2762 break;
2763 case SIB_STATE_CHANGE:
26ec9287
AG
2764 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2765 nla_put_u32(skb, T_new_state, sib->ns.i))
2766 goto nla_put_failure;
3b98c0c2
LE
2767 break;
2768 case SIB_HELPER_POST:
26ec9287
AG
2769 if (nla_put_u32(skb, T_helper_exit_code,
2770 sib->helper_exit_code))
2771 goto nla_put_failure;
3b98c0c2
LE
2772 /* fall through */
2773 case SIB_HELPER_PRE:
26ec9287
AG
2774 if (nla_put_string(skb, T_helper, sib->helper_name))
2775 goto nla_put_failure;
3b98c0c2
LE
2776 break;
2777 }
b411b363 2778 }
3b98c0c2 2779 nla_nest_end(skb, nla);
b411b363 2780
3b98c0c2
LE
2781 if (0)
2782nla_put_failure:
2783 err = -EMSGSIZE;
2784 if (got_ldev)
2785 put_ldev(mdev);
3b98c0c2 2786 return err;
b411b363
PR
2787}
2788
3b98c0c2 2789int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
b411b363 2790{
3b98c0c2
LE
2791 enum drbd_ret_code retcode;
2792 int err;
b411b363 2793
3b98c0c2
LE
2794 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2795 if (!adm_ctx.reply_skb)
2796 return retcode;
2797 if (retcode != NO_ERROR)
2798 goto out;
b411b363 2799
3b98c0c2
LE
2800 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2801 if (err) {
2802 nlmsg_free(adm_ctx.reply_skb);
2803 return err;
b411b363 2804 }
3b98c0c2
LE
2805out:
2806 drbd_adm_finish(info, retcode);
2807 return 0;
b411b363
PR
2808}
2809
71932efc 2810int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
b411b363 2811{
3b98c0c2
LE
2812 struct drbd_conf *mdev;
2813 struct drbd_genlmsghdr *dh;
543cc10b
LE
2814 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2815 struct drbd_tconn *tconn = NULL;
2816 struct drbd_tconn *tmp;
2817 unsigned volume = cb->args[1];
2818
2819 /* Open coded, deferred, iteration:
2820 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2821 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2822 * ...
2823 * }
2824 * }
2825 * where tconn is cb->args[0];
2826 * and i is cb->args[1];
2827 *
71932efc
LE
2828 * cb->args[2] indicates if we shall loop over all resources,
2829 * or just dump all volumes of a single resource.
2830 *
3b98c0c2
LE
2831 * This may miss entries inserted after this dump started,
2832 * or entries deleted before they are reached.
543cc10b
LE
2833 *
2834 * We need to make sure the mdev won't disappear while
2835 * we are looking at it, and revalidate our iterators
2836 * on each iteration.
2837 */
b411b363 2838
9dc9fbb3 2839 /* synchronize with conn_create()/conn_destroy() */
c141ebda 2840 rcu_read_lock();
543cc10b 2841 /* revalidate iterator position */
ec0bddbc 2842 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
543cc10b
LE
2843 if (pos == NULL) {
2844 /* first iteration */
2845 pos = tmp;
2846 tconn = pos;
2847 break;
2848 }
2849 if (tmp == pos) {
2850 tconn = pos;
2851 break;
2852 }
b411b363 2853 }
543cc10b 2854 if (tconn) {
71932efc 2855next_tconn:
543cc10b
LE
2856 mdev = idr_get_next(&tconn->volumes, &volume);
2857 if (!mdev) {
2858 /* No more volumes to dump on this tconn.
2859 * Advance tconn iterator. */
ec0bddbc
PR
2860 pos = list_entry_rcu(tconn->all_tconn.next,
2861 struct drbd_tconn, all_tconn);
71932efc 2862 /* Did we dump any volume on this tconn yet? */
543cc10b 2863 if (volume != 0) {
71932efc
LE
2864 /* If we reached the end of the list,
2865 * or only a single resource dump was requested,
2866 * we are done. */
2867 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2868 goto out;
543cc10b 2869 volume = 0;
71932efc 2870 tconn = pos;
543cc10b
LE
2871 goto next_tconn;
2872 }
2873 }
2874
98683650 2875 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3b98c0c2
LE
2876 cb->nlh->nlmsg_seq, &drbd_genl_family,
2877 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2878 if (!dh)
543cc10b
LE
2879 goto out;
2880
2881 if (!mdev) {
367d675d
LE
2882 /* This is a tconn without a single volume.
2883 * Suprisingly enough, it may have a network
2884 * configuration. */
2885 struct net_conf *nc;
543cc10b
LE
2886 dh->minor = -1U;
2887 dh->ret_code = NO_ERROR;
089c075d 2888 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
367d675d
LE
2889 goto cancel;
2890 nc = rcu_dereference(tconn->net_conf);
2891 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2892 goto cancel;
2893 goto done;
543cc10b 2894 }
b411b363 2895
543cc10b
LE
2896 D_ASSERT(mdev->vnr == volume);
2897 D_ASSERT(mdev->tconn == tconn);
3b98c0c2 2898
543cc10b 2899 dh->minor = mdev_to_minor(mdev);
3b98c0c2
LE
2900 dh->ret_code = NO_ERROR;
2901
2902 if (nla_put_status_info(skb, mdev, NULL)) {
367d675d 2903cancel:
3b98c0c2 2904 genlmsg_cancel(skb, dh);
543cc10b 2905 goto out;
3b98c0c2 2906 }
367d675d 2907done:
3b98c0c2
LE
2908 genlmsg_end(skb, dh);
2909 }
b411b363 2910
543cc10b 2911out:
c141ebda 2912 rcu_read_unlock();
543cc10b
LE
2913 /* where to start the next iteration */
2914 cb->args[0] = (long)pos;
2915 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
b411b363 2916
543cc10b
LE
2917 /* No more tconns/volumes/minors found results in an empty skb.
2918 * Which will terminate the dump. */
3b98c0c2 2919 return skb->len;
b411b363
PR
2920}
2921
71932efc
LE
2922/*
2923 * Request status of all resources, or of all volumes within a single resource.
2924 *
2925 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2926 * Which means we cannot use the family->attrbuf or other such members, because
2927 * dump is NOT protected by the genl_lock(). During dump, we only have access
2928 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2929 *
2930 * Once things are setup properly, we call into get_one_status().
b411b363 2931 */
71932efc 2932int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
b411b363 2933{
71932efc
LE
2934 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2935 struct nlattr *nla;
7c3063cc 2936 const char *resource_name;
71932efc 2937 struct drbd_tconn *tconn;
7c3063cc 2938 int maxtype;
71932efc
LE
2939
2940 /* Is this a followup call? */
2941 if (cb->args[0]) {
2942 /* ... of a single resource dump,
2943 * and the resource iterator has been advanced already? */
2944 if (cb->args[2] && cb->args[2] != cb->args[0])
2945 return 0; /* DONE. */
2946 goto dump;
2947 }
2948
2949 /* First call (from netlink_dump_start). We need to figure out
2950 * which resource(s) the user wants us to dump. */
2951 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2952 nlmsg_attrlen(cb->nlh, hdrlen),
2953 DRBD_NLA_CFG_CONTEXT);
2954
2955 /* No explicit context given. Dump all. */
2956 if (!nla)
2957 goto dump;
7c3063cc
AG
2958 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2959 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2960 if (IS_ERR(nla))
2961 return PTR_ERR(nla);
71932efc
LE
2962 /* context given, but no name present? */
2963 if (!nla)
2964 return -EINVAL;
7c3063cc
AG
2965 resource_name = nla_data(nla);
2966 tconn = conn_get_by_name(resource_name);
0ace9dfa 2967
71932efc
LE
2968 if (!tconn)
2969 return -ENODEV;
2970
0ace9dfa
PR
2971 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2972
71932efc
LE
2973 /* prime iterators, and set "filter" mode mark:
2974 * only dump this tconn. */
2975 cb->args[0] = (long)tconn;
2976 /* cb->args[1] = 0; passed in this way. */
2977 cb->args[2] = (long)tconn;
2978
2979dump:
2980 return get_one_status(skb, cb);
2981}
b411b363 2982
3b98c0c2 2983int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
b411b363 2984{
3b98c0c2
LE
2985 enum drbd_ret_code retcode;
2986 struct timeout_parms tp;
2987 int err;
b411b363 2988
3b98c0c2
LE
2989 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2990 if (!adm_ctx.reply_skb)
2991 return retcode;
2992 if (retcode != NO_ERROR)
2993 goto out;
b411b363 2994
3b98c0c2
LE
2995 tp.timeout_type =
2996 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2997 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2998 UT_DEFAULT;
b411b363 2999
3b98c0c2
LE
3000 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
3001 if (err) {
3002 nlmsg_free(adm_ctx.reply_skb);
3003 return err;
3004 }
3005out:
3006 drbd_adm_finish(info, retcode);
3007 return 0;
b411b363
PR
3008}
3009
3b98c0c2 3010int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
b411b363 3011{
3b98c0c2
LE
3012 struct drbd_conf *mdev;
3013 enum drbd_ret_code retcode;
58ffa580 3014 struct start_ov_parms parms;
b411b363 3015
3b98c0c2
LE
3016 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3017 if (!adm_ctx.reply_skb)
3018 return retcode;
3019 if (retcode != NO_ERROR)
3020 goto out;
873b0d5f 3021
3b98c0c2 3022 mdev = adm_ctx.mdev;
58ffa580
LE
3023
3024 /* resume from last known position, if possible */
3025 parms.ov_start_sector = mdev->ov_start_sector;
3026 parms.ov_stop_sector = ULLONG_MAX;
3b98c0c2 3027 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
f399002e 3028 int err = start_ov_parms_from_attrs(&parms, info);
3b98c0c2
LE
3029 if (err) {
3030 retcode = ERR_MANDATORY_TAG;
3031 drbd_msg_put_info(from_attrs_err_to_txt(err));
3032 goto out;
3033 }
b411b363 3034 }
58ffa580
LE
3035 /* w_make_ov_request expects position to be aligned */
3036 mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3037 mdev->ov_stop_sector = parms.ov_stop_sector;
873b0d5f
LE
3038
3039 /* If there is still bitmap IO pending, e.g. previous resync or verify
3040 * just being finished, wait for it before requesting a new resync. */
a574daf5 3041 drbd_suspend_io(mdev);
873b0d5f 3042 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3b98c0c2 3043 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
a574daf5 3044 drbd_resume_io(mdev);
3b98c0c2
LE
3045out:
3046 drbd_adm_finish(info, retcode);
b411b363
PR
3047 return 0;
3048}
3049
3050
3b98c0c2 3051int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
b411b363 3052{
3b98c0c2
LE
3053 struct drbd_conf *mdev;
3054 enum drbd_ret_code retcode;
b411b363
PR
3055 int skip_initial_sync = 0;
3056 int err;
3b98c0c2 3057 struct new_c_uuid_parms args;
b411b363 3058
3b98c0c2
LE
3059 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3060 if (!adm_ctx.reply_skb)
3061 return retcode;
3062 if (retcode != NO_ERROR)
3063 goto out_nolock;
b411b363 3064
3b98c0c2
LE
3065 mdev = adm_ctx.mdev;
3066 memset(&args, 0, sizeof(args));
3067 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
f399002e 3068 err = new_c_uuid_parms_from_attrs(&args, info);
3b98c0c2
LE
3069 if (err) {
3070 retcode = ERR_MANDATORY_TAG;
3071 drbd_msg_put_info(from_attrs_err_to_txt(err));
3072 goto out_nolock;
3073 }
b411b363
PR
3074 }
3075
8410da8f 3076 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
b411b363
PR
3077
3078 if (!get_ldev(mdev)) {
3079 retcode = ERR_NO_DISK;
3080 goto out;
3081 }
3082
3083 /* this is "skip initial sync", assume to be clean */
31890f4a 3084 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
b411b363
PR
3085 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3086 dev_info(DEV, "Preparing to skip initial sync\n");
3087 skip_initial_sync = 1;
3088 } else if (mdev->state.conn != C_STANDALONE) {
3089 retcode = ERR_CONNECTED;
3090 goto out_dec;
3091 }
3092
3093 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3094 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3095
3096 if (args.clear_bm) {
20ceb2b2
LE
3097 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3098 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
b411b363
PR
3099 if (err) {
3100 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3101 retcode = ERR_IO_MD_DISK;
3102 }
3103 if (skip_initial_sync) {
3104 drbd_send_uuids_skip_initial_sync(mdev);
3105 _drbd_uuid_set(mdev, UI_BITMAP, 0);
62b0da3a 3106 drbd_print_uuids(mdev, "cleared bitmap UUID");
87eeee41 3107 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
3108 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3109 CS_VERBOSE, NULL);
87eeee41 3110 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3111 }
3112 }
3113
3114 drbd_md_sync(mdev);
3115out_dec:
3116 put_ldev(mdev);
3117out:
8410da8f 3118 mutex_unlock(mdev->state_mutex);
3b98c0c2
LE
3119out_nolock:
3120 drbd_adm_finish(info, retcode);
b411b363
PR
3121 return 0;
3122}
3123
3b98c0c2 3124static enum drbd_ret_code
7c3063cc 3125drbd_check_resource_name(const char *name)
b411b363 3126{
3b98c0c2 3127 if (!name || !name[0]) {
7c3063cc 3128 drbd_msg_put_info("resource name missing");
3b98c0c2 3129 return ERR_MANDATORY_TAG;
b411b363 3130 }
3b98c0c2
LE
3131 /* if we want to use these in sysfs/configfs/debugfs some day,
3132 * we must not allow slashes */
3133 if (strchr(name, '/')) {
7c3063cc 3134 drbd_msg_put_info("invalid resource name");
3b98c0c2 3135 return ERR_INVALID_REQUEST;
b411b363 3136 }
3b98c0c2 3137 return NO_ERROR;
774b3055 3138}
b411b363 3139
789c1b62 3140int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
b411b363 3141{
3b98c0c2 3142 enum drbd_ret_code retcode;
afbbfa88
AG
3143 struct res_opts res_opts;
3144 int err;
b411b363 3145
3b98c0c2
LE
3146 retcode = drbd_adm_prepare(skb, info, 0);
3147 if (!adm_ctx.reply_skb)
3148 return retcode;
3149 if (retcode != NO_ERROR)
3150 goto out;
b411b363 3151
afbbfa88
AG
3152 set_res_opts_defaults(&res_opts);
3153 err = res_opts_from_attrs(&res_opts, info);
3154 if (err && err != -ENOMSG) {
3155 retcode = ERR_MANDATORY_TAG;
3156 drbd_msg_put_info(from_attrs_err_to_txt(err));
3157 goto out;
b411b363
PR
3158 }
3159
7c3063cc 3160 retcode = drbd_check_resource_name(adm_ctx.resource_name);
3b98c0c2
LE
3161 if (retcode != NO_ERROR)
3162 goto out;
b411b363 3163
3b98c0c2 3164 if (adm_ctx.tconn) {
38f19616
LE
3165 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3166 retcode = ERR_INVALID_REQUEST;
789c1b62 3167 drbd_msg_put_info("resource exists");
38f19616
LE
3168 }
3169 /* else: still NO_ERROR */
3b98c0c2 3170 goto out;
b411b363 3171 }
b411b363 3172
afbbfa88 3173 if (!conn_create(adm_ctx.resource_name, &res_opts))
b411b363 3174 retcode = ERR_NOMEM;
3b98c0c2
LE
3175out:
3176 drbd_adm_finish(info, retcode);
3177 return 0;
b411b363
PR
3178}
3179
3b98c0c2 3180int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
b411b363 3181{
3b98c0c2
LE
3182 struct drbd_genlmsghdr *dh = info->userhdr;
3183 enum drbd_ret_code retcode;
b411b363 3184
44e52cfa 3185 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3b98c0c2
LE
3186 if (!adm_ctx.reply_skb)
3187 return retcode;
3188 if (retcode != NO_ERROR)
3189 goto out;
b411b363 3190
f2257a56 3191 if (dh->minor > MINORMASK) {
3b98c0c2
LE
3192 drbd_msg_put_info("requested minor out of range");
3193 retcode = ERR_INVALID_REQUEST;
3194 goto out;
b411b363 3195 }
0c8e36d9 3196 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3b98c0c2
LE
3197 drbd_msg_put_info("requested volume id out of range");
3198 retcode = ERR_INVALID_REQUEST;
3199 goto out;
b411b363 3200 }
b411b363 3201
38f19616
LE
3202 /* drbd_adm_prepare made sure already
3203 * that mdev->tconn and mdev->vnr match the request. */
3204 if (adm_ctx.mdev) {
3205 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3206 retcode = ERR_MINOR_EXISTS;
3207 /* else: still NO_ERROR */
3208 goto out;
b411b363 3209 }
38f19616 3210
3b98c0c2
LE
3211 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3212out:
3213 drbd_adm_finish(info, retcode);
3214 return 0;
b411b363
PR
3215}
3216
85f75dd7 3217static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
b411b363 3218{
85f75dd7
LE
3219 if (mdev->state.disk == D_DISKLESS &&
3220 /* no need to be mdev->state.conn == C_STANDALONE &&
3221 * we may want to delete a minor from a live replication group.
3222 */
3223 mdev->state.role == R_SECONDARY) {
369bea63
PR
3224 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3225 CS_VERBOSE + CS_WAIT_COMPLETE);
81fa2e67
PR
3226 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3227 idr_remove(&minors, mdev_to_minor(mdev));
113fef9e 3228 destroy_workqueue(mdev->submit.wq);
81fa2e67
PR
3229 del_gendisk(mdev->vdisk);
3230 synchronize_rcu();
3231 kref_put(&mdev->kref, &drbd_minor_destroy);
85f75dd7
LE
3232 return NO_ERROR;
3233 } else
3234 return ERR_MINOR_CONFIGURED;
b411b363
PR
3235}
3236
3b98c0c2 3237int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
b411b363 3238{
3b98c0c2 3239 enum drbd_ret_code retcode;
b411b363 3240
3b98c0c2
LE
3241 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3242 if (!adm_ctx.reply_skb)
3243 return retcode;
3244 if (retcode != NO_ERROR)
3245 goto out;
b411b363 3246
85f75dd7 3247 retcode = adm_delete_minor(adm_ctx.mdev);
85f75dd7
LE
3248out:
3249 drbd_adm_finish(info, retcode);
3250 return 0;
b411b363
PR
3251}
3252
85f75dd7 3253int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
b411b363 3254{
f3dfa40a 3255 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
85f75dd7
LE
3256 struct drbd_conf *mdev;
3257 unsigned i;
b411b363 3258
85f75dd7
LE
3259 retcode = drbd_adm_prepare(skb, info, 0);
3260 if (!adm_ctx.reply_skb)
3261 return retcode;
3262 if (retcode != NO_ERROR)
3263 goto out;
b411b363 3264
85f75dd7 3265 if (!adm_ctx.tconn) {
789c1b62 3266 retcode = ERR_RES_NOT_KNOWN;
85f75dd7 3267 goto out;
b411b363
PR
3268 }
3269
85f75dd7
LE
3270 /* demote */
3271 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3272 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3273 if (retcode < SS_SUCCESS) {
3274 drbd_msg_put_info("failed to demote");
c141ebda 3275 goto out;
85f75dd7 3276 }
b411b363 3277 }
b411b363 3278
f3dfa40a
LE
3279 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3280 if (retcode < SS_SUCCESS) {
85f75dd7 3281 drbd_msg_put_info("failed to disconnect");
f3dfa40a 3282 goto out;
85f75dd7 3283 }
b411b363 3284
85f75dd7
LE
3285 /* detach */
3286 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
cdfda633 3287 retcode = adm_detach(mdev, 0);
27012382 3288 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
85f75dd7 3289 drbd_msg_put_info("failed to detach");
c141ebda 3290 goto out;
85f75dd7
LE
3291 }
3292 }
b411b363 3293
f3dfa40a
LE
3294 /* If we reach this, all volumes (of this tconn) are Secondary,
3295 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
c141ebda 3296 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
f3dfa40a 3297 drbd_thread_stop(&adm_ctx.tconn->worker);
b411b363 3298
f3dfa40a 3299 /* Now, nothing can fail anymore */
b411b363 3300
85f75dd7
LE
3301 /* delete volumes */
3302 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3303 retcode = adm_delete_minor(mdev);
3304 if (retcode != NO_ERROR) {
3305 /* "can not happen" */
3306 drbd_msg_put_info("failed to delete volume");
ef356262 3307 goto out;
85f75dd7
LE
3308 }
3309 }
b411b363 3310
85f75dd7
LE
3311 /* delete connection */
3312 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
ec0bddbc
PR
3313 list_del_rcu(&adm_ctx.tconn->all_tconn);
3314 synchronize_rcu();
9dc9fbb3 3315 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
b411b363 3316
85f75dd7
LE
3317 retcode = NO_ERROR;
3318 } else {
3319 /* "can not happen" */
789c1b62 3320 retcode = ERR_RES_IN_USE;
85f75dd7 3321 drbd_msg_put_info("failed to delete connection");
85f75dd7 3322 }
ef356262 3323 goto out;
3b98c0c2
LE
3324out:
3325 drbd_adm_finish(info, retcode);
3326 return 0;
b411b363
PR
3327}
3328
789c1b62 3329int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
b411b363 3330{
3b98c0c2 3331 enum drbd_ret_code retcode;
b411b363 3332
44e52cfa 3333 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3b98c0c2
LE
3334 if (!adm_ctx.reply_skb)
3335 return retcode;
3336 if (retcode != NO_ERROR)
3337 goto out;
b411b363 3338
3b98c0c2 3339 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
ec0bddbc
PR
3340 list_del_rcu(&adm_ctx.tconn->all_tconn);
3341 synchronize_rcu();
9dc9fbb3
PR
3342 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3343
3b98c0c2
LE
3344 retcode = NO_ERROR;
3345 } else {
789c1b62 3346 retcode = ERR_RES_IN_USE;
b411b363
PR
3347 }
3348
992d6e91
LE
3349 if (retcode == NO_ERROR)
3350 drbd_thread_stop(&adm_ctx.tconn->worker);
3b98c0c2
LE
3351out:
3352 drbd_adm_finish(info, retcode);
b411b363
PR
3353 return 0;
3354}
3355
3b98c0c2 3356void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
b411b363 3357{
3b98c0c2
LE
3358 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3359 struct sk_buff *msg;
3360 struct drbd_genlmsghdr *d_out;
3361 unsigned seq;
3362 int err = -ENOMEM;
3363
ef86b779
PR
3364 if (sib->sib_reason == SIB_SYNC_PROGRESS) {
3365 if (time_after(jiffies, mdev->rs_last_bcast + HZ))
3366 mdev->rs_last_bcast = jiffies;
3367 else
3368 return;
3369 }
b411b363 3370
3b98c0c2
LE
3371 seq = atomic_inc_return(&drbd_genl_seq);
3372 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3373 if (!msg)
3374 goto failed;
3375
3376 err = -EMSGSIZE;
3377 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3378 if (!d_out) /* cannot happen, but anyways. */
3379 goto nla_put_failure;
3380 d_out->minor = mdev_to_minor(mdev);
6f9b5f84 3381 d_out->ret_code = NO_ERROR;
3b98c0c2
LE
3382
3383 if (nla_put_status_info(msg, mdev, sib))
3384 goto nla_put_failure;
3385 genlmsg_end(msg, d_out);
3386 err = drbd_genl_multicast_events(msg, 0);
3387 /* msg has been consumed or freed in netlink_broadcast() */
3388 if (err && err != -ESRCH)
3389 goto failed;
b411b363 3390
3b98c0c2 3391 return;
b411b363 3392
3b98c0c2
LE
3393nla_put_failure:
3394 nlmsg_free(msg);
3395failed:
3396 dev_err(DEV, "Error %d while broadcasting event. "
3397 "Event seq:%u sib_reason:%u\n",
3398 err, seq, sib->sib_reason);
b411b363 3399}
This page took 0.437496 seconds and 5 git commands to generate.