drbd: factor out drbd_rs_number_requests
[deliverable/linux.git] / drivers / block / drbd / drbd_receiver.c
CommitLineData
b411b363
PR
1/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
b411b363
PR
26#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
b411b363
PR
31#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
b411b363
PR
39#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
b411b363
PR
44#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
b411b363
PR
47#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
b411b363
PR
51enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
b411b363
PR
63
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
45bb912b
LE
66/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
23ce4227
PR
84
85 if (!page)
86 return NULL;
87
45bb912b
LE
88 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
b411b363
PR
146{
147 struct page *page = NULL;
45bb912b
LE
148 struct page *tmp = NULL;
149 int i = 0;
b411b363
PR
150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
45bb912b 153 if (drbd_pp_vacant >= number) {
b411b363 154 spin_lock(&drbd_pp_lock);
45bb912b
LE
155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
b411b363 158 spin_unlock(&drbd_pp_lock);
45bb912b
LE
159 if (page)
160 return page;
b411b363 161 }
45bb912b 162
b411b363
PR
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
45bb912b
LE
166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
b411b363
PR
188}
189
b411b363
PR
190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
45bb912b 202 if (drbd_ee_has_active_page(e))
b411b363
PR
203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
b411b363
PR
213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
435f0740 218 drbd_free_net_ee(mdev, e);
b411b363
PR
219}
220
221/**
45bb912b 222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
b411b363 223 * @mdev: DRBD device.
45bb912b
LE
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
226 *
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
b411b363 230 *
45bb912b 231 * Returns a page chain linked via page->private.
b411b363 232 */
45bb912b 233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
b411b363
PR
234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
45bb912b
LE
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
b411b363 242
45bb912b 243 while (page == NULL) {
b411b363
PR
244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
45bb912b 249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
b411b363
PR
250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
45bb912b
LE
266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
b411b363
PR
268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
45bb912b
LE
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
435f0740 275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
b411b363 276{
435f0740 277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
b411b363 278 int i;
435f0740 279
45bb912b
LE
280 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
b411b363 289 }
435f0740 290 i = atomic_sub_return(i, a);
45bb912b 291 if (i < 0)
435f0740
LE
292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
b411b363
PR
294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
b411b363
PR
318 struct drbd_epoch_entry *e;
319 struct page *page;
45bb912b 320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
b411b363
PR
321
322 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
45bb912b
LE
332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
b411b363 335
b411b363
PR
336 INIT_HLIST_NODE(&e->colision);
337 e->epoch = NULL;
45bb912b
LE
338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
b411b363 342 e->flags = 0;
45bb912b 343 e->sector = sector;
45bb912b 344 e->block_id = id;
b411b363 345
b411b363
PR
346 return e;
347
45bb912b 348 fail:
b411b363 349 mempool_free(e, drbd_ee_mempool);
b411b363
PR
350 return NULL;
351}
352
435f0740 353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
b411b363 354{
c36c3ced
LE
355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
435f0740 357 drbd_pp_free(mdev, e->pages, is_net);
45bb912b 358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
b411b363
PR
359 D_ASSERT(hlist_unhashed(&e->colision));
360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
435f0740 368 int is_net = list == &mdev->net_ee;
b411b363
PR
369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
435f0740 375 drbd_free_some_ee(mdev, e, is_net);
b411b363
PR
376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
435f0740 404 drbd_free_net_ee(mdev, e);
b411b363
PR
405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
b411b363
PR
411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
7eaceacc 429 io_schedule();
b411b363
PR
430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
469
470out:
471 return err;
472}
473
474static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
476{
477 mm_segment_t oldfs;
478 struct kvec iov = {
479 .iov_base = buf,
480 .iov_len = size,
481 };
482 struct msghdr msg = {
483 .msg_iovlen = 1,
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
486 };
487 int rv;
488
489 oldfs = get_fs();
490 set_fs(KERNEL_DS);
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
492 set_fs(oldfs);
493
494 return rv;
495}
496
497static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
498{
499 mm_segment_t oldfs;
500 struct kvec iov = {
501 .iov_base = buf,
502 .iov_len = size,
503 };
504 struct msghdr msg = {
505 .msg_iovlen = 1,
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
508 };
509 int rv;
510
511 oldfs = get_fs();
512 set_fs(KERNEL_DS);
513
514 for (;;) {
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
516 if (rv == size)
517 break;
518
519 /* Note:
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
522 */
523
524 if (rv < 0) {
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
529 break;
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
532 break;
533 } else {
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
536 */
537 /* D_ASSERT(signal_pending(current)); */
538 break;
539 }
540 };
541
542 set_fs(oldfs);
543
544 if (rv != size)
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
546
547 return rv;
548}
549
5dbf1673
LE
550/* quoting tcp(7):
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
554 */
555static void drbd_setbufsize(struct socket *sock, unsigned int snd,
556 unsigned int rcv)
557{
558 /* open coded SO_SNDBUF, SO_RCVBUF */
559 if (snd) {
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
562 }
563 if (rcv) {
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
566 }
567}
568
b411b363
PR
569static struct socket *drbd_try_connect(struct drbd_conf *mdev)
570{
571 const char *what;
572 struct socket *sock;
573 struct sockaddr_in6 src_in6;
574 int err;
575 int disconnect_on_error = 1;
576
577 if (!get_net_conf(mdev))
578 return NULL;
579
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
583 if (err < 0) {
584 sock = NULL;
585 goto out;
586 }
587
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
5dbf1673
LE
590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
b411b363
PR
592
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
599 */
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
604 else
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
606
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
611 if (err < 0)
612 goto out;
613
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
617 what = "connect";
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
621
622out:
623 if (err < 0) {
624 if (sock) {
625 sock_release(sock);
626 sock = NULL;
627 }
628 switch (-err) {
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
636 break;
637 default:
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
639 }
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
642 }
643 put_net_conf(mdev);
644 return sock;
645}
646
647static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
648{
649 int timeo, err;
650 struct socket *s_estab = NULL, *s_listen;
651 const char *what;
652
653 if (!get_net_conf(mdev))
654 return NULL;
655
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
659 if (err) {
660 s_listen = NULL;
661 goto out;
662 }
663
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
5dbf1673
LE
670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
b411b363
PR
672
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
677 if (err < 0)
678 goto out;
679
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
681
682out:
683 if (s_listen)
684 sock_release(s_listen);
685 if (err < 0) {
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
689 }
690 }
691 put_net_conf(mdev);
692
693 return s_estab;
694}
695
696static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
698{
02918be2 699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
b411b363
PR
700
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
702}
703
704static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
705{
02918be2 706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
b411b363
PR
707 int rr;
708
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
710
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
713
714 return 0xffff;
715}
716
717/**
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
721 */
722static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
723{
724 int rr;
725 char tb[4];
726
727 if (!*sock)
728 return FALSE;
729
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
731
732 if (rr > 0 || rr == -EAGAIN) {
733 return TRUE;
734 } else {
735 sock_release(*sock);
736 *sock = NULL;
737 return FALSE;
738 }
739}
740
741/*
742 * return values:
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
748 */
749static int drbd_connect(struct drbd_conf *mdev)
750{
751 struct socket *s, *sock, *msock;
752 int try, h, ok;
753
754 D_ASSERT(!mdev->data.socket);
755
b411b363
PR
756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
757 return -2;
758
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
760
761 sock = NULL;
762 msock = NULL;
763
764 do {
765 for (try = 0;;) {
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
768 if (s || ++try >= 3)
769 break;
770 /* give the other side time to call bind() & listen() */
771 __set_current_state(TASK_INTERRUPTIBLE);
772 schedule_timeout(HZ / 10);
773 }
774
775 if (s) {
776 if (!sock) {
777 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
778 sock = s;
779 s = NULL;
780 } else if (!msock) {
781 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
782 msock = s;
783 s = NULL;
784 } else {
785 dev_err(DEV, "Logic error in drbd_connect()\n");
786 goto out_release_sockets;
787 }
788 }
789
790 if (sock && msock) {
791 __set_current_state(TASK_INTERRUPTIBLE);
792 schedule_timeout(HZ / 10);
793 ok = drbd_socket_okay(mdev, &sock);
794 ok = drbd_socket_okay(mdev, &msock) && ok;
795 if (ok)
796 break;
797 }
798
799retry:
800 s = drbd_wait_for_connect(mdev);
801 if (s) {
802 try = drbd_recv_fp(mdev, s);
803 drbd_socket_okay(mdev, &sock);
804 drbd_socket_okay(mdev, &msock);
805 switch (try) {
806 case P_HAND_SHAKE_S:
807 if (sock) {
808 dev_warn(DEV, "initial packet S crossed\n");
809 sock_release(sock);
810 }
811 sock = s;
812 break;
813 case P_HAND_SHAKE_M:
814 if (msock) {
815 dev_warn(DEV, "initial packet M crossed\n");
816 sock_release(msock);
817 }
818 msock = s;
819 set_bit(DISCARD_CONCURRENT, &mdev->flags);
820 break;
821 default:
822 dev_warn(DEV, "Error receiving initial packet\n");
823 sock_release(s);
824 if (random32() & 1)
825 goto retry;
826 }
827 }
828
829 if (mdev->state.conn <= C_DISCONNECTING)
830 goto out_release_sockets;
831 if (signal_pending(current)) {
832 flush_signals(current);
833 smp_rmb();
834 if (get_t_state(&mdev->receiver) == Exiting)
835 goto out_release_sockets;
836 }
837
838 if (sock && msock) {
839 ok = drbd_socket_okay(mdev, &sock);
840 ok = drbd_socket_okay(mdev, &msock) && ok;
841 if (ok)
842 break;
843 }
844 } while (1);
845
846 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
847 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
848
849 sock->sk->sk_allocation = GFP_NOIO;
850 msock->sk->sk_allocation = GFP_NOIO;
851
852 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
853 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
854
b411b363
PR
855 /* NOT YET ...
856 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
857 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
858 * first set it to the P_HAND_SHAKE timeout,
859 * which we set to 4x the configured ping_timeout. */
860 sock->sk->sk_sndtimeo =
861 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
862
863 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
864 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
865
866 /* we don't want delays.
867 * we use TCP_CORK where apropriate, though */
868 drbd_tcp_nodelay(sock);
869 drbd_tcp_nodelay(msock);
870
871 mdev->data.socket = sock;
872 mdev->meta.socket = msock;
873 mdev->last_received = jiffies;
874
875 D_ASSERT(mdev->asender.task == NULL);
876
877 h = drbd_do_handshake(mdev);
878 if (h <= 0)
879 return h;
880
881 if (mdev->cram_hmac_tfm) {
882 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
b10d96cb
JT
883 switch (drbd_do_auth(mdev)) {
884 case -1:
b411b363
PR
885 dev_err(DEV, "Authentication of peer failed\n");
886 return -1;
b10d96cb
JT
887 case 0:
888 dev_err(DEV, "Authentication of peer failed, trying again.\n");
889 return 0;
b411b363
PR
890 }
891 }
892
893 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
894 return 0;
895
896 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
897 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
898
899 atomic_set(&mdev->packet_seq, 0);
900 mdev->peer_seq = 0;
901
902 drbd_thread_start(&mdev->asender);
903
d5373389
PR
904 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
905 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
906 put_ldev(mdev);
907 }
908
7e2455c1
PR
909 if (!drbd_send_protocol(mdev))
910 return -1;
b411b363 911 drbd_send_sync_param(mdev, &mdev->sync_conf);
e89b591c 912 drbd_send_sizes(mdev, 0, 0);
b411b363
PR
913 drbd_send_uuids(mdev);
914 drbd_send_state(mdev);
915 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
916 clear_bit(RESIZE_PENDING, &mdev->flags);
917
918 return 1;
919
920out_release_sockets:
921 if (sock)
922 sock_release(sock);
923 if (msock)
924 sock_release(msock);
925 return -1;
926}
927
02918be2 928static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
b411b363 929{
02918be2 930 union p_header *h = &mdev->data.rbuf.header;
b411b363
PR
931 int r;
932
933 r = drbd_recv(mdev, h, sizeof(*h));
b411b363
PR
934 if (unlikely(r != sizeof(*h))) {
935 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
936 return FALSE;
02918be2
PR
937 }
938
939 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
940 *cmd = be16_to_cpu(h->h80.command);
941 *packet_size = be16_to_cpu(h->h80.length);
942 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
943 *cmd = be16_to_cpu(h->h95.command);
944 *packet_size = be32_to_cpu(h->h95.length);
945 } else {
004352fa
LE
946 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
947 be32_to_cpu(h->h80.magic),
948 be16_to_cpu(h->h80.command),
949 be16_to_cpu(h->h80.length));
b411b363
PR
950 return FALSE;
951 }
952 mdev->last_received = jiffies;
953
954 return TRUE;
955}
956
2451fc3b 957static void drbd_flush(struct drbd_conf *mdev)
b411b363
PR
958{
959 int rv;
960
961 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
fbd9b09a 962 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
dd3932ed 963 NULL);
b411b363
PR
964 if (rv) {
965 dev_err(DEV, "local disk flush failed with status %d\n", rv);
966 /* would rather check on EOPNOTSUPP, but that is not reliable.
967 * don't try again for ANY return value != 0
968 * if (rv == -EOPNOTSUPP) */
969 drbd_bump_write_ordering(mdev, WO_drain_io);
970 }
971 put_ldev(mdev);
972 }
b411b363
PR
973}
974
975/**
976 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
977 * @mdev: DRBD device.
978 * @epoch: Epoch object.
979 * @ev: Epoch event.
980 */
981static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
982 struct drbd_epoch *epoch,
983 enum epoch_event ev)
984{
2451fc3b 985 int epoch_size;
b411b363 986 struct drbd_epoch *next_epoch;
b411b363
PR
987 enum finish_epoch rv = FE_STILL_LIVE;
988
989 spin_lock(&mdev->epoch_lock);
990 do {
991 next_epoch = NULL;
b411b363
PR
992
993 epoch_size = atomic_read(&epoch->epoch_size);
994
995 switch (ev & ~EV_CLEANUP) {
996 case EV_PUT:
997 atomic_dec(&epoch->active);
998 break;
999 case EV_GOT_BARRIER_NR:
1000 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
b411b363
PR
1001 break;
1002 case EV_BECAME_LAST:
1003 /* nothing to do*/
1004 break;
1005 }
1006
b411b363
PR
1007 if (epoch_size != 0 &&
1008 atomic_read(&epoch->active) == 0 &&
2451fc3b 1009 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
b411b363
PR
1010 if (!(ev & EV_CLEANUP)) {
1011 spin_unlock(&mdev->epoch_lock);
1012 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1013 spin_lock(&mdev->epoch_lock);
1014 }
1015 dec_unacked(mdev);
1016
1017 if (mdev->current_epoch != epoch) {
1018 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1019 list_del(&epoch->list);
1020 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1021 mdev->epochs--;
b411b363
PR
1022 kfree(epoch);
1023
1024 if (rv == FE_STILL_LIVE)
1025 rv = FE_DESTROYED;
1026 } else {
1027 epoch->flags = 0;
1028 atomic_set(&epoch->epoch_size, 0);
698f9315 1029 /* atomic_set(&epoch->active, 0); is already zero */
b411b363
PR
1030 if (rv == FE_STILL_LIVE)
1031 rv = FE_RECYCLED;
2451fc3b 1032 wake_up(&mdev->ee_wait);
b411b363
PR
1033 }
1034 }
1035
1036 if (!next_epoch)
1037 break;
1038
1039 epoch = next_epoch;
1040 } while (1);
1041
1042 spin_unlock(&mdev->epoch_lock);
1043
b411b363
PR
1044 return rv;
1045}
1046
1047/**
1048 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1049 * @mdev: DRBD device.
1050 * @wo: Write ordering method to try.
1051 */
1052void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1053{
1054 enum write_ordering_e pwo;
1055 static char *write_ordering_str[] = {
1056 [WO_none] = "none",
1057 [WO_drain_io] = "drain",
1058 [WO_bdev_flush] = "flush",
b411b363
PR
1059 };
1060
1061 pwo = mdev->write_ordering;
1062 wo = min(pwo, wo);
b411b363
PR
1063 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1064 wo = WO_drain_io;
1065 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1066 wo = WO_none;
1067 mdev->write_ordering = wo;
2451fc3b 1068 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
b411b363
PR
1069 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1070}
1071
45bb912b
LE
1072/**
1073 * drbd_submit_ee()
1074 * @mdev: DRBD device.
1075 * @e: epoch entry
1076 * @rw: flag field, see bio->bi_rw
1077 */
1078/* TODO allocate from our own bio_set. */
1079int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1080 const unsigned rw, const int fault_type)
1081{
1082 struct bio *bios = NULL;
1083 struct bio *bio;
1084 struct page *page = e->pages;
1085 sector_t sector = e->sector;
1086 unsigned ds = e->size;
1087 unsigned n_bios = 0;
1088 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1089
1090 /* In most cases, we will only need one bio. But in case the lower
1091 * level restrictions happen to be different at this offset on this
1092 * side than those of the sending peer, we may need to submit the
1093 * request in more than one bio. */
1094next_bio:
1095 bio = bio_alloc(GFP_NOIO, nr_pages);
1096 if (!bio) {
1097 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1098 goto fail;
1099 }
1100 /* > e->sector, unless this is the first bio */
1101 bio->bi_sector = sector;
1102 bio->bi_bdev = mdev->ldev->backing_bdev;
45bb912b
LE
1103 bio->bi_rw = rw;
1104 bio->bi_private = e;
1105 bio->bi_end_io = drbd_endio_sec;
1106
1107 bio->bi_next = bios;
1108 bios = bio;
1109 ++n_bios;
1110
1111 page_chain_for_each(page) {
1112 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1113 if (!bio_add_page(bio, page, len, 0)) {
1114 /* a single page must always be possible! */
1115 BUG_ON(bio->bi_vcnt == 0);
1116 goto next_bio;
1117 }
1118 ds -= len;
1119 sector += len >> 9;
1120 --nr_pages;
1121 }
1122 D_ASSERT(page == NULL);
1123 D_ASSERT(ds == 0);
1124
1125 atomic_set(&e->pending_bios, n_bios);
1126 do {
1127 bio = bios;
1128 bios = bios->bi_next;
1129 bio->bi_next = NULL;
1130
45bb912b 1131 drbd_generic_make_request(mdev, fault_type, bio);
45bb912b 1132 } while (bios);
45bb912b
LE
1133 return 0;
1134
1135fail:
1136 while (bios) {
1137 bio = bios;
1138 bios = bios->bi_next;
1139 bio_put(bio);
1140 }
1141 return -ENOMEM;
1142}
1143
02918be2 1144static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363 1145{
2451fc3b 1146 int rv;
02918be2 1147 struct p_barrier *p = &mdev->data.rbuf.barrier;
b411b363
PR
1148 struct drbd_epoch *epoch;
1149
b411b363
PR
1150 inc_unacked(mdev);
1151
b411b363
PR
1152 mdev->current_epoch->barrier_nr = p->barrier;
1153 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1154
1155 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1156 * the activity log, which means it would not be resynced in case the
1157 * R_PRIMARY crashes now.
1158 * Therefore we must send the barrier_ack after the barrier request was
1159 * completed. */
1160 switch (mdev->write_ordering) {
b411b363
PR
1161 case WO_none:
1162 if (rv == FE_RECYCLED)
1163 return TRUE;
2451fc3b
PR
1164
1165 /* receiver context, in the writeout path of the other node.
1166 * avoid potential distributed deadlock */
1167 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1168 if (epoch)
1169 break;
1170 else
1171 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1172 /* Fall through */
b411b363
PR
1173
1174 case WO_bdev_flush:
1175 case WO_drain_io:
b411b363 1176 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
2451fc3b
PR
1177 drbd_flush(mdev);
1178
1179 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1180 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1181 if (epoch)
1182 break;
b411b363
PR
1183 }
1184
2451fc3b
PR
1185 epoch = mdev->current_epoch;
1186 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1187
1188 D_ASSERT(atomic_read(&epoch->active) == 0);
1189 D_ASSERT(epoch->flags == 0);
b411b363
PR
1190
1191 return TRUE;
2451fc3b
PR
1192 default:
1193 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1194 return FALSE;
b411b363
PR
1195 }
1196
1197 epoch->flags = 0;
1198 atomic_set(&epoch->epoch_size, 0);
1199 atomic_set(&epoch->active, 0);
1200
1201 spin_lock(&mdev->epoch_lock);
1202 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1203 list_add(&epoch->list, &mdev->current_epoch->list);
1204 mdev->current_epoch = epoch;
1205 mdev->epochs++;
b411b363
PR
1206 } else {
1207 /* The current_epoch got recycled while we allocated this one... */
1208 kfree(epoch);
1209 }
1210 spin_unlock(&mdev->epoch_lock);
1211
1212 return TRUE;
1213}
1214
1215/* used from receive_RSDataReply (recv_resync_read)
1216 * and from receive_Data */
1217static struct drbd_epoch_entry *
1218read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1219{
6666032a 1220 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
b411b363 1221 struct drbd_epoch_entry *e;
b411b363 1222 struct page *page;
45bb912b 1223 int dgs, ds, rr;
b411b363
PR
1224 void *dig_in = mdev->int_dig_in;
1225 void *dig_vv = mdev->int_dig_vv;
6b4388ac 1226 unsigned long *data;
b411b363
PR
1227
1228 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1229 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1230
1231 if (dgs) {
1232 rr = drbd_recv(mdev, dig_in, dgs);
1233 if (rr != dgs) {
1234 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1235 rr, dgs);
1236 return NULL;
1237 }
1238 }
1239
1240 data_size -= dgs;
1241
1242 ERR_IF(data_size & 0x1ff) return NULL;
1243 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1244
6666032a
LE
1245 /* even though we trust out peer,
1246 * we sometimes have to double check. */
1247 if (sector + (data_size>>9) > capacity) {
1248 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1249 (unsigned long long)capacity,
1250 (unsigned long long)sector, data_size);
1251 return NULL;
1252 }
1253
b411b363
PR
1254 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1255 * "criss-cross" setup, that might cause write-out on some other DRBD,
1256 * which in turn might block on the other node at this very place. */
1257 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1258 if (!e)
1259 return NULL;
45bb912b 1260
b411b363 1261 ds = data_size;
45bb912b
LE
1262 page = e->pages;
1263 page_chain_for_each(page) {
1264 unsigned len = min_t(int, ds, PAGE_SIZE);
6b4388ac 1265 data = kmap(page);
45bb912b 1266 rr = drbd_recv(mdev, data, len);
6b4388ac
PR
1267 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1268 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1269 data[0] = data[0] ^ (unsigned long)-1;
1270 }
b411b363 1271 kunmap(page);
45bb912b 1272 if (rr != len) {
b411b363
PR
1273 drbd_free_ee(mdev, e);
1274 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
45bb912b 1275 rr, len);
b411b363
PR
1276 return NULL;
1277 }
1278 ds -= rr;
1279 }
1280
1281 if (dgs) {
45bb912b 1282 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
b411b363
PR
1283 if (memcmp(dig_in, dig_vv, dgs)) {
1284 dev_err(DEV, "Digest integrity check FAILED.\n");
1285 drbd_bcast_ee(mdev, "digest failed",
1286 dgs, dig_in, dig_vv, e);
1287 drbd_free_ee(mdev, e);
1288 return NULL;
1289 }
1290 }
1291 mdev->recv_cnt += data_size>>9;
1292 return e;
1293}
1294
1295/* drbd_drain_block() just takes a data block
1296 * out of the socket input buffer, and discards it.
1297 */
1298static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1299{
1300 struct page *page;
1301 int rr, rv = 1;
1302 void *data;
1303
c3470cde
LE
1304 if (!data_size)
1305 return TRUE;
1306
45bb912b 1307 page = drbd_pp_alloc(mdev, 1, 1);
b411b363
PR
1308
1309 data = kmap(page);
1310 while (data_size) {
1311 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1312 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1313 rv = 0;
1314 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1315 rr, min_t(int, data_size, PAGE_SIZE));
1316 break;
1317 }
1318 data_size -= rr;
1319 }
1320 kunmap(page);
435f0740 1321 drbd_pp_free(mdev, page, 0);
b411b363
PR
1322 return rv;
1323}
1324
1325static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1326 sector_t sector, int data_size)
1327{
1328 struct bio_vec *bvec;
1329 struct bio *bio;
1330 int dgs, rr, i, expect;
1331 void *dig_in = mdev->int_dig_in;
1332 void *dig_vv = mdev->int_dig_vv;
1333
1334 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1335 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1336
1337 if (dgs) {
1338 rr = drbd_recv(mdev, dig_in, dgs);
1339 if (rr != dgs) {
1340 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1341 rr, dgs);
1342 return 0;
1343 }
1344 }
1345
1346 data_size -= dgs;
1347
1348 /* optimistically update recv_cnt. if receiving fails below,
1349 * we disconnect anyways, and counters will be reset. */
1350 mdev->recv_cnt += data_size>>9;
1351
1352 bio = req->master_bio;
1353 D_ASSERT(sector == bio->bi_sector);
1354
1355 bio_for_each_segment(bvec, bio, i) {
1356 expect = min_t(int, data_size, bvec->bv_len);
1357 rr = drbd_recv(mdev,
1358 kmap(bvec->bv_page)+bvec->bv_offset,
1359 expect);
1360 kunmap(bvec->bv_page);
1361 if (rr != expect) {
1362 dev_warn(DEV, "short read receiving data reply: "
1363 "read %d expected %d\n",
1364 rr, expect);
1365 return 0;
1366 }
1367 data_size -= rr;
1368 }
1369
1370 if (dgs) {
45bb912b 1371 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
b411b363
PR
1372 if (memcmp(dig_in, dig_vv, dgs)) {
1373 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1374 return 0;
1375 }
1376 }
1377
1378 D_ASSERT(data_size == 0);
1379 return 1;
1380}
1381
1382/* e_end_resync_block() is called via
1383 * drbd_process_done_ee() by asender only */
1384static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1385{
1386 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1387 sector_t sector = e->sector;
1388 int ok;
1389
1390 D_ASSERT(hlist_unhashed(&e->colision));
1391
45bb912b 1392 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1393 drbd_set_in_sync(mdev, sector, e->size);
1394 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1395 } else {
1396 /* Record failure to sync */
1397 drbd_rs_failed_io(mdev, sector, e->size);
1398
1399 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1400 }
1401 dec_unacked(mdev);
1402
1403 return ok;
1404}
1405
1406static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1407{
1408 struct drbd_epoch_entry *e;
1409
1410 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
45bb912b
LE
1411 if (!e)
1412 goto fail;
b411b363
PR
1413
1414 dec_rs_pending(mdev);
1415
b411b363
PR
1416 inc_unacked(mdev);
1417 /* corresponding dec_unacked() in e_end_resync_block()
1418 * respective _drbd_clear_done_ee */
1419
45bb912b
LE
1420 e->w.cb = e_end_resync_block;
1421
b411b363
PR
1422 spin_lock_irq(&mdev->req_lock);
1423 list_add(&e->w.list, &mdev->sync_ee);
1424 spin_unlock_irq(&mdev->req_lock);
1425
0f0601f4 1426 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
45bb912b
LE
1427 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1428 return TRUE;
b411b363 1429
22cc37a9
LE
1430 /* drbd_submit_ee currently fails for one reason only:
1431 * not being able to allocate enough bios.
1432 * Is dropping the connection going to help? */
1433 spin_lock_irq(&mdev->req_lock);
1434 list_del(&e->w.list);
1435 spin_unlock_irq(&mdev->req_lock);
1436
45bb912b
LE
1437 drbd_free_ee(mdev, e);
1438fail:
1439 put_ldev(mdev);
1440 return FALSE;
b411b363
PR
1441}
1442
02918be2 1443static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363
PR
1444{
1445 struct drbd_request *req;
1446 sector_t sector;
b411b363 1447 int ok;
02918be2 1448 struct p_data *p = &mdev->data.rbuf.data;
b411b363
PR
1449
1450 sector = be64_to_cpu(p->sector);
1451
1452 spin_lock_irq(&mdev->req_lock);
1453 req = _ar_id_to_req(mdev, p->block_id, sector);
1454 spin_unlock_irq(&mdev->req_lock);
1455 if (unlikely(!req)) {
1456 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1457 return FALSE;
1458 }
1459
1460 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1461 * special casing it there for the various failure cases.
1462 * still no race with drbd_fail_pending_reads */
1463 ok = recv_dless_read(mdev, req, sector, data_size);
1464
1465 if (ok)
1466 req_mod(req, data_received);
1467 /* else: nothing. handled from drbd_disconnect...
1468 * I don't think we may complete this just yet
1469 * in case we are "on-disconnect: freeze" */
1470
1471 return ok;
1472}
1473
02918be2 1474static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363
PR
1475{
1476 sector_t sector;
b411b363 1477 int ok;
02918be2 1478 struct p_data *p = &mdev->data.rbuf.data;
b411b363
PR
1479
1480 sector = be64_to_cpu(p->sector);
1481 D_ASSERT(p->block_id == ID_SYNCER);
1482
1483 if (get_ldev(mdev)) {
1484 /* data is submitted to disk within recv_resync_read.
1485 * corresponding put_ldev done below on error,
1486 * or in drbd_endio_write_sec. */
1487 ok = recv_resync_read(mdev, sector, data_size);
1488 } else {
1489 if (__ratelimit(&drbd_ratelimit_state))
1490 dev_err(DEV, "Can not write resync data to local disk.\n");
1491
1492 ok = drbd_drain_block(mdev, data_size);
1493
2b2bf214 1494 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
b411b363
PR
1495 }
1496
778f271d
PR
1497 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1498
b411b363
PR
1499 return ok;
1500}
1501
1502/* e_end_block() is called via drbd_process_done_ee().
1503 * this means this function only runs in the asender thread
1504 */
1505static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1506{
1507 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1508 sector_t sector = e->sector;
b411b363
PR
1509 int ok = 1, pcmd;
1510
b411b363 1511 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
45bb912b 1512 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1513 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1514 mdev->state.conn <= C_PAUSED_SYNC_T &&
1515 e->flags & EE_MAY_SET_IN_SYNC) ?
1516 P_RS_WRITE_ACK : P_WRITE_ACK;
1517 ok &= drbd_send_ack(mdev, pcmd, e);
1518 if (pcmd == P_RS_WRITE_ACK)
1519 drbd_set_in_sync(mdev, sector, e->size);
1520 } else {
1521 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1522 /* we expect it to be marked out of sync anyways...
1523 * maybe assert this? */
1524 }
1525 dec_unacked(mdev);
1526 }
1527 /* we delete from the conflict detection hash _after_ we sent out the
1528 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1529 if (mdev->net_conf->two_primaries) {
1530 spin_lock_irq(&mdev->req_lock);
1531 D_ASSERT(!hlist_unhashed(&e->colision));
1532 hlist_del_init(&e->colision);
1533 spin_unlock_irq(&mdev->req_lock);
1534 } else {
1535 D_ASSERT(hlist_unhashed(&e->colision));
1536 }
1537
1538 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1539
1540 return ok;
1541}
1542
1543static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1544{
1545 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1546 int ok = 1;
1547
1548 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1549 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1550
1551 spin_lock_irq(&mdev->req_lock);
1552 D_ASSERT(!hlist_unhashed(&e->colision));
1553 hlist_del_init(&e->colision);
1554 spin_unlock_irq(&mdev->req_lock);
1555
1556 dec_unacked(mdev);
1557
1558 return ok;
1559}
1560
1561/* Called from receive_Data.
1562 * Synchronize packets on sock with packets on msock.
1563 *
1564 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1565 * packet traveling on msock, they are still processed in the order they have
1566 * been sent.
1567 *
1568 * Note: we don't care for Ack packets overtaking P_DATA packets.
1569 *
1570 * In case packet_seq is larger than mdev->peer_seq number, there are
1571 * outstanding packets on the msock. We wait for them to arrive.
1572 * In case we are the logically next packet, we update mdev->peer_seq
1573 * ourselves. Correctly handles 32bit wrap around.
1574 *
1575 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1576 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1577 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1578 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1579 *
1580 * returns 0 if we may process the packet,
1581 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1582static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1583{
1584 DEFINE_WAIT(wait);
1585 unsigned int p_seq;
1586 long timeout;
1587 int ret = 0;
1588 spin_lock(&mdev->peer_seq_lock);
1589 for (;;) {
1590 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1591 if (seq_le(packet_seq, mdev->peer_seq+1))
1592 break;
1593 if (signal_pending(current)) {
1594 ret = -ERESTARTSYS;
1595 break;
1596 }
1597 p_seq = mdev->peer_seq;
1598 spin_unlock(&mdev->peer_seq_lock);
1599 timeout = schedule_timeout(30*HZ);
1600 spin_lock(&mdev->peer_seq_lock);
1601 if (timeout == 0 && p_seq == mdev->peer_seq) {
1602 ret = -ETIMEDOUT;
1603 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1604 break;
1605 }
1606 }
1607 finish_wait(&mdev->seq_wait, &wait);
1608 if (mdev->peer_seq+1 == packet_seq)
1609 mdev->peer_seq++;
1610 spin_unlock(&mdev->peer_seq_lock);
1611 return ret;
1612}
1613
76d2e7ec
PR
1614static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1615{
1616 if (mdev->agreed_pro_version >= 95)
1617 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
76d2e7ec
PR
1618 (dpf & DP_FUA ? REQ_FUA : 0) |
1619 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1620 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1621 else
721a9602 1622 return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
76d2e7ec
PR
1623}
1624
b411b363 1625/* mirrored write */
02918be2 1626static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363
PR
1627{
1628 sector_t sector;
1629 struct drbd_epoch_entry *e;
02918be2 1630 struct p_data *p = &mdev->data.rbuf.data;
b411b363
PR
1631 int rw = WRITE;
1632 u32 dp_flags;
1633
b411b363
PR
1634 if (!get_ldev(mdev)) {
1635 if (__ratelimit(&drbd_ratelimit_state))
1636 dev_err(DEV, "Can not write mirrored data block "
1637 "to local disk.\n");
1638 spin_lock(&mdev->peer_seq_lock);
1639 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1640 mdev->peer_seq++;
1641 spin_unlock(&mdev->peer_seq_lock);
1642
2b2bf214 1643 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
b411b363
PR
1644 atomic_inc(&mdev->current_epoch->epoch_size);
1645 return drbd_drain_block(mdev, data_size);
1646 }
1647
1648 /* get_ldev(mdev) successful.
1649 * Corresponding put_ldev done either below (on various errors),
1650 * or in drbd_endio_write_sec, if we successfully submit the data at
1651 * the end of this function. */
1652
1653 sector = be64_to_cpu(p->sector);
1654 e = read_in_block(mdev, p->block_id, sector, data_size);
1655 if (!e) {
1656 put_ldev(mdev);
1657 return FALSE;
1658 }
1659
b411b363
PR
1660 e->w.cb = e_end_block;
1661
1662 spin_lock(&mdev->epoch_lock);
1663 e->epoch = mdev->current_epoch;
1664 atomic_inc(&e->epoch->epoch_size);
1665 atomic_inc(&e->epoch->active);
b411b363
PR
1666 spin_unlock(&mdev->epoch_lock);
1667
1668 dp_flags = be32_to_cpu(p->dp_flags);
76d2e7ec
PR
1669 rw |= write_flags_to_bio(mdev, dp_flags);
1670
b411b363
PR
1671 if (dp_flags & DP_MAY_SET_IN_SYNC)
1672 e->flags |= EE_MAY_SET_IN_SYNC;
1673
1674 /* I'm the receiver, I do hold a net_cnt reference. */
1675 if (!mdev->net_conf->two_primaries) {
1676 spin_lock_irq(&mdev->req_lock);
1677 } else {
1678 /* don't get the req_lock yet,
1679 * we may sleep in drbd_wait_peer_seq */
1680 const int size = e->size;
1681 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1682 DEFINE_WAIT(wait);
1683 struct drbd_request *i;
1684 struct hlist_node *n;
1685 struct hlist_head *slot;
1686 int first;
1687
1688 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1689 BUG_ON(mdev->ee_hash == NULL);
1690 BUG_ON(mdev->tl_hash == NULL);
1691
1692 /* conflict detection and handling:
1693 * 1. wait on the sequence number,
1694 * in case this data packet overtook ACK packets.
1695 * 2. check our hash tables for conflicting requests.
1696 * we only need to walk the tl_hash, since an ee can not
1697 * have a conflict with an other ee: on the submitting
1698 * node, the corresponding req had already been conflicting,
1699 * and a conflicting req is never sent.
1700 *
1701 * Note: for two_primaries, we are protocol C,
1702 * so there cannot be any request that is DONE
1703 * but still on the transfer log.
1704 *
1705 * unconditionally add to the ee_hash.
1706 *
1707 * if no conflicting request is found:
1708 * submit.
1709 *
1710 * if any conflicting request is found
1711 * that has not yet been acked,
1712 * AND I have the "discard concurrent writes" flag:
1713 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1714 *
1715 * if any conflicting request is found:
1716 * block the receiver, waiting on misc_wait
1717 * until no more conflicting requests are there,
1718 * or we get interrupted (disconnect).
1719 *
1720 * we do not just write after local io completion of those
1721 * requests, but only after req is done completely, i.e.
1722 * we wait for the P_DISCARD_ACK to arrive!
1723 *
1724 * then proceed normally, i.e. submit.
1725 */
1726 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1727 goto out_interrupted;
1728
1729 spin_lock_irq(&mdev->req_lock);
1730
1731 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1732
1733#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1734 slot = tl_hash_slot(mdev, sector);
1735 first = 1;
1736 for (;;) {
1737 int have_unacked = 0;
1738 int have_conflict = 0;
1739 prepare_to_wait(&mdev->misc_wait, &wait,
1740 TASK_INTERRUPTIBLE);
1741 hlist_for_each_entry(i, n, slot, colision) {
1742 if (OVERLAPS) {
1743 /* only ALERT on first iteration,
1744 * we may be woken up early... */
1745 if (first)
1746 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1747 " new: %llus +%u; pending: %llus +%u\n",
1748 current->comm, current->pid,
1749 (unsigned long long)sector, size,
1750 (unsigned long long)i->sector, i->size);
1751 if (i->rq_state & RQ_NET_PENDING)
1752 ++have_unacked;
1753 ++have_conflict;
1754 }
1755 }
1756#undef OVERLAPS
1757 if (!have_conflict)
1758 break;
1759
1760 /* Discard Ack only for the _first_ iteration */
1761 if (first && discard && have_unacked) {
1762 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1763 (unsigned long long)sector);
1764 inc_unacked(mdev);
1765 e->w.cb = e_send_discard_ack;
1766 list_add_tail(&e->w.list, &mdev->done_ee);
1767
1768 spin_unlock_irq(&mdev->req_lock);
1769
1770 /* we could probably send that P_DISCARD_ACK ourselves,
1771 * but I don't like the receiver using the msock */
1772
1773 put_ldev(mdev);
1774 wake_asender(mdev);
1775 finish_wait(&mdev->misc_wait, &wait);
1776 return TRUE;
1777 }
1778
1779 if (signal_pending(current)) {
1780 hlist_del_init(&e->colision);
1781
1782 spin_unlock_irq(&mdev->req_lock);
1783
1784 finish_wait(&mdev->misc_wait, &wait);
1785 goto out_interrupted;
1786 }
1787
1788 spin_unlock_irq(&mdev->req_lock);
1789 if (first) {
1790 first = 0;
1791 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1792 "sec=%llus\n", (unsigned long long)sector);
1793 } else if (discard) {
1794 /* we had none on the first iteration.
1795 * there must be none now. */
1796 D_ASSERT(have_unacked == 0);
1797 }
1798 schedule();
1799 spin_lock_irq(&mdev->req_lock);
1800 }
1801 finish_wait(&mdev->misc_wait, &wait);
1802 }
1803
1804 list_add(&e->w.list, &mdev->active_ee);
1805 spin_unlock_irq(&mdev->req_lock);
1806
1807 switch (mdev->net_conf->wire_protocol) {
1808 case DRBD_PROT_C:
1809 inc_unacked(mdev);
1810 /* corresponding dec_unacked() in e_end_block()
1811 * respective _drbd_clear_done_ee */
1812 break;
1813 case DRBD_PROT_B:
1814 /* I really don't like it that the receiver thread
1815 * sends on the msock, but anyways */
1816 drbd_send_ack(mdev, P_RECV_ACK, e);
1817 break;
1818 case DRBD_PROT_A:
1819 /* nothing to do */
1820 break;
1821 }
1822
6719fb03 1823 if (mdev->state.pdsk < D_INCONSISTENT) {
b411b363
PR
1824 /* In case we have the only disk of the cluster, */
1825 drbd_set_out_of_sync(mdev, e->sector, e->size);
1826 e->flags |= EE_CALL_AL_COMPLETE_IO;
6719fb03 1827 e->flags &= ~EE_MAY_SET_IN_SYNC;
b411b363
PR
1828 drbd_al_begin_io(mdev, e->sector);
1829 }
1830
45bb912b
LE
1831 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1832 return TRUE;
b411b363 1833
22cc37a9
LE
1834 /* drbd_submit_ee currently fails for one reason only:
1835 * not being able to allocate enough bios.
1836 * Is dropping the connection going to help? */
1837 spin_lock_irq(&mdev->req_lock);
1838 list_del(&e->w.list);
1839 hlist_del_init(&e->colision);
1840 spin_unlock_irq(&mdev->req_lock);
1841 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1842 drbd_al_complete_io(mdev, e->sector);
1843
b411b363
PR
1844out_interrupted:
1845 /* yes, the epoch_size now is imbalanced.
1846 * but we drop the connection anyways, so we don't have a chance to
1847 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1848 put_ldev(mdev);
1849 drbd_free_ee(mdev, e);
1850 return FALSE;
1851}
1852
0f0601f4
LE
1853/* We may throttle resync, if the lower device seems to be busy,
1854 * and current sync rate is above c_min_rate.
1855 *
1856 * To decide whether or not the lower device is busy, we use a scheme similar
1857 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1858 * (more than 64 sectors) of activity we cannot account for with our own resync
1859 * activity, it obviously is "busy".
1860 *
1861 * The current sync rate used here uses only the most recent two step marks,
1862 * to have a short time average so we can react faster.
1863 */
1864int drbd_rs_should_slow_down(struct drbd_conf *mdev)
1865{
1866 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1867 unsigned long db, dt, dbdt;
1868 int curr_events;
1869 int throttle = 0;
1870
1871 /* feature disabled? */
1872 if (mdev->sync_conf.c_min_rate == 0)
1873 return 0;
1874
1875 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1876 (int)part_stat_read(&disk->part0, sectors[1]) -
1877 atomic_read(&mdev->rs_sect_ev);
1878 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1879 unsigned long rs_left;
1880 int i;
1881
1882 mdev->rs_last_events = curr_events;
1883
1884 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1885 * approx. */
1886 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
1887 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
1888
1889 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1890 if (!dt)
1891 dt++;
1892 db = mdev->rs_mark_left[i] - rs_left;
1893 dbdt = Bit2KB(db/dt);
1894
1895 if (dbdt > mdev->sync_conf.c_min_rate)
1896 throttle = 1;
1897 }
1898 return throttle;
1899}
1900
1901
02918be2 1902static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
b411b363
PR
1903{
1904 sector_t sector;
1905 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1906 struct drbd_epoch_entry *e;
1907 struct digest_info *di = NULL;
b18b37be 1908 int size, verb;
b411b363 1909 unsigned int fault_type;
02918be2 1910 struct p_block_req *p = &mdev->data.rbuf.block_req;
b411b363
PR
1911
1912 sector = be64_to_cpu(p->sector);
1913 size = be32_to_cpu(p->blksize);
1914
1915 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1916 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1917 (unsigned long long)sector, size);
1918 return FALSE;
1919 }
1920 if (sector + (size>>9) > capacity) {
1921 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1922 (unsigned long long)sector, size);
1923 return FALSE;
1924 }
1925
1926 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
b18b37be
PR
1927 verb = 1;
1928 switch (cmd) {
1929 case P_DATA_REQUEST:
1930 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1931 break;
1932 case P_RS_DATA_REQUEST:
1933 case P_CSUM_RS_REQUEST:
1934 case P_OV_REQUEST:
1935 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1936 break;
1937 case P_OV_REPLY:
1938 verb = 0;
1939 dec_rs_pending(mdev);
1940 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1941 break;
1942 default:
1943 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1944 cmdname(cmd));
1945 }
1946 if (verb && __ratelimit(&drbd_ratelimit_state))
b411b363
PR
1947 dev_err(DEV, "Can not satisfy peer's read request, "
1948 "no local data.\n");
b18b37be 1949
a821cc4a
LE
1950 /* drain possibly payload */
1951 return drbd_drain_block(mdev, digest_size);
b411b363
PR
1952 }
1953
1954 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1955 * "criss-cross" setup, that might cause write-out on some other DRBD,
1956 * which in turn might block on the other node at this very place. */
1957 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1958 if (!e) {
1959 put_ldev(mdev);
1960 return FALSE;
1961 }
1962
02918be2 1963 switch (cmd) {
b411b363
PR
1964 case P_DATA_REQUEST:
1965 e->w.cb = w_e_end_data_req;
1966 fault_type = DRBD_FAULT_DT_RD;
80a40e43
LE
1967 /* application IO, don't drbd_rs_begin_io */
1968 goto submit;
1969
b411b363
PR
1970 case P_RS_DATA_REQUEST:
1971 e->w.cb = w_e_end_rsdata_req;
1972 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
1973 break;
1974
1975 case P_OV_REPLY:
1976 case P_CSUM_RS_REQUEST:
1977 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
1978 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
1979 if (!di)
1980 goto out_free_e;
1981
1982 di->digest_size = digest_size;
1983 di->digest = (((char *)di)+sizeof(struct digest_info));
1984
c36c3ced
LE
1985 e->digest = di;
1986 e->flags |= EE_HAS_DIGEST;
1987
b411b363
PR
1988 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
1989 goto out_free_e;
1990
02918be2 1991 if (cmd == P_CSUM_RS_REQUEST) {
b411b363
PR
1992 D_ASSERT(mdev->agreed_pro_version >= 89);
1993 e->w.cb = w_e_end_csum_rs_req;
02918be2 1994 } else if (cmd == P_OV_REPLY) {
b411b363
PR
1995 e->w.cb = w_e_end_ov_reply;
1996 dec_rs_pending(mdev);
0f0601f4
LE
1997 /* drbd_rs_begin_io done when we sent this request,
1998 * but accounting still needs to be done. */
1999 goto submit_for_resync;
b411b363
PR
2000 }
2001 break;
2002
2003 case P_OV_REQUEST:
b411b363
PR
2004 if (mdev->ov_start_sector == ~(sector_t)0 &&
2005 mdev->agreed_pro_version >= 90) {
de228bba
LE
2006 unsigned long now = jiffies;
2007 int i;
b411b363
PR
2008 mdev->ov_start_sector = sector;
2009 mdev->ov_position = sector;
30b743a2
LE
2010 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2011 mdev->rs_total = mdev->ov_left;
de228bba
LE
2012 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2013 mdev->rs_mark_left[i] = mdev->ov_left;
2014 mdev->rs_mark_time[i] = now;
2015 }
b411b363
PR
2016 dev_info(DEV, "Online Verify start sector: %llu\n",
2017 (unsigned long long)sector);
2018 }
2019 e->w.cb = w_e_end_ov_req;
2020 fault_type = DRBD_FAULT_RS_RD;
b411b363
PR
2021 break;
2022
b411b363
PR
2023 default:
2024 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
02918be2 2025 cmdname(cmd));
b411b363 2026 fault_type = DRBD_FAULT_MAX;
80a40e43 2027 goto out_free_e;
b411b363
PR
2028 }
2029
0f0601f4
LE
2030 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2031 * wrt the receiver, but it is not as straightforward as it may seem.
2032 * Various places in the resync start and stop logic assume resync
2033 * requests are processed in order, requeuing this on the worker thread
2034 * introduces a bunch of new code for synchronization between threads.
2035 *
2036 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2037 * "forever", throttling after drbd_rs_begin_io will lock that extent
2038 * for application writes for the same time. For now, just throttle
2039 * here, where the rest of the code expects the receiver to sleep for
2040 * a while, anyways.
2041 */
2042
2043 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2044 * this defers syncer requests for some time, before letting at least
2045 * on request through. The resync controller on the receiving side
2046 * will adapt to the incoming rate accordingly.
2047 *
2048 * We cannot throttle here if remote is Primary/SyncTarget:
2049 * we would also throttle its application reads.
2050 * In that case, throttling is done on the SyncTarget only.
2051 */
2052 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2053 msleep(100);
80a40e43
LE
2054 if (drbd_rs_begin_io(mdev, e->sector))
2055 goto out_free_e;
b411b363 2056
0f0601f4
LE
2057submit_for_resync:
2058 atomic_add(size >> 9, &mdev->rs_sect_ev);
2059
80a40e43 2060submit:
b411b363 2061 inc_unacked(mdev);
80a40e43
LE
2062 spin_lock_irq(&mdev->req_lock);
2063 list_add_tail(&e->w.list, &mdev->read_ee);
2064 spin_unlock_irq(&mdev->req_lock);
b411b363 2065
45bb912b
LE
2066 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2067 return TRUE;
b411b363 2068
22cc37a9
LE
2069 /* drbd_submit_ee currently fails for one reason only:
2070 * not being able to allocate enough bios.
2071 * Is dropping the connection going to help? */
2072 spin_lock_irq(&mdev->req_lock);
2073 list_del(&e->w.list);
2074 spin_unlock_irq(&mdev->req_lock);
2075 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2076
b411b363 2077out_free_e:
b411b363
PR
2078 put_ldev(mdev);
2079 drbd_free_ee(mdev, e);
2080 return FALSE;
2081}
2082
2083static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2084{
2085 int self, peer, rv = -100;
2086 unsigned long ch_self, ch_peer;
2087
2088 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2089 peer = mdev->p_uuid[UI_BITMAP] & 1;
2090
2091 ch_peer = mdev->p_uuid[UI_SIZE];
2092 ch_self = mdev->comm_bm_set;
2093
2094 switch (mdev->net_conf->after_sb_0p) {
2095 case ASB_CONSENSUS:
2096 case ASB_DISCARD_SECONDARY:
2097 case ASB_CALL_HELPER:
2098 dev_err(DEV, "Configuration error.\n");
2099 break;
2100 case ASB_DISCONNECT:
2101 break;
2102 case ASB_DISCARD_YOUNGER_PRI:
2103 if (self == 0 && peer == 1) {
2104 rv = -1;
2105 break;
2106 }
2107 if (self == 1 && peer == 0) {
2108 rv = 1;
2109 break;
2110 }
2111 /* Else fall through to one of the other strategies... */
2112 case ASB_DISCARD_OLDER_PRI:
2113 if (self == 0 && peer == 1) {
2114 rv = 1;
2115 break;
2116 }
2117 if (self == 1 && peer == 0) {
2118 rv = -1;
2119 break;
2120 }
2121 /* Else fall through to one of the other strategies... */
ad19bf6e 2122 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
b411b363
PR
2123 "Using discard-least-changes instead\n");
2124 case ASB_DISCARD_ZERO_CHG:
2125 if (ch_peer == 0 && ch_self == 0) {
2126 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2127 ? -1 : 1;
2128 break;
2129 } else {
2130 if (ch_peer == 0) { rv = 1; break; }
2131 if (ch_self == 0) { rv = -1; break; }
2132 }
2133 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2134 break;
2135 case ASB_DISCARD_LEAST_CHG:
2136 if (ch_self < ch_peer)
2137 rv = -1;
2138 else if (ch_self > ch_peer)
2139 rv = 1;
2140 else /* ( ch_self == ch_peer ) */
2141 /* Well, then use something else. */
2142 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2143 ? -1 : 1;
2144 break;
2145 case ASB_DISCARD_LOCAL:
2146 rv = -1;
2147 break;
2148 case ASB_DISCARD_REMOTE:
2149 rv = 1;
2150 }
2151
2152 return rv;
2153}
2154
2155static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2156{
2157 int self, peer, hg, rv = -100;
2158
2159 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2160 peer = mdev->p_uuid[UI_BITMAP] & 1;
2161
2162 switch (mdev->net_conf->after_sb_1p) {
2163 case ASB_DISCARD_YOUNGER_PRI:
2164 case ASB_DISCARD_OLDER_PRI:
2165 case ASB_DISCARD_LEAST_CHG:
2166 case ASB_DISCARD_LOCAL:
2167 case ASB_DISCARD_REMOTE:
2168 dev_err(DEV, "Configuration error.\n");
2169 break;
2170 case ASB_DISCONNECT:
2171 break;
2172 case ASB_CONSENSUS:
2173 hg = drbd_asb_recover_0p(mdev);
2174 if (hg == -1 && mdev->state.role == R_SECONDARY)
2175 rv = hg;
2176 if (hg == 1 && mdev->state.role == R_PRIMARY)
2177 rv = hg;
2178 break;
2179 case ASB_VIOLENTLY:
2180 rv = drbd_asb_recover_0p(mdev);
2181 break;
2182 case ASB_DISCARD_SECONDARY:
2183 return mdev->state.role == R_PRIMARY ? 1 : -1;
2184 case ASB_CALL_HELPER:
2185 hg = drbd_asb_recover_0p(mdev);
2186 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2187 self = drbd_set_role(mdev, R_SECONDARY, 0);
2188 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2189 * we might be here in C_WF_REPORT_PARAMS which is transient.
2190 * we do not need to wait for the after state change work either. */
2191 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2192 if (self != SS_SUCCESS) {
2193 drbd_khelper(mdev, "pri-lost-after-sb");
2194 } else {
2195 dev_warn(DEV, "Successfully gave up primary role.\n");
2196 rv = hg;
2197 }
2198 } else
2199 rv = hg;
2200 }
2201
2202 return rv;
2203}
2204
2205static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2206{
2207 int self, peer, hg, rv = -100;
2208
2209 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2210 peer = mdev->p_uuid[UI_BITMAP] & 1;
2211
2212 switch (mdev->net_conf->after_sb_2p) {
2213 case ASB_DISCARD_YOUNGER_PRI:
2214 case ASB_DISCARD_OLDER_PRI:
2215 case ASB_DISCARD_LEAST_CHG:
2216 case ASB_DISCARD_LOCAL:
2217 case ASB_DISCARD_REMOTE:
2218 case ASB_CONSENSUS:
2219 case ASB_DISCARD_SECONDARY:
2220 dev_err(DEV, "Configuration error.\n");
2221 break;
2222 case ASB_VIOLENTLY:
2223 rv = drbd_asb_recover_0p(mdev);
2224 break;
2225 case ASB_DISCONNECT:
2226 break;
2227 case ASB_CALL_HELPER:
2228 hg = drbd_asb_recover_0p(mdev);
2229 if (hg == -1) {
2230 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2231 * we might be here in C_WF_REPORT_PARAMS which is transient.
2232 * we do not need to wait for the after state change work either. */
2233 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2234 if (self != SS_SUCCESS) {
2235 drbd_khelper(mdev, "pri-lost-after-sb");
2236 } else {
2237 dev_warn(DEV, "Successfully gave up primary role.\n");
2238 rv = hg;
2239 }
2240 } else
2241 rv = hg;
2242 }
2243
2244 return rv;
2245}
2246
2247static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2248 u64 bits, u64 flags)
2249{
2250 if (!uuid) {
2251 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2252 return;
2253 }
2254 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2255 text,
2256 (unsigned long long)uuid[UI_CURRENT],
2257 (unsigned long long)uuid[UI_BITMAP],
2258 (unsigned long long)uuid[UI_HISTORY_START],
2259 (unsigned long long)uuid[UI_HISTORY_END],
2260 (unsigned long long)bits,
2261 (unsigned long long)flags);
2262}
2263
2264/*
2265 100 after split brain try auto recover
2266 2 C_SYNC_SOURCE set BitMap
2267 1 C_SYNC_SOURCE use BitMap
2268 0 no Sync
2269 -1 C_SYNC_TARGET use BitMap
2270 -2 C_SYNC_TARGET set BitMap
2271 -100 after split brain, disconnect
2272-1000 unrelated data
2273 */
2274static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2275{
2276 u64 self, peer;
2277 int i, j;
2278
2279 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2280 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2281
2282 *rule_nr = 10;
2283 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2284 return 0;
2285
2286 *rule_nr = 20;
2287 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2288 peer != UUID_JUST_CREATED)
2289 return -2;
2290
2291 *rule_nr = 30;
2292 if (self != UUID_JUST_CREATED &&
2293 (peer == UUID_JUST_CREATED || peer == (u64)0))
2294 return 2;
2295
2296 if (self == peer) {
2297 int rct, dc; /* roles at crash time */
2298
2299 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2300
2301 if (mdev->agreed_pro_version < 91)
2302 return -1001;
2303
2304 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2305 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2306 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2307 drbd_uuid_set_bm(mdev, 0UL);
2308
2309 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2310 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2311 *rule_nr = 34;
2312 } else {
2313 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2314 *rule_nr = 36;
2315 }
2316
2317 return 1;
2318 }
2319
2320 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2321
2322 if (mdev->agreed_pro_version < 91)
2323 return -1001;
2324
2325 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2326 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2327 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2328
2329 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2330 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2331 mdev->p_uuid[UI_BITMAP] = 0UL;
2332
2333 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2334 *rule_nr = 35;
2335 } else {
2336 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2337 *rule_nr = 37;
2338 }
2339
2340 return -1;
2341 }
2342
2343 /* Common power [off|failure] */
2344 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2345 (mdev->p_uuid[UI_FLAGS] & 2);
2346 /* lowest bit is set when we were primary,
2347 * next bit (weight 2) is set when peer was primary */
2348 *rule_nr = 40;
2349
2350 switch (rct) {
2351 case 0: /* !self_pri && !peer_pri */ return 0;
2352 case 1: /* self_pri && !peer_pri */ return 1;
2353 case 2: /* !self_pri && peer_pri */ return -1;
2354 case 3: /* self_pri && peer_pri */
2355 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2356 return dc ? -1 : 1;
2357 }
2358 }
2359
2360 *rule_nr = 50;
2361 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2362 if (self == peer)
2363 return -1;
2364
2365 *rule_nr = 51;
2366 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2367 if (self == peer) {
2368 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2369 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2370 if (self == peer) {
2371 /* The last P_SYNC_UUID did not get though. Undo the last start of
2372 resync as sync source modifications of the peer's UUIDs. */
2373
2374 if (mdev->agreed_pro_version < 91)
2375 return -1001;
2376
2377 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2378 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2379 return -1;
2380 }
2381 }
2382
2383 *rule_nr = 60;
2384 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2385 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2386 peer = mdev->p_uuid[i] & ~((u64)1);
2387 if (self == peer)
2388 return -2;
2389 }
2390
2391 *rule_nr = 70;
2392 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2393 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2394 if (self == peer)
2395 return 1;
2396
2397 *rule_nr = 71;
2398 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2399 if (self == peer) {
2400 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2401 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2402 if (self == peer) {
2403 /* The last P_SYNC_UUID did not get though. Undo the last start of
2404 resync as sync source modifications of our UUIDs. */
2405
2406 if (mdev->agreed_pro_version < 91)
2407 return -1001;
2408
2409 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2410 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2411
2412 dev_info(DEV, "Undid last start of resync:\n");
2413
2414 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2415 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2416
2417 return 1;
2418 }
2419 }
2420
2421
2422 *rule_nr = 80;
d8c2a36b 2423 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
b411b363
PR
2424 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2425 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2426 if (self == peer)
2427 return 2;
2428 }
2429
2430 *rule_nr = 90;
2431 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2432 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2433 if (self == peer && self != ((u64)0))
2434 return 100;
2435
2436 *rule_nr = 100;
2437 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2438 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2439 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2440 peer = mdev->p_uuid[j] & ~((u64)1);
2441 if (self == peer)
2442 return -100;
2443 }
2444 }
2445
2446 return -1000;
2447}
2448
2449/* drbd_sync_handshake() returns the new conn state on success, or
2450 CONN_MASK (-1) on failure.
2451 */
2452static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2453 enum drbd_disk_state peer_disk) __must_hold(local)
2454{
2455 int hg, rule_nr;
2456 enum drbd_conns rv = C_MASK;
2457 enum drbd_disk_state mydisk;
2458
2459 mydisk = mdev->state.disk;
2460 if (mydisk == D_NEGOTIATING)
2461 mydisk = mdev->new_state_tmp.disk;
2462
2463 dev_info(DEV, "drbd_sync_handshake:\n");
2464 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2465 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2466 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2467
2468 hg = drbd_uuid_compare(mdev, &rule_nr);
2469
2470 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2471
2472 if (hg == -1000) {
2473 dev_alert(DEV, "Unrelated data, aborting!\n");
2474 return C_MASK;
2475 }
2476 if (hg == -1001) {
2477 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2478 return C_MASK;
2479 }
2480
2481 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2482 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2483 int f = (hg == -100) || abs(hg) == 2;
2484 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2485 if (f)
2486 hg = hg*2;
2487 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2488 hg > 0 ? "source" : "target");
2489 }
2490
3a11a487
AG
2491 if (abs(hg) == 100)
2492 drbd_khelper(mdev, "initial-split-brain");
2493
b411b363
PR
2494 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2495 int pcount = (mdev->state.role == R_PRIMARY)
2496 + (peer_role == R_PRIMARY);
2497 int forced = (hg == -100);
2498
2499 switch (pcount) {
2500 case 0:
2501 hg = drbd_asb_recover_0p(mdev);
2502 break;
2503 case 1:
2504 hg = drbd_asb_recover_1p(mdev);
2505 break;
2506 case 2:
2507 hg = drbd_asb_recover_2p(mdev);
2508 break;
2509 }
2510 if (abs(hg) < 100) {
2511 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2512 "automatically solved. Sync from %s node\n",
2513 pcount, (hg < 0) ? "peer" : "this");
2514 if (forced) {
2515 dev_warn(DEV, "Doing a full sync, since"
2516 " UUIDs where ambiguous.\n");
2517 hg = hg*2;
2518 }
2519 }
2520 }
2521
2522 if (hg == -100) {
2523 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2524 hg = -1;
2525 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2526 hg = 1;
2527
2528 if (abs(hg) < 100)
2529 dev_warn(DEV, "Split-Brain detected, manually solved. "
2530 "Sync from %s node\n",
2531 (hg < 0) ? "peer" : "this");
2532 }
2533
2534 if (hg == -100) {
580b9767
LE
2535 /* FIXME this log message is not correct if we end up here
2536 * after an attempted attach on a diskless node.
2537 * We just refuse to attach -- well, we drop the "connection"
2538 * to that disk, in a way... */
3a11a487 2539 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
b411b363
PR
2540 drbd_khelper(mdev, "split-brain");
2541 return C_MASK;
2542 }
2543
2544 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2545 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2546 return C_MASK;
2547 }
2548
2549 if (hg < 0 && /* by intention we do not use mydisk here. */
2550 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2551 switch (mdev->net_conf->rr_conflict) {
2552 case ASB_CALL_HELPER:
2553 drbd_khelper(mdev, "pri-lost");
2554 /* fall through */
2555 case ASB_DISCONNECT:
2556 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2557 return C_MASK;
2558 case ASB_VIOLENTLY:
2559 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2560 "assumption\n");
2561 }
2562 }
2563
cf14c2e9
PR
2564 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2565 if (hg == 0)
2566 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2567 else
2568 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2569 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2570 abs(hg) >= 2 ? "full" : "bit-map based");
2571 return C_MASK;
2572 }
2573
b411b363
PR
2574 if (abs(hg) >= 2) {
2575 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2576 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2577 return C_MASK;
2578 }
2579
2580 if (hg > 0) { /* become sync source. */
2581 rv = C_WF_BITMAP_S;
2582 } else if (hg < 0) { /* become sync target */
2583 rv = C_WF_BITMAP_T;
2584 } else {
2585 rv = C_CONNECTED;
2586 if (drbd_bm_total_weight(mdev)) {
2587 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2588 drbd_bm_total_weight(mdev));
2589 }
2590 }
2591
2592 return rv;
2593}
2594
2595/* returns 1 if invalid */
2596static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2597{
2598 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2599 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2600 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2601 return 0;
2602
2603 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2604 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2605 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2606 return 1;
2607
2608 /* everything else is valid if they are equal on both sides. */
2609 if (peer == self)
2610 return 0;
2611
2612 /* everything es is invalid. */
2613 return 1;
2614}
2615
02918be2 2616static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363 2617{
02918be2 2618 struct p_protocol *p = &mdev->data.rbuf.protocol;
b411b363 2619 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
cf14c2e9 2620 int p_want_lose, p_two_primaries, cf;
b411b363
PR
2621 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2622
b411b363
PR
2623 p_proto = be32_to_cpu(p->protocol);
2624 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2625 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2626 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
b411b363 2627 p_two_primaries = be32_to_cpu(p->two_primaries);
cf14c2e9
PR
2628 cf = be32_to_cpu(p->conn_flags);
2629 p_want_lose = cf & CF_WANT_LOSE;
2630
2631 clear_bit(CONN_DRY_RUN, &mdev->flags);
2632
2633 if (cf & CF_DRY_RUN)
2634 set_bit(CONN_DRY_RUN, &mdev->flags);
b411b363
PR
2635
2636 if (p_proto != mdev->net_conf->wire_protocol) {
2637 dev_err(DEV, "incompatible communication protocols\n");
2638 goto disconnect;
2639 }
2640
2641 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2642 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2643 goto disconnect;
2644 }
2645
2646 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2647 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2648 goto disconnect;
2649 }
2650
2651 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2652 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2653 goto disconnect;
2654 }
2655
2656 if (p_want_lose && mdev->net_conf->want_lose) {
2657 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2658 goto disconnect;
2659 }
2660
2661 if (p_two_primaries != mdev->net_conf->two_primaries) {
2662 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2663 goto disconnect;
2664 }
2665
2666 if (mdev->agreed_pro_version >= 87) {
2667 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2668
2669 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2670 return FALSE;
2671
2672 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2673 if (strcmp(p_integrity_alg, my_alg)) {
2674 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2675 goto disconnect;
2676 }
2677 dev_info(DEV, "data-integrity-alg: %s\n",
2678 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2679 }
2680
2681 return TRUE;
2682
2683disconnect:
2684 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2685 return FALSE;
2686}
2687
2688/* helper function
2689 * input: alg name, feature name
2690 * return: NULL (alg name was "")
2691 * ERR_PTR(error) if something goes wrong
2692 * or the crypto hash ptr, if it worked out ok. */
2693struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2694 const char *alg, const char *name)
2695{
2696 struct crypto_hash *tfm;
2697
2698 if (!alg[0])
2699 return NULL;
2700
2701 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2702 if (IS_ERR(tfm)) {
2703 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2704 alg, name, PTR_ERR(tfm));
2705 return tfm;
2706 }
2707 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2708 crypto_free_hash(tfm);
2709 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2710 return ERR_PTR(-EINVAL);
2711 }
2712 return tfm;
2713}
2714
02918be2 2715static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
b411b363
PR
2716{
2717 int ok = TRUE;
02918be2 2718 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
b411b363
PR
2719 unsigned int header_size, data_size, exp_max_sz;
2720 struct crypto_hash *verify_tfm = NULL;
2721 struct crypto_hash *csums_tfm = NULL;
2722 const int apv = mdev->agreed_pro_version;
778f271d
PR
2723 int *rs_plan_s = NULL;
2724 int fifo_size = 0;
b411b363
PR
2725
2726 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2727 : apv == 88 ? sizeof(struct p_rs_param)
2728 + SHARED_SECRET_MAX
8e26f9cc
PR
2729 : apv <= 94 ? sizeof(struct p_rs_param_89)
2730 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 2731
02918be2 2732 if (packet_size > exp_max_sz) {
b411b363 2733 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
02918be2 2734 packet_size, exp_max_sz);
b411b363
PR
2735 return FALSE;
2736 }
2737
2738 if (apv <= 88) {
02918be2
PR
2739 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2740 data_size = packet_size - header_size;
8e26f9cc 2741 } else if (apv <= 94) {
02918be2
PR
2742 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2743 data_size = packet_size - header_size;
b411b363 2744 D_ASSERT(data_size == 0);
8e26f9cc 2745 } else {
02918be2
PR
2746 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2747 data_size = packet_size - header_size;
b411b363
PR
2748 D_ASSERT(data_size == 0);
2749 }
2750
2751 /* initialize verify_alg and csums_alg */
2752 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2753
02918be2 2754 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
b411b363
PR
2755 return FALSE;
2756
2757 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2758
2759 if (apv >= 88) {
2760 if (apv == 88) {
2761 if (data_size > SHARED_SECRET_MAX) {
2762 dev_err(DEV, "verify-alg too long, "
2763 "peer wants %u, accepting only %u byte\n",
2764 data_size, SHARED_SECRET_MAX);
2765 return FALSE;
2766 }
2767
2768 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2769 return FALSE;
2770
2771 /* we expect NUL terminated string */
2772 /* but just in case someone tries to be evil */
2773 D_ASSERT(p->verify_alg[data_size-1] == 0);
2774 p->verify_alg[data_size-1] = 0;
2775
2776 } else /* apv >= 89 */ {
2777 /* we still expect NUL terminated strings */
2778 /* but just in case someone tries to be evil */
2779 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2780 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2781 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2782 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2783 }
2784
2785 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2786 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2787 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2788 mdev->sync_conf.verify_alg, p->verify_alg);
2789 goto disconnect;
2790 }
2791 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2792 p->verify_alg, "verify-alg");
2793 if (IS_ERR(verify_tfm)) {
2794 verify_tfm = NULL;
2795 goto disconnect;
2796 }
2797 }
2798
2799 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2800 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2801 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2802 mdev->sync_conf.csums_alg, p->csums_alg);
2803 goto disconnect;
2804 }
2805 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2806 p->csums_alg, "csums-alg");
2807 if (IS_ERR(csums_tfm)) {
2808 csums_tfm = NULL;
2809 goto disconnect;
2810 }
2811 }
2812
8e26f9cc
PR
2813 if (apv > 94) {
2814 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2815 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2816 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2817 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2818 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
778f271d
PR
2819
2820 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2821 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2822 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2823 if (!rs_plan_s) {
2824 dev_err(DEV, "kmalloc of fifo_buffer failed");
2825 goto disconnect;
2826 }
2827 }
8e26f9cc 2828 }
b411b363
PR
2829
2830 spin_lock(&mdev->peer_seq_lock);
2831 /* lock against drbd_nl_syncer_conf() */
2832 if (verify_tfm) {
2833 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2834 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2835 crypto_free_hash(mdev->verify_tfm);
2836 mdev->verify_tfm = verify_tfm;
2837 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2838 }
2839 if (csums_tfm) {
2840 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2841 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2842 crypto_free_hash(mdev->csums_tfm);
2843 mdev->csums_tfm = csums_tfm;
2844 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2845 }
778f271d
PR
2846 if (fifo_size != mdev->rs_plan_s.size) {
2847 kfree(mdev->rs_plan_s.values);
2848 mdev->rs_plan_s.values = rs_plan_s;
2849 mdev->rs_plan_s.size = fifo_size;
2850 mdev->rs_planed = 0;
2851 }
b411b363
PR
2852 spin_unlock(&mdev->peer_seq_lock);
2853 }
2854
2855 return ok;
2856disconnect:
2857 /* just for completeness: actually not needed,
2858 * as this is not reached if csums_tfm was ok. */
2859 crypto_free_hash(csums_tfm);
2860 /* but free the verify_tfm again, if csums_tfm did not work out */
2861 crypto_free_hash(verify_tfm);
2862 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2863 return FALSE;
2864}
2865
2866static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2867{
2868 /* sorry, we currently have no working implementation
2869 * of distributed TCQ */
2870}
2871
2872/* warn if the arguments differ by more than 12.5% */
2873static void warn_if_differ_considerably(struct drbd_conf *mdev,
2874 const char *s, sector_t a, sector_t b)
2875{
2876 sector_t d;
2877 if (a == 0 || b == 0)
2878 return;
2879 d = (a > b) ? (a - b) : (b - a);
2880 if (d > (a>>3) || d > (b>>3))
2881 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2882 (unsigned long long)a, (unsigned long long)b);
2883}
2884
02918be2 2885static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363 2886{
02918be2 2887 struct p_sizes *p = &mdev->data.rbuf.sizes;
b411b363
PR
2888 enum determine_dev_size dd = unchanged;
2889 unsigned int max_seg_s;
2890 sector_t p_size, p_usize, my_usize;
2891 int ldsc = 0; /* local disk size changed */
e89b591c 2892 enum dds_flags ddsf;
b411b363 2893
b411b363
PR
2894 p_size = be64_to_cpu(p->d_size);
2895 p_usize = be64_to_cpu(p->u_size);
2896
2897 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2898 dev_err(DEV, "some backing storage is needed\n");
2899 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2900 return FALSE;
2901 }
2902
2903 /* just store the peer's disk size for now.
2904 * we still need to figure out whether we accept that. */
2905 mdev->p_size = p_size;
2906
b411b363
PR
2907 if (get_ldev(mdev)) {
2908 warn_if_differ_considerably(mdev, "lower level device sizes",
2909 p_size, drbd_get_max_capacity(mdev->ldev));
2910 warn_if_differ_considerably(mdev, "user requested size",
2911 p_usize, mdev->ldev->dc.disk_size);
2912
2913 /* if this is the first connect, or an otherwise expected
2914 * param exchange, choose the minimum */
2915 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2916 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2917 p_usize);
2918
2919 my_usize = mdev->ldev->dc.disk_size;
2920
2921 if (mdev->ldev->dc.disk_size != p_usize) {
2922 mdev->ldev->dc.disk_size = p_usize;
2923 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2924 (unsigned long)mdev->ldev->dc.disk_size);
2925 }
2926
2927 /* Never shrink a device with usable data during connect.
2928 But allow online shrinking if we are connected. */
a393db6f 2929 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
b411b363
PR
2930 drbd_get_capacity(mdev->this_bdev) &&
2931 mdev->state.disk >= D_OUTDATED &&
2932 mdev->state.conn < C_CONNECTED) {
2933 dev_err(DEV, "The peer's disk size is too small!\n");
2934 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2935 mdev->ldev->dc.disk_size = my_usize;
2936 put_ldev(mdev);
2937 return FALSE;
2938 }
2939 put_ldev(mdev);
2940 }
2941#undef min_not_zero
2942
e89b591c 2943 ddsf = be16_to_cpu(p->dds_flags);
b411b363 2944 if (get_ldev(mdev)) {
e89b591c 2945 dd = drbd_determin_dev_size(mdev, ddsf);
b411b363
PR
2946 put_ldev(mdev);
2947 if (dd == dev_size_error)
2948 return FALSE;
2949 drbd_md_sync(mdev);
2950 } else {
2951 /* I am diskless, need to accept the peer's size. */
2952 drbd_set_my_capacity(mdev, p_size);
2953 }
2954
b411b363
PR
2955 if (get_ldev(mdev)) {
2956 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2957 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2958 ldsc = 1;
2959 }
2960
a1c88d0d
LE
2961 if (mdev->agreed_pro_version < 94)
2962 max_seg_s = be32_to_cpu(p->max_segment_size);
8979d9c9
LE
2963 else if (mdev->agreed_pro_version == 94)
2964 max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
a1c88d0d
LE
2965 else /* drbd 8.3.8 onwards */
2966 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
2967
b411b363
PR
2968 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
2969 drbd_setup_queue_param(mdev, max_seg_s);
2970
e89b591c 2971 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
b411b363
PR
2972 put_ldev(mdev);
2973 }
2974
2975 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
2976 if (be64_to_cpu(p->c_size) !=
2977 drbd_get_capacity(mdev->this_bdev) || ldsc) {
2978 /* we have different sizes, probably peer
2979 * needs to know my new size... */
e89b591c 2980 drbd_send_sizes(mdev, 0, ddsf);
b411b363
PR
2981 }
2982 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
2983 (dd == grew && mdev->state.conn == C_CONNECTED)) {
2984 if (mdev->state.pdsk >= D_INCONSISTENT &&
e89b591c
PR
2985 mdev->state.disk >= D_INCONSISTENT) {
2986 if (ddsf & DDSF_NO_RESYNC)
2987 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
2988 else
2989 resync_after_online_grow(mdev);
2990 } else
b411b363
PR
2991 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
2992 }
2993 }
2994
2995 return TRUE;
2996}
2997
02918be2 2998static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363 2999{
02918be2 3000 struct p_uuids *p = &mdev->data.rbuf.uuids;
b411b363
PR
3001 u64 *p_uuid;
3002 int i;
3003
b411b363
PR
3004 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3005
3006 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3007 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3008
3009 kfree(mdev->p_uuid);
3010 mdev->p_uuid = p_uuid;
3011
3012 if (mdev->state.conn < C_CONNECTED &&
3013 mdev->state.disk < D_INCONSISTENT &&
3014 mdev->state.role == R_PRIMARY &&
3015 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3016 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3017 (unsigned long long)mdev->ed_uuid);
3018 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3019 return FALSE;
3020 }
3021
3022 if (get_ldev(mdev)) {
3023 int skip_initial_sync =
3024 mdev->state.conn == C_CONNECTED &&
3025 mdev->agreed_pro_version >= 90 &&
3026 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3027 (p_uuid[UI_FLAGS] & 8);
3028 if (skip_initial_sync) {
3029 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3030 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3031 "clear_n_write from receive_uuids");
3032 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3033 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3034 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3035 CS_VERBOSE, NULL);
3036 drbd_md_sync(mdev);
3037 }
3038 put_ldev(mdev);
18a50fa2
PR
3039 } else if (mdev->state.disk < D_INCONSISTENT &&
3040 mdev->state.role == R_PRIMARY) {
3041 /* I am a diskless primary, the peer just created a new current UUID
3042 for me. */
3043 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
b411b363
PR
3044 }
3045
3046 /* Before we test for the disk state, we should wait until an eventually
3047 ongoing cluster wide state change is finished. That is important if
3048 we are primary and are detaching from our disk. We need to see the
3049 new disk state... */
3050 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3051 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3052 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3053
3054 return TRUE;
3055}
3056
3057/**
3058 * convert_state() - Converts the peer's view of the cluster state to our point of view
3059 * @ps: The state as seen by the peer.
3060 */
3061static union drbd_state convert_state(union drbd_state ps)
3062{
3063 union drbd_state ms;
3064
3065 static enum drbd_conns c_tab[] = {
3066 [C_CONNECTED] = C_CONNECTED,
3067
3068 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3069 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3070 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3071 [C_VERIFY_S] = C_VERIFY_T,
3072 [C_MASK] = C_MASK,
3073 };
3074
3075 ms.i = ps.i;
3076
3077 ms.conn = c_tab[ps.conn];
3078 ms.peer = ps.role;
3079 ms.role = ps.peer;
3080 ms.pdsk = ps.disk;
3081 ms.disk = ps.pdsk;
3082 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3083
3084 return ms;
3085}
3086
02918be2 3087static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363 3088{
02918be2 3089 struct p_req_state *p = &mdev->data.rbuf.req_state;
b411b363
PR
3090 union drbd_state mask, val;
3091 int rv;
3092
b411b363
PR
3093 mask.i = be32_to_cpu(p->mask);
3094 val.i = be32_to_cpu(p->val);
3095
3096 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3097 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3098 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3099 return TRUE;
3100 }
3101
3102 mask = convert_state(mask);
3103 val = convert_state(val);
3104
3105 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3106
3107 drbd_send_sr_reply(mdev, rv);
3108 drbd_md_sync(mdev);
3109
3110 return TRUE;
3111}
3112
02918be2 3113static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363 3114{
02918be2 3115 struct p_state *p = &mdev->data.rbuf.state;
4ac4aada 3116 union drbd_state os, ns, peer_state;
b411b363 3117 enum drbd_disk_state real_peer_disk;
65d922c3 3118 enum chg_state_flags cs_flags;
b411b363
PR
3119 int rv;
3120
b411b363
PR
3121 peer_state.i = be32_to_cpu(p->state);
3122
3123 real_peer_disk = peer_state.disk;
3124 if (peer_state.disk == D_NEGOTIATING) {
3125 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3126 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3127 }
3128
3129 spin_lock_irq(&mdev->req_lock);
3130 retry:
4ac4aada 3131 os = ns = mdev->state;
b411b363
PR
3132 spin_unlock_irq(&mdev->req_lock);
3133
e9ef7bb6
LE
3134 /* peer says his disk is uptodate, while we think it is inconsistent,
3135 * and this happens while we think we have a sync going on. */
3136 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3137 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3138 /* If we are (becoming) SyncSource, but peer is still in sync
3139 * preparation, ignore its uptodate-ness to avoid flapping, it
3140 * will change to inconsistent once the peer reaches active
3141 * syncing states.
3142 * It may have changed syncer-paused flags, however, so we
3143 * cannot ignore this completely. */
3144 if (peer_state.conn > C_CONNECTED &&
3145 peer_state.conn < C_SYNC_SOURCE)
3146 real_peer_disk = D_INCONSISTENT;
3147
3148 /* if peer_state changes to connected at the same time,
3149 * it explicitly notifies us that it finished resync.
3150 * Maybe we should finish it up, too? */
3151 else if (os.conn >= C_SYNC_SOURCE &&
3152 peer_state.conn == C_CONNECTED) {
3153 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3154 drbd_resync_finished(mdev);
3155 return TRUE;
3156 }
3157 }
3158
3159 /* peer says his disk is inconsistent, while we think it is uptodate,
3160 * and this happens while the peer still thinks we have a sync going on,
3161 * but we think we are already done with the sync.
3162 * We ignore this to avoid flapping pdsk.
3163 * This should not happen, if the peer is a recent version of drbd. */
3164 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3165 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3166 real_peer_disk = D_UP_TO_DATE;
3167
4ac4aada
LE
3168 if (ns.conn == C_WF_REPORT_PARAMS)
3169 ns.conn = C_CONNECTED;
b411b363
PR
3170
3171 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3172 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3173 int cr; /* consider resync */
3174
3175 /* if we established a new connection */
4ac4aada 3176 cr = (os.conn < C_CONNECTED);
b411b363
PR
3177 /* if we had an established connection
3178 * and one of the nodes newly attaches a disk */
4ac4aada 3179 cr |= (os.conn == C_CONNECTED &&
b411b363 3180 (peer_state.disk == D_NEGOTIATING ||
4ac4aada 3181 os.disk == D_NEGOTIATING));
b411b363
PR
3182 /* if we have both been inconsistent, and the peer has been
3183 * forced to be UpToDate with --overwrite-data */
3184 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3185 /* if we had been plain connected, and the admin requested to
3186 * start a sync by "invalidate" or "invalidate-remote" */
4ac4aada 3187 cr |= (os.conn == C_CONNECTED &&
b411b363
PR
3188 (peer_state.conn >= C_STARTING_SYNC_S &&
3189 peer_state.conn <= C_WF_BITMAP_T));
3190
3191 if (cr)
4ac4aada 3192 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
b411b363
PR
3193
3194 put_ldev(mdev);
4ac4aada
LE
3195 if (ns.conn == C_MASK) {
3196 ns.conn = C_CONNECTED;
b411b363 3197 if (mdev->state.disk == D_NEGOTIATING) {
82f59cc6 3198 drbd_force_state(mdev, NS(disk, D_FAILED));
b411b363
PR
3199 } else if (peer_state.disk == D_NEGOTIATING) {
3200 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3201 peer_state.disk = D_DISKLESS;
580b9767 3202 real_peer_disk = D_DISKLESS;
b411b363 3203 } else {
cf14c2e9
PR
3204 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3205 return FALSE;
4ac4aada 3206 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
b411b363
PR
3207 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3208 return FALSE;
3209 }
3210 }
3211 }
3212
3213 spin_lock_irq(&mdev->req_lock);
4ac4aada 3214 if (mdev->state.i != os.i)
b411b363
PR
3215 goto retry;
3216 clear_bit(CONSIDER_RESYNC, &mdev->flags);
b411b363
PR
3217 ns.peer = peer_state.role;
3218 ns.pdsk = real_peer_disk;
3219 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
4ac4aada 3220 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
b411b363 3221 ns.disk = mdev->new_state_tmp.disk;
4ac4aada
LE
3222 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3223 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
481c6f50
PR
3224 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3225 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3226 for temporal network outages! */
3227 spin_unlock_irq(&mdev->req_lock);
3228 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3229 tl_clear(mdev);
3230 drbd_uuid_new_current(mdev);
3231 clear_bit(NEW_CUR_UUID, &mdev->flags);
3232 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3233 return FALSE;
3234 }
65d922c3 3235 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
b411b363
PR
3236 ns = mdev->state;
3237 spin_unlock_irq(&mdev->req_lock);
3238
3239 if (rv < SS_SUCCESS) {
3240 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3241 return FALSE;
3242 }
3243
4ac4aada
LE
3244 if (os.conn > C_WF_REPORT_PARAMS) {
3245 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
b411b363
PR
3246 peer_state.disk != D_NEGOTIATING ) {
3247 /* we want resync, peer has not yet decided to sync... */
3248 /* Nowadays only used when forcing a node into primary role and
3249 setting its disk to UpToDate with that */
3250 drbd_send_uuids(mdev);
3251 drbd_send_state(mdev);
3252 }
3253 }
3254
3255 mdev->net_conf->want_lose = 0;
3256
3257 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3258
3259 return TRUE;
3260}
3261
02918be2 3262static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363 3263{
02918be2 3264 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
b411b363
PR
3265
3266 wait_event(mdev->misc_wait,
3267 mdev->state.conn == C_WF_SYNC_UUID ||
3268 mdev->state.conn < C_CONNECTED ||
3269 mdev->state.disk < D_NEGOTIATING);
3270
3271 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3272
b411b363
PR
3273 /* Here the _drbd_uuid_ functions are right, current should
3274 _not_ be rotated into the history */
3275 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3276 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3277 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3278
3279 drbd_start_resync(mdev, C_SYNC_TARGET);
3280
3281 put_ldev(mdev);
3282 } else
3283 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3284
3285 return TRUE;
3286}
3287
3288enum receive_bitmap_ret { OK, DONE, FAILED };
3289
3290static enum receive_bitmap_ret
02918be2
PR
3291receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3292 unsigned long *buffer, struct bm_xfer_ctx *c)
b411b363
PR
3293{
3294 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3295 unsigned want = num_words * sizeof(long);
3296
02918be2
PR
3297 if (want != data_size) {
3298 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
b411b363
PR
3299 return FAILED;
3300 }
3301 if (want == 0)
3302 return DONE;
3303 if (drbd_recv(mdev, buffer, want) != want)
3304 return FAILED;
3305
3306 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3307
3308 c->word_offset += num_words;
3309 c->bit_offset = c->word_offset * BITS_PER_LONG;
3310 if (c->bit_offset > c->bm_bits)
3311 c->bit_offset = c->bm_bits;
3312
3313 return OK;
3314}
3315
3316static enum receive_bitmap_ret
3317recv_bm_rle_bits(struct drbd_conf *mdev,
3318 struct p_compressed_bm *p,
3319 struct bm_xfer_ctx *c)
3320{
3321 struct bitstream bs;
3322 u64 look_ahead;
3323 u64 rl;
3324 u64 tmp;
3325 unsigned long s = c->bit_offset;
3326 unsigned long e;
004352fa 3327 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
b411b363
PR
3328 int toggle = DCBP_get_start(p);
3329 int have;
3330 int bits;
3331
3332 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3333
3334 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3335 if (bits < 0)
3336 return FAILED;
3337
3338 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3339 bits = vli_decode_bits(&rl, look_ahead);
3340 if (bits <= 0)
3341 return FAILED;
3342
3343 if (toggle) {
3344 e = s + rl -1;
3345 if (e >= c->bm_bits) {
3346 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3347 return FAILED;
3348 }
3349 _drbd_bm_set_bits(mdev, s, e);
3350 }
3351
3352 if (have < bits) {
3353 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3354 have, bits, look_ahead,
3355 (unsigned int)(bs.cur.b - p->code),
3356 (unsigned int)bs.buf_len);
3357 return FAILED;
3358 }
3359 look_ahead >>= bits;
3360 have -= bits;
3361
3362 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3363 if (bits < 0)
3364 return FAILED;
3365 look_ahead |= tmp << have;
3366 have += bits;
3367 }
3368
3369 c->bit_offset = s;
3370 bm_xfer_ctx_bit_to_word_offset(c);
3371
3372 return (s == c->bm_bits) ? DONE : OK;
3373}
3374
3375static enum receive_bitmap_ret
3376decode_bitmap_c(struct drbd_conf *mdev,
3377 struct p_compressed_bm *p,
3378 struct bm_xfer_ctx *c)
3379{
3380 if (DCBP_get_code(p) == RLE_VLI_Bits)
3381 return recv_bm_rle_bits(mdev, p, c);
3382
3383 /* other variants had been implemented for evaluation,
3384 * but have been dropped as this one turned out to be "best"
3385 * during all our tests. */
3386
3387 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3388 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3389 return FAILED;
3390}
3391
3392void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3393 const char *direction, struct bm_xfer_ctx *c)
3394{
3395 /* what would it take to transfer it "plaintext" */
0b70a13d 3396 unsigned plain = sizeof(struct p_header80) *
b411b363
PR
3397 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3398 + c->bm_words * sizeof(long);
3399 unsigned total = c->bytes[0] + c->bytes[1];
3400 unsigned r;
3401
3402 /* total can not be zero. but just in case: */
3403 if (total == 0)
3404 return;
3405
3406 /* don't report if not compressed */
3407 if (total >= plain)
3408 return;
3409
3410 /* total < plain. check for overflow, still */
3411 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3412 : (1000 * total / plain);
3413
3414 if (r > 1000)
3415 r = 1000;
3416
3417 r = 1000 - r;
3418 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3419 "total %u; compression: %u.%u%%\n",
3420 direction,
3421 c->bytes[1], c->packets[1],
3422 c->bytes[0], c->packets[0],
3423 total, r/10, r % 10);
3424}
3425
3426/* Since we are processing the bitfield from lower addresses to higher,
3427 it does not matter if the process it in 32 bit chunks or 64 bit
3428 chunks as long as it is little endian. (Understand it as byte stream,
3429 beginning with the lowest byte...) If we would use big endian
3430 we would need to process it from the highest address to the lowest,
3431 in order to be agnostic to the 32 vs 64 bits issue.
3432
3433 returns 0 on failure, 1 if we successfully received it. */
02918be2 3434static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363
PR
3435{
3436 struct bm_xfer_ctx c;
3437 void *buffer;
3438 enum receive_bitmap_ret ret;
3439 int ok = FALSE;
02918be2 3440 struct p_header80 *h = &mdev->data.rbuf.header.h80;
b411b363
PR
3441
3442 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3443
3444 drbd_bm_lock(mdev, "receive bitmap");
3445
3446 /* maybe we should use some per thread scratch page,
3447 * and allocate that during initial device creation? */
3448 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3449 if (!buffer) {
3450 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3451 goto out;
3452 }
3453
3454 c = (struct bm_xfer_ctx) {
3455 .bm_bits = drbd_bm_bits(mdev),
3456 .bm_words = drbd_bm_words(mdev),
3457 };
3458
3459 do {
02918be2
PR
3460 if (cmd == P_BITMAP) {
3461 ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
3462 } else if (cmd == P_COMPRESSED_BITMAP) {
b411b363
PR
3463 /* MAYBE: sanity check that we speak proto >= 90,
3464 * and the feature is enabled! */
3465 struct p_compressed_bm *p;
3466
02918be2 3467 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
b411b363
PR
3468 dev_err(DEV, "ReportCBitmap packet too large\n");
3469 goto out;
3470 }
3471 /* use the page buff */
3472 p = buffer;
3473 memcpy(p, h, sizeof(*h));
02918be2 3474 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
b411b363 3475 goto out;
004352fa
LE
3476 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3477 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
b411b363
PR
3478 return FAILED;
3479 }
3480 ret = decode_bitmap_c(mdev, p, &c);
3481 } else {
02918be2 3482 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
b411b363
PR
3483 goto out;
3484 }
3485
02918be2
PR
3486 c.packets[cmd == P_BITMAP]++;
3487 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
b411b363
PR
3488
3489 if (ret != OK)
3490 break;
3491
02918be2 3492 if (!drbd_recv_header(mdev, &cmd, &data_size))
b411b363
PR
3493 goto out;
3494 } while (ret == OK);
3495 if (ret == FAILED)
3496 goto out;
3497
3498 INFO_bm_xfer_stats(mdev, "receive", &c);
3499
3500 if (mdev->state.conn == C_WF_BITMAP_T) {
3501 ok = !drbd_send_bitmap(mdev);
3502 if (!ok)
3503 goto out;
3504 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3505 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3506 D_ASSERT(ok == SS_SUCCESS);
3507 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3508 /* admin may have requested C_DISCONNECTING,
3509 * other threads may have noticed network errors */
3510 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3511 drbd_conn_str(mdev->state.conn));
3512 }
3513
3514 ok = TRUE;
3515 out:
3516 drbd_bm_unlock(mdev);
3517 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3518 drbd_start_resync(mdev, C_SYNC_SOURCE);
3519 free_page((unsigned long) buffer);
3520 return ok;
3521}
3522
02918be2 3523static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
b411b363
PR
3524{
3525 /* TODO zero copy sink :) */
3526 static char sink[128];
3527 int size, want, r;
3528
02918be2
PR
3529 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3530 cmd, data_size);
b411b363 3531
02918be2 3532 size = data_size;
b411b363
PR
3533 while (size > 0) {
3534 want = min_t(int, size, sizeof(sink));
3535 r = drbd_recv(mdev, sink, want);
3536 ERR_IF(r <= 0) break;
3537 size -= r;
3538 }
3539 return size == 0;
3540}
3541
02918be2 3542static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
0ced55a3 3543{
e7f52dfb
LE
3544 /* Make sure we've acked all the TCP data associated
3545 * with the data requests being unplugged */
3546 drbd_tcp_quickack(mdev->data.socket);
0ced55a3 3547
0ced55a3
PR
3548 return TRUE;
3549}
3550
02918be2
PR
3551typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3552
3553struct data_cmd {
3554 int expect_payload;
3555 size_t pkt_size;
3556 drbd_cmd_handler_f function;
3557};
3558
3559static struct data_cmd drbd_cmd_handler[] = {
3560 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3561 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3562 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3563 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3564 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3565 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3566 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3567 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3568 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3569 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3570 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3571 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3572 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3573 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3574 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3575 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3576 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3577 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3578 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3579 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3580 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
b411b363
PR
3581 /* anything missing from this table is in
3582 * the asender_tbl, see get_asender_cmd */
02918be2 3583 [P_MAX_CMD] = { 0, 0, NULL },
b411b363
PR
3584};
3585
02918be2
PR
3586/* All handler functions that expect a sub-header get that sub-heder in
3587 mdev->data.rbuf.header.head.payload.
3588
3589 Usually in mdev->data.rbuf.header.head the callback can find the usual
3590 p_header, but they may not rely on that. Since there is also p_header95 !
3591 */
b411b363
PR
3592
3593static void drbdd(struct drbd_conf *mdev)
3594{
02918be2
PR
3595 union p_header *header = &mdev->data.rbuf.header;
3596 unsigned int packet_size;
3597 enum drbd_packets cmd;
3598 size_t shs; /* sub header size */
3599 int rv;
b411b363
PR
3600
3601 while (get_t_state(&mdev->receiver) == Running) {
3602 drbd_thread_current_set_cpu(mdev);
02918be2
PR
3603 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3604 goto err_out;
b411b363 3605
02918be2
PR
3606 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3607 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3608 goto err_out;
0b33a916 3609 }
b411b363 3610
02918be2 3611 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
02918be2
PR
3612 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3613 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3614 goto err_out;
b411b363 3615 }
b411b363 3616
c13f7e1a
LE
3617 if (shs) {
3618 rv = drbd_recv(mdev, &header->h80.payload, shs);
3619 if (unlikely(rv != shs)) {
3620 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3621 goto err_out;
3622 }
3623 }
3624
02918be2 3625 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
b411b363 3626
02918be2 3627 if (unlikely(!rv)) {
b411b363 3628 dev_err(DEV, "error receiving %s, l: %d!\n",
02918be2
PR
3629 cmdname(cmd), packet_size);
3630 goto err_out;
b411b363
PR
3631 }
3632 }
b411b363 3633
02918be2
PR
3634 if (0) {
3635 err_out:
3636 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3637 }
856c50c7
LE
3638 /* If we leave here, we probably want to update at least the
3639 * "Connected" indicator on stable storage. Do so explicitly here. */
3640 drbd_md_sync(mdev);
b411b363
PR
3641}
3642
3643void drbd_flush_workqueue(struct drbd_conf *mdev)
3644{
3645 struct drbd_wq_barrier barr;
3646
3647 barr.w.cb = w_prev_work_done;
3648 init_completion(&barr.done);
3649 drbd_queue_work(&mdev->data.work, &barr.w);
3650 wait_for_completion(&barr.done);
3651}
3652
f70b3511
PR
3653void drbd_free_tl_hash(struct drbd_conf *mdev)
3654{
3655 struct hlist_head *h;
3656
3657 spin_lock_irq(&mdev->req_lock);
3658
3659 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3660 spin_unlock_irq(&mdev->req_lock);
3661 return;
3662 }
3663 /* paranoia code */
3664 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3665 if (h->first)
3666 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3667 (int)(h - mdev->ee_hash), h->first);
3668 kfree(mdev->ee_hash);
3669 mdev->ee_hash = NULL;
3670 mdev->ee_hash_s = 0;
3671
3672 /* paranoia code */
3673 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3674 if (h->first)
3675 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3676 (int)(h - mdev->tl_hash), h->first);
3677 kfree(mdev->tl_hash);
3678 mdev->tl_hash = NULL;
3679 mdev->tl_hash_s = 0;
3680 spin_unlock_irq(&mdev->req_lock);
3681}
3682
b411b363
PR
3683static void drbd_disconnect(struct drbd_conf *mdev)
3684{
3685 enum drbd_fencing_p fp;
3686 union drbd_state os, ns;
3687 int rv = SS_UNKNOWN_ERROR;
3688 unsigned int i;
3689
3690 if (mdev->state.conn == C_STANDALONE)
3691 return;
3692 if (mdev->state.conn >= C_WF_CONNECTION)
3693 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3694 drbd_conn_str(mdev->state.conn));
3695
3696 /* asender does not clean up anything. it must not interfere, either */
3697 drbd_thread_stop(&mdev->asender);
b411b363 3698 drbd_free_sock(mdev);
b411b363 3699
85719573 3700 /* wait for current activity to cease. */
b411b363
PR
3701 spin_lock_irq(&mdev->req_lock);
3702 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3703 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3704 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3705 spin_unlock_irq(&mdev->req_lock);
3706
3707 /* We do not have data structures that would allow us to
3708 * get the rs_pending_cnt down to 0 again.
3709 * * On C_SYNC_TARGET we do not have any data structures describing
3710 * the pending RSDataRequest's we have sent.
3711 * * On C_SYNC_SOURCE there is no data structure that tracks
3712 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3713 * And no, it is not the sum of the reference counts in the
3714 * resync_LRU. The resync_LRU tracks the whole operation including
3715 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3716 * on the fly. */
3717 drbd_rs_cancel_all(mdev);
3718 mdev->rs_total = 0;
3719 mdev->rs_failed = 0;
3720 atomic_set(&mdev->rs_pending_cnt, 0);
3721 wake_up(&mdev->misc_wait);
3722
3723 /* make sure syncer is stopped and w_resume_next_sg queued */
3724 del_timer_sync(&mdev->resync_timer);
b411b363
PR
3725 resync_timer_fn((unsigned long)mdev);
3726
b411b363
PR
3727 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3728 * w_make_resync_request etc. which may still be on the worker queue
3729 * to be "canceled" */
3730 drbd_flush_workqueue(mdev);
3731
3732 /* This also does reclaim_net_ee(). If we do this too early, we might
3733 * miss some resync ee and pages.*/
3734 drbd_process_done_ee(mdev);
3735
3736 kfree(mdev->p_uuid);
3737 mdev->p_uuid = NULL;
3738
fb22c402 3739 if (!is_susp(mdev->state))
b411b363
PR
3740 tl_clear(mdev);
3741
b411b363
PR
3742 dev_info(DEV, "Connection closed\n");
3743
3744 drbd_md_sync(mdev);
3745
3746 fp = FP_DONT_CARE;
3747 if (get_ldev(mdev)) {
3748 fp = mdev->ldev->dc.fencing;
3749 put_ldev(mdev);
3750 }
3751
87f7be4c
PR
3752 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3753 drbd_try_outdate_peer_async(mdev);
b411b363
PR
3754
3755 spin_lock_irq(&mdev->req_lock);
3756 os = mdev->state;
3757 if (os.conn >= C_UNCONNECTED) {
3758 /* Do not restart in case we are C_DISCONNECTING */
3759 ns = os;
3760 ns.conn = C_UNCONNECTED;
3761 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3762 }
3763 spin_unlock_irq(&mdev->req_lock);
3764
3765 if (os.conn == C_DISCONNECTING) {
84dfb9f5 3766 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
b411b363 3767
fb22c402 3768 if (!is_susp(mdev->state)) {
f70b3511
PR
3769 /* we must not free the tl_hash
3770 * while application io is still on the fly */
3771 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3772 drbd_free_tl_hash(mdev);
3773 }
b411b363
PR
3774
3775 crypto_free_hash(mdev->cram_hmac_tfm);
3776 mdev->cram_hmac_tfm = NULL;
3777
3778 kfree(mdev->net_conf);
3779 mdev->net_conf = NULL;
3780 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3781 }
3782
3783 /* tcp_close and release of sendpage pages can be deferred. I don't
3784 * want to use SO_LINGER, because apparently it can be deferred for
3785 * more than 20 seconds (longest time I checked).
3786 *
3787 * Actually we don't care for exactly when the network stack does its
3788 * put_page(), but release our reference on these pages right here.
3789 */
3790 i = drbd_release_ee(mdev, &mdev->net_ee);
3791 if (i)
3792 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
435f0740
LE
3793 i = atomic_read(&mdev->pp_in_use_by_net);
3794 if (i)
3795 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
b411b363
PR
3796 i = atomic_read(&mdev->pp_in_use);
3797 if (i)
45bb912b 3798 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
b411b363
PR
3799
3800 D_ASSERT(list_empty(&mdev->read_ee));
3801 D_ASSERT(list_empty(&mdev->active_ee));
3802 D_ASSERT(list_empty(&mdev->sync_ee));
3803 D_ASSERT(list_empty(&mdev->done_ee));
3804
3805 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3806 atomic_set(&mdev->current_epoch->epoch_size, 0);
3807 D_ASSERT(list_empty(&mdev->current_epoch->list));
3808}
3809
3810/*
3811 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3812 * we can agree on is stored in agreed_pro_version.
3813 *
3814 * feature flags and the reserved array should be enough room for future
3815 * enhancements of the handshake protocol, and possible plugins...
3816 *
3817 * for now, they are expected to be zero, but ignored.
3818 */
3819static int drbd_send_handshake(struct drbd_conf *mdev)
3820{
3821 /* ASSERT current == mdev->receiver ... */
3822 struct p_handshake *p = &mdev->data.sbuf.handshake;
3823 int ok;
3824
3825 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3826 dev_err(DEV, "interrupted during initial handshake\n");
3827 return 0; /* interrupted. not ok. */
3828 }
3829
3830 if (mdev->data.socket == NULL) {
3831 mutex_unlock(&mdev->data.mutex);
3832 return 0;
3833 }
3834
3835 memset(p, 0, sizeof(*p));
3836 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3837 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3838 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
0b70a13d 3839 (struct p_header80 *)p, sizeof(*p), 0 );
b411b363
PR
3840 mutex_unlock(&mdev->data.mutex);
3841 return ok;
3842}
3843
3844/*
3845 * return values:
3846 * 1 yes, we have a valid connection
3847 * 0 oops, did not work out, please try again
3848 * -1 peer talks different language,
3849 * no point in trying again, please go standalone.
3850 */
3851static int drbd_do_handshake(struct drbd_conf *mdev)
3852{
3853 /* ASSERT current == mdev->receiver ... */
3854 struct p_handshake *p = &mdev->data.rbuf.handshake;
02918be2
PR
3855 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3856 unsigned int length;
3857 enum drbd_packets cmd;
b411b363
PR
3858 int rv;
3859
3860 rv = drbd_send_handshake(mdev);
3861 if (!rv)
3862 return 0;
3863
02918be2 3864 rv = drbd_recv_header(mdev, &cmd, &length);
b411b363
PR
3865 if (!rv)
3866 return 0;
3867
02918be2 3868 if (cmd != P_HAND_SHAKE) {
b411b363 3869 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
02918be2 3870 cmdname(cmd), cmd);
b411b363
PR
3871 return -1;
3872 }
3873
02918be2 3874 if (length != expect) {
b411b363 3875 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
02918be2 3876 expect, length);
b411b363
PR
3877 return -1;
3878 }
3879
3880 rv = drbd_recv(mdev, &p->head.payload, expect);
3881
3882 if (rv != expect) {
3883 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3884 return 0;
3885 }
3886
b411b363
PR
3887 p->protocol_min = be32_to_cpu(p->protocol_min);
3888 p->protocol_max = be32_to_cpu(p->protocol_max);
3889 if (p->protocol_max == 0)
3890 p->protocol_max = p->protocol_min;
3891
3892 if (PRO_VERSION_MAX < p->protocol_min ||
3893 PRO_VERSION_MIN > p->protocol_max)
3894 goto incompat;
3895
3896 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3897
3898 dev_info(DEV, "Handshake successful: "
3899 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3900
3901 return 1;
3902
3903 incompat:
3904 dev_err(DEV, "incompatible DRBD dialects: "
3905 "I support %d-%d, peer supports %d-%d\n",
3906 PRO_VERSION_MIN, PRO_VERSION_MAX,
3907 p->protocol_min, p->protocol_max);
3908 return -1;
3909}
3910
3911#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3912static int drbd_do_auth(struct drbd_conf *mdev)
3913{
3914 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3915 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
b10d96cb 3916 return -1;
b411b363
PR
3917}
3918#else
3919#define CHALLENGE_LEN 64
b10d96cb
JT
3920
3921/* Return value:
3922 1 - auth succeeded,
3923 0 - failed, try again (network error),
3924 -1 - auth failed, don't try again.
3925*/
3926
b411b363
PR
3927static int drbd_do_auth(struct drbd_conf *mdev)
3928{
3929 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3930 struct scatterlist sg;
3931 char *response = NULL;
3932 char *right_response = NULL;
3933 char *peers_ch = NULL;
b411b363
PR
3934 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3935 unsigned int resp_size;
3936 struct hash_desc desc;
02918be2
PR
3937 enum drbd_packets cmd;
3938 unsigned int length;
b411b363
PR
3939 int rv;
3940
3941 desc.tfm = mdev->cram_hmac_tfm;
3942 desc.flags = 0;
3943
3944 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3945 (u8 *)mdev->net_conf->shared_secret, key_len);
3946 if (rv) {
3947 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
b10d96cb 3948 rv = -1;
b411b363
PR
3949 goto fail;
3950 }
3951
3952 get_random_bytes(my_challenge, CHALLENGE_LEN);
3953
3954 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
3955 if (!rv)
3956 goto fail;
3957
02918be2 3958 rv = drbd_recv_header(mdev, &cmd, &length);
b411b363
PR
3959 if (!rv)
3960 goto fail;
3961
02918be2 3962 if (cmd != P_AUTH_CHALLENGE) {
b411b363 3963 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
02918be2 3964 cmdname(cmd), cmd);
b411b363
PR
3965 rv = 0;
3966 goto fail;
3967 }
3968
02918be2 3969 if (length > CHALLENGE_LEN * 2) {
b411b363 3970 dev_err(DEV, "expected AuthChallenge payload too big.\n");
b10d96cb 3971 rv = -1;
b411b363
PR
3972 goto fail;
3973 }
3974
02918be2 3975 peers_ch = kmalloc(length, GFP_NOIO);
b411b363
PR
3976 if (peers_ch == NULL) {
3977 dev_err(DEV, "kmalloc of peers_ch failed\n");
b10d96cb 3978 rv = -1;
b411b363
PR
3979 goto fail;
3980 }
3981
02918be2 3982 rv = drbd_recv(mdev, peers_ch, length);
b411b363 3983
02918be2 3984 if (rv != length) {
b411b363
PR
3985 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
3986 rv = 0;
3987 goto fail;
3988 }
3989
3990 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
3991 response = kmalloc(resp_size, GFP_NOIO);
3992 if (response == NULL) {
3993 dev_err(DEV, "kmalloc of response failed\n");
b10d96cb 3994 rv = -1;
b411b363
PR
3995 goto fail;
3996 }
3997
3998 sg_init_table(&sg, 1);
02918be2 3999 sg_set_buf(&sg, peers_ch, length);
b411b363
PR
4000
4001 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4002 if (rv) {
4003 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4004 rv = -1;
b411b363
PR
4005 goto fail;
4006 }
4007
4008 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4009 if (!rv)
4010 goto fail;
4011
02918be2 4012 rv = drbd_recv_header(mdev, &cmd, &length);
b411b363
PR
4013 if (!rv)
4014 goto fail;
4015
02918be2 4016 if (cmd != P_AUTH_RESPONSE) {
b411b363 4017 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
02918be2 4018 cmdname(cmd), cmd);
b411b363
PR
4019 rv = 0;
4020 goto fail;
4021 }
4022
02918be2 4023 if (length != resp_size) {
b411b363
PR
4024 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4025 rv = 0;
4026 goto fail;
4027 }
4028
4029 rv = drbd_recv(mdev, response , resp_size);
4030
4031 if (rv != resp_size) {
4032 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4033 rv = 0;
4034 goto fail;
4035 }
4036
4037 right_response = kmalloc(resp_size, GFP_NOIO);
2d1ee87d 4038 if (right_response == NULL) {
b411b363 4039 dev_err(DEV, "kmalloc of right_response failed\n");
b10d96cb 4040 rv = -1;
b411b363
PR
4041 goto fail;
4042 }
4043
4044 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4045
4046 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4047 if (rv) {
4048 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
b10d96cb 4049 rv = -1;
b411b363
PR
4050 goto fail;
4051 }
4052
4053 rv = !memcmp(response, right_response, resp_size);
4054
4055 if (rv)
4056 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4057 resp_size, mdev->net_conf->cram_hmac_alg);
b10d96cb
JT
4058 else
4059 rv = -1;
b411b363
PR
4060
4061 fail:
4062 kfree(peers_ch);
4063 kfree(response);
4064 kfree(right_response);
4065
4066 return rv;
4067}
4068#endif
4069
4070int drbdd_init(struct drbd_thread *thi)
4071{
4072 struct drbd_conf *mdev = thi->mdev;
4073 unsigned int minor = mdev_to_minor(mdev);
4074 int h;
4075
4076 sprintf(current->comm, "drbd%d_receiver", minor);
4077
4078 dev_info(DEV, "receiver (re)started\n");
4079
4080 do {
4081 h = drbd_connect(mdev);
4082 if (h == 0) {
4083 drbd_disconnect(mdev);
4084 __set_current_state(TASK_INTERRUPTIBLE);
4085 schedule_timeout(HZ);
4086 }
4087 if (h == -1) {
4088 dev_warn(DEV, "Discarding network configuration.\n");
4089 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4090 }
4091 } while (h == 0);
4092
4093 if (h > 0) {
4094 if (get_net_conf(mdev)) {
4095 drbdd(mdev);
4096 put_net_conf(mdev);
4097 }
4098 }
4099
4100 drbd_disconnect(mdev);
4101
4102 dev_info(DEV, "receiver terminated\n");
4103 return 0;
4104}
4105
4106/* ********* acknowledge sender ******** */
4107
0b70a13d 4108static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4109{
4110 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4111
4112 int retcode = be32_to_cpu(p->retcode);
4113
4114 if (retcode >= SS_SUCCESS) {
4115 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4116 } else {
4117 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4118 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4119 drbd_set_st_err_str(retcode), retcode);
4120 }
4121 wake_up(&mdev->state_wait);
4122
4123 return TRUE;
4124}
4125
0b70a13d 4126static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4127{
4128 return drbd_send_ping_ack(mdev);
4129
4130}
4131
0b70a13d 4132static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4133{
4134 /* restore idle timeout */
4135 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
309d1608
PR
4136 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4137 wake_up(&mdev->misc_wait);
b411b363
PR
4138
4139 return TRUE;
4140}
4141
0b70a13d 4142static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4143{
4144 struct p_block_ack *p = (struct p_block_ack *)h;
4145 sector_t sector = be64_to_cpu(p->sector);
4146 int blksize = be32_to_cpu(p->blksize);
4147
4148 D_ASSERT(mdev->agreed_pro_version >= 89);
4149
4150 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4151
1d53f09e
LE
4152 if (get_ldev(mdev)) {
4153 drbd_rs_complete_io(mdev, sector);
4154 drbd_set_in_sync(mdev, sector, blksize);
4155 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4156 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4157 put_ldev(mdev);
4158 }
b411b363 4159 dec_rs_pending(mdev);
778f271d 4160 atomic_add(blksize >> 9, &mdev->rs_sect_in);
b411b363
PR
4161
4162 return TRUE;
4163}
4164
4165/* when we receive the ACK for a write request,
4166 * verify that we actually know about it */
4167static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4168 u64 id, sector_t sector)
4169{
4170 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4171 struct hlist_node *n;
4172 struct drbd_request *req;
4173
4174 hlist_for_each_entry(req, n, slot, colision) {
4175 if ((unsigned long)req == (unsigned long)id) {
4176 if (req->sector != sector) {
4177 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4178 "wrong sector (%llus versus %llus)\n", req,
4179 (unsigned long long)req->sector,
4180 (unsigned long long)sector);
4181 break;
4182 }
4183 return req;
4184 }
4185 }
4186 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4187 (void *)(unsigned long)id, (unsigned long long)sector);
4188 return NULL;
4189}
4190
4191typedef struct drbd_request *(req_validator_fn)
4192 (struct drbd_conf *mdev, u64 id, sector_t sector);
4193
4194static int validate_req_change_req_state(struct drbd_conf *mdev,
4195 u64 id, sector_t sector, req_validator_fn validator,
4196 const char *func, enum drbd_req_event what)
4197{
4198 struct drbd_request *req;
4199 struct bio_and_error m;
4200
4201 spin_lock_irq(&mdev->req_lock);
4202 req = validator(mdev, id, sector);
4203 if (unlikely(!req)) {
4204 spin_unlock_irq(&mdev->req_lock);
4205 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4206 return FALSE;
4207 }
4208 __req_mod(req, what, &m);
4209 spin_unlock_irq(&mdev->req_lock);
4210
4211 if (m.bio)
4212 complete_master_bio(mdev, &m);
4213 return TRUE;
4214}
4215
0b70a13d 4216static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4217{
4218 struct p_block_ack *p = (struct p_block_ack *)h;
4219 sector_t sector = be64_to_cpu(p->sector);
4220 int blksize = be32_to_cpu(p->blksize);
4221 enum drbd_req_event what;
4222
4223 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4224
4225 if (is_syncer_block_id(p->block_id)) {
4226 drbd_set_in_sync(mdev, sector, blksize);
4227 dec_rs_pending(mdev);
4228 return TRUE;
4229 }
4230 switch (be16_to_cpu(h->command)) {
4231 case P_RS_WRITE_ACK:
4232 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4233 what = write_acked_by_peer_and_sis;
4234 break;
4235 case P_WRITE_ACK:
4236 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4237 what = write_acked_by_peer;
4238 break;
4239 case P_RECV_ACK:
4240 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4241 what = recv_acked_by_peer;
4242 break;
4243 case P_DISCARD_ACK:
4244 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4245 what = conflict_discarded_by_peer;
4246 break;
4247 default:
4248 D_ASSERT(0);
4249 return FALSE;
4250 }
4251
4252 return validate_req_change_req_state(mdev, p->block_id, sector,
4253 _ack_id_to_req, __func__ , what);
4254}
4255
0b70a13d 4256static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4257{
4258 struct p_block_ack *p = (struct p_block_ack *)h;
4259 sector_t sector = be64_to_cpu(p->sector);
4260
4261 if (__ratelimit(&drbd_ratelimit_state))
4262 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4263
4264 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4265
4266 if (is_syncer_block_id(p->block_id)) {
4267 int size = be32_to_cpu(p->blksize);
4268 dec_rs_pending(mdev);
4269 drbd_rs_failed_io(mdev, sector, size);
4270 return TRUE;
4271 }
4272 return validate_req_change_req_state(mdev, p->block_id, sector,
4273 _ack_id_to_req, __func__ , neg_acked);
4274}
4275
0b70a13d 4276static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4277{
4278 struct p_block_ack *p = (struct p_block_ack *)h;
4279 sector_t sector = be64_to_cpu(p->sector);
4280
4281 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4282 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4283 (unsigned long long)sector, be32_to_cpu(p->blksize));
4284
4285 return validate_req_change_req_state(mdev, p->block_id, sector,
4286 _ar_id_to_req, __func__ , neg_acked);
4287}
4288
0b70a13d 4289static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4290{
4291 sector_t sector;
4292 int size;
4293 struct p_block_ack *p = (struct p_block_ack *)h;
4294
4295 sector = be64_to_cpu(p->sector);
4296 size = be32_to_cpu(p->blksize);
b411b363
PR
4297
4298 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4299
4300 dec_rs_pending(mdev);
4301
4302 if (get_ldev_if_state(mdev, D_FAILED)) {
4303 drbd_rs_complete_io(mdev, sector);
4304 drbd_rs_failed_io(mdev, sector, size);
4305 put_ldev(mdev);
4306 }
4307
4308 return TRUE;
4309}
4310
0b70a13d 4311static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4312{
4313 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4314
4315 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4316
4317 return TRUE;
4318}
4319
0b70a13d 4320static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
b411b363
PR
4321{
4322 struct p_block_ack *p = (struct p_block_ack *)h;
4323 struct drbd_work *w;
4324 sector_t sector;
4325 int size;
4326
4327 sector = be64_to_cpu(p->sector);
4328 size = be32_to_cpu(p->blksize);
4329
4330 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4331
4332 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4333 drbd_ov_oos_found(mdev, sector, size);
4334 else
4335 ov_oos_print(mdev);
4336
1d53f09e
LE
4337 if (!get_ldev(mdev))
4338 return TRUE;
4339
b411b363
PR
4340 drbd_rs_complete_io(mdev, sector);
4341 dec_rs_pending(mdev);
4342
ea5442af
LE
4343 --mdev->ov_left;
4344
4345 /* let's advance progress step marks only for every other megabyte */
4346 if ((mdev->ov_left & 0x200) == 0x200)
4347 drbd_advance_rs_marks(mdev, mdev->ov_left);
4348
4349 if (mdev->ov_left == 0) {
b411b363
PR
4350 w = kmalloc(sizeof(*w), GFP_NOIO);
4351 if (w) {
4352 w->cb = w_ov_finished;
4353 drbd_queue_work_front(&mdev->data.work, w);
4354 } else {
4355 dev_err(DEV, "kmalloc(w) failed.");
4356 ov_oos_print(mdev);
4357 drbd_resync_finished(mdev);
4358 }
4359 }
1d53f09e 4360 put_ldev(mdev);
b411b363
PR
4361 return TRUE;
4362}
4363
02918be2 4364static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
0ced55a3 4365{
0ced55a3
PR
4366 return TRUE;
4367}
4368
b411b363
PR
4369struct asender_cmd {
4370 size_t pkt_size;
0b70a13d 4371 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
b411b363
PR
4372};
4373
4374static struct asender_cmd *get_asender_cmd(int cmd)
4375{
4376 static struct asender_cmd asender_tbl[] = {
4377 /* anything missing from this table is in
4378 * the drbd_cmd_handler (drbd_default_handler) table,
4379 * see the beginning of drbdd() */
0b70a13d
PR
4380 [P_PING] = { sizeof(struct p_header80), got_Ping },
4381 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
b411b363
PR
4382 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4383 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4384 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4385 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4386 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4387 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4388 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4389 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4390 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4391 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4392 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
02918be2 4393 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
b411b363
PR
4394 [P_MAX_CMD] = { 0, NULL },
4395 };
4396 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4397 return NULL;
4398 return &asender_tbl[cmd];
4399}
4400
4401int drbd_asender(struct drbd_thread *thi)
4402{
4403 struct drbd_conf *mdev = thi->mdev;
02918be2 4404 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
b411b363
PR
4405 struct asender_cmd *cmd = NULL;
4406
4407 int rv, len;
4408 void *buf = h;
4409 int received = 0;
0b70a13d 4410 int expect = sizeof(struct p_header80);
b411b363
PR
4411 int empty;
4412
4413 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4414
4415 current->policy = SCHED_RR; /* Make this a realtime task! */
4416 current->rt_priority = 2; /* more important than all other tasks */
4417
4418 while (get_t_state(thi) == Running) {
4419 drbd_thread_current_set_cpu(mdev);
4420 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4421 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4422 mdev->meta.socket->sk->sk_rcvtimeo =
4423 mdev->net_conf->ping_timeo*HZ/10;
4424 }
4425
4426 /* conditionally cork;
4427 * it may hurt latency if we cork without much to send */
4428 if (!mdev->net_conf->no_cork &&
4429 3 < atomic_read(&mdev->unacked_cnt))
4430 drbd_tcp_cork(mdev->meta.socket);
4431 while (1) {
4432 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4433 flush_signals(current);
0f8488e1 4434 if (!drbd_process_done_ee(mdev))
b411b363 4435 goto reconnect;
b411b363
PR
4436 /* to avoid race with newly queued ACKs */
4437 set_bit(SIGNAL_ASENDER, &mdev->flags);
4438 spin_lock_irq(&mdev->req_lock);
4439 empty = list_empty(&mdev->done_ee);
4440 spin_unlock_irq(&mdev->req_lock);
4441 /* new ack may have been queued right here,
4442 * but then there is also a signal pending,
4443 * and we start over... */
4444 if (empty)
4445 break;
4446 }
4447 /* but unconditionally uncork unless disabled */
4448 if (!mdev->net_conf->no_cork)
4449 drbd_tcp_uncork(mdev->meta.socket);
4450
4451 /* short circuit, recv_msg would return EINTR anyways. */
4452 if (signal_pending(current))
4453 continue;
4454
4455 rv = drbd_recv_short(mdev, mdev->meta.socket,
4456 buf, expect-received, 0);
4457 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4458
4459 flush_signals(current);
4460
4461 /* Note:
4462 * -EINTR (on meta) we got a signal
4463 * -EAGAIN (on meta) rcvtimeo expired
4464 * -ECONNRESET other side closed the connection
4465 * -ERESTARTSYS (on data) we got a signal
4466 * rv < 0 other than above: unexpected error!
4467 * rv == expected: full header or command
4468 * rv < expected: "woken" by signal during receive
4469 * rv == 0 : "connection shut down by peer"
4470 */
4471 if (likely(rv > 0)) {
4472 received += rv;
4473 buf += rv;
4474 } else if (rv == 0) {
4475 dev_err(DEV, "meta connection shut down by peer.\n");
4476 goto reconnect;
4477 } else if (rv == -EAGAIN) {
4478 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4479 mdev->net_conf->ping_timeo*HZ/10) {
4480 dev_err(DEV, "PingAck did not arrive in time.\n");
4481 goto reconnect;
4482 }
4483 set_bit(SEND_PING, &mdev->flags);
4484 continue;
4485 } else if (rv == -EINTR) {
4486 continue;
4487 } else {
4488 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4489 goto reconnect;
4490 }
4491
4492 if (received == expect && cmd == NULL) {
4493 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
004352fa
LE
4494 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4495 be32_to_cpu(h->magic),
4496 be16_to_cpu(h->command),
4497 be16_to_cpu(h->length));
b411b363
PR
4498 goto reconnect;
4499 }
4500 cmd = get_asender_cmd(be16_to_cpu(h->command));
4501 len = be16_to_cpu(h->length);
4502 if (unlikely(cmd == NULL)) {
004352fa
LE
4503 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4504 be32_to_cpu(h->magic),
4505 be16_to_cpu(h->command),
4506 be16_to_cpu(h->length));
b411b363
PR
4507 goto disconnect;
4508 }
4509 expect = cmd->pkt_size;
0b70a13d 4510 ERR_IF(len != expect-sizeof(struct p_header80))
b411b363 4511 goto reconnect;
b411b363
PR
4512 }
4513 if (received == expect) {
4514 D_ASSERT(cmd != NULL);
b411b363
PR
4515 if (!cmd->process(mdev, h))
4516 goto reconnect;
4517
4518 buf = h;
4519 received = 0;
0b70a13d 4520 expect = sizeof(struct p_header80);
b411b363
PR
4521 cmd = NULL;
4522 }
4523 }
4524
4525 if (0) {
4526reconnect:
4527 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
856c50c7 4528 drbd_md_sync(mdev);
b411b363
PR
4529 }
4530 if (0) {
4531disconnect:
4532 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
856c50c7 4533 drbd_md_sync(mdev);
b411b363
PR
4534 }
4535 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4536
4537 D_ASSERT(mdev->state.conn < C_CONNECTED);
4538 dev_info(DEV, "asender terminated\n");
4539
4540 return 0;
4541}
This page took 0.373866 seconds and 5 git commands to generate.