drbd: remove struct drbd_tl_epoch objects (barrier works)
[deliverable/linux.git] / drivers / block / drbd / drbd_worker.c
1 /*
2 drbd_worker.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/sched.h>
29 #include <linux/wait.h>
30 #include <linux/mm.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mm_inline.h>
33 #include <linux/slab.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/scatterlist.h>
37
38 #include "drbd_int.h"
39 #include "drbd_req.h"
40
41 static int w_make_ov_request(struct drbd_work *w, int cancel);
42
43
44 /* endio handlers:
45 * drbd_md_io_complete (defined here)
46 * drbd_request_endio (defined here)
47 * drbd_peer_request_endio (defined here)
48 * bm_async_io_complete (defined in drbd_bitmap.c)
49 *
50 * For all these callbacks, note the following:
51 * The callbacks will be called in irq context by the IDE drivers,
52 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
53 * Try to get the locking right :)
54 *
55 */
56
57
58 /* About the global_state_lock
59 Each state transition on an device holds a read lock. In case we have
60 to evaluate the resync after dependencies, we grab a write lock, because
61 we need stable states on all devices for that. */
62 rwlock_t global_state_lock;
63
64 /* used for synchronous meta data and bitmap IO
65 * submitted by drbd_md_sync_page_io()
66 */
67 void drbd_md_io_complete(struct bio *bio, int error)
68 {
69 struct drbd_md_io *md_io;
70 struct drbd_conf *mdev;
71
72 md_io = (struct drbd_md_io *)bio->bi_private;
73 mdev = container_of(md_io, struct drbd_conf, md_io);
74
75 md_io->error = error;
76
77 /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
78 * to timeout on the lower level device, and eventually detach from it.
79 * If this io completion runs after that timeout expired, this
80 * drbd_md_put_buffer() may allow us to finally try and re-attach.
81 * During normal operation, this only puts that extra reference
82 * down to 1 again.
83 * Make sure we first drop the reference, and only then signal
84 * completion, or we may (in drbd_al_read_log()) cycle so fast into the
85 * next drbd_md_sync_page_io(), that we trigger the
86 * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
87 */
88 drbd_md_put_buffer(mdev);
89 md_io->done = 1;
90 wake_up(&mdev->misc_wait);
91 bio_put(bio);
92 put_ldev(mdev);
93 }
94
95 /* reads on behalf of the partner,
96 * "submitted" by the receiver
97 */
98 void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
99 {
100 unsigned long flags = 0;
101 struct drbd_conf *mdev = peer_req->w.mdev;
102
103 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
104 mdev->read_cnt += peer_req->i.size >> 9;
105 list_del(&peer_req->w.list);
106 if (list_empty(&mdev->read_ee))
107 wake_up(&mdev->ee_wait);
108 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
109 __drbd_chk_io_error(mdev, false);
110 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
111
112 drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
113 put_ldev(mdev);
114 }
115
116 /* writes on behalf of the partner, or resync writes,
117 * "submitted" by the receiver, final stage. */
118 static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
119 {
120 unsigned long flags = 0;
121 struct drbd_conf *mdev = peer_req->w.mdev;
122 struct drbd_interval i;
123 int do_wake;
124 u64 block_id;
125 int do_al_complete_io;
126
127 /* after we moved peer_req to done_ee,
128 * we may no longer access it,
129 * it may be freed/reused already!
130 * (as soon as we release the req_lock) */
131 i = peer_req->i;
132 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
133 block_id = peer_req->block_id;
134
135 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
136 mdev->writ_cnt += peer_req->i.size >> 9;
137 list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */
138 list_add_tail(&peer_req->w.list, &mdev->done_ee);
139
140 /*
141 * Do not remove from the write_requests tree here: we did not send the
142 * Ack yet and did not wake possibly waiting conflicting requests.
143 * Removed from the tree from "drbd_process_done_ee" within the
144 * appropriate w.cb (e_end_block/e_end_resync_block) or from
145 * _drbd_clear_done_ee.
146 */
147
148 do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
149
150 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
151 __drbd_chk_io_error(mdev, false);
152 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
153
154 if (block_id == ID_SYNCER)
155 drbd_rs_complete_io(mdev, i.sector);
156
157 if (do_wake)
158 wake_up(&mdev->ee_wait);
159
160 if (do_al_complete_io)
161 drbd_al_complete_io(mdev, &i);
162
163 wake_asender(mdev->tconn);
164 put_ldev(mdev);
165 }
166
167 /* writes on behalf of the partner, or resync writes,
168 * "submitted" by the receiver.
169 */
170 void drbd_peer_request_endio(struct bio *bio, int error)
171 {
172 struct drbd_peer_request *peer_req = bio->bi_private;
173 struct drbd_conf *mdev = peer_req->w.mdev;
174 int uptodate = bio_flagged(bio, BIO_UPTODATE);
175 int is_write = bio_data_dir(bio) == WRITE;
176
177 if (error && __ratelimit(&drbd_ratelimit_state))
178 dev_warn(DEV, "%s: error=%d s=%llus\n",
179 is_write ? "write" : "read", error,
180 (unsigned long long)peer_req->i.sector);
181 if (!error && !uptodate) {
182 if (__ratelimit(&drbd_ratelimit_state))
183 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
184 is_write ? "write" : "read",
185 (unsigned long long)peer_req->i.sector);
186 /* strange behavior of some lower level drivers...
187 * fail the request by clearing the uptodate flag,
188 * but do not return any error?! */
189 error = -EIO;
190 }
191
192 if (error)
193 set_bit(__EE_WAS_ERROR, &peer_req->flags);
194
195 bio_put(bio); /* no need for the bio anymore */
196 if (atomic_dec_and_test(&peer_req->pending_bios)) {
197 if (is_write)
198 drbd_endio_write_sec_final(peer_req);
199 else
200 drbd_endio_read_sec_final(peer_req);
201 }
202 }
203
204 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
205 */
206 void drbd_request_endio(struct bio *bio, int error)
207 {
208 unsigned long flags;
209 struct drbd_request *req = bio->bi_private;
210 struct drbd_conf *mdev = req->w.mdev;
211 struct bio_and_error m;
212 enum drbd_req_event what;
213 int uptodate = bio_flagged(bio, BIO_UPTODATE);
214
215 if (!error && !uptodate) {
216 dev_warn(DEV, "p %s: setting error to -EIO\n",
217 bio_data_dir(bio) == WRITE ? "write" : "read");
218 /* strange behavior of some lower level drivers...
219 * fail the request by clearing the uptodate flag,
220 * but do not return any error?! */
221 error = -EIO;
222 }
223
224 /* to avoid recursion in __req_mod */
225 if (unlikely(error)) {
226 what = (bio_data_dir(bio) == WRITE)
227 ? WRITE_COMPLETED_WITH_ERROR
228 : (bio_rw(bio) == READ)
229 ? READ_COMPLETED_WITH_ERROR
230 : READ_AHEAD_COMPLETED_WITH_ERROR;
231 } else
232 what = COMPLETED_OK;
233
234 bio_put(req->private_bio);
235 req->private_bio = ERR_PTR(error);
236
237 /* not req_mod(), we need irqsave here! */
238 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
239 __req_mod(req, what, &m);
240 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
241 put_ldev(mdev);
242
243 if (m.bio)
244 complete_master_bio(mdev, &m);
245 }
246
247 void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
248 struct drbd_peer_request *peer_req, void *digest)
249 {
250 struct hash_desc desc;
251 struct scatterlist sg;
252 struct page *page = peer_req->pages;
253 struct page *tmp;
254 unsigned len;
255
256 desc.tfm = tfm;
257 desc.flags = 0;
258
259 sg_init_table(&sg, 1);
260 crypto_hash_init(&desc);
261
262 while ((tmp = page_chain_next(page))) {
263 /* all but the last page will be fully used */
264 sg_set_page(&sg, page, PAGE_SIZE, 0);
265 crypto_hash_update(&desc, &sg, sg.length);
266 page = tmp;
267 }
268 /* and now the last, possibly only partially used page */
269 len = peer_req->i.size & (PAGE_SIZE - 1);
270 sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
271 crypto_hash_update(&desc, &sg, sg.length);
272 crypto_hash_final(&desc, digest);
273 }
274
275 void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
276 {
277 struct hash_desc desc;
278 struct scatterlist sg;
279 struct bio_vec *bvec;
280 int i;
281
282 desc.tfm = tfm;
283 desc.flags = 0;
284
285 sg_init_table(&sg, 1);
286 crypto_hash_init(&desc);
287
288 bio_for_each_segment(bvec, bio, i) {
289 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
290 crypto_hash_update(&desc, &sg, sg.length);
291 }
292 crypto_hash_final(&desc, digest);
293 }
294
295 /* MAYBE merge common code with w_e_end_ov_req */
296 static int w_e_send_csum(struct drbd_work *w, int cancel)
297 {
298 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
299 struct drbd_conf *mdev = w->mdev;
300 int digest_size;
301 void *digest;
302 int err = 0;
303
304 if (unlikely(cancel))
305 goto out;
306
307 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
308 goto out;
309
310 digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
311 digest = kmalloc(digest_size, GFP_NOIO);
312 if (digest) {
313 sector_t sector = peer_req->i.sector;
314 unsigned int size = peer_req->i.size;
315 drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
316 /* Free peer_req and pages before send.
317 * In case we block on congestion, we could otherwise run into
318 * some distributed deadlock, if the other side blocks on
319 * congestion as well, because our receiver blocks in
320 * drbd_alloc_pages due to pp_in_use > max_buffers. */
321 drbd_free_peer_req(mdev, peer_req);
322 peer_req = NULL;
323 inc_rs_pending(mdev);
324 err = drbd_send_drequest_csum(mdev, sector, size,
325 digest, digest_size,
326 P_CSUM_RS_REQUEST);
327 kfree(digest);
328 } else {
329 dev_err(DEV, "kmalloc() of digest failed.\n");
330 err = -ENOMEM;
331 }
332
333 out:
334 if (peer_req)
335 drbd_free_peer_req(mdev, peer_req);
336
337 if (unlikely(err))
338 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
339 return err;
340 }
341
342 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
343
344 static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
345 {
346 struct drbd_peer_request *peer_req;
347
348 if (!get_ldev(mdev))
349 return -EIO;
350
351 if (drbd_rs_should_slow_down(mdev, sector))
352 goto defer;
353
354 /* GFP_TRY, because if there is no memory available right now, this may
355 * be rescheduled for later. It is "only" background resync, after all. */
356 peer_req = drbd_alloc_peer_req(mdev, ID_SYNCER /* unused */, sector,
357 size, GFP_TRY);
358 if (!peer_req)
359 goto defer;
360
361 peer_req->w.cb = w_e_send_csum;
362 spin_lock_irq(&mdev->tconn->req_lock);
363 list_add(&peer_req->w.list, &mdev->read_ee);
364 spin_unlock_irq(&mdev->tconn->req_lock);
365
366 atomic_add(size >> 9, &mdev->rs_sect_ev);
367 if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
368 return 0;
369
370 /* If it failed because of ENOMEM, retry should help. If it failed
371 * because bio_add_page failed (probably broken lower level driver),
372 * retry may or may not help.
373 * If it does not, you may need to force disconnect. */
374 spin_lock_irq(&mdev->tconn->req_lock);
375 list_del(&peer_req->w.list);
376 spin_unlock_irq(&mdev->tconn->req_lock);
377
378 drbd_free_peer_req(mdev, peer_req);
379 defer:
380 put_ldev(mdev);
381 return -EAGAIN;
382 }
383
384 int w_resync_timer(struct drbd_work *w, int cancel)
385 {
386 struct drbd_conf *mdev = w->mdev;
387 switch (mdev->state.conn) {
388 case C_VERIFY_S:
389 w_make_ov_request(w, cancel);
390 break;
391 case C_SYNC_TARGET:
392 w_make_resync_request(w, cancel);
393 break;
394 }
395
396 return 0;
397 }
398
399 void resync_timer_fn(unsigned long data)
400 {
401 struct drbd_conf *mdev = (struct drbd_conf *) data;
402
403 if (list_empty(&mdev->resync_work.list))
404 drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
405 }
406
407 static void fifo_set(struct fifo_buffer *fb, int value)
408 {
409 int i;
410
411 for (i = 0; i < fb->size; i++)
412 fb->values[i] = value;
413 }
414
415 static int fifo_push(struct fifo_buffer *fb, int value)
416 {
417 int ov;
418
419 ov = fb->values[fb->head_index];
420 fb->values[fb->head_index++] = value;
421
422 if (fb->head_index >= fb->size)
423 fb->head_index = 0;
424
425 return ov;
426 }
427
428 static void fifo_add_val(struct fifo_buffer *fb, int value)
429 {
430 int i;
431
432 for (i = 0; i < fb->size; i++)
433 fb->values[i] += value;
434 }
435
436 struct fifo_buffer *fifo_alloc(int fifo_size)
437 {
438 struct fifo_buffer *fb;
439
440 fb = kzalloc(sizeof(struct fifo_buffer) + sizeof(int) * fifo_size, GFP_KERNEL);
441 if (!fb)
442 return NULL;
443
444 fb->head_index = 0;
445 fb->size = fifo_size;
446 fb->total = 0;
447
448 return fb;
449 }
450
451 static int drbd_rs_controller(struct drbd_conf *mdev)
452 {
453 struct disk_conf *dc;
454 unsigned int sect_in; /* Number of sectors that came in since the last turn */
455 unsigned int want; /* The number of sectors we want in the proxy */
456 int req_sect; /* Number of sectors to request in this turn */
457 int correction; /* Number of sectors more we need in the proxy*/
458 int cps; /* correction per invocation of drbd_rs_controller() */
459 int steps; /* Number of time steps to plan ahead */
460 int curr_corr;
461 int max_sect;
462 struct fifo_buffer *plan;
463
464 sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
465 mdev->rs_in_flight -= sect_in;
466
467 dc = rcu_dereference(mdev->ldev->disk_conf);
468 plan = rcu_dereference(mdev->rs_plan_s);
469
470 steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
471
472 if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
473 want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
474 } else { /* normal path */
475 want = dc->c_fill_target ? dc->c_fill_target :
476 sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
477 }
478
479 correction = want - mdev->rs_in_flight - plan->total;
480
481 /* Plan ahead */
482 cps = correction / steps;
483 fifo_add_val(plan, cps);
484 plan->total += cps * steps;
485
486 /* What we do in this step */
487 curr_corr = fifo_push(plan, 0);
488 plan->total -= curr_corr;
489
490 req_sect = sect_in + curr_corr;
491 if (req_sect < 0)
492 req_sect = 0;
493
494 max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
495 if (req_sect > max_sect)
496 req_sect = max_sect;
497
498 /*
499 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
500 sect_in, mdev->rs_in_flight, want, correction,
501 steps, cps, mdev->rs_planed, curr_corr, req_sect);
502 */
503
504 return req_sect;
505 }
506
507 static int drbd_rs_number_requests(struct drbd_conf *mdev)
508 {
509 int number;
510
511 rcu_read_lock();
512 if (rcu_dereference(mdev->rs_plan_s)->size) {
513 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
514 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
515 } else {
516 mdev->c_sync_rate = rcu_dereference(mdev->ldev->disk_conf)->resync_rate;
517 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
518 }
519 rcu_read_unlock();
520
521 /* ignore the amount of pending requests, the resync controller should
522 * throttle down to incoming reply rate soon enough anyways. */
523 return number;
524 }
525
526 int w_make_resync_request(struct drbd_work *w, int cancel)
527 {
528 struct drbd_conf *mdev = w->mdev;
529 unsigned long bit;
530 sector_t sector;
531 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
532 int max_bio_size;
533 int number, rollback_i, size;
534 int align, queued, sndbuf;
535 int i = 0;
536
537 if (unlikely(cancel))
538 return 0;
539
540 if (mdev->rs_total == 0) {
541 /* empty resync? */
542 drbd_resync_finished(mdev);
543 return 0;
544 }
545
546 if (!get_ldev(mdev)) {
547 /* Since we only need to access mdev->rsync a
548 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
549 to continue resync with a broken disk makes no sense at
550 all */
551 dev_err(DEV, "Disk broke down during resync!\n");
552 return 0;
553 }
554
555 max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
556 number = drbd_rs_number_requests(mdev);
557 if (number == 0)
558 goto requeue;
559
560 for (i = 0; i < number; i++) {
561 /* Stop generating RS requests, when half of the send buffer is filled */
562 mutex_lock(&mdev->tconn->data.mutex);
563 if (mdev->tconn->data.socket) {
564 queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
565 sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
566 } else {
567 queued = 1;
568 sndbuf = 0;
569 }
570 mutex_unlock(&mdev->tconn->data.mutex);
571 if (queued > sndbuf / 2)
572 goto requeue;
573
574 next_sector:
575 size = BM_BLOCK_SIZE;
576 bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
577
578 if (bit == DRBD_END_OF_BITMAP) {
579 mdev->bm_resync_fo = drbd_bm_bits(mdev);
580 put_ldev(mdev);
581 return 0;
582 }
583
584 sector = BM_BIT_TO_SECT(bit);
585
586 if (drbd_rs_should_slow_down(mdev, sector) ||
587 drbd_try_rs_begin_io(mdev, sector)) {
588 mdev->bm_resync_fo = bit;
589 goto requeue;
590 }
591 mdev->bm_resync_fo = bit + 1;
592
593 if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
594 drbd_rs_complete_io(mdev, sector);
595 goto next_sector;
596 }
597
598 #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
599 /* try to find some adjacent bits.
600 * we stop if we have already the maximum req size.
601 *
602 * Additionally always align bigger requests, in order to
603 * be prepared for all stripe sizes of software RAIDs.
604 */
605 align = 1;
606 rollback_i = i;
607 for (;;) {
608 if (size + BM_BLOCK_SIZE > max_bio_size)
609 break;
610
611 /* Be always aligned */
612 if (sector & ((1<<(align+3))-1))
613 break;
614
615 /* do not cross extent boundaries */
616 if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
617 break;
618 /* now, is it actually dirty, after all?
619 * caution, drbd_bm_test_bit is tri-state for some
620 * obscure reason; ( b == 0 ) would get the out-of-band
621 * only accidentally right because of the "oddly sized"
622 * adjustment below */
623 if (drbd_bm_test_bit(mdev, bit+1) != 1)
624 break;
625 bit++;
626 size += BM_BLOCK_SIZE;
627 if ((BM_BLOCK_SIZE << align) <= size)
628 align++;
629 i++;
630 }
631 /* if we merged some,
632 * reset the offset to start the next drbd_bm_find_next from */
633 if (size > BM_BLOCK_SIZE)
634 mdev->bm_resync_fo = bit + 1;
635 #endif
636
637 /* adjust very last sectors, in case we are oddly sized */
638 if (sector + (size>>9) > capacity)
639 size = (capacity-sector)<<9;
640 if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
641 switch (read_for_csum(mdev, sector, size)) {
642 case -EIO: /* Disk failure */
643 put_ldev(mdev);
644 return -EIO;
645 case -EAGAIN: /* allocation failed, or ldev busy */
646 drbd_rs_complete_io(mdev, sector);
647 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
648 i = rollback_i;
649 goto requeue;
650 case 0:
651 /* everything ok */
652 break;
653 default:
654 BUG();
655 }
656 } else {
657 int err;
658
659 inc_rs_pending(mdev);
660 err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
661 sector, size, ID_SYNCER);
662 if (err) {
663 dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
664 dec_rs_pending(mdev);
665 put_ldev(mdev);
666 return err;
667 }
668 }
669 }
670
671 if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
672 /* last syncer _request_ was sent,
673 * but the P_RS_DATA_REPLY not yet received. sync will end (and
674 * next sync group will resume), as soon as we receive the last
675 * resync data block, and the last bit is cleared.
676 * until then resync "work" is "inactive" ...
677 */
678 put_ldev(mdev);
679 return 0;
680 }
681
682 requeue:
683 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
684 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
685 put_ldev(mdev);
686 return 0;
687 }
688
689 static int w_make_ov_request(struct drbd_work *w, int cancel)
690 {
691 struct drbd_conf *mdev = w->mdev;
692 int number, i, size;
693 sector_t sector;
694 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
695
696 if (unlikely(cancel))
697 return 1;
698
699 number = drbd_rs_number_requests(mdev);
700
701 sector = mdev->ov_position;
702 for (i = 0; i < number; i++) {
703 if (sector >= capacity) {
704 return 1;
705 }
706
707 size = BM_BLOCK_SIZE;
708
709 if (drbd_rs_should_slow_down(mdev, sector) ||
710 drbd_try_rs_begin_io(mdev, sector)) {
711 mdev->ov_position = sector;
712 goto requeue;
713 }
714
715 if (sector + (size>>9) > capacity)
716 size = (capacity-sector)<<9;
717
718 inc_rs_pending(mdev);
719 if (drbd_send_ov_request(mdev, sector, size)) {
720 dec_rs_pending(mdev);
721 return 0;
722 }
723 sector += BM_SECT_PER_BIT;
724 }
725 mdev->ov_position = sector;
726
727 requeue:
728 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
729 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
730 return 1;
731 }
732
733 int w_ov_finished(struct drbd_work *w, int cancel)
734 {
735 struct drbd_conf *mdev = w->mdev;
736 kfree(w);
737 ov_out_of_sync_print(mdev);
738 drbd_resync_finished(mdev);
739
740 return 0;
741 }
742
743 static int w_resync_finished(struct drbd_work *w, int cancel)
744 {
745 struct drbd_conf *mdev = w->mdev;
746 kfree(w);
747
748 drbd_resync_finished(mdev);
749
750 return 0;
751 }
752
753 static void ping_peer(struct drbd_conf *mdev)
754 {
755 struct drbd_tconn *tconn = mdev->tconn;
756
757 clear_bit(GOT_PING_ACK, &tconn->flags);
758 request_ping(tconn);
759 wait_event(tconn->ping_wait,
760 test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
761 }
762
763 int drbd_resync_finished(struct drbd_conf *mdev)
764 {
765 unsigned long db, dt, dbdt;
766 unsigned long n_oos;
767 union drbd_state os, ns;
768 struct drbd_work *w;
769 char *khelper_cmd = NULL;
770 int verify_done = 0;
771
772 /* Remove all elements from the resync LRU. Since future actions
773 * might set bits in the (main) bitmap, then the entries in the
774 * resync LRU would be wrong. */
775 if (drbd_rs_del_all(mdev)) {
776 /* In case this is not possible now, most probably because
777 * there are P_RS_DATA_REPLY Packets lingering on the worker's
778 * queue (or even the read operations for those packets
779 * is not finished by now). Retry in 100ms. */
780
781 schedule_timeout_interruptible(HZ / 10);
782 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
783 if (w) {
784 w->cb = w_resync_finished;
785 w->mdev = mdev;
786 drbd_queue_work(&mdev->tconn->sender_work, w);
787 return 1;
788 }
789 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
790 }
791
792 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
793 if (dt <= 0)
794 dt = 1;
795 db = mdev->rs_total;
796 dbdt = Bit2KB(db/dt);
797 mdev->rs_paused /= HZ;
798
799 if (!get_ldev(mdev))
800 goto out;
801
802 ping_peer(mdev);
803
804 spin_lock_irq(&mdev->tconn->req_lock);
805 os = drbd_read_state(mdev);
806
807 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
808
809 /* This protects us against multiple calls (that can happen in the presence
810 of application IO), and against connectivity loss just before we arrive here. */
811 if (os.conn <= C_CONNECTED)
812 goto out_unlock;
813
814 ns = os;
815 ns.conn = C_CONNECTED;
816
817 dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
818 verify_done ? "Online verify " : "Resync",
819 dt + mdev->rs_paused, mdev->rs_paused, dbdt);
820
821 n_oos = drbd_bm_total_weight(mdev);
822
823 if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
824 if (n_oos) {
825 dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
826 n_oos, Bit2KB(1));
827 khelper_cmd = "out-of-sync";
828 }
829 } else {
830 D_ASSERT((n_oos - mdev->rs_failed) == 0);
831
832 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
833 khelper_cmd = "after-resync-target";
834
835 if (mdev->tconn->csums_tfm && mdev->rs_total) {
836 const unsigned long s = mdev->rs_same_csum;
837 const unsigned long t = mdev->rs_total;
838 const int ratio =
839 (t == 0) ? 0 :
840 (t < 100000) ? ((s*100)/t) : (s/(t/100));
841 dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
842 "transferred %luK total %luK\n",
843 ratio,
844 Bit2KB(mdev->rs_same_csum),
845 Bit2KB(mdev->rs_total - mdev->rs_same_csum),
846 Bit2KB(mdev->rs_total));
847 }
848 }
849
850 if (mdev->rs_failed) {
851 dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
852
853 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
854 ns.disk = D_INCONSISTENT;
855 ns.pdsk = D_UP_TO_DATE;
856 } else {
857 ns.disk = D_UP_TO_DATE;
858 ns.pdsk = D_INCONSISTENT;
859 }
860 } else {
861 ns.disk = D_UP_TO_DATE;
862 ns.pdsk = D_UP_TO_DATE;
863
864 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
865 if (mdev->p_uuid) {
866 int i;
867 for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
868 _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
869 drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
870 _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
871 } else {
872 dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
873 }
874 }
875
876 if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
877 /* for verify runs, we don't update uuids here,
878 * so there would be nothing to report. */
879 drbd_uuid_set_bm(mdev, 0UL);
880 drbd_print_uuids(mdev, "updated UUIDs");
881 if (mdev->p_uuid) {
882 /* Now the two UUID sets are equal, update what we
883 * know of the peer. */
884 int i;
885 for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
886 mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
887 }
888 }
889 }
890
891 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
892 out_unlock:
893 spin_unlock_irq(&mdev->tconn->req_lock);
894 put_ldev(mdev);
895 out:
896 mdev->rs_total = 0;
897 mdev->rs_failed = 0;
898 mdev->rs_paused = 0;
899 if (verify_done)
900 mdev->ov_start_sector = 0;
901
902 drbd_md_sync(mdev);
903
904 if (khelper_cmd)
905 drbd_khelper(mdev, khelper_cmd);
906
907 return 1;
908 }
909
910 /* helper */
911 static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
912 {
913 if (drbd_peer_req_has_active_page(peer_req)) {
914 /* This might happen if sendpage() has not finished */
915 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
916 atomic_add(i, &mdev->pp_in_use_by_net);
917 atomic_sub(i, &mdev->pp_in_use);
918 spin_lock_irq(&mdev->tconn->req_lock);
919 list_add_tail(&peer_req->w.list, &mdev->net_ee);
920 spin_unlock_irq(&mdev->tconn->req_lock);
921 wake_up(&drbd_pp_wait);
922 } else
923 drbd_free_peer_req(mdev, peer_req);
924 }
925
926 /**
927 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
928 * @mdev: DRBD device.
929 * @w: work object.
930 * @cancel: The connection will be closed anyways
931 */
932 int w_e_end_data_req(struct drbd_work *w, int cancel)
933 {
934 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
935 struct drbd_conf *mdev = w->mdev;
936 int err;
937
938 if (unlikely(cancel)) {
939 drbd_free_peer_req(mdev, peer_req);
940 dec_unacked(mdev);
941 return 0;
942 }
943
944 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
945 err = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
946 } else {
947 if (__ratelimit(&drbd_ratelimit_state))
948 dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
949 (unsigned long long)peer_req->i.sector);
950
951 err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
952 }
953
954 dec_unacked(mdev);
955
956 move_to_net_ee_or_free(mdev, peer_req);
957
958 if (unlikely(err))
959 dev_err(DEV, "drbd_send_block() failed\n");
960 return err;
961 }
962
963 /**
964 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
965 * @mdev: DRBD device.
966 * @w: work object.
967 * @cancel: The connection will be closed anyways
968 */
969 int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
970 {
971 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
972 struct drbd_conf *mdev = w->mdev;
973 int err;
974
975 if (unlikely(cancel)) {
976 drbd_free_peer_req(mdev, peer_req);
977 dec_unacked(mdev);
978 return 0;
979 }
980
981 if (get_ldev_if_state(mdev, D_FAILED)) {
982 drbd_rs_complete_io(mdev, peer_req->i.sector);
983 put_ldev(mdev);
984 }
985
986 if (mdev->state.conn == C_AHEAD) {
987 err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
988 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
989 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
990 inc_rs_pending(mdev);
991 err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
992 } else {
993 if (__ratelimit(&drbd_ratelimit_state))
994 dev_err(DEV, "Not sending RSDataReply, "
995 "partner DISKLESS!\n");
996 err = 0;
997 }
998 } else {
999 if (__ratelimit(&drbd_ratelimit_state))
1000 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
1001 (unsigned long long)peer_req->i.sector);
1002
1003 err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
1004
1005 /* update resync data with failure */
1006 drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
1007 }
1008
1009 dec_unacked(mdev);
1010
1011 move_to_net_ee_or_free(mdev, peer_req);
1012
1013 if (unlikely(err))
1014 dev_err(DEV, "drbd_send_block() failed\n");
1015 return err;
1016 }
1017
1018 int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1019 {
1020 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1021 struct drbd_conf *mdev = w->mdev;
1022 struct digest_info *di;
1023 int digest_size;
1024 void *digest = NULL;
1025 int err, eq = 0;
1026
1027 if (unlikely(cancel)) {
1028 drbd_free_peer_req(mdev, peer_req);
1029 dec_unacked(mdev);
1030 return 0;
1031 }
1032
1033 if (get_ldev(mdev)) {
1034 drbd_rs_complete_io(mdev, peer_req->i.sector);
1035 put_ldev(mdev);
1036 }
1037
1038 di = peer_req->digest;
1039
1040 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1041 /* quick hack to try to avoid a race against reconfiguration.
1042 * a real fix would be much more involved,
1043 * introducing more locking mechanisms */
1044 if (mdev->tconn->csums_tfm) {
1045 digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
1046 D_ASSERT(digest_size == di->digest_size);
1047 digest = kmalloc(digest_size, GFP_NOIO);
1048 }
1049 if (digest) {
1050 drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
1051 eq = !memcmp(digest, di->digest, digest_size);
1052 kfree(digest);
1053 }
1054
1055 if (eq) {
1056 drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
1057 /* rs_same_csums unit is BM_BLOCK_SIZE */
1058 mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
1059 err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
1060 } else {
1061 inc_rs_pending(mdev);
1062 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1063 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1064 kfree(di);
1065 err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
1066 }
1067 } else {
1068 err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
1069 if (__ratelimit(&drbd_ratelimit_state))
1070 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1071 }
1072
1073 dec_unacked(mdev);
1074 move_to_net_ee_or_free(mdev, peer_req);
1075
1076 if (unlikely(err))
1077 dev_err(DEV, "drbd_send_block/ack() failed\n");
1078 return err;
1079 }
1080
1081 int w_e_end_ov_req(struct drbd_work *w, int cancel)
1082 {
1083 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1084 struct drbd_conf *mdev = w->mdev;
1085 sector_t sector = peer_req->i.sector;
1086 unsigned int size = peer_req->i.size;
1087 int digest_size;
1088 void *digest;
1089 int err = 0;
1090
1091 if (unlikely(cancel))
1092 goto out;
1093
1094 digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
1095 digest = kmalloc(digest_size, GFP_NOIO);
1096 if (!digest) {
1097 err = 1; /* terminate the connection in case the allocation failed */
1098 goto out;
1099 }
1100
1101 if (likely(!(peer_req->flags & EE_WAS_ERROR)))
1102 drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
1103 else
1104 memset(digest, 0, digest_size);
1105
1106 /* Free e and pages before send.
1107 * In case we block on congestion, we could otherwise run into
1108 * some distributed deadlock, if the other side blocks on
1109 * congestion as well, because our receiver blocks in
1110 * drbd_alloc_pages due to pp_in_use > max_buffers. */
1111 drbd_free_peer_req(mdev, peer_req);
1112 peer_req = NULL;
1113 inc_rs_pending(mdev);
1114 err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
1115 if (err)
1116 dec_rs_pending(mdev);
1117 kfree(digest);
1118
1119 out:
1120 if (peer_req)
1121 drbd_free_peer_req(mdev, peer_req);
1122 dec_unacked(mdev);
1123 return err;
1124 }
1125
1126 void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
1127 {
1128 if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
1129 mdev->ov_last_oos_size += size>>9;
1130 } else {
1131 mdev->ov_last_oos_start = sector;
1132 mdev->ov_last_oos_size = size>>9;
1133 }
1134 drbd_set_out_of_sync(mdev, sector, size);
1135 }
1136
1137 int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1138 {
1139 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1140 struct drbd_conf *mdev = w->mdev;
1141 struct digest_info *di;
1142 void *digest;
1143 sector_t sector = peer_req->i.sector;
1144 unsigned int size = peer_req->i.size;
1145 int digest_size;
1146 int err, eq = 0;
1147
1148 if (unlikely(cancel)) {
1149 drbd_free_peer_req(mdev, peer_req);
1150 dec_unacked(mdev);
1151 return 0;
1152 }
1153
1154 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1155 * the resync lru has been cleaned up already */
1156 if (get_ldev(mdev)) {
1157 drbd_rs_complete_io(mdev, peer_req->i.sector);
1158 put_ldev(mdev);
1159 }
1160
1161 di = peer_req->digest;
1162
1163 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1164 digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
1165 digest = kmalloc(digest_size, GFP_NOIO);
1166 if (digest) {
1167 drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
1168
1169 D_ASSERT(digest_size == di->digest_size);
1170 eq = !memcmp(digest, di->digest, digest_size);
1171 kfree(digest);
1172 }
1173 }
1174
1175 /* Free peer_req and pages before send.
1176 * In case we block on congestion, we could otherwise run into
1177 * some distributed deadlock, if the other side blocks on
1178 * congestion as well, because our receiver blocks in
1179 * drbd_alloc_pages due to pp_in_use > max_buffers. */
1180 drbd_free_peer_req(mdev, peer_req);
1181 if (!eq)
1182 drbd_ov_out_of_sync_found(mdev, sector, size);
1183 else
1184 ov_out_of_sync_print(mdev);
1185
1186 err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
1187 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1188
1189 dec_unacked(mdev);
1190
1191 --mdev->ov_left;
1192
1193 /* let's advance progress step marks only for every other megabyte */
1194 if ((mdev->ov_left & 0x200) == 0x200)
1195 drbd_advance_rs_marks(mdev, mdev->ov_left);
1196
1197 if (mdev->ov_left == 0) {
1198 ov_out_of_sync_print(mdev);
1199 drbd_resync_finished(mdev);
1200 }
1201
1202 return err;
1203 }
1204
1205 int w_prev_work_done(struct drbd_work *w, int cancel)
1206 {
1207 struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1208
1209 complete(&b->done);
1210 return 0;
1211 }
1212
1213 /* FIXME
1214 * We need to track the number of pending barrier acks,
1215 * and to be able to wait for them.
1216 * See also comment in drbd_adm_attach before drbd_suspend_io.
1217 */
1218 int drbd_send_barrier(struct drbd_tconn *tconn)
1219 {
1220 struct p_barrier *p;
1221 struct drbd_socket *sock;
1222
1223 sock = &tconn->data;
1224 p = conn_prepare_command(tconn, sock);
1225 if (!p)
1226 return -EIO;
1227 p->barrier = tconn->send.current_epoch_nr;
1228 p->pad = 0;
1229 tconn->send.current_epoch_writes = 0;
1230
1231 return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0);
1232 }
1233
1234 int w_send_write_hint(struct drbd_work *w, int cancel)
1235 {
1236 struct drbd_conf *mdev = w->mdev;
1237 struct drbd_socket *sock;
1238
1239 if (cancel)
1240 return 0;
1241 sock = &mdev->tconn->data;
1242 if (!drbd_prepare_command(mdev, sock))
1243 return -EIO;
1244 return drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
1245 }
1246
1247 int w_send_out_of_sync(struct drbd_work *w, int cancel)
1248 {
1249 struct drbd_request *req = container_of(w, struct drbd_request, w);
1250 struct drbd_conf *mdev = w->mdev;
1251 struct drbd_tconn *tconn = mdev->tconn;
1252 int err;
1253
1254 if (unlikely(cancel)) {
1255 req_mod(req, SEND_CANCELED);
1256 return 0;
1257 }
1258
1259 if (!tconn->send.seen_any_write_yet) {
1260 tconn->send.seen_any_write_yet = true;
1261 tconn->send.current_epoch_nr = req->epoch;
1262 }
1263 if (tconn->send.current_epoch_nr != req->epoch) {
1264 if (tconn->send.current_epoch_writes)
1265 drbd_send_barrier(tconn);
1266 tconn->send.current_epoch_nr = req->epoch;
1267 }
1268 /* this time, no tconn->send.current_epoch_writes++;
1269 * If it was sent, it was the closing barrier for the last
1270 * replicated epoch, before we went into AHEAD mode.
1271 * No more barriers will be sent, until we leave AHEAD mode again. */
1272
1273 err = drbd_send_out_of_sync(mdev, req);
1274 req_mod(req, OOS_HANDED_TO_NETWORK);
1275
1276 return err;
1277 }
1278
1279 /**
1280 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1281 * @mdev: DRBD device.
1282 * @w: work object.
1283 * @cancel: The connection will be closed anyways
1284 */
1285 int w_send_dblock(struct drbd_work *w, int cancel)
1286 {
1287 struct drbd_request *req = container_of(w, struct drbd_request, w);
1288 struct drbd_conf *mdev = w->mdev;
1289 struct drbd_tconn *tconn = mdev->tconn;
1290 int err;
1291
1292 if (unlikely(cancel)) {
1293 req_mod(req, SEND_CANCELED);
1294 return 0;
1295 }
1296
1297 if (!tconn->send.seen_any_write_yet) {
1298 tconn->send.seen_any_write_yet = true;
1299 tconn->send.current_epoch_nr = req->epoch;
1300 }
1301 if (tconn->send.current_epoch_nr != req->epoch) {
1302 if (tconn->send.current_epoch_writes)
1303 drbd_send_barrier(tconn);
1304 tconn->send.current_epoch_nr = req->epoch;
1305 }
1306 tconn->send.current_epoch_writes++;
1307
1308 err = drbd_send_dblock(mdev, req);
1309 req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
1310
1311 return err;
1312 }
1313
1314 /**
1315 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1316 * @mdev: DRBD device.
1317 * @w: work object.
1318 * @cancel: The connection will be closed anyways
1319 */
1320 int w_send_read_req(struct drbd_work *w, int cancel)
1321 {
1322 struct drbd_request *req = container_of(w, struct drbd_request, w);
1323 struct drbd_conf *mdev = w->mdev;
1324 struct drbd_tconn *tconn = mdev->tconn;
1325 int err;
1326
1327 if (unlikely(cancel)) {
1328 req_mod(req, SEND_CANCELED);
1329 return 0;
1330 }
1331
1332 /* Even read requests may close a write epoch,
1333 * if there was any yet. */
1334 if (tconn->send.seen_any_write_yet &&
1335 tconn->send.current_epoch_nr != req->epoch) {
1336 if (tconn->send.current_epoch_writes)
1337 drbd_send_barrier(tconn);
1338 tconn->send.current_epoch_nr = req->epoch;
1339 }
1340
1341 err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
1342 (unsigned long)req);
1343
1344 req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
1345
1346 return err;
1347 }
1348
1349 int w_restart_disk_io(struct drbd_work *w, int cancel)
1350 {
1351 struct drbd_request *req = container_of(w, struct drbd_request, w);
1352 struct drbd_conf *mdev = w->mdev;
1353
1354 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1355 drbd_al_begin_io(mdev, &req->i);
1356
1357 drbd_req_make_private_bio(req, req->master_bio);
1358 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1359 generic_make_request(req->private_bio);
1360
1361 return 0;
1362 }
1363
1364 static int _drbd_may_sync_now(struct drbd_conf *mdev)
1365 {
1366 struct drbd_conf *odev = mdev;
1367 int resync_after;
1368
1369 while (1) {
1370 if (!odev->ldev)
1371 return 1;
1372 rcu_read_lock();
1373 resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1374 rcu_read_unlock();
1375 if (resync_after == -1)
1376 return 1;
1377 odev = minor_to_mdev(resync_after);
1378 if (!expect(odev))
1379 return 1;
1380 if ((odev->state.conn >= C_SYNC_SOURCE &&
1381 odev->state.conn <= C_PAUSED_SYNC_T) ||
1382 odev->state.aftr_isp || odev->state.peer_isp ||
1383 odev->state.user_isp)
1384 return 0;
1385 }
1386 }
1387
1388 /**
1389 * _drbd_pause_after() - Pause resync on all devices that may not resync now
1390 * @mdev: DRBD device.
1391 *
1392 * Called from process context only (admin command and after_state_ch).
1393 */
1394 static int _drbd_pause_after(struct drbd_conf *mdev)
1395 {
1396 struct drbd_conf *odev;
1397 int i, rv = 0;
1398
1399 rcu_read_lock();
1400 idr_for_each_entry(&minors, odev, i) {
1401 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1402 continue;
1403 if (!_drbd_may_sync_now(odev))
1404 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
1405 != SS_NOTHING_TO_DO);
1406 }
1407 rcu_read_unlock();
1408
1409 return rv;
1410 }
1411
1412 /**
1413 * _drbd_resume_next() - Resume resync on all devices that may resync now
1414 * @mdev: DRBD device.
1415 *
1416 * Called from process context only (admin command and worker).
1417 */
1418 static int _drbd_resume_next(struct drbd_conf *mdev)
1419 {
1420 struct drbd_conf *odev;
1421 int i, rv = 0;
1422
1423 rcu_read_lock();
1424 idr_for_each_entry(&minors, odev, i) {
1425 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1426 continue;
1427 if (odev->state.aftr_isp) {
1428 if (_drbd_may_sync_now(odev))
1429 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
1430 CS_HARD, NULL)
1431 != SS_NOTHING_TO_DO) ;
1432 }
1433 }
1434 rcu_read_unlock();
1435 return rv;
1436 }
1437
1438 void resume_next_sg(struct drbd_conf *mdev)
1439 {
1440 write_lock_irq(&global_state_lock);
1441 _drbd_resume_next(mdev);
1442 write_unlock_irq(&global_state_lock);
1443 }
1444
1445 void suspend_other_sg(struct drbd_conf *mdev)
1446 {
1447 write_lock_irq(&global_state_lock);
1448 _drbd_pause_after(mdev);
1449 write_unlock_irq(&global_state_lock);
1450 }
1451
1452 /* caller must hold global_state_lock */
1453 enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
1454 {
1455 struct drbd_conf *odev;
1456 int resync_after;
1457
1458 if (o_minor == -1)
1459 return NO_ERROR;
1460 if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
1461 return ERR_RESYNC_AFTER;
1462
1463 /* check for loops */
1464 odev = minor_to_mdev(o_minor);
1465 while (1) {
1466 if (odev == mdev)
1467 return ERR_RESYNC_AFTER_CYCLE;
1468
1469 rcu_read_lock();
1470 resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1471 rcu_read_unlock();
1472 /* dependency chain ends here, no cycles. */
1473 if (resync_after == -1)
1474 return NO_ERROR;
1475
1476 /* follow the dependency chain */
1477 odev = minor_to_mdev(resync_after);
1478 }
1479 }
1480
1481 /* caller must hold global_state_lock */
1482 void drbd_resync_after_changed(struct drbd_conf *mdev)
1483 {
1484 int changes;
1485
1486 do {
1487 changes = _drbd_pause_after(mdev);
1488 changes |= _drbd_resume_next(mdev);
1489 } while (changes);
1490 }
1491
1492 void drbd_rs_controller_reset(struct drbd_conf *mdev)
1493 {
1494 struct fifo_buffer *plan;
1495
1496 atomic_set(&mdev->rs_sect_in, 0);
1497 atomic_set(&mdev->rs_sect_ev, 0);
1498 mdev->rs_in_flight = 0;
1499
1500 /* Updating the RCU protected object in place is necessary since
1501 this function gets called from atomic context.
1502 It is valid since all other updates also lead to an completely
1503 empty fifo */
1504 rcu_read_lock();
1505 plan = rcu_dereference(mdev->rs_plan_s);
1506 plan->total = 0;
1507 fifo_set(plan, 0);
1508 rcu_read_unlock();
1509 }
1510
1511 void start_resync_timer_fn(unsigned long data)
1512 {
1513 struct drbd_conf *mdev = (struct drbd_conf *) data;
1514
1515 drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
1516 }
1517
1518 int w_start_resync(struct drbd_work *w, int cancel)
1519 {
1520 struct drbd_conf *mdev = w->mdev;
1521
1522 if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
1523 dev_warn(DEV, "w_start_resync later...\n");
1524 mdev->start_resync_timer.expires = jiffies + HZ/10;
1525 add_timer(&mdev->start_resync_timer);
1526 return 0;
1527 }
1528
1529 drbd_start_resync(mdev, C_SYNC_SOURCE);
1530 clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
1531 return 0;
1532 }
1533
1534 /**
1535 * drbd_start_resync() - Start the resync process
1536 * @mdev: DRBD device.
1537 * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1538 *
1539 * This function might bring you directly into one of the
1540 * C_PAUSED_SYNC_* states.
1541 */
1542 void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1543 {
1544 union drbd_state ns;
1545 int r;
1546
1547 if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
1548 dev_err(DEV, "Resync already running!\n");
1549 return;
1550 }
1551
1552 if (mdev->state.conn < C_AHEAD) {
1553 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1554 drbd_rs_cancel_all(mdev);
1555 /* This should be done when we abort the resync. We definitely do not
1556 want to have this for connections going back and forth between
1557 Ahead/Behind and SyncSource/SyncTarget */
1558 }
1559
1560 if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
1561 if (side == C_SYNC_TARGET) {
1562 /* Since application IO was locked out during C_WF_BITMAP_T and
1563 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1564 we check that we might make the data inconsistent. */
1565 r = drbd_khelper(mdev, "before-resync-target");
1566 r = (r >> 8) & 0xff;
1567 if (r > 0) {
1568 dev_info(DEV, "before-resync-target handler returned %d, "
1569 "dropping connection.\n", r);
1570 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1571 return;
1572 }
1573 } else /* C_SYNC_SOURCE */ {
1574 r = drbd_khelper(mdev, "before-resync-source");
1575 r = (r >> 8) & 0xff;
1576 if (r > 0) {
1577 if (r == 3) {
1578 dev_info(DEV, "before-resync-source handler returned %d, "
1579 "ignoring. Old userland tools?", r);
1580 } else {
1581 dev_info(DEV, "before-resync-source handler returned %d, "
1582 "dropping connection.\n", r);
1583 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1584 return;
1585 }
1586 }
1587 }
1588 }
1589
1590 if (current == mdev->tconn->worker.task) {
1591 /* The worker should not sleep waiting for state_mutex,
1592 that can take long */
1593 if (!mutex_trylock(mdev->state_mutex)) {
1594 set_bit(B_RS_H_DONE, &mdev->flags);
1595 mdev->start_resync_timer.expires = jiffies + HZ/5;
1596 add_timer(&mdev->start_resync_timer);
1597 return;
1598 }
1599 } else {
1600 mutex_lock(mdev->state_mutex);
1601 }
1602 clear_bit(B_RS_H_DONE, &mdev->flags);
1603
1604 write_lock_irq(&global_state_lock);
1605 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1606 write_unlock_irq(&global_state_lock);
1607 mutex_unlock(mdev->state_mutex);
1608 return;
1609 }
1610
1611 ns = drbd_read_state(mdev);
1612
1613 ns.aftr_isp = !_drbd_may_sync_now(mdev);
1614
1615 ns.conn = side;
1616
1617 if (side == C_SYNC_TARGET)
1618 ns.disk = D_INCONSISTENT;
1619 else /* side == C_SYNC_SOURCE */
1620 ns.pdsk = D_INCONSISTENT;
1621
1622 r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1623 ns = drbd_read_state(mdev);
1624
1625 if (ns.conn < C_CONNECTED)
1626 r = SS_UNKNOWN_ERROR;
1627
1628 if (r == SS_SUCCESS) {
1629 unsigned long tw = drbd_bm_total_weight(mdev);
1630 unsigned long now = jiffies;
1631 int i;
1632
1633 mdev->rs_failed = 0;
1634 mdev->rs_paused = 0;
1635 mdev->rs_same_csum = 0;
1636 mdev->rs_last_events = 0;
1637 mdev->rs_last_sect_ev = 0;
1638 mdev->rs_total = tw;
1639 mdev->rs_start = now;
1640 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1641 mdev->rs_mark_left[i] = tw;
1642 mdev->rs_mark_time[i] = now;
1643 }
1644 _drbd_pause_after(mdev);
1645 }
1646 write_unlock_irq(&global_state_lock);
1647
1648 if (r == SS_SUCCESS) {
1649 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1650 drbd_conn_str(ns.conn),
1651 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1652 (unsigned long) mdev->rs_total);
1653 if (side == C_SYNC_TARGET)
1654 mdev->bm_resync_fo = 0;
1655
1656 /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
1657 * with w_send_oos, or the sync target will get confused as to
1658 * how much bits to resync. We cannot do that always, because for an
1659 * empty resync and protocol < 95, we need to do it here, as we call
1660 * drbd_resync_finished from here in that case.
1661 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
1662 * and from after_state_ch otherwise. */
1663 if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
1664 drbd_gen_and_send_sync_uuid(mdev);
1665
1666 if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
1667 /* This still has a race (about when exactly the peers
1668 * detect connection loss) that can lead to a full sync
1669 * on next handshake. In 8.3.9 we fixed this with explicit
1670 * resync-finished notifications, but the fix
1671 * introduces a protocol change. Sleeping for some
1672 * time longer than the ping interval + timeout on the
1673 * SyncSource, to give the SyncTarget the chance to
1674 * detect connection loss, then waiting for a ping
1675 * response (implicit in drbd_resync_finished) reduces
1676 * the race considerably, but does not solve it. */
1677 if (side == C_SYNC_SOURCE) {
1678 struct net_conf *nc;
1679 int timeo;
1680
1681 rcu_read_lock();
1682 nc = rcu_dereference(mdev->tconn->net_conf);
1683 timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
1684 rcu_read_unlock();
1685 schedule_timeout_interruptible(timeo);
1686 }
1687 drbd_resync_finished(mdev);
1688 }
1689
1690 drbd_rs_controller_reset(mdev);
1691 /* ns.conn may already be != mdev->state.conn,
1692 * we may have been paused in between, or become paused until
1693 * the timer triggers.
1694 * No matter, that is handled in resync_timer_fn() */
1695 if (ns.conn == C_SYNC_TARGET)
1696 mod_timer(&mdev->resync_timer, jiffies);
1697
1698 drbd_md_sync(mdev);
1699 }
1700 put_ldev(mdev);
1701 mutex_unlock(mdev->state_mutex);
1702 }
1703
1704 /* If the resource already closed the current epoch, but we did not
1705 * (because we have not yet seen new requests), we should send the
1706 * corresponding barrier now. Must be checked within the same spinlock
1707 * that is used to check for new requests. */
1708 bool need_to_send_barrier(struct drbd_tconn *connection)
1709 {
1710 if (!connection->send.seen_any_write_yet)
1711 return false;
1712
1713 /* Skip barriers that do not contain any writes.
1714 * This may happen during AHEAD mode. */
1715 if (!connection->send.current_epoch_writes)
1716 return false;
1717
1718 /* ->req_lock is held when requests are queued on
1719 * connection->sender_work, and put into ->transfer_log.
1720 * It is also held when ->current_tle_nr is increased.
1721 * So either there are already new requests queued,
1722 * and corresponding barriers will be send there.
1723 * Or nothing new is queued yet, so the difference will be 1.
1724 */
1725 if (atomic_read(&connection->current_tle_nr) !=
1726 connection->send.current_epoch_nr + 1)
1727 return false;
1728
1729 return true;
1730 }
1731
1732 bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
1733 {
1734 spin_lock_irq(&queue->q_lock);
1735 list_splice_init(&queue->q, work_list);
1736 spin_unlock_irq(&queue->q_lock);
1737 return !list_empty(work_list);
1738 }
1739
1740 bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list)
1741 {
1742 spin_lock_irq(&queue->q_lock);
1743 if (!list_empty(&queue->q))
1744 list_move(queue->q.next, work_list);
1745 spin_unlock_irq(&queue->q_lock);
1746 return !list_empty(work_list);
1747 }
1748
1749 void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list)
1750 {
1751 DEFINE_WAIT(wait);
1752 struct net_conf *nc;
1753 int uncork, cork;
1754
1755 dequeue_work_item(&connection->sender_work, work_list);
1756 if (!list_empty(work_list))
1757 return;
1758
1759 /* Still nothing to do?
1760 * Maybe we still need to close the current epoch,
1761 * even if no new requests are queued yet.
1762 *
1763 * Also, poke TCP, just in case.
1764 * Then wait for new work (or signal). */
1765 rcu_read_lock();
1766 nc = rcu_dereference(connection->net_conf);
1767 uncork = nc ? nc->tcp_cork : 0;
1768 rcu_read_unlock();
1769 if (uncork) {
1770 mutex_lock(&connection->data.mutex);
1771 if (connection->data.socket)
1772 drbd_tcp_uncork(connection->data.socket);
1773 mutex_unlock(&connection->data.mutex);
1774 }
1775
1776 for (;;) {
1777 int send_barrier;
1778 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
1779 spin_lock_irq(&connection->req_lock);
1780 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
1781 list_splice_init(&connection->sender_work.q, work_list);
1782 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */
1783 if (!list_empty(work_list) || signal_pending(current)) {
1784 spin_unlock_irq(&connection->req_lock);
1785 break;
1786 }
1787 send_barrier = need_to_send_barrier(connection);
1788 spin_unlock_irq(&connection->req_lock);
1789 if (send_barrier) {
1790 drbd_send_barrier(connection);
1791 connection->send.current_epoch_nr++;
1792 }
1793 schedule();
1794 /* may be woken up for other things but new work, too,
1795 * e.g. if the current epoch got closed.
1796 * In which case we send the barrier above. */
1797 }
1798 finish_wait(&connection->sender_work.q_wait, &wait);
1799
1800 /* someone may have changed the config while we have been waiting above. */
1801 rcu_read_lock();
1802 nc = rcu_dereference(connection->net_conf);
1803 cork = nc ? nc->tcp_cork : 0;
1804 rcu_read_unlock();
1805 mutex_lock(&connection->data.mutex);
1806 if (connection->data.socket) {
1807 if (cork)
1808 drbd_tcp_cork(connection->data.socket);
1809 else if (!uncork)
1810 drbd_tcp_uncork(connection->data.socket);
1811 }
1812 mutex_unlock(&connection->data.mutex);
1813 }
1814
1815 int drbd_worker(struct drbd_thread *thi)
1816 {
1817 struct drbd_tconn *tconn = thi->tconn;
1818 struct drbd_work *w = NULL;
1819 struct drbd_conf *mdev;
1820 LIST_HEAD(work_list);
1821 int vnr;
1822
1823 while (get_t_state(thi) == RUNNING) {
1824 drbd_thread_current_set_cpu(thi);
1825
1826 /* as long as we use drbd_queue_work_front(),
1827 * we may only dequeue single work items here, not batches. */
1828 if (list_empty(&work_list))
1829 wait_for_work(tconn, &work_list);
1830
1831 if (signal_pending(current)) {
1832 flush_signals(current);
1833 if (get_t_state(thi) == RUNNING) {
1834 conn_warn(tconn, "Worker got an unexpected signal\n");
1835 continue;
1836 }
1837 break;
1838 }
1839
1840 if (get_t_state(thi) != RUNNING)
1841 break;
1842
1843 while (!list_empty(&work_list)) {
1844 w = list_first_entry(&work_list, struct drbd_work, list);
1845 list_del_init(&w->list);
1846 if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0)
1847 continue;
1848 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1849 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
1850 }
1851 }
1852
1853 do {
1854 while (!list_empty(&work_list)) {
1855 w = list_first_entry(&work_list, struct drbd_work, list);
1856 list_del_init(&w->list);
1857 w->cb(w, 1);
1858 }
1859 dequeue_work_batch(&tconn->sender_work, &work_list);
1860 } while (!list_empty(&work_list));
1861
1862 rcu_read_lock();
1863 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1864 D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1865 kref_get(&mdev->kref);
1866 rcu_read_unlock();
1867 drbd_mdev_cleanup(mdev);
1868 kref_put(&mdev->kref, &drbd_minor_destroy);
1869 rcu_read_lock();
1870 }
1871 rcu_read_unlock();
1872
1873 return 0;
1874 }
This page took 0.273736 seconds and 6 git commands to generate.