4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/sched.h>
29 #include <linux/wait.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mm_inline.h>
33 #include <linux/slab.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/scatterlist.h>
41 static int w_make_ov_request(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
);
42 static int w_make_resync_request(struct drbd_conf
*mdev
,
43 struct drbd_work
*w
, int cancel
);
48 * drbd_md_io_complete (defined here)
49 * drbd_endio_pri (defined here)
50 * drbd_endio_sec (defined here)
51 * bm_async_io_complete (defined in drbd_bitmap.c)
53 * For all these callbacks, note the following:
54 * The callbacks will be called in irq context by the IDE drivers,
55 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
56 * Try to get the locking right :)
61 /* About the global_state_lock
62 Each state transition on an device holds a read lock. In case we have
63 to evaluate the sync after dependencies, we grab a write lock, because
64 we need stable states on all devices for that. */
65 rwlock_t global_state_lock
;
67 /* used for synchronous meta data and bitmap IO
68 * submitted by drbd_md_sync_page_io()
70 void drbd_md_io_complete(struct bio
*bio
, int error
)
72 struct drbd_md_io
*md_io
;
73 struct drbd_conf
*mdev
;
75 md_io
= (struct drbd_md_io
*)bio
->bi_private
;
76 mdev
= container_of(md_io
, struct drbd_conf
, md_io
);
80 /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
81 * to timeout on the lower level device, and eventually detach from it.
82 * If this io completion runs after that timeout expired, this
83 * drbd_md_put_buffer() may allow us to finally try and re-attach.
84 * During normal operation, this only puts that extra reference
86 * Make sure we first drop the reference, and only then signal
87 * completion, or we may (in drbd_al_read_log()) cycle so fast into the
88 * next drbd_md_sync_page_io(), that we trigger the
89 * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
91 drbd_md_put_buffer(mdev
);
93 wake_up(&mdev
->misc_wait
);
98 /* reads on behalf of the partner,
99 * "submitted" by the receiver
101 void drbd_endio_read_sec_final(struct drbd_epoch_entry
*e
) __releases(local
)
103 unsigned long flags
= 0;
104 struct drbd_conf
*mdev
= e
->mdev
;
106 D_ASSERT(e
->block_id
!= ID_VACANT
);
108 spin_lock_irqsave(&mdev
->req_lock
, flags
);
109 mdev
->read_cnt
+= e
->size
>> 9;
110 list_del(&e
->w
.list
);
111 if (list_empty(&mdev
->read_ee
))
112 wake_up(&mdev
->ee_wait
);
113 if (test_bit(__EE_WAS_ERROR
, &e
->flags
))
114 __drbd_chk_io_error(mdev
, DRBD_IO_ERROR
);
115 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
117 drbd_queue_work(&mdev
->data
.work
, &e
->w
);
121 /* writes on behalf of the partner, or resync writes,
122 * "submitted" by the receiver, final stage. */
123 static void drbd_endio_write_sec_final(struct drbd_epoch_entry
*e
) __releases(local
)
125 unsigned long flags
= 0;
126 struct drbd_conf
*mdev
= e
->mdev
;
130 int do_al_complete_io
;
132 D_ASSERT(e
->block_id
!= ID_VACANT
);
134 /* after we moved e to done_ee,
135 * we may no longer access it,
136 * it may be freed/reused already!
137 * (as soon as we release the req_lock) */
138 e_sector
= e
->sector
;
139 do_al_complete_io
= e
->flags
& EE_CALL_AL_COMPLETE_IO
;
140 is_syncer_req
= is_syncer_block_id(e
->block_id
);
142 spin_lock_irqsave(&mdev
->req_lock
, flags
);
143 mdev
->writ_cnt
+= e
->size
>> 9;
144 list_del(&e
->w
.list
); /* has been on active_ee or sync_ee */
145 list_add_tail(&e
->w
.list
, &mdev
->done_ee
);
147 /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
148 * neither did we wake possibly waiting conflicting requests.
149 * done from "drbd_process_done_ee" within the appropriate w.cb
150 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
152 do_wake
= is_syncer_req
153 ? list_empty(&mdev
->sync_ee
)
154 : list_empty(&mdev
->active_ee
);
156 if (test_bit(__EE_WAS_ERROR
, &e
->flags
))
157 __drbd_chk_io_error(mdev
, DRBD_IO_ERROR
);
158 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
161 drbd_rs_complete_io(mdev
, e_sector
);
164 wake_up(&mdev
->ee_wait
);
166 if (do_al_complete_io
)
167 drbd_al_complete_io(mdev
, e_sector
);
173 /* writes on behalf of the partner, or resync writes,
174 * "submitted" by the receiver.
176 void drbd_endio_sec(struct bio
*bio
, int error
)
178 struct drbd_epoch_entry
*e
= bio
->bi_private
;
179 struct drbd_conf
*mdev
= e
->mdev
;
180 int uptodate
= bio_flagged(bio
, BIO_UPTODATE
);
181 int is_write
= bio_data_dir(bio
) == WRITE
;
183 if (error
&& __ratelimit(&drbd_ratelimit_state
))
184 dev_warn(DEV
, "%s: error=%d s=%llus\n",
185 is_write
? "write" : "read", error
,
186 (unsigned long long)e
->sector
);
187 if (!error
&& !uptodate
) {
188 if (__ratelimit(&drbd_ratelimit_state
))
189 dev_warn(DEV
, "%s: setting error to -EIO s=%llus\n",
190 is_write
? "write" : "read",
191 (unsigned long long)e
->sector
);
192 /* strange behavior of some lower level drivers...
193 * fail the request by clearing the uptodate flag,
194 * but do not return any error?! */
199 set_bit(__EE_WAS_ERROR
, &e
->flags
);
201 bio_put(bio
); /* no need for the bio anymore */
202 if (atomic_dec_and_test(&e
->pending_bios
)) {
204 drbd_endio_write_sec_final(e
);
206 drbd_endio_read_sec_final(e
);
210 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
212 void drbd_endio_pri(struct bio
*bio
, int error
)
215 struct drbd_request
*req
= bio
->bi_private
;
216 struct drbd_conf
*mdev
= req
->mdev
;
217 struct bio_and_error m
;
218 enum drbd_req_event what
;
219 int uptodate
= bio_flagged(bio
, BIO_UPTODATE
);
221 if (!error
&& !uptodate
) {
222 dev_warn(DEV
, "p %s: setting error to -EIO\n",
223 bio_data_dir(bio
) == WRITE
? "write" : "read");
224 /* strange behavior of some lower level drivers...
225 * fail the request by clearing the uptodate flag,
226 * but do not return any error?! */
230 /* to avoid recursion in __req_mod */
231 if (unlikely(error
)) {
232 what
= (bio_data_dir(bio
) == WRITE
)
233 ? write_completed_with_error
234 : (bio_rw(bio
) == READ
)
235 ? read_completed_with_error
236 : read_ahead_completed_with_error
;
240 bio_put(req
->private_bio
);
241 req
->private_bio
= ERR_PTR(error
);
243 /* not req_mod(), we need irqsave here! */
244 spin_lock_irqsave(&mdev
->req_lock
, flags
);
245 __req_mod(req
, what
, &m
);
246 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
250 complete_master_bio(mdev
, &m
);
253 int w_read_retry_remote(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
255 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
257 /* We should not detach for read io-error,
258 * but try to WRITE the P_DATA_REPLY to the failed location,
259 * to give the disk the chance to relocate that block */
261 spin_lock_irq(&mdev
->req_lock
);
262 if (cancel
|| mdev
->state
.pdsk
!= D_UP_TO_DATE
) {
263 _req_mod(req
, read_retry_remote_canceled
);
264 spin_unlock_irq(&mdev
->req_lock
);
267 spin_unlock_irq(&mdev
->req_lock
);
269 return w_send_read_req(mdev
, w
, 0);
272 void drbd_csum_ee(struct drbd_conf
*mdev
, struct crypto_hash
*tfm
, struct drbd_epoch_entry
*e
, void *digest
)
274 struct hash_desc desc
;
275 struct scatterlist sg
;
276 struct page
*page
= e
->pages
;
283 sg_init_table(&sg
, 1);
284 crypto_hash_init(&desc
);
286 while ((tmp
= page_chain_next(page
))) {
287 /* all but the last page will be fully used */
288 sg_set_page(&sg
, page
, PAGE_SIZE
, 0);
289 crypto_hash_update(&desc
, &sg
, sg
.length
);
292 /* and now the last, possibly only partially used page */
293 len
= e
->size
& (PAGE_SIZE
- 1);
294 sg_set_page(&sg
, page
, len
?: PAGE_SIZE
, 0);
295 crypto_hash_update(&desc
, &sg
, sg
.length
);
296 crypto_hash_final(&desc
, digest
);
299 void drbd_csum_bio(struct drbd_conf
*mdev
, struct crypto_hash
*tfm
, struct bio
*bio
, void *digest
)
301 struct hash_desc desc
;
302 struct scatterlist sg
;
303 struct bio_vec
*bvec
;
309 sg_init_table(&sg
, 1);
310 crypto_hash_init(&desc
);
312 bio_for_each_segment(bvec
, bio
, i
) {
313 sg_set_page(&sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
314 crypto_hash_update(&desc
, &sg
, sg
.length
);
316 crypto_hash_final(&desc
, digest
);
319 /* TODO merge common code with w_e_end_ov_req */
320 int w_e_send_csum(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
322 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
327 D_ASSERT(e
->block_id
== DRBD_MAGIC
+ 0xbeef);
329 if (unlikely(cancel
))
332 if (likely((e
->flags
& EE_WAS_ERROR
) != 0))
335 digest_size
= crypto_hash_digestsize(mdev
->csums_tfm
);
336 digest
= kmalloc(digest_size
, GFP_NOIO
);
338 sector_t sector
= e
->sector
;
339 unsigned int size
= e
->size
;
340 drbd_csum_ee(mdev
, mdev
->csums_tfm
, e
, digest
);
341 /* Free e and pages before send.
342 * In case we block on congestion, we could otherwise run into
343 * some distributed deadlock, if the other side blocks on
344 * congestion as well, because our receiver blocks in
345 * drbd_pp_alloc due to pp_in_use > max_buffers. */
346 drbd_free_ee(mdev
, e
);
348 inc_rs_pending(mdev
);
349 ok
= drbd_send_drequest_csum(mdev
, sector
, size
,
354 dev_err(DEV
, "kmalloc() of digest failed.\n");
360 drbd_free_ee(mdev
, e
);
363 dev_err(DEV
, "drbd_send_drequest(..., csum) failed\n");
367 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
369 static int read_for_csum(struct drbd_conf
*mdev
, sector_t sector
, int size
)
371 struct drbd_epoch_entry
*e
;
376 if (drbd_rs_should_slow_down(mdev
, sector
))
379 /* GFP_TRY, because if there is no memory available right now, this may
380 * be rescheduled for later. It is "only" background resync, after all. */
381 e
= drbd_alloc_ee(mdev
, DRBD_MAGIC
+0xbeef, sector
, size
, GFP_TRY
);
385 e
->w
.cb
= w_e_send_csum
;
386 spin_lock_irq(&mdev
->req_lock
);
387 list_add(&e
->w
.list
, &mdev
->read_ee
);
388 spin_unlock_irq(&mdev
->req_lock
);
390 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
391 if (drbd_submit_ee(mdev
, e
, READ
, DRBD_FAULT_RS_RD
) == 0)
394 /* If it failed because of ENOMEM, retry should help. If it failed
395 * because bio_add_page failed (probably broken lower level driver),
396 * retry may or may not help.
397 * If it does not, you may need to force disconnect. */
398 spin_lock_irq(&mdev
->req_lock
);
399 list_del(&e
->w
.list
);
400 spin_unlock_irq(&mdev
->req_lock
);
402 drbd_free_ee(mdev
, e
);
408 int w_resync_timer(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
410 switch (mdev
->state
.conn
) {
412 w_make_ov_request(mdev
, w
, cancel
);
415 w_make_resync_request(mdev
, w
, cancel
);
422 void resync_timer_fn(unsigned long data
)
424 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
426 if (list_empty(&mdev
->resync_work
.list
))
427 drbd_queue_work(&mdev
->data
.work
, &mdev
->resync_work
);
430 static void fifo_set(struct fifo_buffer
*fb
, int value
)
434 for (i
= 0; i
< fb
->size
; i
++)
435 fb
->values
[i
] = value
;
438 static int fifo_push(struct fifo_buffer
*fb
, int value
)
442 ov
= fb
->values
[fb
->head_index
];
443 fb
->values
[fb
->head_index
++] = value
;
445 if (fb
->head_index
>= fb
->size
)
451 static void fifo_add_val(struct fifo_buffer
*fb
, int value
)
455 for (i
= 0; i
< fb
->size
; i
++)
456 fb
->values
[i
] += value
;
459 static int drbd_rs_controller(struct drbd_conf
*mdev
)
461 unsigned int sect_in
; /* Number of sectors that came in since the last turn */
462 unsigned int want
; /* The number of sectors we want in the proxy */
463 int req_sect
; /* Number of sectors to request in this turn */
464 int correction
; /* Number of sectors more we need in the proxy*/
465 int cps
; /* correction per invocation of drbd_rs_controller() */
466 int steps
; /* Number of time steps to plan ahead */
470 sect_in
= atomic_xchg(&mdev
->rs_sect_in
, 0); /* Number of sectors that came in */
471 mdev
->rs_in_flight
-= sect_in
;
473 spin_lock(&mdev
->peer_seq_lock
); /* get an atomic view on mdev->rs_plan_s */
475 steps
= mdev
->rs_plan_s
.size
; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
477 if (mdev
->rs_in_flight
+ sect_in
== 0) { /* At start of resync */
478 want
= ((mdev
->sync_conf
.rate
* 2 * SLEEP_TIME
) / HZ
) * steps
;
479 } else { /* normal path */
480 want
= mdev
->sync_conf
.c_fill_target
? mdev
->sync_conf
.c_fill_target
:
481 sect_in
* mdev
->sync_conf
.c_delay_target
* HZ
/ (SLEEP_TIME
* 10);
484 correction
= want
- mdev
->rs_in_flight
- mdev
->rs_planed
;
487 cps
= correction
/ steps
;
488 fifo_add_val(&mdev
->rs_plan_s
, cps
);
489 mdev
->rs_planed
+= cps
* steps
;
491 /* What we do in this step */
492 curr_corr
= fifo_push(&mdev
->rs_plan_s
, 0);
493 spin_unlock(&mdev
->peer_seq_lock
);
494 mdev
->rs_planed
-= curr_corr
;
496 req_sect
= sect_in
+ curr_corr
;
500 max_sect
= (mdev
->sync_conf
.c_max_rate
* 2 * SLEEP_TIME
) / HZ
;
501 if (req_sect
> max_sect
)
505 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
506 sect_in, mdev->rs_in_flight, want, correction,
507 steps, cps, mdev->rs_planed, curr_corr, req_sect);
513 static int drbd_rs_number_requests(struct drbd_conf
*mdev
)
516 if (mdev
->rs_plan_s
.size
) { /* mdev->sync_conf.c_plan_ahead */
517 number
= drbd_rs_controller(mdev
) >> (BM_BLOCK_SHIFT
- 9);
518 mdev
->c_sync_rate
= number
* HZ
* (BM_BLOCK_SIZE
/ 1024) / SLEEP_TIME
;
520 mdev
->c_sync_rate
= mdev
->sync_conf
.rate
;
521 number
= SLEEP_TIME
* mdev
->c_sync_rate
/ ((BM_BLOCK_SIZE
/ 1024) * HZ
);
524 /* ignore the amount of pending requests, the resync controller should
525 * throttle down to incoming reply rate soon enough anyways. */
529 static int w_make_resync_request(struct drbd_conf
*mdev
,
530 struct drbd_work
*w
, int cancel
)
534 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
536 int number
, rollback_i
, size
;
537 int align
, queued
, sndbuf
;
540 if (unlikely(cancel
))
543 if (mdev
->rs_total
== 0) {
545 drbd_resync_finished(mdev
);
549 if (!get_ldev(mdev
)) {
550 /* Since we only need to access mdev->rsync a
551 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
552 to continue resync with a broken disk makes no sense at
554 dev_err(DEV
, "Disk broke down during resync!\n");
558 max_bio_size
= queue_max_hw_sectors(mdev
->rq_queue
) << 9;
559 number
= drbd_rs_number_requests(mdev
);
563 for (i
= 0; i
< number
; i
++) {
564 /* Stop generating RS requests, when half of the send buffer is filled */
565 mutex_lock(&mdev
->data
.mutex
);
566 if (mdev
->data
.socket
) {
567 queued
= mdev
->data
.socket
->sk
->sk_wmem_queued
;
568 sndbuf
= mdev
->data
.socket
->sk
->sk_sndbuf
;
573 mutex_unlock(&mdev
->data
.mutex
);
574 if (queued
> sndbuf
/ 2)
578 size
= BM_BLOCK_SIZE
;
579 bit
= drbd_bm_find_next(mdev
, mdev
->bm_resync_fo
);
581 if (bit
== DRBD_END_OF_BITMAP
) {
582 mdev
->bm_resync_fo
= drbd_bm_bits(mdev
);
587 sector
= BM_BIT_TO_SECT(bit
);
589 if (drbd_rs_should_slow_down(mdev
, sector
) ||
590 drbd_try_rs_begin_io(mdev
, sector
)) {
591 mdev
->bm_resync_fo
= bit
;
594 mdev
->bm_resync_fo
= bit
+ 1;
596 if (unlikely(drbd_bm_test_bit(mdev
, bit
) == 0)) {
597 drbd_rs_complete_io(mdev
, sector
);
601 #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
602 /* try to find some adjacent bits.
603 * we stop if we have already the maximum req size.
605 * Additionally always align bigger requests, in order to
606 * be prepared for all stripe sizes of software RAIDs.
611 if (size
+ BM_BLOCK_SIZE
> max_bio_size
)
614 /* Be always aligned */
615 if (sector
& ((1<<(align
+3))-1))
618 /* do not cross extent boundaries */
619 if (((bit
+1) & BM_BLOCKS_PER_BM_EXT_MASK
) == 0)
621 /* now, is it actually dirty, after all?
622 * caution, drbd_bm_test_bit is tri-state for some
623 * obscure reason; ( b == 0 ) would get the out-of-band
624 * only accidentally right because of the "oddly sized"
625 * adjustment below */
626 if (drbd_bm_test_bit(mdev
, bit
+1) != 1)
629 size
+= BM_BLOCK_SIZE
;
630 if ((BM_BLOCK_SIZE
<< align
) <= size
)
634 /* if we merged some,
635 * reset the offset to start the next drbd_bm_find_next from */
636 if (size
> BM_BLOCK_SIZE
)
637 mdev
->bm_resync_fo
= bit
+ 1;
640 /* adjust very last sectors, in case we are oddly sized */
641 if (sector
+ (size
>>9) > capacity
)
642 size
= (capacity
-sector
)<<9;
643 if (mdev
->agreed_pro_version
>= 89 && mdev
->csums_tfm
) {
644 switch (read_for_csum(mdev
, sector
, size
)) {
645 case -EIO
: /* Disk failure */
648 case -EAGAIN
: /* allocation failed, or ldev busy */
649 drbd_rs_complete_io(mdev
, sector
);
650 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
660 inc_rs_pending(mdev
);
661 if (!drbd_send_drequest(mdev
, P_RS_DATA_REQUEST
,
662 sector
, size
, ID_SYNCER
)) {
663 dev_err(DEV
, "drbd_send_drequest() failed, aborting...\n");
664 dec_rs_pending(mdev
);
671 if (mdev
->bm_resync_fo
>= drbd_bm_bits(mdev
)) {
672 /* last syncer _request_ was sent,
673 * but the P_RS_DATA_REPLY not yet received. sync will end (and
674 * next sync group will resume), as soon as we receive the last
675 * resync data block, and the last bit is cleared.
676 * until then resync "work" is "inactive" ...
683 mdev
->rs_in_flight
+= (i
<< (BM_BLOCK_SHIFT
- 9));
684 mod_timer(&mdev
->resync_timer
, jiffies
+ SLEEP_TIME
);
689 static int w_make_ov_request(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
693 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
695 if (unlikely(cancel
))
698 number
= drbd_rs_number_requests(mdev
);
700 sector
= mdev
->ov_position
;
701 for (i
= 0; i
< number
; i
++) {
702 if (sector
>= capacity
) {
706 size
= BM_BLOCK_SIZE
;
708 if (drbd_rs_should_slow_down(mdev
, sector
) ||
709 drbd_try_rs_begin_io(mdev
, sector
)) {
710 mdev
->ov_position
= sector
;
714 if (sector
+ (size
>>9) > capacity
)
715 size
= (capacity
-sector
)<<9;
717 inc_rs_pending(mdev
);
718 if (!drbd_send_ov_request(mdev
, sector
, size
)) {
719 dec_rs_pending(mdev
);
722 sector
+= BM_SECT_PER_BIT
;
724 mdev
->ov_position
= sector
;
727 mdev
->rs_in_flight
+= (i
<< (BM_BLOCK_SHIFT
- 9));
728 mod_timer(&mdev
->resync_timer
, jiffies
+ SLEEP_TIME
);
733 void start_resync_timer_fn(unsigned long data
)
735 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
737 drbd_queue_work(&mdev
->data
.work
, &mdev
->start_resync_work
);
740 int w_start_resync(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
742 if (atomic_read(&mdev
->unacked_cnt
) || atomic_read(&mdev
->rs_pending_cnt
)) {
743 dev_warn(DEV
, "w_start_resync later...\n");
744 mdev
->start_resync_timer
.expires
= jiffies
+ HZ
/10;
745 add_timer(&mdev
->start_resync_timer
);
749 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
750 clear_bit(AHEAD_TO_SYNC_SOURCE
, &mdev
->flags
);
754 int w_ov_finished(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
758 drbd_resync_finished(mdev
);
763 static int w_resync_finished(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
767 drbd_resync_finished(mdev
);
772 static void ping_peer(struct drbd_conf
*mdev
)
774 clear_bit(GOT_PING_ACK
, &mdev
->flags
);
776 wait_event(mdev
->misc_wait
,
777 test_bit(GOT_PING_ACK
, &mdev
->flags
) || mdev
->state
.conn
< C_CONNECTED
);
780 int drbd_resync_finished(struct drbd_conf
*mdev
)
782 unsigned long db
, dt
, dbdt
;
784 union drbd_state os
, ns
;
786 char *khelper_cmd
= NULL
;
789 /* Remove all elements from the resync LRU. Since future actions
790 * might set bits in the (main) bitmap, then the entries in the
791 * resync LRU would be wrong. */
792 if (drbd_rs_del_all(mdev
)) {
793 /* In case this is not possible now, most probably because
794 * there are P_RS_DATA_REPLY Packets lingering on the worker's
795 * queue (or even the read operations for those packets
796 * is not finished by now). Retry in 100ms. */
798 schedule_timeout_interruptible(HZ
/ 10);
799 w
= kmalloc(sizeof(struct drbd_work
), GFP_ATOMIC
);
801 w
->cb
= w_resync_finished
;
802 drbd_queue_work(&mdev
->data
.work
, w
);
805 dev_err(DEV
, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
808 dt
= (jiffies
- mdev
->rs_start
- mdev
->rs_paused
) / HZ
;
812 dbdt
= Bit2KB(db
/dt
);
813 mdev
->rs_paused
/= HZ
;
820 spin_lock_irq(&mdev
->req_lock
);
823 verify_done
= (os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
);
825 /* This protects us against multiple calls (that can happen in the presence
826 of application IO), and against connectivity loss just before we arrive here. */
827 if (os
.conn
<= C_CONNECTED
)
831 ns
.conn
= C_CONNECTED
;
833 dev_info(DEV
, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
834 verify_done
? "Online verify " : "Resync",
835 dt
+ mdev
->rs_paused
, mdev
->rs_paused
, dbdt
);
837 n_oos
= drbd_bm_total_weight(mdev
);
839 if (os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) {
841 dev_alert(DEV
, "Online verify found %lu %dk block out of sync!\n",
843 khelper_cmd
= "out-of-sync";
846 D_ASSERT((n_oos
- mdev
->rs_failed
) == 0);
848 if (os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_PAUSED_SYNC_T
)
849 khelper_cmd
= "after-resync-target";
851 if (mdev
->csums_tfm
&& mdev
->rs_total
) {
852 const unsigned long s
= mdev
->rs_same_csum
;
853 const unsigned long t
= mdev
->rs_total
;
856 (t
< 100000) ? ((s
*100)/t
) : (s
/(t
/100));
857 dev_info(DEV
, "%u %% had equal checksums, eliminated: %luK; "
858 "transferred %luK total %luK\n",
860 Bit2KB(mdev
->rs_same_csum
),
861 Bit2KB(mdev
->rs_total
- mdev
->rs_same_csum
),
862 Bit2KB(mdev
->rs_total
));
866 if (mdev
->rs_failed
) {
867 dev_info(DEV
, " %lu failed blocks\n", mdev
->rs_failed
);
869 if (os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_PAUSED_SYNC_T
) {
870 ns
.disk
= D_INCONSISTENT
;
871 ns
.pdsk
= D_UP_TO_DATE
;
873 ns
.disk
= D_UP_TO_DATE
;
874 ns
.pdsk
= D_INCONSISTENT
;
877 ns
.disk
= D_UP_TO_DATE
;
878 ns
.pdsk
= D_UP_TO_DATE
;
880 if (os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_PAUSED_SYNC_T
) {
883 for (i
= UI_BITMAP
; i
<= UI_HISTORY_END
; i
++)
884 _drbd_uuid_set(mdev
, i
, mdev
->p_uuid
[i
]);
885 drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_CURRENT
]);
886 _drbd_uuid_set(mdev
, UI_CURRENT
, mdev
->p_uuid
[UI_CURRENT
]);
888 dev_err(DEV
, "mdev->p_uuid is NULL! BUG\n");
892 if (!(os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
)) {
893 /* for verify runs, we don't update uuids here,
894 * so there would be nothing to report. */
895 drbd_uuid_set_bm(mdev
, 0UL);
896 drbd_print_uuids(mdev
, "updated UUIDs");
898 /* Now the two UUID sets are equal, update what we
899 * know of the peer. */
901 for (i
= UI_CURRENT
; i
<= UI_HISTORY_END
; i
++)
902 mdev
->p_uuid
[i
] = mdev
->ldev
->md
.uuid
[i
];
907 _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
909 spin_unlock_irq(&mdev
->req_lock
);
916 mdev
->ov_start_sector
= 0;
921 drbd_khelper(mdev
, khelper_cmd
);
927 static void move_to_net_ee_or_free(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
)
929 if (drbd_ee_has_active_page(e
)) {
930 /* This might happen if sendpage() has not finished */
931 int i
= (e
->size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
932 atomic_add(i
, &mdev
->pp_in_use_by_net
);
933 atomic_sub(i
, &mdev
->pp_in_use
);
934 spin_lock_irq(&mdev
->req_lock
);
935 list_add_tail(&e
->w
.list
, &mdev
->net_ee
);
936 spin_unlock_irq(&mdev
->req_lock
);
937 wake_up(&drbd_pp_wait
);
939 drbd_free_ee(mdev
, e
);
943 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
944 * @mdev: DRBD device.
946 * @cancel: The connection will be closed anyways
948 int w_e_end_data_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
950 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
953 if (unlikely(cancel
)) {
954 drbd_free_ee(mdev
, e
);
959 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
960 ok
= drbd_send_block(mdev
, P_DATA_REPLY
, e
);
962 if (__ratelimit(&drbd_ratelimit_state
))
963 dev_err(DEV
, "Sending NegDReply. sector=%llus.\n",
964 (unsigned long long)e
->sector
);
966 ok
= drbd_send_ack(mdev
, P_NEG_DREPLY
, e
);
971 move_to_net_ee_or_free(mdev
, e
);
974 dev_err(DEV
, "drbd_send_block() failed\n");
979 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
980 * @mdev: DRBD device.
982 * @cancel: The connection will be closed anyways
984 int w_e_end_rsdata_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
986 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
989 if (unlikely(cancel
)) {
990 drbd_free_ee(mdev
, e
);
995 if (get_ldev_if_state(mdev
, D_FAILED
)) {
996 drbd_rs_complete_io(mdev
, e
->sector
);
1000 if (mdev
->state
.conn
== C_AHEAD
) {
1001 ok
= drbd_send_ack(mdev
, P_RS_CANCEL
, e
);
1002 } else if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1003 if (likely(mdev
->state
.pdsk
>= D_INCONSISTENT
)) {
1004 inc_rs_pending(mdev
);
1005 ok
= drbd_send_block(mdev
, P_RS_DATA_REPLY
, e
);
1007 if (__ratelimit(&drbd_ratelimit_state
))
1008 dev_err(DEV
, "Not sending RSDataReply, "
1009 "partner DISKLESS!\n");
1013 if (__ratelimit(&drbd_ratelimit_state
))
1014 dev_err(DEV
, "Sending NegRSDReply. sector %llus.\n",
1015 (unsigned long long)e
->sector
);
1017 ok
= drbd_send_ack(mdev
, P_NEG_RS_DREPLY
, e
);
1019 /* update resync data with failure */
1020 drbd_rs_failed_io(mdev
, e
->sector
, e
->size
);
1025 move_to_net_ee_or_free(mdev
, e
);
1028 dev_err(DEV
, "drbd_send_block() failed\n");
1032 int w_e_end_csum_rs_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1034 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
1035 struct digest_info
*di
;
1037 void *digest
= NULL
;
1040 if (unlikely(cancel
)) {
1041 drbd_free_ee(mdev
, e
);
1046 if (get_ldev(mdev
)) {
1047 drbd_rs_complete_io(mdev
, e
->sector
);
1053 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1054 /* quick hack to try to avoid a race against reconfiguration.
1055 * a real fix would be much more involved,
1056 * introducing more locking mechanisms */
1057 if (mdev
->csums_tfm
) {
1058 digest_size
= crypto_hash_digestsize(mdev
->csums_tfm
);
1059 D_ASSERT(digest_size
== di
->digest_size
);
1060 digest
= kmalloc(digest_size
, GFP_NOIO
);
1063 drbd_csum_ee(mdev
, mdev
->csums_tfm
, e
, digest
);
1064 eq
= !memcmp(digest
, di
->digest
, digest_size
);
1069 drbd_set_in_sync(mdev
, e
->sector
, e
->size
);
1070 /* rs_same_csums unit is BM_BLOCK_SIZE */
1071 mdev
->rs_same_csum
+= e
->size
>> BM_BLOCK_SHIFT
;
1072 ok
= drbd_send_ack(mdev
, P_RS_IS_IN_SYNC
, e
);
1074 inc_rs_pending(mdev
);
1075 e
->block_id
= ID_SYNCER
; /* By setting block_id, digest pointer becomes invalid! */
1076 e
->flags
&= ~EE_HAS_DIGEST
; /* This e no longer has a digest pointer */
1078 ok
= drbd_send_block(mdev
, P_RS_DATA_REPLY
, e
);
1081 ok
= drbd_send_ack(mdev
, P_NEG_RS_DREPLY
, e
);
1082 if (__ratelimit(&drbd_ratelimit_state
))
1083 dev_err(DEV
, "Sending NegDReply. I guess it gets messy.\n");
1087 move_to_net_ee_or_free(mdev
, e
);
1090 dev_err(DEV
, "drbd_send_block/ack() failed\n");
1094 /* TODO merge common code with w_e_send_csum */
1095 int w_e_end_ov_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1097 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
1098 sector_t sector
= e
->sector
;
1099 unsigned int size
= e
->size
;
1104 if (unlikely(cancel
))
1107 digest_size
= crypto_hash_digestsize(mdev
->verify_tfm
);
1108 digest
= kmalloc(digest_size
, GFP_NOIO
);
1110 ok
= 0; /* terminate the connection in case the allocation failed */
1114 if (likely(!(e
->flags
& EE_WAS_ERROR
)))
1115 drbd_csum_ee(mdev
, mdev
->verify_tfm
, e
, digest
);
1117 memset(digest
, 0, digest_size
);
1119 /* Free e and pages before send.
1120 * In case we block on congestion, we could otherwise run into
1121 * some distributed deadlock, if the other side blocks on
1122 * congestion as well, because our receiver blocks in
1123 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1124 drbd_free_ee(mdev
, e
);
1126 inc_rs_pending(mdev
);
1127 ok
= drbd_send_drequest_csum(mdev
, sector
, size
,
1128 digest
, digest_size
,
1131 dec_rs_pending(mdev
);
1136 drbd_free_ee(mdev
, e
);
1141 void drbd_ov_oos_found(struct drbd_conf
*mdev
, sector_t sector
, int size
)
1143 if (mdev
->ov_last_oos_start
+ mdev
->ov_last_oos_size
== sector
) {
1144 mdev
->ov_last_oos_size
+= size
>>9;
1146 mdev
->ov_last_oos_start
= sector
;
1147 mdev
->ov_last_oos_size
= size
>>9;
1149 drbd_set_out_of_sync(mdev
, sector
, size
);
1152 int w_e_end_ov_reply(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1154 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
1155 struct digest_info
*di
;
1157 sector_t sector
= e
->sector
;
1158 unsigned int size
= e
->size
;
1162 if (unlikely(cancel
)) {
1163 drbd_free_ee(mdev
, e
);
1168 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1169 * the resync lru has been cleaned up already */
1170 if (get_ldev(mdev
)) {
1171 drbd_rs_complete_io(mdev
, e
->sector
);
1177 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1178 digest_size
= crypto_hash_digestsize(mdev
->verify_tfm
);
1179 digest
= kmalloc(digest_size
, GFP_NOIO
);
1181 drbd_csum_ee(mdev
, mdev
->verify_tfm
, e
, digest
);
1183 D_ASSERT(digest_size
== di
->digest_size
);
1184 eq
= !memcmp(digest
, di
->digest
, digest_size
);
1189 /* Free e and pages before send.
1190 * In case we block on congestion, we could otherwise run into
1191 * some distributed deadlock, if the other side blocks on
1192 * congestion as well, because our receiver blocks in
1193 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1194 drbd_free_ee(mdev
, e
);
1196 drbd_ov_oos_found(mdev
, sector
, size
);
1200 ok
= drbd_send_ack_ex(mdev
, P_OV_RESULT
, sector
, size
,
1201 eq
? ID_IN_SYNC
: ID_OUT_OF_SYNC
);
1207 /* let's advance progress step marks only for every other megabyte */
1208 if ((mdev
->ov_left
& 0x200) == 0x200)
1209 drbd_advance_rs_marks(mdev
, mdev
->ov_left
);
1211 if (mdev
->ov_left
== 0) {
1213 drbd_resync_finished(mdev
);
1219 int w_prev_work_done(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1221 struct drbd_wq_barrier
*b
= container_of(w
, struct drbd_wq_barrier
, w
);
1226 int w_send_barrier(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1228 struct drbd_tl_epoch
*b
= container_of(w
, struct drbd_tl_epoch
, w
);
1229 struct p_barrier
*p
= &mdev
->data
.sbuf
.barrier
;
1232 /* really avoid racing with tl_clear. w.cb may have been referenced
1233 * just before it was reassigned and re-queued, so double check that.
1234 * actually, this race was harmless, since we only try to send the
1235 * barrier packet here, and otherwise do nothing with the object.
1236 * but compare with the head of w_clear_epoch */
1237 spin_lock_irq(&mdev
->req_lock
);
1238 if (w
->cb
!= w_send_barrier
|| mdev
->state
.conn
< C_CONNECTED
)
1240 spin_unlock_irq(&mdev
->req_lock
);
1244 if (!drbd_get_data_sock(mdev
))
1246 p
->barrier
= b
->br_number
;
1247 /* inc_ap_pending was done where this was queued.
1248 * dec_ap_pending will be done in got_BarrierAck
1249 * or (on connection loss) in w_clear_epoch. */
1250 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_BARRIER
,
1251 (struct p_header80
*)p
, sizeof(*p
), 0);
1252 drbd_put_data_sock(mdev
);
1257 int w_send_write_hint(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1261 return drbd_send_short_cmd(mdev
, P_UNPLUG_REMOTE
);
1264 int w_send_oos(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1266 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1269 if (unlikely(cancel
)) {
1270 req_mod(req
, send_canceled
);
1274 ok
= drbd_send_oos(mdev
, req
);
1275 req_mod(req
, oos_handed_to_network
);
1281 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1282 * @mdev: DRBD device.
1284 * @cancel: The connection will be closed anyways
1286 int w_send_dblock(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1288 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1291 if (unlikely(cancel
)) {
1292 req_mod(req
, send_canceled
);
1296 ok
= drbd_send_dblock(mdev
, req
);
1297 req_mod(req
, ok
? handed_over_to_network
: send_failed
);
1303 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1304 * @mdev: DRBD device.
1306 * @cancel: The connection will be closed anyways
1308 int w_send_read_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1310 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1313 if (unlikely(cancel
)) {
1314 req_mod(req
, send_canceled
);
1318 ok
= drbd_send_drequest(mdev
, P_DATA_REQUEST
, req
->sector
, req
->size
,
1319 (unsigned long)req
);
1322 /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
1323 * so this is probably redundant */
1324 if (mdev
->state
.conn
>= C_CONNECTED
)
1325 drbd_force_state(mdev
, NS(conn
, C_NETWORK_FAILURE
));
1327 req_mod(req
, ok
? handed_over_to_network
: send_failed
);
1332 int w_restart_disk_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1334 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1336 if (bio_data_dir(req
->master_bio
) == WRITE
&& req
->rq_state
& RQ_IN_ACT_LOG
)
1337 drbd_al_begin_io(mdev
, req
->sector
);
1338 /* Calling drbd_al_begin_io() out of the worker might deadlocks
1339 theoretically. Practically it can not deadlock, since this is
1340 only used when unfreezing IOs. All the extents of the requests
1341 that made it into the TL are already active */
1343 drbd_req_make_private_bio(req
, req
->master_bio
);
1344 req
->private_bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1345 generic_make_request(req
->private_bio
);
1350 static int _drbd_may_sync_now(struct drbd_conf
*mdev
)
1352 struct drbd_conf
*odev
= mdev
;
1355 if (odev
->sync_conf
.after
== -1)
1357 odev
= minor_to_mdev(odev
->sync_conf
.after
);
1358 ERR_IF(!odev
) return 1;
1359 if ((odev
->state
.conn
>= C_SYNC_SOURCE
&&
1360 odev
->state
.conn
<= C_PAUSED_SYNC_T
) ||
1361 odev
->state
.aftr_isp
|| odev
->state
.peer_isp
||
1362 odev
->state
.user_isp
)
1368 * _drbd_pause_after() - Pause resync on all devices that may not resync now
1369 * @mdev: DRBD device.
1371 * Called from process context only (admin command and after_state_ch).
1373 static int _drbd_pause_after(struct drbd_conf
*mdev
)
1375 struct drbd_conf
*odev
;
1378 for (i
= 0; i
< minor_count
; i
++) {
1379 odev
= minor_to_mdev(i
);
1382 if (odev
->state
.conn
== C_STANDALONE
&& odev
->state
.disk
== D_DISKLESS
)
1384 if (!_drbd_may_sync_now(odev
))
1385 rv
|= (__drbd_set_state(_NS(odev
, aftr_isp
, 1), CS_HARD
, NULL
)
1386 != SS_NOTHING_TO_DO
);
1393 * _drbd_resume_next() - Resume resync on all devices that may resync now
1394 * @mdev: DRBD device.
1396 * Called from process context only (admin command and worker).
1398 static int _drbd_resume_next(struct drbd_conf
*mdev
)
1400 struct drbd_conf
*odev
;
1403 for (i
= 0; i
< minor_count
; i
++) {
1404 odev
= minor_to_mdev(i
);
1407 if (odev
->state
.conn
== C_STANDALONE
&& odev
->state
.disk
== D_DISKLESS
)
1409 if (odev
->state
.aftr_isp
) {
1410 if (_drbd_may_sync_now(odev
))
1411 rv
|= (__drbd_set_state(_NS(odev
, aftr_isp
, 0),
1413 != SS_NOTHING_TO_DO
) ;
1419 void resume_next_sg(struct drbd_conf
*mdev
)
1421 write_lock_irq(&global_state_lock
);
1422 _drbd_resume_next(mdev
);
1423 write_unlock_irq(&global_state_lock
);
1426 void suspend_other_sg(struct drbd_conf
*mdev
)
1428 write_lock_irq(&global_state_lock
);
1429 _drbd_pause_after(mdev
);
1430 write_unlock_irq(&global_state_lock
);
1433 static int sync_after_error(struct drbd_conf
*mdev
, int o_minor
)
1435 struct drbd_conf
*odev
;
1439 if (o_minor
< -1 || minor_to_mdev(o_minor
) == NULL
)
1440 return ERR_SYNC_AFTER
;
1442 /* check for loops */
1443 odev
= minor_to_mdev(o_minor
);
1446 return ERR_SYNC_AFTER_CYCLE
;
1448 /* dependency chain ends here, no cycles. */
1449 if (odev
->sync_conf
.after
== -1)
1452 /* follow the dependency chain */
1453 odev
= minor_to_mdev(odev
->sync_conf
.after
);
1457 int drbd_alter_sa(struct drbd_conf
*mdev
, int na
)
1462 write_lock_irq(&global_state_lock
);
1463 retcode
= sync_after_error(mdev
, na
);
1464 if (retcode
== NO_ERROR
) {
1465 mdev
->sync_conf
.after
= na
;
1467 changes
= _drbd_pause_after(mdev
);
1468 changes
|= _drbd_resume_next(mdev
);
1471 write_unlock_irq(&global_state_lock
);
1475 void drbd_rs_controller_reset(struct drbd_conf
*mdev
)
1477 atomic_set(&mdev
->rs_sect_in
, 0);
1478 atomic_set(&mdev
->rs_sect_ev
, 0);
1479 mdev
->rs_in_flight
= 0;
1480 mdev
->rs_planed
= 0;
1481 spin_lock(&mdev
->peer_seq_lock
);
1482 fifo_set(&mdev
->rs_plan_s
, 0);
1483 spin_unlock(&mdev
->peer_seq_lock
);
1487 * drbd_start_resync() - Start the resync process
1488 * @mdev: DRBD device.
1489 * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1491 * This function might bring you directly into one of the
1492 * C_PAUSED_SYNC_* states.
1494 void drbd_start_resync(struct drbd_conf
*mdev
, enum drbd_conns side
)
1496 union drbd_state ns
;
1499 if (mdev
->state
.conn
>= C_SYNC_SOURCE
&& mdev
->state
.conn
< C_AHEAD
) {
1500 dev_err(DEV
, "Resync already running!\n");
1504 if (side
== C_SYNC_TARGET
) {
1505 /* Since application IO was locked out during C_WF_BITMAP_T and
1506 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1507 we check that we might make the data inconsistent. */
1508 r
= drbd_khelper(mdev
, "before-resync-target");
1509 r
= (r
>> 8) & 0xff;
1511 dev_info(DEV
, "before-resync-target handler returned %d, "
1512 "dropping connection.\n", r
);
1513 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
1516 } else /* C_SYNC_SOURCE */ {
1517 r
= drbd_khelper(mdev
, "before-resync-source");
1518 r
= (r
>> 8) & 0xff;
1521 dev_info(DEV
, "before-resync-source handler returned %d, "
1522 "ignoring. Old userland tools?", r
);
1524 dev_info(DEV
, "before-resync-source handler returned %d, "
1525 "dropping connection.\n", r
);
1526 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
1532 drbd_state_lock(mdev
);
1533 write_lock_irq(&global_state_lock
);
1534 if (!get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
1535 write_unlock_irq(&global_state_lock
);
1536 drbd_state_unlock(mdev
);
1540 ns
.i
= mdev
->state
.i
;
1542 ns
.aftr_isp
= !_drbd_may_sync_now(mdev
);
1546 if (side
== C_SYNC_TARGET
)
1547 ns
.disk
= D_INCONSISTENT
;
1548 else /* side == C_SYNC_SOURCE */
1549 ns
.pdsk
= D_INCONSISTENT
;
1551 r
= __drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
1554 if (ns
.conn
< C_CONNECTED
)
1555 r
= SS_UNKNOWN_ERROR
;
1557 if (r
== SS_SUCCESS
) {
1558 unsigned long tw
= drbd_bm_total_weight(mdev
);
1559 unsigned long now
= jiffies
;
1562 mdev
->rs_failed
= 0;
1563 mdev
->rs_paused
= 0;
1564 mdev
->rs_same_csum
= 0;
1565 mdev
->rs_last_events
= 0;
1566 mdev
->rs_last_sect_ev
= 0;
1567 mdev
->rs_total
= tw
;
1568 mdev
->rs_start
= now
;
1569 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1570 mdev
->rs_mark_left
[i
] = tw
;
1571 mdev
->rs_mark_time
[i
] = now
;
1573 _drbd_pause_after(mdev
);
1575 write_unlock_irq(&global_state_lock
);
1577 if (r
== SS_SUCCESS
) {
1578 dev_info(DEV
, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1579 drbd_conn_str(ns
.conn
),
1580 (unsigned long) mdev
->rs_total
<< (BM_BLOCK_SHIFT
-10),
1581 (unsigned long) mdev
->rs_total
);
1582 if (side
== C_SYNC_TARGET
)
1583 mdev
->bm_resync_fo
= 0;
1585 /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
1586 * with w_send_oos, or the sync target will get confused as to
1587 * how much bits to resync. We cannot do that always, because for an
1588 * empty resync and protocol < 95, we need to do it here, as we call
1589 * drbd_resync_finished from here in that case.
1590 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
1591 * and from after_state_ch otherwise. */
1592 if (side
== C_SYNC_SOURCE
&& mdev
->agreed_pro_version
< 96)
1593 drbd_gen_and_send_sync_uuid(mdev
);
1595 if (mdev
->agreed_pro_version
< 95 && mdev
->rs_total
== 0) {
1596 /* This still has a race (about when exactly the peers
1597 * detect connection loss) that can lead to a full sync
1598 * on next handshake. In 8.3.9 we fixed this with explicit
1599 * resync-finished notifications, but the fix
1600 * introduces a protocol change. Sleeping for some
1601 * time longer than the ping interval + timeout on the
1602 * SyncSource, to give the SyncTarget the chance to
1603 * detect connection loss, then waiting for a ping
1604 * response (implicit in drbd_resync_finished) reduces
1605 * the race considerably, but does not solve it. */
1606 if (side
== C_SYNC_SOURCE
)
1607 schedule_timeout_interruptible(
1608 mdev
->net_conf
->ping_int
* HZ
+
1609 mdev
->net_conf
->ping_timeo
*HZ
/9);
1610 drbd_resync_finished(mdev
);
1613 drbd_rs_controller_reset(mdev
);
1614 /* ns.conn may already be != mdev->state.conn,
1615 * we may have been paused in between, or become paused until
1616 * the timer triggers.
1617 * No matter, that is handled in resync_timer_fn() */
1618 if (ns
.conn
== C_SYNC_TARGET
)
1619 mod_timer(&mdev
->resync_timer
, jiffies
);
1624 drbd_state_unlock(mdev
);
1627 int drbd_worker(struct drbd_thread
*thi
)
1629 struct drbd_conf
*mdev
= thi
->mdev
;
1630 struct drbd_work
*w
= NULL
;
1631 LIST_HEAD(work_list
);
1634 sprintf(current
->comm
, "drbd%d_worker", mdev_to_minor(mdev
));
1636 while (get_t_state(thi
) == Running
) {
1637 drbd_thread_current_set_cpu(mdev
);
1639 if (down_trylock(&mdev
->data
.work
.s
)) {
1640 mutex_lock(&mdev
->data
.mutex
);
1641 if (mdev
->data
.socket
&& !mdev
->net_conf
->no_cork
)
1642 drbd_tcp_uncork(mdev
->data
.socket
);
1643 mutex_unlock(&mdev
->data
.mutex
);
1645 intr
= down_interruptible(&mdev
->data
.work
.s
);
1647 mutex_lock(&mdev
->data
.mutex
);
1648 if (mdev
->data
.socket
&& !mdev
->net_conf
->no_cork
)
1649 drbd_tcp_cork(mdev
->data
.socket
);
1650 mutex_unlock(&mdev
->data
.mutex
);
1654 D_ASSERT(intr
== -EINTR
);
1655 flush_signals(current
);
1656 ERR_IF (get_t_state(thi
) == Running
)
1661 if (get_t_state(thi
) != Running
)
1663 /* With this break, we have done a down() but not consumed
1664 the entry from the list. The cleanup code takes care of
1668 spin_lock_irq(&mdev
->data
.work
.q_lock
);
1669 ERR_IF(list_empty(&mdev
->data
.work
.q
)) {
1670 /* something terribly wrong in our logic.
1671 * we were able to down() the semaphore,
1672 * but the list is empty... doh.
1674 * what is the best thing to do now?
1675 * try again from scratch, restarting the receiver,
1676 * asender, whatnot? could break even more ugly,
1677 * e.g. when we are primary, but no good local data.
1679 * I'll try to get away just starting over this loop.
1681 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1684 w
= list_entry(mdev
->data
.work
.q
.next
, struct drbd_work
, list
);
1685 list_del_init(&w
->list
);
1686 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1688 if (!w
->cb(mdev
, w
, mdev
->state
.conn
< C_CONNECTED
)) {
1689 /* dev_warn(DEV, "worker: a callback failed! \n"); */
1690 if (mdev
->state
.conn
>= C_CONNECTED
)
1691 drbd_force_state(mdev
,
1692 NS(conn
, C_NETWORK_FAILURE
));
1695 D_ASSERT(test_bit(DEVICE_DYING
, &mdev
->flags
));
1696 D_ASSERT(test_bit(CONFIG_PENDING
, &mdev
->flags
));
1698 spin_lock_irq(&mdev
->data
.work
.q_lock
);
1700 while (!list_empty(&mdev
->data
.work
.q
)) {
1701 list_splice_init(&mdev
->data
.work
.q
, &work_list
);
1702 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1704 while (!list_empty(&work_list
)) {
1705 w
= list_entry(work_list
.next
, struct drbd_work
, list
);
1706 list_del_init(&w
->list
);
1708 i
++; /* dead debugging code */
1711 spin_lock_irq(&mdev
->data
.work
.q_lock
);
1713 sema_init(&mdev
->data
.work
.s
, 0);
1714 /* DANGEROUS race: if someone did queue his work within the spinlock,
1715 * but up() ed outside the spinlock, we could get an up() on the
1716 * semaphore without corresponding list entry.
1719 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1721 D_ASSERT(mdev
->state
.disk
== D_DISKLESS
&& mdev
->state
.conn
== C_STANDALONE
);
1722 /* _drbd_set_state only uses stop_nowait.
1723 * wait here for the Exiting receiver. */
1724 drbd_thread_stop(&mdev
->receiver
);
1725 drbd_mdev_cleanup(mdev
);
1727 dev_info(DEV
, "worker terminated\n");
1729 clear_bit(DEVICE_DYING
, &mdev
->flags
);
1730 clear_bit(CONFIG_PENDING
, &mdev
->flags
);
1731 wake_up(&mdev
->state_wait
);