Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_worker.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
b411b363 | 26 | #include <linux/module.h> |
b411b363 PR |
27 | #include <linux/drbd.h> |
28 | #include <linux/sched.h> | |
b411b363 PR |
29 | #include <linux/wait.h> |
30 | #include <linux/mm.h> | |
31 | #include <linux/memcontrol.h> | |
32 | #include <linux/mm_inline.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/random.h> | |
b411b363 PR |
35 | #include <linux/string.h> |
36 | #include <linux/scatterlist.h> | |
37 | ||
38 | #include "drbd_int.h" | |
39 | #include "drbd_req.h" | |
b411b363 | 40 | |
b411b363 | 41 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); |
9d77a5fe PR |
42 | static int w_make_resync_request(struct drbd_conf *mdev, |
43 | struct drbd_work *w, int cancel); | |
b411b363 PR |
44 | |
45 | ||
46 | ||
c5a91619 AG |
47 | /* endio handlers: |
48 | * drbd_md_io_complete (defined here) | |
49 | * drbd_endio_pri (defined here) | |
50 | * drbd_endio_sec (defined here) | |
51 | * bm_async_io_complete (defined in drbd_bitmap.c) | |
52 | * | |
b411b363 PR |
53 | * For all these callbacks, note the following: |
54 | * The callbacks will be called in irq context by the IDE drivers, | |
55 | * and in Softirqs/Tasklets/BH context by the SCSI drivers. | |
56 | * Try to get the locking right :) | |
57 | * | |
58 | */ | |
59 | ||
60 | ||
61 | /* About the global_state_lock | |
62 | Each state transition on an device holds a read lock. In case we have | |
63 | to evaluate the sync after dependencies, we grab a write lock, because | |
64 | we need stable states on all devices for that. */ | |
65 | rwlock_t global_state_lock; | |
66 | ||
67 | /* used for synchronous meta data and bitmap IO | |
68 | * submitted by drbd_md_sync_page_io() | |
69 | */ | |
70 | void drbd_md_io_complete(struct bio *bio, int error) | |
71 | { | |
72 | struct drbd_md_io *md_io; | |
73 | ||
74 | md_io = (struct drbd_md_io *)bio->bi_private; | |
75 | md_io->error = error; | |
76 | ||
b411b363 PR |
77 | complete(&md_io->event); |
78 | } | |
79 | ||
80 | /* reads on behalf of the partner, | |
81 | * "submitted" by the receiver | |
82 | */ | |
db830c46 | 83 | void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
b411b363 PR |
84 | { |
85 | unsigned long flags = 0; | |
a21e9298 | 86 | struct drbd_conf *mdev = peer_req->w.mdev; |
b411b363 | 87 | |
87eeee41 | 88 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
db830c46 AG |
89 | mdev->read_cnt += peer_req->i.size >> 9; |
90 | list_del(&peer_req->w.list); | |
b411b363 PR |
91 | if (list_empty(&mdev->read_ee)) |
92 | wake_up(&mdev->ee_wait); | |
db830c46 | 93 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
81e84650 | 94 | __drbd_chk_io_error(mdev, false); |
87eeee41 | 95 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
b411b363 | 96 | |
db830c46 | 97 | drbd_queue_work(&mdev->tconn->data.work, &peer_req->w); |
b411b363 | 98 | put_ldev(mdev); |
b411b363 PR |
99 | } |
100 | ||
101 | /* writes on behalf of the partner, or resync writes, | |
45bb912b | 102 | * "submitted" by the receiver, final stage. */ |
db830c46 | 103 | static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) |
b411b363 PR |
104 | { |
105 | unsigned long flags = 0; | |
a21e9298 | 106 | struct drbd_conf *mdev = peer_req->w.mdev; |
b411b363 PR |
107 | sector_t e_sector; |
108 | int do_wake; | |
579b57ed | 109 | u64 block_id; |
b411b363 | 110 | int do_al_complete_io; |
b411b363 | 111 | |
db830c46 | 112 | /* after we moved peer_req to done_ee, |
b411b363 PR |
113 | * we may no longer access it, |
114 | * it may be freed/reused already! | |
115 | * (as soon as we release the req_lock) */ | |
db830c46 AG |
116 | e_sector = peer_req->i.sector; |
117 | do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; | |
118 | block_id = peer_req->block_id; | |
b411b363 | 119 | |
87eeee41 | 120 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
db830c46 AG |
121 | mdev->writ_cnt += peer_req->i.size >> 9; |
122 | list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */ | |
123 | list_add_tail(&peer_req->w.list, &mdev->done_ee); | |
b411b363 | 124 | |
bb3bfe96 | 125 | /* |
5e472264 | 126 | * Do not remove from the write_requests tree here: we did not send the |
bb3bfe96 AG |
127 | * Ack yet and did not wake possibly waiting conflicting requests. |
128 | * Removed from the tree from "drbd_process_done_ee" within the | |
129 | * appropriate w.cb (e_end_block/e_end_resync_block) or from | |
130 | * _drbd_clear_done_ee. | |
131 | */ | |
b411b363 | 132 | |
579b57ed | 133 | do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee); |
b411b363 | 134 | |
db830c46 | 135 | if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) |
81e84650 | 136 | __drbd_chk_io_error(mdev, false); |
87eeee41 | 137 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
b411b363 | 138 | |
579b57ed | 139 | if (block_id == ID_SYNCER) |
b411b363 PR |
140 | drbd_rs_complete_io(mdev, e_sector); |
141 | ||
142 | if (do_wake) | |
143 | wake_up(&mdev->ee_wait); | |
144 | ||
145 | if (do_al_complete_io) | |
146 | drbd_al_complete_io(mdev, e_sector); | |
147 | ||
0625ac19 | 148 | wake_asender(mdev->tconn); |
b411b363 | 149 | put_ldev(mdev); |
45bb912b | 150 | } |
b411b363 | 151 | |
45bb912b LE |
152 | /* writes on behalf of the partner, or resync writes, |
153 | * "submitted" by the receiver. | |
154 | */ | |
155 | void drbd_endio_sec(struct bio *bio, int error) | |
156 | { | |
db830c46 | 157 | struct drbd_peer_request *peer_req = bio->bi_private; |
a21e9298 | 158 | struct drbd_conf *mdev = peer_req->w.mdev; |
45bb912b LE |
159 | int uptodate = bio_flagged(bio, BIO_UPTODATE); |
160 | int is_write = bio_data_dir(bio) == WRITE; | |
161 | ||
07194272 | 162 | if (error && __ratelimit(&drbd_ratelimit_state)) |
45bb912b LE |
163 | dev_warn(DEV, "%s: error=%d s=%llus\n", |
164 | is_write ? "write" : "read", error, | |
db830c46 | 165 | (unsigned long long)peer_req->i.sector); |
45bb912b | 166 | if (!error && !uptodate) { |
07194272 LE |
167 | if (__ratelimit(&drbd_ratelimit_state)) |
168 | dev_warn(DEV, "%s: setting error to -EIO s=%llus\n", | |
169 | is_write ? "write" : "read", | |
db830c46 | 170 | (unsigned long long)peer_req->i.sector); |
45bb912b LE |
171 | /* strange behavior of some lower level drivers... |
172 | * fail the request by clearing the uptodate flag, | |
173 | * but do not return any error?! */ | |
174 | error = -EIO; | |
175 | } | |
176 | ||
177 | if (error) | |
db830c46 | 178 | set_bit(__EE_WAS_ERROR, &peer_req->flags); |
45bb912b LE |
179 | |
180 | bio_put(bio); /* no need for the bio anymore */ | |
db830c46 | 181 | if (atomic_dec_and_test(&peer_req->pending_bios)) { |
45bb912b | 182 | if (is_write) |
db830c46 | 183 | drbd_endio_write_sec_final(peer_req); |
45bb912b | 184 | else |
db830c46 | 185 | drbd_endio_read_sec_final(peer_req); |
45bb912b | 186 | } |
b411b363 PR |
187 | } |
188 | ||
189 | /* read, readA or write requests on R_PRIMARY coming from drbd_make_request | |
190 | */ | |
191 | void drbd_endio_pri(struct bio *bio, int error) | |
192 | { | |
a115413d | 193 | unsigned long flags; |
b411b363 | 194 | struct drbd_request *req = bio->bi_private; |
a21e9298 | 195 | struct drbd_conf *mdev = req->w.mdev; |
a115413d | 196 | struct bio_and_error m; |
b411b363 PR |
197 | enum drbd_req_event what; |
198 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | |
199 | ||
b411b363 PR |
200 | if (!error && !uptodate) { |
201 | dev_warn(DEV, "p %s: setting error to -EIO\n", | |
202 | bio_data_dir(bio) == WRITE ? "write" : "read"); | |
203 | /* strange behavior of some lower level drivers... | |
204 | * fail the request by clearing the uptodate flag, | |
205 | * but do not return any error?! */ | |
206 | error = -EIO; | |
207 | } | |
208 | ||
b411b363 PR |
209 | /* to avoid recursion in __req_mod */ |
210 | if (unlikely(error)) { | |
211 | what = (bio_data_dir(bio) == WRITE) | |
8554df1c | 212 | ? WRITE_COMPLETED_WITH_ERROR |
5c3c7e64 | 213 | : (bio_rw(bio) == READ) |
8554df1c AG |
214 | ? READ_COMPLETED_WITH_ERROR |
215 | : READ_AHEAD_COMPLETED_WITH_ERROR; | |
b411b363 | 216 | } else |
8554df1c | 217 | what = COMPLETED_OK; |
b411b363 PR |
218 | |
219 | bio_put(req->private_bio); | |
220 | req->private_bio = ERR_PTR(error); | |
221 | ||
a115413d | 222 | /* not req_mod(), we need irqsave here! */ |
87eeee41 | 223 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
a115413d | 224 | __req_mod(req, what, &m); |
87eeee41 | 225 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
a115413d LE |
226 | |
227 | if (m.bio) | |
228 | complete_master_bio(mdev, &m); | |
b411b363 PR |
229 | } |
230 | ||
b411b363 PR |
231 | int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
232 | { | |
233 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
234 | ||
235 | /* We should not detach for read io-error, | |
236 | * but try to WRITE the P_DATA_REPLY to the failed location, | |
237 | * to give the disk the chance to relocate that block */ | |
238 | ||
87eeee41 | 239 | spin_lock_irq(&mdev->tconn->req_lock); |
d255e5ff | 240 | if (cancel || mdev->state.pdsk != D_UP_TO_DATE) { |
8554df1c | 241 | _req_mod(req, READ_RETRY_REMOTE_CANCELED); |
87eeee41 | 242 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
243 | return 1; |
244 | } | |
87eeee41 | 245 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
246 | |
247 | return w_send_read_req(mdev, w, 0); | |
248 | } | |
249 | ||
f6ffca9f | 250 | void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, |
db830c46 | 251 | struct drbd_peer_request *peer_req, void *digest) |
45bb912b LE |
252 | { |
253 | struct hash_desc desc; | |
254 | struct scatterlist sg; | |
db830c46 | 255 | struct page *page = peer_req->pages; |
45bb912b LE |
256 | struct page *tmp; |
257 | unsigned len; | |
258 | ||
259 | desc.tfm = tfm; | |
260 | desc.flags = 0; | |
261 | ||
262 | sg_init_table(&sg, 1); | |
263 | crypto_hash_init(&desc); | |
264 | ||
265 | while ((tmp = page_chain_next(page))) { | |
266 | /* all but the last page will be fully used */ | |
267 | sg_set_page(&sg, page, PAGE_SIZE, 0); | |
268 | crypto_hash_update(&desc, &sg, sg.length); | |
269 | page = tmp; | |
270 | } | |
271 | /* and now the last, possibly only partially used page */ | |
db830c46 | 272 | len = peer_req->i.size & (PAGE_SIZE - 1); |
45bb912b LE |
273 | sg_set_page(&sg, page, len ?: PAGE_SIZE, 0); |
274 | crypto_hash_update(&desc, &sg, sg.length); | |
275 | crypto_hash_final(&desc, digest); | |
276 | } | |
277 | ||
278 | void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest) | |
b411b363 PR |
279 | { |
280 | struct hash_desc desc; | |
281 | struct scatterlist sg; | |
282 | struct bio_vec *bvec; | |
283 | int i; | |
284 | ||
285 | desc.tfm = tfm; | |
286 | desc.flags = 0; | |
287 | ||
288 | sg_init_table(&sg, 1); | |
289 | crypto_hash_init(&desc); | |
290 | ||
291 | __bio_for_each_segment(bvec, bio, i, 0) { | |
292 | sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); | |
293 | crypto_hash_update(&desc, &sg, sg.length); | |
294 | } | |
295 | crypto_hash_final(&desc, digest); | |
296 | } | |
297 | ||
53ea4331 LE |
298 | /* TODO merge common code with w_e_end_ov_req */ |
299 | int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
b411b363 | 300 | { |
db830c46 AG |
301 | struct drbd_peer_request *peer_req = |
302 | container_of(w, struct drbd_peer_request, w); | |
b411b363 PR |
303 | int digest_size; |
304 | void *digest; | |
53ea4331 | 305 | int ok = 1; |
b411b363 | 306 | |
53ea4331 LE |
307 | if (unlikely(cancel)) |
308 | goto out; | |
b411b363 | 309 | |
db830c46 | 310 | if (likely((peer_req->flags & EE_WAS_ERROR) != 0)) |
53ea4331 | 311 | goto out; |
b411b363 | 312 | |
53ea4331 LE |
313 | digest_size = crypto_hash_digestsize(mdev->csums_tfm); |
314 | digest = kmalloc(digest_size, GFP_NOIO); | |
315 | if (digest) { | |
db830c46 AG |
316 | sector_t sector = peer_req->i.sector; |
317 | unsigned int size = peer_req->i.size; | |
318 | drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest); | |
53ea4331 LE |
319 | /* Free e and pages before send. |
320 | * In case we block on congestion, we could otherwise run into | |
321 | * some distributed deadlock, if the other side blocks on | |
322 | * congestion as well, because our receiver blocks in | |
323 | * drbd_pp_alloc due to pp_in_use > max_buffers. */ | |
db830c46 AG |
324 | drbd_free_ee(mdev, peer_req); |
325 | peer_req = NULL; | |
53ea4331 LE |
326 | inc_rs_pending(mdev); |
327 | ok = drbd_send_drequest_csum(mdev, sector, size, | |
328 | digest, digest_size, | |
329 | P_CSUM_RS_REQUEST); | |
330 | kfree(digest); | |
331 | } else { | |
332 | dev_err(DEV, "kmalloc() of digest failed.\n"); | |
333 | ok = 0; | |
334 | } | |
b411b363 | 335 | |
53ea4331 | 336 | out: |
db830c46 AG |
337 | if (peer_req) |
338 | drbd_free_ee(mdev, peer_req); | |
b411b363 PR |
339 | |
340 | if (unlikely(!ok)) | |
341 | dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); | |
342 | return ok; | |
343 | } | |
344 | ||
345 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) | |
346 | ||
347 | static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) | |
348 | { | |
db830c46 | 349 | struct drbd_peer_request *peer_req; |
b411b363 PR |
350 | |
351 | if (!get_ldev(mdev)) | |
80a40e43 | 352 | return -EIO; |
b411b363 | 353 | |
e3555d85 | 354 | if (drbd_rs_should_slow_down(mdev, sector)) |
0f0601f4 LE |
355 | goto defer; |
356 | ||
b411b363 PR |
357 | /* GFP_TRY, because if there is no memory available right now, this may |
358 | * be rescheduled for later. It is "only" background resync, after all. */ | |
db830c46 AG |
359 | peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY); |
360 | if (!peer_req) | |
80a40e43 | 361 | goto defer; |
b411b363 | 362 | |
db830c46 | 363 | peer_req->w.cb = w_e_send_csum; |
87eeee41 | 364 | spin_lock_irq(&mdev->tconn->req_lock); |
db830c46 | 365 | list_add(&peer_req->w.list, &mdev->read_ee); |
87eeee41 | 366 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 | 367 | |
0f0601f4 | 368 | atomic_add(size >> 9, &mdev->rs_sect_ev); |
db830c46 | 369 | if (drbd_submit_ee(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0) |
80a40e43 | 370 | return 0; |
b411b363 | 371 | |
10f6d992 LE |
372 | /* If it failed because of ENOMEM, retry should help. If it failed |
373 | * because bio_add_page failed (probably broken lower level driver), | |
374 | * retry may or may not help. | |
375 | * If it does not, you may need to force disconnect. */ | |
87eeee41 | 376 | spin_lock_irq(&mdev->tconn->req_lock); |
db830c46 | 377 | list_del(&peer_req->w.list); |
87eeee41 | 378 | spin_unlock_irq(&mdev->tconn->req_lock); |
22cc37a9 | 379 | |
db830c46 | 380 | drbd_free_ee(mdev, peer_req); |
80a40e43 | 381 | defer: |
45bb912b | 382 | put_ldev(mdev); |
80a40e43 | 383 | return -EAGAIN; |
b411b363 PR |
384 | } |
385 | ||
794abb75 | 386 | int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
b411b363 | 387 | { |
63106d3c PR |
388 | switch (mdev->state.conn) { |
389 | case C_VERIFY_S: | |
794abb75 | 390 | w_make_ov_request(mdev, w, cancel); |
63106d3c PR |
391 | break; |
392 | case C_SYNC_TARGET: | |
794abb75 | 393 | w_make_resync_request(mdev, w, cancel); |
63106d3c | 394 | break; |
b411b363 PR |
395 | } |
396 | ||
794abb75 PR |
397 | return 1; |
398 | } | |
399 | ||
400 | void resync_timer_fn(unsigned long data) | |
401 | { | |
402 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
403 | ||
404 | if (list_empty(&mdev->resync_work.list)) | |
e42325a5 | 405 | drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work); |
b411b363 PR |
406 | } |
407 | ||
778f271d PR |
408 | static void fifo_set(struct fifo_buffer *fb, int value) |
409 | { | |
410 | int i; | |
411 | ||
412 | for (i = 0; i < fb->size; i++) | |
f10f2623 | 413 | fb->values[i] = value; |
778f271d PR |
414 | } |
415 | ||
416 | static int fifo_push(struct fifo_buffer *fb, int value) | |
417 | { | |
418 | int ov; | |
419 | ||
420 | ov = fb->values[fb->head_index]; | |
421 | fb->values[fb->head_index++] = value; | |
422 | ||
423 | if (fb->head_index >= fb->size) | |
424 | fb->head_index = 0; | |
425 | ||
426 | return ov; | |
427 | } | |
428 | ||
429 | static void fifo_add_val(struct fifo_buffer *fb, int value) | |
430 | { | |
431 | int i; | |
432 | ||
433 | for (i = 0; i < fb->size; i++) | |
434 | fb->values[i] += value; | |
435 | } | |
436 | ||
9d77a5fe | 437 | static int drbd_rs_controller(struct drbd_conf *mdev) |
778f271d PR |
438 | { |
439 | unsigned int sect_in; /* Number of sectors that came in since the last turn */ | |
440 | unsigned int want; /* The number of sectors we want in the proxy */ | |
441 | int req_sect; /* Number of sectors to request in this turn */ | |
442 | int correction; /* Number of sectors more we need in the proxy*/ | |
443 | int cps; /* correction per invocation of drbd_rs_controller() */ | |
444 | int steps; /* Number of time steps to plan ahead */ | |
445 | int curr_corr; | |
446 | int max_sect; | |
447 | ||
448 | sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */ | |
449 | mdev->rs_in_flight -= sect_in; | |
450 | ||
451 | spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */ | |
452 | ||
453 | steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */ | |
454 | ||
455 | if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */ | |
456 | want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps; | |
457 | } else { /* normal path */ | |
458 | want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target : | |
459 | sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10); | |
460 | } | |
461 | ||
462 | correction = want - mdev->rs_in_flight - mdev->rs_planed; | |
463 | ||
464 | /* Plan ahead */ | |
465 | cps = correction / steps; | |
466 | fifo_add_val(&mdev->rs_plan_s, cps); | |
467 | mdev->rs_planed += cps * steps; | |
468 | ||
469 | /* What we do in this step */ | |
470 | curr_corr = fifo_push(&mdev->rs_plan_s, 0); | |
471 | spin_unlock(&mdev->peer_seq_lock); | |
472 | mdev->rs_planed -= curr_corr; | |
473 | ||
474 | req_sect = sect_in + curr_corr; | |
475 | if (req_sect < 0) | |
476 | req_sect = 0; | |
477 | ||
478 | max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ; | |
479 | if (req_sect > max_sect) | |
480 | req_sect = max_sect; | |
481 | ||
482 | /* | |
483 | dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", | |
484 | sect_in, mdev->rs_in_flight, want, correction, | |
485 | steps, cps, mdev->rs_planed, curr_corr, req_sect); | |
486 | */ | |
487 | ||
488 | return req_sect; | |
489 | } | |
490 | ||
9d77a5fe | 491 | static int drbd_rs_number_requests(struct drbd_conf *mdev) |
e65f440d LE |
492 | { |
493 | int number; | |
494 | if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ | |
495 | number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); | |
496 | mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; | |
497 | } else { | |
498 | mdev->c_sync_rate = mdev->sync_conf.rate; | |
499 | number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); | |
500 | } | |
501 | ||
e65f440d LE |
502 | /* ignore the amount of pending requests, the resync controller should |
503 | * throttle down to incoming reply rate soon enough anyways. */ | |
504 | return number; | |
505 | } | |
506 | ||
9d77a5fe PR |
507 | static int w_make_resync_request(struct drbd_conf *mdev, |
508 | struct drbd_work *w, int cancel) | |
b411b363 PR |
509 | { |
510 | unsigned long bit; | |
511 | sector_t sector; | |
512 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | |
1816a2b4 | 513 | int max_bio_size; |
e65f440d | 514 | int number, rollback_i, size; |
b411b363 | 515 | int align, queued, sndbuf; |
0f0601f4 | 516 | int i = 0; |
b411b363 PR |
517 | |
518 | if (unlikely(cancel)) | |
519 | return 1; | |
520 | ||
af85e8e8 LE |
521 | if (mdev->rs_total == 0) { |
522 | /* empty resync? */ | |
523 | drbd_resync_finished(mdev); | |
524 | return 1; | |
525 | } | |
526 | ||
b411b363 PR |
527 | if (!get_ldev(mdev)) { |
528 | /* Since we only need to access mdev->rsync a | |
529 | get_ldev_if_state(mdev,D_FAILED) would be sufficient, but | |
530 | to continue resync with a broken disk makes no sense at | |
531 | all */ | |
532 | dev_err(DEV, "Disk broke down during resync!\n"); | |
b411b363 PR |
533 | return 1; |
534 | } | |
535 | ||
0cfdd247 | 536 | max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; |
e65f440d LE |
537 | number = drbd_rs_number_requests(mdev); |
538 | if (number == 0) | |
0f0601f4 | 539 | goto requeue; |
b411b363 | 540 | |
b411b363 PR |
541 | for (i = 0; i < number; i++) { |
542 | /* Stop generating RS requests, when half of the send buffer is filled */ | |
e42325a5 PR |
543 | mutex_lock(&mdev->tconn->data.mutex); |
544 | if (mdev->tconn->data.socket) { | |
545 | queued = mdev->tconn->data.socket->sk->sk_wmem_queued; | |
546 | sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf; | |
b411b363 PR |
547 | } else { |
548 | queued = 1; | |
549 | sndbuf = 0; | |
550 | } | |
e42325a5 | 551 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 PR |
552 | if (queued > sndbuf / 2) |
553 | goto requeue; | |
554 | ||
555 | next_sector: | |
556 | size = BM_BLOCK_SIZE; | |
557 | bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); | |
558 | ||
4b0715f0 | 559 | if (bit == DRBD_END_OF_BITMAP) { |
b411b363 | 560 | mdev->bm_resync_fo = drbd_bm_bits(mdev); |
b411b363 PR |
561 | put_ldev(mdev); |
562 | return 1; | |
563 | } | |
564 | ||
565 | sector = BM_BIT_TO_SECT(bit); | |
566 | ||
e3555d85 PR |
567 | if (drbd_rs_should_slow_down(mdev, sector) || |
568 | drbd_try_rs_begin_io(mdev, sector)) { | |
b411b363 PR |
569 | mdev->bm_resync_fo = bit; |
570 | goto requeue; | |
571 | } | |
572 | mdev->bm_resync_fo = bit + 1; | |
573 | ||
574 | if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) { | |
575 | drbd_rs_complete_io(mdev, sector); | |
576 | goto next_sector; | |
577 | } | |
578 | ||
1816a2b4 | 579 | #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE |
b411b363 PR |
580 | /* try to find some adjacent bits. |
581 | * we stop if we have already the maximum req size. | |
582 | * | |
583 | * Additionally always align bigger requests, in order to | |
584 | * be prepared for all stripe sizes of software RAIDs. | |
b411b363 PR |
585 | */ |
586 | align = 1; | |
d207450c | 587 | rollback_i = i; |
b411b363 | 588 | for (;;) { |
1816a2b4 | 589 | if (size + BM_BLOCK_SIZE > max_bio_size) |
b411b363 PR |
590 | break; |
591 | ||
592 | /* Be always aligned */ | |
593 | if (sector & ((1<<(align+3))-1)) | |
594 | break; | |
595 | ||
596 | /* do not cross extent boundaries */ | |
597 | if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0) | |
598 | break; | |
599 | /* now, is it actually dirty, after all? | |
600 | * caution, drbd_bm_test_bit is tri-state for some | |
601 | * obscure reason; ( b == 0 ) would get the out-of-band | |
602 | * only accidentally right because of the "oddly sized" | |
603 | * adjustment below */ | |
604 | if (drbd_bm_test_bit(mdev, bit+1) != 1) | |
605 | break; | |
606 | bit++; | |
607 | size += BM_BLOCK_SIZE; | |
608 | if ((BM_BLOCK_SIZE << align) <= size) | |
609 | align++; | |
610 | i++; | |
611 | } | |
612 | /* if we merged some, | |
613 | * reset the offset to start the next drbd_bm_find_next from */ | |
614 | if (size > BM_BLOCK_SIZE) | |
615 | mdev->bm_resync_fo = bit + 1; | |
616 | #endif | |
617 | ||
618 | /* adjust very last sectors, in case we are oddly sized */ | |
619 | if (sector + (size>>9) > capacity) | |
620 | size = (capacity-sector)<<9; | |
31890f4a | 621 | if (mdev->tconn->agreed_pro_version >= 89 && mdev->csums_tfm) { |
b411b363 | 622 | switch (read_for_csum(mdev, sector, size)) { |
80a40e43 | 623 | case -EIO: /* Disk failure */ |
b411b363 PR |
624 | put_ldev(mdev); |
625 | return 0; | |
80a40e43 | 626 | case -EAGAIN: /* allocation failed, or ldev busy */ |
b411b363 PR |
627 | drbd_rs_complete_io(mdev, sector); |
628 | mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); | |
d207450c | 629 | i = rollback_i; |
b411b363 | 630 | goto requeue; |
80a40e43 LE |
631 | case 0: |
632 | /* everything ok */ | |
633 | break; | |
634 | default: | |
635 | BUG(); | |
b411b363 PR |
636 | } |
637 | } else { | |
638 | inc_rs_pending(mdev); | |
639 | if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST, | |
640 | sector, size, ID_SYNCER)) { | |
641 | dev_err(DEV, "drbd_send_drequest() failed, aborting...\n"); | |
642 | dec_rs_pending(mdev); | |
643 | put_ldev(mdev); | |
644 | return 0; | |
645 | } | |
646 | } | |
647 | } | |
648 | ||
649 | if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) { | |
650 | /* last syncer _request_ was sent, | |
651 | * but the P_RS_DATA_REPLY not yet received. sync will end (and | |
652 | * next sync group will resume), as soon as we receive the last | |
653 | * resync data block, and the last bit is cleared. | |
654 | * until then resync "work" is "inactive" ... | |
655 | */ | |
b411b363 PR |
656 | put_ldev(mdev); |
657 | return 1; | |
658 | } | |
659 | ||
660 | requeue: | |
778f271d | 661 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
b411b363 PR |
662 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
663 | put_ldev(mdev); | |
664 | return 1; | |
665 | } | |
666 | ||
667 | static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
668 | { | |
669 | int number, i, size; | |
670 | sector_t sector; | |
671 | const sector_t capacity = drbd_get_capacity(mdev->this_bdev); | |
672 | ||
673 | if (unlikely(cancel)) | |
674 | return 1; | |
675 | ||
2649f080 | 676 | number = drbd_rs_number_requests(mdev); |
b411b363 PR |
677 | |
678 | sector = mdev->ov_position; | |
679 | for (i = 0; i < number; i++) { | |
680 | if (sector >= capacity) { | |
b411b363 PR |
681 | return 1; |
682 | } | |
683 | ||
684 | size = BM_BLOCK_SIZE; | |
685 | ||
e3555d85 PR |
686 | if (drbd_rs_should_slow_down(mdev, sector) || |
687 | drbd_try_rs_begin_io(mdev, sector)) { | |
b411b363 PR |
688 | mdev->ov_position = sector; |
689 | goto requeue; | |
690 | } | |
691 | ||
692 | if (sector + (size>>9) > capacity) | |
693 | size = (capacity-sector)<<9; | |
694 | ||
695 | inc_rs_pending(mdev); | |
696 | if (!drbd_send_ov_request(mdev, sector, size)) { | |
697 | dec_rs_pending(mdev); | |
698 | return 0; | |
699 | } | |
700 | sector += BM_SECT_PER_BIT; | |
701 | } | |
702 | mdev->ov_position = sector; | |
703 | ||
704 | requeue: | |
2649f080 | 705 | mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); |
b411b363 PR |
706 | mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); |
707 | return 1; | |
708 | } | |
709 | ||
b411b363 PR |
710 | int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
711 | { | |
712 | kfree(w); | |
713 | ov_oos_print(mdev); | |
714 | drbd_resync_finished(mdev); | |
715 | ||
716 | return 1; | |
717 | } | |
718 | ||
719 | static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
720 | { | |
721 | kfree(w); | |
722 | ||
723 | drbd_resync_finished(mdev); | |
724 | ||
725 | return 1; | |
726 | } | |
727 | ||
af85e8e8 LE |
728 | static void ping_peer(struct drbd_conf *mdev) |
729 | { | |
730 | clear_bit(GOT_PING_ACK, &mdev->flags); | |
0625ac19 | 731 | request_ping(mdev->tconn); |
af85e8e8 LE |
732 | wait_event(mdev->misc_wait, |
733 | test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED); | |
734 | } | |
735 | ||
b411b363 PR |
736 | int drbd_resync_finished(struct drbd_conf *mdev) |
737 | { | |
738 | unsigned long db, dt, dbdt; | |
739 | unsigned long n_oos; | |
740 | union drbd_state os, ns; | |
741 | struct drbd_work *w; | |
742 | char *khelper_cmd = NULL; | |
26525618 | 743 | int verify_done = 0; |
b411b363 PR |
744 | |
745 | /* Remove all elements from the resync LRU. Since future actions | |
746 | * might set bits in the (main) bitmap, then the entries in the | |
747 | * resync LRU would be wrong. */ | |
748 | if (drbd_rs_del_all(mdev)) { | |
749 | /* In case this is not possible now, most probably because | |
750 | * there are P_RS_DATA_REPLY Packets lingering on the worker's | |
751 | * queue (or even the read operations for those packets | |
752 | * is not finished by now). Retry in 100ms. */ | |
753 | ||
20ee6390 | 754 | schedule_timeout_interruptible(HZ / 10); |
b411b363 PR |
755 | w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC); |
756 | if (w) { | |
757 | w->cb = w_resync_finished; | |
e42325a5 | 758 | drbd_queue_work(&mdev->tconn->data.work, w); |
b411b363 PR |
759 | return 1; |
760 | } | |
761 | dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); | |
762 | } | |
763 | ||
764 | dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; | |
765 | if (dt <= 0) | |
766 | dt = 1; | |
767 | db = mdev->rs_total; | |
768 | dbdt = Bit2KB(db/dt); | |
769 | mdev->rs_paused /= HZ; | |
770 | ||
771 | if (!get_ldev(mdev)) | |
772 | goto out; | |
773 | ||
af85e8e8 LE |
774 | ping_peer(mdev); |
775 | ||
87eeee41 | 776 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
777 | os = mdev->state; |
778 | ||
26525618 LE |
779 | verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); |
780 | ||
b411b363 PR |
781 | /* This protects us against multiple calls (that can happen in the presence |
782 | of application IO), and against connectivity loss just before we arrive here. */ | |
783 | if (os.conn <= C_CONNECTED) | |
784 | goto out_unlock; | |
785 | ||
786 | ns = os; | |
787 | ns.conn = C_CONNECTED; | |
788 | ||
789 | dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", | |
26525618 | 790 | verify_done ? "Online verify " : "Resync", |
b411b363 PR |
791 | dt + mdev->rs_paused, mdev->rs_paused, dbdt); |
792 | ||
793 | n_oos = drbd_bm_total_weight(mdev); | |
794 | ||
795 | if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) { | |
796 | if (n_oos) { | |
797 | dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n", | |
798 | n_oos, Bit2KB(1)); | |
799 | khelper_cmd = "out-of-sync"; | |
800 | } | |
801 | } else { | |
802 | D_ASSERT((n_oos - mdev->rs_failed) == 0); | |
803 | ||
804 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) | |
805 | khelper_cmd = "after-resync-target"; | |
806 | ||
807 | if (mdev->csums_tfm && mdev->rs_total) { | |
808 | const unsigned long s = mdev->rs_same_csum; | |
809 | const unsigned long t = mdev->rs_total; | |
810 | const int ratio = | |
811 | (t == 0) ? 0 : | |
812 | (t < 100000) ? ((s*100)/t) : (s/(t/100)); | |
24c4830c | 813 | dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; " |
b411b363 PR |
814 | "transferred %luK total %luK\n", |
815 | ratio, | |
816 | Bit2KB(mdev->rs_same_csum), | |
817 | Bit2KB(mdev->rs_total - mdev->rs_same_csum), | |
818 | Bit2KB(mdev->rs_total)); | |
819 | } | |
820 | } | |
821 | ||
822 | if (mdev->rs_failed) { | |
823 | dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed); | |
824 | ||
825 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { | |
826 | ns.disk = D_INCONSISTENT; | |
827 | ns.pdsk = D_UP_TO_DATE; | |
828 | } else { | |
829 | ns.disk = D_UP_TO_DATE; | |
830 | ns.pdsk = D_INCONSISTENT; | |
831 | } | |
832 | } else { | |
833 | ns.disk = D_UP_TO_DATE; | |
834 | ns.pdsk = D_UP_TO_DATE; | |
835 | ||
836 | if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) { | |
837 | if (mdev->p_uuid) { | |
838 | int i; | |
839 | for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++) | |
840 | _drbd_uuid_set(mdev, i, mdev->p_uuid[i]); | |
841 | drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]); | |
842 | _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]); | |
843 | } else { | |
844 | dev_err(DEV, "mdev->p_uuid is NULL! BUG\n"); | |
845 | } | |
846 | } | |
847 | ||
62b0da3a LE |
848 | if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) { |
849 | /* for verify runs, we don't update uuids here, | |
850 | * so there would be nothing to report. */ | |
851 | drbd_uuid_set_bm(mdev, 0UL); | |
852 | drbd_print_uuids(mdev, "updated UUIDs"); | |
853 | if (mdev->p_uuid) { | |
854 | /* Now the two UUID sets are equal, update what we | |
855 | * know of the peer. */ | |
856 | int i; | |
857 | for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++) | |
858 | mdev->p_uuid[i] = mdev->ldev->md.uuid[i]; | |
859 | } | |
b411b363 PR |
860 | } |
861 | } | |
862 | ||
863 | _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); | |
864 | out_unlock: | |
87eeee41 | 865 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
866 | put_ldev(mdev); |
867 | out: | |
868 | mdev->rs_total = 0; | |
869 | mdev->rs_failed = 0; | |
870 | mdev->rs_paused = 0; | |
26525618 LE |
871 | if (verify_done) |
872 | mdev->ov_start_sector = 0; | |
b411b363 | 873 | |
13d42685 LE |
874 | drbd_md_sync(mdev); |
875 | ||
b411b363 PR |
876 | if (khelper_cmd) |
877 | drbd_khelper(mdev, khelper_cmd); | |
878 | ||
879 | return 1; | |
880 | } | |
881 | ||
882 | /* helper */ | |
db830c46 | 883 | static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) |
b411b363 | 884 | { |
db830c46 | 885 | if (drbd_ee_has_active_page(peer_req)) { |
b411b363 | 886 | /* This might happen if sendpage() has not finished */ |
db830c46 | 887 | int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; |
435f0740 LE |
888 | atomic_add(i, &mdev->pp_in_use_by_net); |
889 | atomic_sub(i, &mdev->pp_in_use); | |
87eeee41 | 890 | spin_lock_irq(&mdev->tconn->req_lock); |
db830c46 | 891 | list_add_tail(&peer_req->w.list, &mdev->net_ee); |
87eeee41 | 892 | spin_unlock_irq(&mdev->tconn->req_lock); |
435f0740 | 893 | wake_up(&drbd_pp_wait); |
b411b363 | 894 | } else |
db830c46 | 895 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
896 | } |
897 | ||
898 | /** | |
899 | * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST | |
900 | * @mdev: DRBD device. | |
901 | * @w: work object. | |
902 | * @cancel: The connection will be closed anyways | |
903 | */ | |
904 | int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
905 | { | |
db830c46 | 906 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 PR |
907 | int ok; |
908 | ||
909 | if (unlikely(cancel)) { | |
db830c46 | 910 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
911 | dec_unacked(mdev); |
912 | return 1; | |
913 | } | |
914 | ||
db830c46 AG |
915 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
916 | ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req); | |
b411b363 PR |
917 | } else { |
918 | if (__ratelimit(&drbd_ratelimit_state)) | |
919 | dev_err(DEV, "Sending NegDReply. sector=%llus.\n", | |
db830c46 | 920 | (unsigned long long)peer_req->i.sector); |
b411b363 | 921 | |
db830c46 | 922 | ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req); |
b411b363 PR |
923 | } |
924 | ||
925 | dec_unacked(mdev); | |
926 | ||
db830c46 | 927 | move_to_net_ee_or_free(mdev, peer_req); |
b411b363 PR |
928 | |
929 | if (unlikely(!ok)) | |
930 | dev_err(DEV, "drbd_send_block() failed\n"); | |
931 | return ok; | |
932 | } | |
933 | ||
934 | /** | |
935 | * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS | |
936 | * @mdev: DRBD device. | |
937 | * @w: work object. | |
938 | * @cancel: The connection will be closed anyways | |
939 | */ | |
940 | int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
941 | { | |
db830c46 | 942 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 PR |
943 | int ok; |
944 | ||
945 | if (unlikely(cancel)) { | |
db830c46 | 946 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
947 | dec_unacked(mdev); |
948 | return 1; | |
949 | } | |
950 | ||
951 | if (get_ldev_if_state(mdev, D_FAILED)) { | |
db830c46 | 952 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
b411b363 PR |
953 | put_ldev(mdev); |
954 | } | |
955 | ||
d612d309 | 956 | if (mdev->state.conn == C_AHEAD) { |
db830c46 AG |
957 | ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req); |
958 | } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { | |
b411b363 PR |
959 | if (likely(mdev->state.pdsk >= D_INCONSISTENT)) { |
960 | inc_rs_pending(mdev); | |
db830c46 | 961 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); |
b411b363 PR |
962 | } else { |
963 | if (__ratelimit(&drbd_ratelimit_state)) | |
964 | dev_err(DEV, "Not sending RSDataReply, " | |
965 | "partner DISKLESS!\n"); | |
966 | ok = 1; | |
967 | } | |
968 | } else { | |
969 | if (__ratelimit(&drbd_ratelimit_state)) | |
970 | dev_err(DEV, "Sending NegRSDReply. sector %llus.\n", | |
db830c46 | 971 | (unsigned long long)peer_req->i.sector); |
b411b363 | 972 | |
db830c46 | 973 | ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); |
b411b363 PR |
974 | |
975 | /* update resync data with failure */ | |
db830c46 | 976 | drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size); |
b411b363 PR |
977 | } |
978 | ||
979 | dec_unacked(mdev); | |
980 | ||
db830c46 | 981 | move_to_net_ee_or_free(mdev, peer_req); |
b411b363 PR |
982 | |
983 | if (unlikely(!ok)) | |
984 | dev_err(DEV, "drbd_send_block() failed\n"); | |
985 | return ok; | |
986 | } | |
987 | ||
988 | int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
989 | { | |
db830c46 | 990 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 PR |
991 | struct digest_info *di; |
992 | int digest_size; | |
993 | void *digest = NULL; | |
994 | int ok, eq = 0; | |
995 | ||
996 | if (unlikely(cancel)) { | |
db830c46 | 997 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
998 | dec_unacked(mdev); |
999 | return 1; | |
1000 | } | |
1001 | ||
1d53f09e | 1002 | if (get_ldev(mdev)) { |
db830c46 | 1003 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
1d53f09e LE |
1004 | put_ldev(mdev); |
1005 | } | |
b411b363 | 1006 | |
db830c46 | 1007 | di = peer_req->digest; |
b411b363 | 1008 | |
db830c46 | 1009 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b411b363 PR |
1010 | /* quick hack to try to avoid a race against reconfiguration. |
1011 | * a real fix would be much more involved, | |
1012 | * introducing more locking mechanisms */ | |
1013 | if (mdev->csums_tfm) { | |
1014 | digest_size = crypto_hash_digestsize(mdev->csums_tfm); | |
1015 | D_ASSERT(digest_size == di->digest_size); | |
1016 | digest = kmalloc(digest_size, GFP_NOIO); | |
1017 | } | |
1018 | if (digest) { | |
db830c46 | 1019 | drbd_csum_ee(mdev, mdev->csums_tfm, peer_req, digest); |
b411b363 PR |
1020 | eq = !memcmp(digest, di->digest, digest_size); |
1021 | kfree(digest); | |
1022 | } | |
1023 | ||
1024 | if (eq) { | |
db830c46 | 1025 | drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size); |
676396d5 | 1026 | /* rs_same_csums unit is BM_BLOCK_SIZE */ |
db830c46 AG |
1027 | mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT; |
1028 | ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req); | |
b411b363 PR |
1029 | } else { |
1030 | inc_rs_pending(mdev); | |
db830c46 AG |
1031 | peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ |
1032 | peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */ | |
204bba99 | 1033 | kfree(di); |
db830c46 | 1034 | ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); |
b411b363 PR |
1035 | } |
1036 | } else { | |
db830c46 | 1037 | ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); |
b411b363 PR |
1038 | if (__ratelimit(&drbd_ratelimit_state)) |
1039 | dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n"); | |
1040 | } | |
1041 | ||
1042 | dec_unacked(mdev); | |
db830c46 | 1043 | move_to_net_ee_or_free(mdev, peer_req); |
b411b363 PR |
1044 | |
1045 | if (unlikely(!ok)) | |
1046 | dev_err(DEV, "drbd_send_block/ack() failed\n"); | |
1047 | return ok; | |
1048 | } | |
1049 | ||
53ea4331 | 1050 | /* TODO merge common code with w_e_send_csum */ |
b411b363 PR |
1051 | int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
1052 | { | |
db830c46 AG |
1053 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
1054 | sector_t sector = peer_req->i.sector; | |
1055 | unsigned int size = peer_req->i.size; | |
b411b363 PR |
1056 | int digest_size; |
1057 | void *digest; | |
1058 | int ok = 1; | |
1059 | ||
1060 | if (unlikely(cancel)) | |
1061 | goto out; | |
1062 | ||
b411b363 | 1063 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
b411b363 | 1064 | digest = kmalloc(digest_size, GFP_NOIO); |
8f21420e PR |
1065 | if (!digest) { |
1066 | ok = 0; /* terminate the connection in case the allocation failed */ | |
1067 | goto out; | |
b411b363 PR |
1068 | } |
1069 | ||
db830c46 AG |
1070 | if (likely(!(peer_req->flags & EE_WAS_ERROR))) |
1071 | drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest); | |
8f21420e PR |
1072 | else |
1073 | memset(digest, 0, digest_size); | |
1074 | ||
53ea4331 LE |
1075 | /* Free e and pages before send. |
1076 | * In case we block on congestion, we could otherwise run into | |
1077 | * some distributed deadlock, if the other side blocks on | |
1078 | * congestion as well, because our receiver blocks in | |
1079 | * drbd_pp_alloc due to pp_in_use > max_buffers. */ | |
db830c46 AG |
1080 | drbd_free_ee(mdev, peer_req); |
1081 | peer_req = NULL; | |
8f21420e | 1082 | inc_rs_pending(mdev); |
53ea4331 LE |
1083 | ok = drbd_send_drequest_csum(mdev, sector, size, |
1084 | digest, digest_size, | |
1085 | P_OV_REPLY); | |
8f21420e PR |
1086 | if (!ok) |
1087 | dec_rs_pending(mdev); | |
1088 | kfree(digest); | |
1089 | ||
b411b363 | 1090 | out: |
db830c46 AG |
1091 | if (peer_req) |
1092 | drbd_free_ee(mdev, peer_req); | |
b411b363 | 1093 | dec_unacked(mdev); |
b411b363 PR |
1094 | return ok; |
1095 | } | |
1096 | ||
1097 | void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size) | |
1098 | { | |
1099 | if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) { | |
1100 | mdev->ov_last_oos_size += size>>9; | |
1101 | } else { | |
1102 | mdev->ov_last_oos_start = sector; | |
1103 | mdev->ov_last_oos_size = size>>9; | |
1104 | } | |
1105 | drbd_set_out_of_sync(mdev, sector, size); | |
b411b363 PR |
1106 | } |
1107 | ||
1108 | int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1109 | { | |
db830c46 | 1110 | struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); |
b411b363 | 1111 | struct digest_info *di; |
b411b363 | 1112 | void *digest; |
db830c46 AG |
1113 | sector_t sector = peer_req->i.sector; |
1114 | unsigned int size = peer_req->i.size; | |
53ea4331 | 1115 | int digest_size; |
b411b363 PR |
1116 | int ok, eq = 0; |
1117 | ||
1118 | if (unlikely(cancel)) { | |
db830c46 | 1119 | drbd_free_ee(mdev, peer_req); |
b411b363 PR |
1120 | dec_unacked(mdev); |
1121 | return 1; | |
1122 | } | |
1123 | ||
1124 | /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all | |
1125 | * the resync lru has been cleaned up already */ | |
1d53f09e | 1126 | if (get_ldev(mdev)) { |
db830c46 | 1127 | drbd_rs_complete_io(mdev, peer_req->i.sector); |
1d53f09e LE |
1128 | put_ldev(mdev); |
1129 | } | |
b411b363 | 1130 | |
db830c46 | 1131 | di = peer_req->digest; |
b411b363 | 1132 | |
db830c46 | 1133 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b411b363 PR |
1134 | digest_size = crypto_hash_digestsize(mdev->verify_tfm); |
1135 | digest = kmalloc(digest_size, GFP_NOIO); | |
1136 | if (digest) { | |
db830c46 | 1137 | drbd_csum_ee(mdev, mdev->verify_tfm, peer_req, digest); |
b411b363 PR |
1138 | |
1139 | D_ASSERT(digest_size == di->digest_size); | |
1140 | eq = !memcmp(digest, di->digest, digest_size); | |
1141 | kfree(digest); | |
1142 | } | |
b411b363 PR |
1143 | } |
1144 | ||
53ea4331 LE |
1145 | /* Free e and pages before send. |
1146 | * In case we block on congestion, we could otherwise run into | |
1147 | * some distributed deadlock, if the other side blocks on | |
1148 | * congestion as well, because our receiver blocks in | |
1149 | * drbd_pp_alloc due to pp_in_use > max_buffers. */ | |
db830c46 | 1150 | drbd_free_ee(mdev, peer_req); |
b411b363 | 1151 | if (!eq) |
53ea4331 | 1152 | drbd_ov_oos_found(mdev, sector, size); |
b411b363 PR |
1153 | else |
1154 | ov_oos_print(mdev); | |
1155 | ||
53ea4331 | 1156 | ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, |
b411b363 PR |
1157 | eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); |
1158 | ||
53ea4331 | 1159 | dec_unacked(mdev); |
b411b363 | 1160 | |
ea5442af LE |
1161 | --mdev->ov_left; |
1162 | ||
1163 | /* let's advance progress step marks only for every other megabyte */ | |
1164 | if ((mdev->ov_left & 0x200) == 0x200) | |
1165 | drbd_advance_rs_marks(mdev, mdev->ov_left); | |
1166 | ||
1167 | if (mdev->ov_left == 0) { | |
b411b363 PR |
1168 | ov_oos_print(mdev); |
1169 | drbd_resync_finished(mdev); | |
1170 | } | |
1171 | ||
1172 | return ok; | |
1173 | } | |
1174 | ||
1175 | int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1176 | { | |
1177 | struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w); | |
1178 | complete(&b->done); | |
1179 | return 1; | |
1180 | } | |
1181 | ||
1182 | int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1183 | { | |
1184 | struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w); | |
e42325a5 | 1185 | struct p_barrier *p = &mdev->tconn->data.sbuf.barrier; |
b411b363 PR |
1186 | int ok = 1; |
1187 | ||
1188 | /* really avoid racing with tl_clear. w.cb may have been referenced | |
1189 | * just before it was reassigned and re-queued, so double check that. | |
1190 | * actually, this race was harmless, since we only try to send the | |
1191 | * barrier packet here, and otherwise do nothing with the object. | |
1192 | * but compare with the head of w_clear_epoch */ | |
87eeee41 | 1193 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
1194 | if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED) |
1195 | cancel = 1; | |
87eeee41 | 1196 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
1197 | if (cancel) |
1198 | return 1; | |
1199 | ||
61120870 | 1200 | if (!drbd_get_data_sock(mdev->tconn)) |
b411b363 PR |
1201 | return 0; |
1202 | p->barrier = b->br_number; | |
1203 | /* inc_ap_pending was done where this was queued. | |
1204 | * dec_ap_pending will be done in got_BarrierAck | |
1205 | * or (on connection loss) in w_clear_epoch. */ | |
e42325a5 | 1206 | ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER, |
c012949a | 1207 | &p->head, sizeof(*p), 0); |
61120870 | 1208 | drbd_put_data_sock(mdev->tconn); |
b411b363 PR |
1209 | |
1210 | return ok; | |
1211 | } | |
1212 | ||
1213 | int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1214 | { | |
1215 | if (cancel) | |
1216 | return 1; | |
1217 | return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE); | |
1218 | } | |
1219 | ||
73a01a18 PR |
1220 | int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
1221 | { | |
1222 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1223 | int ok; | |
1224 | ||
1225 | if (unlikely(cancel)) { | |
8554df1c | 1226 | req_mod(req, SEND_CANCELED); |
73a01a18 PR |
1227 | return 1; |
1228 | } | |
1229 | ||
1230 | ok = drbd_send_oos(mdev, req); | |
8554df1c | 1231 | req_mod(req, OOS_HANDED_TO_NETWORK); |
73a01a18 PR |
1232 | |
1233 | return ok; | |
1234 | } | |
1235 | ||
b411b363 PR |
1236 | /** |
1237 | * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request | |
1238 | * @mdev: DRBD device. | |
1239 | * @w: work object. | |
1240 | * @cancel: The connection will be closed anyways | |
1241 | */ | |
1242 | int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1243 | { | |
1244 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1245 | int ok; | |
1246 | ||
1247 | if (unlikely(cancel)) { | |
8554df1c | 1248 | req_mod(req, SEND_CANCELED); |
b411b363 PR |
1249 | return 1; |
1250 | } | |
1251 | ||
1252 | ok = drbd_send_dblock(mdev, req); | |
8554df1c | 1253 | req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED); |
b411b363 PR |
1254 | |
1255 | return ok; | |
1256 | } | |
1257 | ||
1258 | /** | |
1259 | * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet | |
1260 | * @mdev: DRBD device. | |
1261 | * @w: work object. | |
1262 | * @cancel: The connection will be closed anyways | |
1263 | */ | |
1264 | int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1265 | { | |
1266 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1267 | int ok; | |
1268 | ||
1269 | if (unlikely(cancel)) { | |
8554df1c | 1270 | req_mod(req, SEND_CANCELED); |
b411b363 PR |
1271 | return 1; |
1272 | } | |
1273 | ||
ace652ac | 1274 | ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size, |
b411b363 PR |
1275 | (unsigned long)req); |
1276 | ||
1277 | if (!ok) { | |
1278 | /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send(); | |
1279 | * so this is probably redundant */ | |
1280 | if (mdev->state.conn >= C_CONNECTED) | |
1281 | drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); | |
1282 | } | |
8554df1c | 1283 | req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED); |
b411b363 PR |
1284 | |
1285 | return ok; | |
1286 | } | |
1287 | ||
265be2d0 PR |
1288 | int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel) |
1289 | { | |
1290 | struct drbd_request *req = container_of(w, struct drbd_request, w); | |
1291 | ||
0778286a | 1292 | if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) |
ace652ac | 1293 | drbd_al_begin_io(mdev, req->i.sector); |
265be2d0 PR |
1294 | /* Calling drbd_al_begin_io() out of the worker might deadlocks |
1295 | theoretically. Practically it can not deadlock, since this is | |
1296 | only used when unfreezing IOs. All the extents of the requests | |
1297 | that made it into the TL are already active */ | |
1298 | ||
1299 | drbd_req_make_private_bio(req, req->master_bio); | |
1300 | req->private_bio->bi_bdev = mdev->ldev->backing_bdev; | |
1301 | generic_make_request(req->private_bio); | |
1302 | ||
1303 | return 1; | |
1304 | } | |
1305 | ||
b411b363 PR |
1306 | static int _drbd_may_sync_now(struct drbd_conf *mdev) |
1307 | { | |
1308 | struct drbd_conf *odev = mdev; | |
1309 | ||
1310 | while (1) { | |
1311 | if (odev->sync_conf.after == -1) | |
1312 | return 1; | |
1313 | odev = minor_to_mdev(odev->sync_conf.after); | |
841ce241 AG |
1314 | if (!expect(odev)) |
1315 | return 1; | |
b411b363 PR |
1316 | if ((odev->state.conn >= C_SYNC_SOURCE && |
1317 | odev->state.conn <= C_PAUSED_SYNC_T) || | |
1318 | odev->state.aftr_isp || odev->state.peer_isp || | |
1319 | odev->state.user_isp) | |
1320 | return 0; | |
1321 | } | |
1322 | } | |
1323 | ||
1324 | /** | |
1325 | * _drbd_pause_after() - Pause resync on all devices that may not resync now | |
1326 | * @mdev: DRBD device. | |
1327 | * | |
1328 | * Called from process context only (admin command and after_state_ch). | |
1329 | */ | |
1330 | static int _drbd_pause_after(struct drbd_conf *mdev) | |
1331 | { | |
1332 | struct drbd_conf *odev; | |
1333 | int i, rv = 0; | |
1334 | ||
1335 | for (i = 0; i < minor_count; i++) { | |
1336 | odev = minor_to_mdev(i); | |
1337 | if (!odev) | |
1338 | continue; | |
1339 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) | |
1340 | continue; | |
1341 | if (!_drbd_may_sync_now(odev)) | |
1342 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL) | |
1343 | != SS_NOTHING_TO_DO); | |
1344 | } | |
1345 | ||
1346 | return rv; | |
1347 | } | |
1348 | ||
1349 | /** | |
1350 | * _drbd_resume_next() - Resume resync on all devices that may resync now | |
1351 | * @mdev: DRBD device. | |
1352 | * | |
1353 | * Called from process context only (admin command and worker). | |
1354 | */ | |
1355 | static int _drbd_resume_next(struct drbd_conf *mdev) | |
1356 | { | |
1357 | struct drbd_conf *odev; | |
1358 | int i, rv = 0; | |
1359 | ||
1360 | for (i = 0; i < minor_count; i++) { | |
1361 | odev = minor_to_mdev(i); | |
1362 | if (!odev) | |
1363 | continue; | |
1364 | if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS) | |
1365 | continue; | |
1366 | if (odev->state.aftr_isp) { | |
1367 | if (_drbd_may_sync_now(odev)) | |
1368 | rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0), | |
1369 | CS_HARD, NULL) | |
1370 | != SS_NOTHING_TO_DO) ; | |
1371 | } | |
1372 | } | |
1373 | return rv; | |
1374 | } | |
1375 | ||
1376 | void resume_next_sg(struct drbd_conf *mdev) | |
1377 | { | |
1378 | write_lock_irq(&global_state_lock); | |
1379 | _drbd_resume_next(mdev); | |
1380 | write_unlock_irq(&global_state_lock); | |
1381 | } | |
1382 | ||
1383 | void suspend_other_sg(struct drbd_conf *mdev) | |
1384 | { | |
1385 | write_lock_irq(&global_state_lock); | |
1386 | _drbd_pause_after(mdev); | |
1387 | write_unlock_irq(&global_state_lock); | |
1388 | } | |
1389 | ||
1390 | static int sync_after_error(struct drbd_conf *mdev, int o_minor) | |
1391 | { | |
1392 | struct drbd_conf *odev; | |
1393 | ||
1394 | if (o_minor == -1) | |
1395 | return NO_ERROR; | |
1396 | if (o_minor < -1 || minor_to_mdev(o_minor) == NULL) | |
1397 | return ERR_SYNC_AFTER; | |
1398 | ||
1399 | /* check for loops */ | |
1400 | odev = minor_to_mdev(o_minor); | |
1401 | while (1) { | |
1402 | if (odev == mdev) | |
1403 | return ERR_SYNC_AFTER_CYCLE; | |
1404 | ||
1405 | /* dependency chain ends here, no cycles. */ | |
1406 | if (odev->sync_conf.after == -1) | |
1407 | return NO_ERROR; | |
1408 | ||
1409 | /* follow the dependency chain */ | |
1410 | odev = minor_to_mdev(odev->sync_conf.after); | |
1411 | } | |
1412 | } | |
1413 | ||
1414 | int drbd_alter_sa(struct drbd_conf *mdev, int na) | |
1415 | { | |
1416 | int changes; | |
1417 | int retcode; | |
1418 | ||
1419 | write_lock_irq(&global_state_lock); | |
1420 | retcode = sync_after_error(mdev, na); | |
1421 | if (retcode == NO_ERROR) { | |
1422 | mdev->sync_conf.after = na; | |
1423 | do { | |
1424 | changes = _drbd_pause_after(mdev); | |
1425 | changes |= _drbd_resume_next(mdev); | |
1426 | } while (changes); | |
1427 | } | |
1428 | write_unlock_irq(&global_state_lock); | |
1429 | return retcode; | |
1430 | } | |
1431 | ||
9bd28d3c LE |
1432 | void drbd_rs_controller_reset(struct drbd_conf *mdev) |
1433 | { | |
1434 | atomic_set(&mdev->rs_sect_in, 0); | |
1435 | atomic_set(&mdev->rs_sect_ev, 0); | |
1436 | mdev->rs_in_flight = 0; | |
1437 | mdev->rs_planed = 0; | |
1438 | spin_lock(&mdev->peer_seq_lock); | |
1439 | fifo_set(&mdev->rs_plan_s, 0); | |
1440 | spin_unlock(&mdev->peer_seq_lock); | |
1441 | } | |
1442 | ||
1f04af33 PR |
1443 | void start_resync_timer_fn(unsigned long data) |
1444 | { | |
1445 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
1446 | ||
1447 | drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work); | |
1448 | } | |
1449 | ||
1450 | int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel) | |
1451 | { | |
1452 | if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) { | |
1453 | dev_warn(DEV, "w_start_resync later...\n"); | |
1454 | mdev->start_resync_timer.expires = jiffies + HZ/10; | |
1455 | add_timer(&mdev->start_resync_timer); | |
1456 | return 1; | |
1457 | } | |
1458 | ||
1459 | drbd_start_resync(mdev, C_SYNC_SOURCE); | |
1460 | clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags); | |
1461 | return 1; | |
1462 | } | |
1463 | ||
b411b363 PR |
1464 | /** |
1465 | * drbd_start_resync() - Start the resync process | |
1466 | * @mdev: DRBD device. | |
1467 | * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET | |
1468 | * | |
1469 | * This function might bring you directly into one of the | |
1470 | * C_PAUSED_SYNC_* states. | |
1471 | */ | |
1472 | void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) | |
1473 | { | |
1474 | union drbd_state ns; | |
1475 | int r; | |
1476 | ||
c4752ef1 | 1477 | if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) { |
b411b363 PR |
1478 | dev_err(DEV, "Resync already running!\n"); |
1479 | return; | |
1480 | } | |
1481 | ||
59817f4f PR |
1482 | if (mdev->state.conn < C_AHEAD) { |
1483 | /* In case a previous resync run was aborted by an IO error/detach on the peer. */ | |
1484 | drbd_rs_cancel_all(mdev); | |
1485 | /* This should be done when we abort the resync. We definitely do not | |
1486 | want to have this for connections going back and forth between | |
1487 | Ahead/Behind and SyncSource/SyncTarget */ | |
1488 | } | |
b411b363 | 1489 | |
e64a3294 PR |
1490 | if (!test_bit(B_RS_H_DONE, &mdev->flags)) { |
1491 | if (side == C_SYNC_TARGET) { | |
1492 | /* Since application IO was locked out during C_WF_BITMAP_T and | |
1493 | C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET | |
1494 | we check that we might make the data inconsistent. */ | |
1495 | r = drbd_khelper(mdev, "before-resync-target"); | |
1496 | r = (r >> 8) & 0xff; | |
1497 | if (r > 0) { | |
1498 | dev_info(DEV, "before-resync-target handler returned %d, " | |
09b9e797 PR |
1499 | "dropping connection.\n", r); |
1500 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
1501 | return; | |
1502 | } | |
e64a3294 PR |
1503 | } else /* C_SYNC_SOURCE */ { |
1504 | r = drbd_khelper(mdev, "before-resync-source"); | |
1505 | r = (r >> 8) & 0xff; | |
1506 | if (r > 0) { | |
1507 | if (r == 3) { | |
1508 | dev_info(DEV, "before-resync-source handler returned %d, " | |
1509 | "ignoring. Old userland tools?", r); | |
1510 | } else { | |
1511 | dev_info(DEV, "before-resync-source handler returned %d, " | |
1512 | "dropping connection.\n", r); | |
1513 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
1514 | return; | |
1515 | } | |
1516 | } | |
09b9e797 | 1517 | } |
b411b363 PR |
1518 | } |
1519 | ||
e64a3294 PR |
1520 | if (current == mdev->tconn->worker.task) { |
1521 | /* The worker should not sleep waiting for drbd_state_lock(), | |
1522 | that can take long */ | |
1523 | if (test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { | |
1524 | set_bit(B_RS_H_DONE, &mdev->flags); | |
1525 | mdev->start_resync_timer.expires = jiffies + HZ/5; | |
1526 | add_timer(&mdev->start_resync_timer); | |
1527 | return; | |
1528 | } | |
1529 | } else { | |
1530 | drbd_state_lock(mdev); | |
1531 | } | |
1532 | clear_bit(B_RS_H_DONE, &mdev->flags); | |
b411b363 PR |
1533 | |
1534 | if (!get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
1535 | drbd_state_unlock(mdev); | |
1536 | return; | |
1537 | } | |
1538 | ||
b411b363 PR |
1539 | write_lock_irq(&global_state_lock); |
1540 | ns = mdev->state; | |
1541 | ||
1542 | ns.aftr_isp = !_drbd_may_sync_now(mdev); | |
1543 | ||
1544 | ns.conn = side; | |
1545 | ||
1546 | if (side == C_SYNC_TARGET) | |
1547 | ns.disk = D_INCONSISTENT; | |
1548 | else /* side == C_SYNC_SOURCE */ | |
1549 | ns.pdsk = D_INCONSISTENT; | |
1550 | ||
1551 | r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL); | |
1552 | ns = mdev->state; | |
1553 | ||
1554 | if (ns.conn < C_CONNECTED) | |
1555 | r = SS_UNKNOWN_ERROR; | |
1556 | ||
1557 | if (r == SS_SUCCESS) { | |
1d7734a0 LE |
1558 | unsigned long tw = drbd_bm_total_weight(mdev); |
1559 | unsigned long now = jiffies; | |
1560 | int i; | |
1561 | ||
b411b363 PR |
1562 | mdev->rs_failed = 0; |
1563 | mdev->rs_paused = 0; | |
b411b363 | 1564 | mdev->rs_same_csum = 0; |
0f0601f4 LE |
1565 | mdev->rs_last_events = 0; |
1566 | mdev->rs_last_sect_ev = 0; | |
1d7734a0 LE |
1567 | mdev->rs_total = tw; |
1568 | mdev->rs_start = now; | |
1569 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { | |
1570 | mdev->rs_mark_left[i] = tw; | |
1571 | mdev->rs_mark_time[i] = now; | |
1572 | } | |
b411b363 PR |
1573 | _drbd_pause_after(mdev); |
1574 | } | |
1575 | write_unlock_irq(&global_state_lock); | |
5a22db89 | 1576 | |
b411b363 PR |
1577 | if (r == SS_SUCCESS) { |
1578 | dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n", | |
1579 | drbd_conn_str(ns.conn), | |
1580 | (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), | |
1581 | (unsigned long) mdev->rs_total); | |
6c922ed5 LE |
1582 | if (side == C_SYNC_TARGET) |
1583 | mdev->bm_resync_fo = 0; | |
1584 | ||
1585 | /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid | |
1586 | * with w_send_oos, or the sync target will get confused as to | |
1587 | * how much bits to resync. We cannot do that always, because for an | |
1588 | * empty resync and protocol < 95, we need to do it here, as we call | |
1589 | * drbd_resync_finished from here in that case. | |
1590 | * We drbd_gen_and_send_sync_uuid here for protocol < 96, | |
1591 | * and from after_state_ch otherwise. */ | |
31890f4a | 1592 | if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96) |
6c922ed5 | 1593 | drbd_gen_and_send_sync_uuid(mdev); |
b411b363 | 1594 | |
31890f4a | 1595 | if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) { |
af85e8e8 LE |
1596 | /* This still has a race (about when exactly the peers |
1597 | * detect connection loss) that can lead to a full sync | |
1598 | * on next handshake. In 8.3.9 we fixed this with explicit | |
1599 | * resync-finished notifications, but the fix | |
1600 | * introduces a protocol change. Sleeping for some | |
1601 | * time longer than the ping interval + timeout on the | |
1602 | * SyncSource, to give the SyncTarget the chance to | |
1603 | * detect connection loss, then waiting for a ping | |
1604 | * response (implicit in drbd_resync_finished) reduces | |
1605 | * the race considerably, but does not solve it. */ | |
1606 | if (side == C_SYNC_SOURCE) | |
1607 | schedule_timeout_interruptible( | |
89e58e75 PR |
1608 | mdev->tconn->net_conf->ping_int * HZ + |
1609 | mdev->tconn->net_conf->ping_timeo*HZ/9); | |
b411b363 | 1610 | drbd_resync_finished(mdev); |
b411b363 PR |
1611 | } |
1612 | ||
9bd28d3c | 1613 | drbd_rs_controller_reset(mdev); |
b411b363 PR |
1614 | /* ns.conn may already be != mdev->state.conn, |
1615 | * we may have been paused in between, or become paused until | |
1616 | * the timer triggers. | |
1617 | * No matter, that is handled in resync_timer_fn() */ | |
1618 | if (ns.conn == C_SYNC_TARGET) | |
1619 | mod_timer(&mdev->resync_timer, jiffies); | |
1620 | ||
1621 | drbd_md_sync(mdev); | |
1622 | } | |
5a22db89 | 1623 | put_ldev(mdev); |
d0c3f60f | 1624 | drbd_state_unlock(mdev); |
b411b363 PR |
1625 | } |
1626 | ||
1627 | int drbd_worker(struct drbd_thread *thi) | |
1628 | { | |
1629 | struct drbd_conf *mdev = thi->mdev; | |
1630 | struct drbd_work *w = NULL; | |
1631 | LIST_HEAD(work_list); | |
1632 | int intr = 0, i; | |
1633 | ||
1634 | sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev)); | |
1635 | ||
e77a0a5c | 1636 | while (get_t_state(thi) == RUNNING) { |
80822284 | 1637 | drbd_thread_current_set_cpu(thi); |
b411b363 | 1638 | |
e42325a5 PR |
1639 | if (down_trylock(&mdev->tconn->data.work.s)) { |
1640 | mutex_lock(&mdev->tconn->data.mutex); | |
1641 | if (mdev->tconn->data.socket && !mdev->tconn->net_conf->no_cork) | |
1642 | drbd_tcp_uncork(mdev->tconn->data.socket); | |
1643 | mutex_unlock(&mdev->tconn->data.mutex); | |
b411b363 | 1644 | |
e42325a5 | 1645 | intr = down_interruptible(&mdev->tconn->data.work.s); |
b411b363 | 1646 | |
e42325a5 PR |
1647 | mutex_lock(&mdev->tconn->data.mutex); |
1648 | if (mdev->tconn->data.socket && !mdev->tconn->net_conf->no_cork) | |
1649 | drbd_tcp_cork(mdev->tconn->data.socket); | |
1650 | mutex_unlock(&mdev->tconn->data.mutex); | |
b411b363 PR |
1651 | } |
1652 | ||
1653 | if (intr) { | |
1654 | D_ASSERT(intr == -EINTR); | |
1655 | flush_signals(current); | |
841ce241 | 1656 | if (!expect(get_t_state(thi) != RUNNING)) |
b411b363 PR |
1657 | continue; |
1658 | break; | |
1659 | } | |
1660 | ||
e77a0a5c | 1661 | if (get_t_state(thi) != RUNNING) |
b411b363 PR |
1662 | break; |
1663 | /* With this break, we have done a down() but not consumed | |
1664 | the entry from the list. The cleanup code takes care of | |
1665 | this... */ | |
1666 | ||
1667 | w = NULL; | |
e42325a5 PR |
1668 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
1669 | if (!expect(!list_empty(&mdev->tconn->data.work.q))) { | |
b411b363 PR |
1670 | /* something terribly wrong in our logic. |
1671 | * we were able to down() the semaphore, | |
1672 | * but the list is empty... doh. | |
1673 | * | |
1674 | * what is the best thing to do now? | |
1675 | * try again from scratch, restarting the receiver, | |
1676 | * asender, whatnot? could break even more ugly, | |
1677 | * e.g. when we are primary, but no good local data. | |
1678 | * | |
1679 | * I'll try to get away just starting over this loop. | |
1680 | */ | |
e42325a5 | 1681 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 PR |
1682 | continue; |
1683 | } | |
e42325a5 | 1684 | w = list_entry(mdev->tconn->data.work.q.next, struct drbd_work, list); |
b411b363 | 1685 | list_del_init(&w->list); |
e42325a5 | 1686 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 PR |
1687 | |
1688 | if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) { | |
1689 | /* dev_warn(DEV, "worker: a callback failed! \n"); */ | |
1690 | if (mdev->state.conn >= C_CONNECTED) | |
1691 | drbd_force_state(mdev, | |
1692 | NS(conn, C_NETWORK_FAILURE)); | |
1693 | } | |
1694 | } | |
1695 | D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags)); | |
1696 | D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags)); | |
1697 | ||
e42325a5 | 1698 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 | 1699 | i = 0; |
e42325a5 PR |
1700 | while (!list_empty(&mdev->tconn->data.work.q)) { |
1701 | list_splice_init(&mdev->tconn->data.work.q, &work_list); | |
1702 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); | |
b411b363 PR |
1703 | |
1704 | while (!list_empty(&work_list)) { | |
1705 | w = list_entry(work_list.next, struct drbd_work, list); | |
1706 | list_del_init(&w->list); | |
1707 | w->cb(mdev, w, 1); | |
1708 | i++; /* dead debugging code */ | |
1709 | } | |
1710 | ||
e42325a5 | 1711 | spin_lock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 | 1712 | } |
e42325a5 | 1713 | sema_init(&mdev->tconn->data.work.s, 0); |
b411b363 PR |
1714 | /* DANGEROUS race: if someone did queue his work within the spinlock, |
1715 | * but up() ed outside the spinlock, we could get an up() on the | |
1716 | * semaphore without corresponding list entry. | |
1717 | * So don't do that. | |
1718 | */ | |
e42325a5 | 1719 | spin_unlock_irq(&mdev->tconn->data.work.q_lock); |
b411b363 PR |
1720 | |
1721 | D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE); | |
1722 | /* _drbd_set_state only uses stop_nowait. | |
e6b3ea83 PR |
1723 | * wait here for the exiting receiver. */ |
1724 | drbd_thread_stop(&mdev->tconn->receiver); | |
b411b363 PR |
1725 | drbd_mdev_cleanup(mdev); |
1726 | ||
1727 | dev_info(DEV, "worker terminated\n"); | |
1728 | ||
1729 | clear_bit(DEVICE_DYING, &mdev->flags); | |
1730 | clear_bit(CONFIG_PENDING, &mdev->flags); | |
1731 | wake_up(&mdev->state_wait); | |
1732 | ||
1733 | return 0; | |
1734 | } |