Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_actlog.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
26 | #include <linux/slab.h> | |
27 | #include <linux/drbd.h> | |
28 | #include "drbd_int.h" | |
b411b363 PR |
29 | #include "drbd_wrappers.h" |
30 | ||
31 | /* We maintain a trivial check sum in our on disk activity log. | |
32 | * With that we can ensure correct operation even when the storage | |
33 | * device might do a partial (last) sector write while loosing power. | |
34 | */ | |
35 | struct __packed al_transaction { | |
36 | u32 magic; | |
37 | u32 tr_number; | |
38 | struct __packed { | |
39 | u32 pos; | |
40 | u32 extent; } updates[1 + AL_EXTENTS_PT]; | |
41 | u32 xor_sum; | |
42 | }; | |
43 | ||
44 | struct update_odbm_work { | |
45 | struct drbd_work w; | |
46 | unsigned int enr; | |
47 | }; | |
48 | ||
49 | struct update_al_work { | |
50 | struct drbd_work w; | |
51 | struct lc_element *al_ext; | |
52 | struct completion event; | |
53 | unsigned int enr; | |
54 | /* if old_enr != LC_FREE, write corresponding bitmap sector, too */ | |
55 | unsigned int old_enr; | |
56 | }; | |
57 | ||
58 | struct drbd_atodb_wait { | |
59 | atomic_t count; | |
60 | struct completion io_done; | |
61 | struct drbd_conf *mdev; | |
62 | int error; | |
63 | }; | |
64 | ||
65 | ||
66 | int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int); | |
67 | ||
b411b363 PR |
68 | static int _drbd_md_sync_page_io(struct drbd_conf *mdev, |
69 | struct drbd_backing_dev *bdev, | |
70 | struct page *page, sector_t sector, | |
71 | int rw, int size) | |
72 | { | |
73 | struct bio *bio; | |
74 | struct drbd_md_io md_io; | |
75 | int ok; | |
76 | ||
77 | md_io.mdev = mdev; | |
78 | init_completion(&md_io.event); | |
79 | md_io.error = 0; | |
80 | ||
a8a4e51e PR |
81 | if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) |
82 | rw |= REQ_FUA; | |
721a9602 | 83 | rw |= REQ_SYNC; |
b411b363 | 84 | |
b411b363 PR |
85 | bio = bio_alloc(GFP_NOIO, 1); |
86 | bio->bi_bdev = bdev->md_bdev; | |
87 | bio->bi_sector = sector; | |
88 | ok = (bio_add_page(bio, page, size, 0) == size); | |
89 | if (!ok) | |
90 | goto out; | |
91 | bio->bi_private = &md_io; | |
92 | bio->bi_end_io = drbd_md_io_complete; | |
93 | bio->bi_rw = rw; | |
94 | ||
b411b363 PR |
95 | if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) |
96 | bio_endio(bio, -EIO); | |
97 | else | |
98 | submit_bio(rw, bio); | |
99 | wait_for_completion(&md_io.event); | |
100 | ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; | |
101 | ||
b411b363 PR |
102 | out: |
103 | bio_put(bio); | |
104 | return ok; | |
105 | } | |
106 | ||
107 | int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, | |
108 | sector_t sector, int rw) | |
109 | { | |
110 | int logical_block_size, mask, ok; | |
111 | int offset = 0; | |
112 | struct page *iop = mdev->md_io_page; | |
113 | ||
114 | D_ASSERT(mutex_is_locked(&mdev->md_io_mutex)); | |
115 | ||
116 | BUG_ON(!bdev->md_bdev); | |
117 | ||
118 | logical_block_size = bdev_logical_block_size(bdev->md_bdev); | |
119 | if (logical_block_size == 0) | |
120 | logical_block_size = MD_SECTOR_SIZE; | |
121 | ||
122 | /* in case logical_block_size != 512 [ s390 only? ] */ | |
123 | if (logical_block_size != MD_SECTOR_SIZE) { | |
124 | mask = (logical_block_size / MD_SECTOR_SIZE) - 1; | |
125 | D_ASSERT(mask == 1 || mask == 3 || mask == 7); | |
126 | D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE); | |
127 | offset = sector & mask; | |
128 | sector = sector & ~mask; | |
129 | iop = mdev->md_io_tmpp; | |
130 | ||
131 | if (rw & WRITE) { | |
132 | /* these are GFP_KERNEL pages, pre-allocated | |
133 | * on device initialization */ | |
134 | void *p = page_address(mdev->md_io_page); | |
135 | void *hp = page_address(mdev->md_io_tmpp); | |
136 | ||
137 | ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, | |
138 | READ, logical_block_size); | |
139 | ||
140 | if (unlikely(!ok)) { | |
141 | dev_err(DEV, "drbd_md_sync_page_io(,%llus," | |
142 | "READ [logical_block_size!=512]) failed!\n", | |
143 | (unsigned long long)sector); | |
144 | return 0; | |
145 | } | |
146 | ||
147 | memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE); | |
148 | } | |
149 | } | |
150 | ||
151 | if (sector < drbd_md_first_sector(bdev) || | |
152 | sector > drbd_md_last_sector(bdev)) | |
153 | dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n", | |
154 | current->comm, current->pid, __func__, | |
155 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); | |
156 | ||
157 | ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size); | |
158 | if (unlikely(!ok)) { | |
159 | dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n", | |
160 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); | |
161 | return 0; | |
162 | } | |
163 | ||
164 | if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) { | |
165 | void *p = page_address(mdev->md_io_page); | |
166 | void *hp = page_address(mdev->md_io_tmpp); | |
167 | ||
168 | memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE); | |
169 | } | |
170 | ||
171 | return ok; | |
172 | } | |
173 | ||
174 | static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) | |
175 | { | |
176 | struct lc_element *al_ext; | |
177 | struct lc_element *tmp; | |
178 | unsigned long al_flags = 0; | |
179 | ||
180 | spin_lock_irq(&mdev->al_lock); | |
181 | tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); | |
182 | if (unlikely(tmp != NULL)) { | |
183 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | |
184 | if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { | |
e3555d85 | 185 | set_bit(BME_PRIORITY, &bm_ext->flags); |
b411b363 PR |
186 | spin_unlock_irq(&mdev->al_lock); |
187 | return NULL; | |
188 | } | |
189 | } | |
190 | al_ext = lc_get(mdev->act_log, enr); | |
191 | al_flags = mdev->act_log->flags; | |
192 | spin_unlock_irq(&mdev->al_lock); | |
193 | ||
194 | /* | |
195 | if (!al_ext) { | |
196 | if (al_flags & LC_STARVING) | |
197 | dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n"); | |
198 | if (al_flags & LC_DIRTY) | |
199 | dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n"); | |
200 | } | |
201 | */ | |
202 | ||
203 | return al_ext; | |
204 | } | |
205 | ||
206 | void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) | |
207 | { | |
208 | unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); | |
209 | struct lc_element *al_ext; | |
210 | struct update_al_work al_work; | |
211 | ||
212 | D_ASSERT(atomic_read(&mdev->local_cnt) > 0); | |
213 | ||
b411b363 PR |
214 | wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr))); |
215 | ||
216 | if (al_ext->lc_number != enr) { | |
217 | /* drbd_al_write_transaction(mdev,al_ext,enr); | |
218 | * recurses into generic_make_request(), which | |
219 | * disallows recursion, bios being serialized on the | |
220 | * current->bio_tail list now. | |
221 | * we have to delegate updates to the activity log | |
222 | * to the worker thread. */ | |
223 | init_completion(&al_work.event); | |
224 | al_work.al_ext = al_ext; | |
225 | al_work.enr = enr; | |
226 | al_work.old_enr = al_ext->lc_number; | |
227 | al_work.w.cb = w_al_write_transaction; | |
228 | drbd_queue_work_front(&mdev->data.work, &al_work.w); | |
229 | wait_for_completion(&al_work.event); | |
230 | ||
231 | mdev->al_writ_cnt++; | |
232 | ||
233 | spin_lock_irq(&mdev->al_lock); | |
234 | lc_changed(mdev->act_log, al_ext); | |
235 | spin_unlock_irq(&mdev->al_lock); | |
236 | wake_up(&mdev->al_wait); | |
237 | } | |
238 | } | |
239 | ||
240 | void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) | |
241 | { | |
242 | unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); | |
243 | struct lc_element *extent; | |
244 | unsigned long flags; | |
245 | ||
b411b363 PR |
246 | spin_lock_irqsave(&mdev->al_lock, flags); |
247 | ||
248 | extent = lc_find(mdev->act_log, enr); | |
249 | ||
250 | if (!extent) { | |
251 | spin_unlock_irqrestore(&mdev->al_lock, flags); | |
252 | dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr); | |
253 | return; | |
254 | } | |
255 | ||
256 | if (lc_put(mdev->act_log, extent) == 0) | |
257 | wake_up(&mdev->al_wait); | |
258 | ||
259 | spin_unlock_irqrestore(&mdev->al_lock, flags); | |
260 | } | |
261 | ||
262 | int | |
263 | w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |
264 | { | |
265 | struct update_al_work *aw = container_of(w, struct update_al_work, w); | |
266 | struct lc_element *updated = aw->al_ext; | |
267 | const unsigned int new_enr = aw->enr; | |
268 | const unsigned int evicted = aw->old_enr; | |
269 | struct al_transaction *buffer; | |
270 | sector_t sector; | |
271 | int i, n, mx; | |
272 | unsigned int extent_nr; | |
273 | u32 xor_sum = 0; | |
274 | ||
275 | if (!get_ldev(mdev)) { | |
6719fb03 LE |
276 | dev_err(DEV, |
277 | "disk is %s, cannot start al transaction (-%d +%d)\n", | |
278 | drbd_disk_str(mdev->state.disk), evicted, new_enr); | |
b411b363 PR |
279 | complete(&((struct update_al_work *)w)->event); |
280 | return 1; | |
281 | } | |
282 | /* do we have to do a bitmap write, first? | |
283 | * TODO reduce maximum latency: | |
284 | * submit both bios, then wait for both, | |
6719fb03 LE |
285 | * instead of doing two synchronous sector writes. |
286 | * For now, we must not write the transaction, | |
287 | * if we cannot write out the bitmap of the evicted extent. */ | |
b411b363 PR |
288 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) |
289 | drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); | |
290 | ||
6719fb03 LE |
291 | /* The bitmap write may have failed, causing a state change. */ |
292 | if (mdev->state.disk < D_INCONSISTENT) { | |
293 | dev_err(DEV, | |
294 | "disk is %s, cannot write al transaction (-%d +%d)\n", | |
295 | drbd_disk_str(mdev->state.disk), evicted, new_enr); | |
296 | complete(&((struct update_al_work *)w)->event); | |
297 | put_ldev(mdev); | |
298 | return 1; | |
299 | } | |
300 | ||
301 | mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */ | |
b411b363 PR |
302 | buffer = (struct al_transaction *)page_address(mdev->md_io_page); |
303 | ||
304 | buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); | |
305 | buffer->tr_number = cpu_to_be32(mdev->al_tr_number); | |
306 | ||
307 | n = lc_index_of(mdev->act_log, updated); | |
308 | ||
309 | buffer->updates[0].pos = cpu_to_be32(n); | |
310 | buffer->updates[0].extent = cpu_to_be32(new_enr); | |
311 | ||
312 | xor_sum ^= new_enr; | |
313 | ||
314 | mx = min_t(int, AL_EXTENTS_PT, | |
315 | mdev->act_log->nr_elements - mdev->al_tr_cycle); | |
316 | for (i = 0; i < mx; i++) { | |
317 | unsigned idx = mdev->al_tr_cycle + i; | |
318 | extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number; | |
319 | buffer->updates[i+1].pos = cpu_to_be32(idx); | |
320 | buffer->updates[i+1].extent = cpu_to_be32(extent_nr); | |
321 | xor_sum ^= extent_nr; | |
322 | } | |
323 | for (; i < AL_EXTENTS_PT; i++) { | |
324 | buffer->updates[i+1].pos = __constant_cpu_to_be32(-1); | |
325 | buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE); | |
326 | xor_sum ^= LC_FREE; | |
327 | } | |
328 | mdev->al_tr_cycle += AL_EXTENTS_PT; | |
329 | if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) | |
330 | mdev->al_tr_cycle = 0; | |
331 | ||
332 | buffer->xor_sum = cpu_to_be32(xor_sum); | |
333 | ||
334 | sector = mdev->ldev->md.md_offset | |
335 | + mdev->ldev->md.al_offset + mdev->al_tr_pos; | |
336 | ||
337 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) | |
338 | drbd_chk_io_error(mdev, 1, TRUE); | |
339 | ||
340 | if (++mdev->al_tr_pos > | |
341 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | |
342 | mdev->al_tr_pos = 0; | |
343 | ||
344 | D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE); | |
345 | mdev->al_tr_number++; | |
346 | ||
347 | mutex_unlock(&mdev->md_io_mutex); | |
348 | ||
349 | complete(&((struct update_al_work *)w)->event); | |
350 | put_ldev(mdev); | |
351 | ||
352 | return 1; | |
353 | } | |
354 | ||
355 | /** | |
356 | * drbd_al_read_tr() - Read a single transaction from the on disk activity log | |
357 | * @mdev: DRBD device. | |
358 | * @bdev: Block device to read form. | |
359 | * @b: pointer to an al_transaction. | |
360 | * @index: On disk slot of the transaction to read. | |
361 | * | |
362 | * Returns -1 on IO error, 0 on checksum error and 1 upon success. | |
363 | */ | |
364 | static int drbd_al_read_tr(struct drbd_conf *mdev, | |
365 | struct drbd_backing_dev *bdev, | |
366 | struct al_transaction *b, | |
367 | int index) | |
368 | { | |
369 | sector_t sector; | |
370 | int rv, i; | |
371 | u32 xor_sum = 0; | |
372 | ||
373 | sector = bdev->md.md_offset + bdev->md.al_offset + index; | |
374 | ||
375 | /* Dont process error normally, | |
376 | * as this is done before disk is attached! */ | |
377 | if (!drbd_md_sync_page_io(mdev, bdev, sector, READ)) | |
378 | return -1; | |
379 | ||
380 | rv = (be32_to_cpu(b->magic) == DRBD_MAGIC); | |
381 | ||
382 | for (i = 0; i < AL_EXTENTS_PT + 1; i++) | |
383 | xor_sum ^= be32_to_cpu(b->updates[i].extent); | |
384 | rv &= (xor_sum == be32_to_cpu(b->xor_sum)); | |
385 | ||
386 | return rv; | |
387 | } | |
388 | ||
389 | /** | |
390 | * drbd_al_read_log() - Restores the activity log from its on disk representation. | |
391 | * @mdev: DRBD device. | |
392 | * @bdev: Block device to read form. | |
393 | * | |
394 | * Returns 1 on success, returns 0 when reading the log failed due to IO errors. | |
395 | */ | |
396 | int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |
397 | { | |
398 | struct al_transaction *buffer; | |
399 | int i; | |
400 | int rv; | |
401 | int mx; | |
402 | int active_extents = 0; | |
403 | int transactions = 0; | |
404 | int found_valid = 0; | |
405 | int from = 0; | |
406 | int to = 0; | |
407 | u32 from_tnr = 0; | |
408 | u32 to_tnr = 0; | |
409 | u32 cnr; | |
410 | ||
411 | mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT); | |
412 | ||
413 | /* lock out all other meta data io for now, | |
414 | * and make sure the page is mapped. | |
415 | */ | |
416 | mutex_lock(&mdev->md_io_mutex); | |
417 | buffer = page_address(mdev->md_io_page); | |
418 | ||
419 | /* Find the valid transaction in the log */ | |
420 | for (i = 0; i <= mx; i++) { | |
421 | rv = drbd_al_read_tr(mdev, bdev, buffer, i); | |
422 | if (rv == 0) | |
423 | continue; | |
424 | if (rv == -1) { | |
425 | mutex_unlock(&mdev->md_io_mutex); | |
426 | return 0; | |
427 | } | |
428 | cnr = be32_to_cpu(buffer->tr_number); | |
429 | ||
430 | if (++found_valid == 1) { | |
431 | from = i; | |
432 | to = i; | |
433 | from_tnr = cnr; | |
434 | to_tnr = cnr; | |
435 | continue; | |
436 | } | |
437 | if ((int)cnr - (int)from_tnr < 0) { | |
438 | D_ASSERT(from_tnr - cnr + i - from == mx+1); | |
439 | from = i; | |
440 | from_tnr = cnr; | |
441 | } | |
442 | if ((int)cnr - (int)to_tnr > 0) { | |
443 | D_ASSERT(cnr - to_tnr == i - to); | |
444 | to = i; | |
445 | to_tnr = cnr; | |
446 | } | |
447 | } | |
448 | ||
449 | if (!found_valid) { | |
450 | dev_warn(DEV, "No usable activity log found.\n"); | |
451 | mutex_unlock(&mdev->md_io_mutex); | |
452 | return 1; | |
453 | } | |
454 | ||
455 | /* Read the valid transactions. | |
456 | * dev_info(DEV, "Reading from %d to %d.\n",from,to); */ | |
457 | i = from; | |
458 | while (1) { | |
459 | int j, pos; | |
460 | unsigned int extent_nr; | |
461 | unsigned int trn; | |
462 | ||
463 | rv = drbd_al_read_tr(mdev, bdev, buffer, i); | |
464 | ERR_IF(rv == 0) goto cancel; | |
465 | if (rv == -1) { | |
466 | mutex_unlock(&mdev->md_io_mutex); | |
467 | return 0; | |
468 | } | |
469 | ||
470 | trn = be32_to_cpu(buffer->tr_number); | |
471 | ||
472 | spin_lock_irq(&mdev->al_lock); | |
473 | ||
474 | /* This loop runs backwards because in the cyclic | |
475 | elements there might be an old version of the | |
476 | updated element (in slot 0). So the element in slot 0 | |
477 | can overwrite old versions. */ | |
478 | for (j = AL_EXTENTS_PT; j >= 0; j--) { | |
479 | pos = be32_to_cpu(buffer->updates[j].pos); | |
480 | extent_nr = be32_to_cpu(buffer->updates[j].extent); | |
481 | ||
482 | if (extent_nr == LC_FREE) | |
483 | continue; | |
484 | ||
485 | lc_set(mdev->act_log, extent_nr, pos); | |
486 | active_extents++; | |
487 | } | |
488 | spin_unlock_irq(&mdev->al_lock); | |
489 | ||
490 | transactions++; | |
491 | ||
492 | cancel: | |
493 | if (i == to) | |
494 | break; | |
495 | i++; | |
496 | if (i > mx) | |
497 | i = 0; | |
498 | } | |
499 | ||
500 | mdev->al_tr_number = to_tnr+1; | |
501 | mdev->al_tr_pos = to; | |
502 | if (++mdev->al_tr_pos > | |
503 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | |
504 | mdev->al_tr_pos = 0; | |
505 | ||
506 | /* ok, we are done with it */ | |
507 | mutex_unlock(&mdev->md_io_mutex); | |
508 | ||
509 | dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n", | |
510 | transactions, active_extents); | |
511 | ||
512 | return 1; | |
513 | } | |
514 | ||
515 | static void atodb_endio(struct bio *bio, int error) | |
516 | { | |
517 | struct drbd_atodb_wait *wc = bio->bi_private; | |
518 | struct drbd_conf *mdev = wc->mdev; | |
519 | struct page *page; | |
520 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | |
521 | ||
522 | /* strange behavior of some lower level drivers... | |
523 | * fail the request by clearing the uptodate flag, | |
524 | * but do not return any error?! */ | |
525 | if (!error && !uptodate) | |
526 | error = -EIO; | |
527 | ||
528 | drbd_chk_io_error(mdev, error, TRUE); | |
529 | if (error && wc->error == 0) | |
530 | wc->error = error; | |
531 | ||
532 | if (atomic_dec_and_test(&wc->count)) | |
533 | complete(&wc->io_done); | |
534 | ||
535 | page = bio->bi_io_vec[0].bv_page; | |
536 | put_page(page); | |
537 | bio_put(bio); | |
538 | mdev->bm_writ_cnt++; | |
539 | put_ldev(mdev); | |
540 | } | |
541 | ||
39ad2bbb | 542 | /* sector to word */ |
b411b363 | 543 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) |
39ad2bbb | 544 | |
b411b363 PR |
545 | /* activity log to on disk bitmap -- prepare bio unless that sector |
546 | * is already covered by previously prepared bios */ | |
547 | static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | |
548 | struct bio **bios, | |
549 | unsigned int enr, | |
550 | struct drbd_atodb_wait *wc) __must_hold(local) | |
551 | { | |
552 | struct bio *bio; | |
553 | struct page *page; | |
39ad2bbb | 554 | sector_t on_disk_sector; |
b411b363 PR |
555 | unsigned int page_offset = PAGE_SIZE; |
556 | int offset; | |
557 | int i = 0; | |
558 | int err = -ENOMEM; | |
559 | ||
39ad2bbb LE |
560 | /* We always write aligned, full 4k blocks, |
561 | * so we can ignore the logical_block_size (for now) */ | |
562 | enr &= ~7U; | |
563 | on_disk_sector = enr + mdev->ldev->md.md_offset | |
564 | + mdev->ldev->md.bm_offset; | |
565 | ||
566 | D_ASSERT(!(on_disk_sector & 7U)); | |
567 | ||
b411b363 PR |
568 | /* Check if that enr is already covered by an already created bio. |
569 | * Caution, bios[] is not NULL terminated, | |
570 | * but only initialized to all NULL. | |
571 | * For completely scattered activity log, | |
572 | * the last invocation iterates over all bios, | |
573 | * and finds the last NULL entry. | |
574 | */ | |
575 | while ((bio = bios[i])) { | |
576 | if (bio->bi_sector == on_disk_sector) | |
577 | return 0; | |
578 | i++; | |
579 | } | |
580 | /* bios[i] == NULL, the next not yet used slot */ | |
581 | ||
582 | /* GFP_KERNEL, we are not in the write-out path */ | |
583 | bio = bio_alloc(GFP_KERNEL, 1); | |
584 | if (bio == NULL) | |
585 | return -ENOMEM; | |
586 | ||
587 | if (i > 0) { | |
588 | const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; | |
589 | page_offset = prev_bv->bv_offset + prev_bv->bv_len; | |
590 | page = prev_bv->bv_page; | |
591 | } | |
592 | if (page_offset == PAGE_SIZE) { | |
593 | page = alloc_page(__GFP_HIGHMEM); | |
594 | if (page == NULL) | |
595 | goto out_bio_put; | |
596 | page_offset = 0; | |
597 | } else { | |
598 | get_page(page); | |
599 | } | |
600 | ||
601 | offset = S2W(enr); | |
602 | drbd_bm_get_lel(mdev, offset, | |
39ad2bbb | 603 | min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset), |
b411b363 PR |
604 | kmap(page) + page_offset); |
605 | kunmap(page); | |
606 | ||
607 | bio->bi_private = wc; | |
608 | bio->bi_end_io = atodb_endio; | |
609 | bio->bi_bdev = mdev->ldev->md_bdev; | |
610 | bio->bi_sector = on_disk_sector; | |
611 | ||
39ad2bbb | 612 | if (bio_add_page(bio, page, 4096, page_offset) != 4096) |
b411b363 PR |
613 | goto out_put_page; |
614 | ||
615 | atomic_inc(&wc->count); | |
616 | /* we already know that we may do this... | |
617 | * get_ldev_if_state(mdev,D_ATTACHING); | |
618 | * just get the extra reference, so that the local_cnt reflects | |
619 | * the number of pending IO requests DRBD at its backing device. | |
620 | */ | |
621 | atomic_inc(&mdev->local_cnt); | |
622 | ||
623 | bios[i] = bio; | |
624 | ||
625 | return 0; | |
626 | ||
627 | out_put_page: | |
628 | err = -EINVAL; | |
629 | put_page(page); | |
630 | out_bio_put: | |
631 | bio_put(bio); | |
632 | return err; | |
633 | } | |
634 | ||
635 | /** | |
636 | * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents | |
637 | * @mdev: DRBD device. | |
638 | * | |
639 | * Called when we detach (unconfigure) local storage, | |
640 | * or when we go from R_PRIMARY to R_SECONDARY role. | |
641 | */ | |
642 | void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) | |
643 | { | |
644 | int i, nr_elements; | |
645 | unsigned int enr; | |
646 | struct bio **bios; | |
647 | struct drbd_atodb_wait wc; | |
648 | ||
649 | ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) | |
650 | return; /* sorry, I don't have any act_log etc... */ | |
651 | ||
652 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | |
653 | ||
654 | nr_elements = mdev->act_log->nr_elements; | |
655 | ||
656 | /* GFP_KERNEL, we are not in anyone's write-out path */ | |
657 | bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); | |
658 | if (!bios) | |
659 | goto submit_one_by_one; | |
660 | ||
661 | atomic_set(&wc.count, 0); | |
662 | init_completion(&wc.io_done); | |
663 | wc.mdev = mdev; | |
664 | wc.error = 0; | |
665 | ||
666 | for (i = 0; i < nr_elements; i++) { | |
667 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | |
668 | if (enr == LC_FREE) | |
669 | continue; | |
670 | /* next statement also does atomic_inc wc.count and local_cnt */ | |
671 | if (atodb_prepare_unless_covered(mdev, bios, | |
672 | enr/AL_EXT_PER_BM_SECT, | |
673 | &wc)) | |
674 | goto free_bios_submit_one_by_one; | |
675 | } | |
676 | ||
677 | /* unnecessary optimization? */ | |
678 | lc_unlock(mdev->act_log); | |
679 | wake_up(&mdev->al_wait); | |
680 | ||
681 | /* all prepared, submit them */ | |
682 | for (i = 0; i < nr_elements; i++) { | |
683 | if (bios[i] == NULL) | |
684 | break; | |
685 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { | |
686 | bios[i]->bi_rw = WRITE; | |
687 | bio_endio(bios[i], -EIO); | |
688 | } else { | |
689 | submit_bio(WRITE, bios[i]); | |
690 | } | |
691 | } | |
692 | ||
b411b363 PR |
693 | /* always (try to) flush bitmap to stable storage */ |
694 | drbd_md_flush(mdev); | |
695 | ||
696 | /* In case we did not submit a single IO do not wait for | |
697 | * them to complete. ( Because we would wait forever here. ) | |
698 | * | |
699 | * In case we had IOs and they are already complete, there | |
700 | * is not point in waiting anyways. | |
701 | * Therefore this if () ... */ | |
702 | if (atomic_read(&wc.count)) | |
703 | wait_for_completion(&wc.io_done); | |
704 | ||
705 | put_ldev(mdev); | |
706 | ||
707 | kfree(bios); | |
708 | return; | |
709 | ||
710 | free_bios_submit_one_by_one: | |
711 | /* free everything by calling the endio callback directly. */ | |
712 | for (i = 0; i < nr_elements && bios[i]; i++) | |
713 | bio_endio(bios[i], 0); | |
714 | ||
715 | kfree(bios); | |
716 | ||
717 | submit_one_by_one: | |
718 | dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); | |
719 | ||
720 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | |
721 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | |
722 | if (enr == LC_FREE) | |
723 | continue; | |
724 | /* Really slow: if we have al-extents 16..19 active, | |
725 | * sector 4 will be written four times! Synchronous! */ | |
726 | drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); | |
727 | } | |
728 | ||
729 | lc_unlock(mdev->act_log); | |
730 | wake_up(&mdev->al_wait); | |
731 | put_ldev(mdev); | |
732 | } | |
733 | ||
734 | /** | |
735 | * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents | |
736 | * @mdev: DRBD device. | |
737 | */ | |
738 | void drbd_al_apply_to_bm(struct drbd_conf *mdev) | |
739 | { | |
740 | unsigned int enr; | |
741 | unsigned long add = 0; | |
742 | char ppb[10]; | |
6719fb03 | 743 | int i, tmp; |
b411b363 PR |
744 | |
745 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | |
746 | ||
747 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | |
748 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | |
749 | if (enr == LC_FREE) | |
750 | continue; | |
6719fb03 LE |
751 | tmp = drbd_bm_ALe_set_all(mdev, enr); |
752 | dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr); | |
753 | add += tmp; | |
b411b363 PR |
754 | } |
755 | ||
756 | lc_unlock(mdev->act_log); | |
757 | wake_up(&mdev->al_wait); | |
758 | ||
759 | dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n", | |
760 | ppsize(ppb, Bit2KB(add))); | |
761 | } | |
762 | ||
763 | static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext) | |
764 | { | |
765 | int rv; | |
766 | ||
767 | spin_lock_irq(&mdev->al_lock); | |
768 | rv = (al_ext->refcnt == 0); | |
769 | if (likely(rv)) | |
770 | lc_del(mdev->act_log, al_ext); | |
771 | spin_unlock_irq(&mdev->al_lock); | |
772 | ||
773 | return rv; | |
774 | } | |
775 | ||
776 | /** | |
777 | * drbd_al_shrink() - Removes all active extents form the activity log | |
778 | * @mdev: DRBD device. | |
779 | * | |
780 | * Removes all active extents form the activity log, waiting until | |
781 | * the reference count of each entry dropped to 0 first, of course. | |
782 | * | |
783 | * You need to lock mdev->act_log with lc_try_lock() / lc_unlock() | |
784 | */ | |
785 | void drbd_al_shrink(struct drbd_conf *mdev) | |
786 | { | |
787 | struct lc_element *al_ext; | |
788 | int i; | |
789 | ||
790 | D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags)); | |
791 | ||
792 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | |
793 | al_ext = lc_element_by_index(mdev->act_log, i); | |
794 | if (al_ext->lc_number == LC_FREE) | |
795 | continue; | |
796 | wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext)); | |
797 | } | |
798 | ||
799 | wake_up(&mdev->al_wait); | |
800 | } | |
801 | ||
802 | static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |
803 | { | |
804 | struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); | |
805 | ||
806 | if (!get_ldev(mdev)) { | |
807 | if (__ratelimit(&drbd_ratelimit_state)) | |
808 | dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n"); | |
809 | kfree(udw); | |
810 | return 1; | |
811 | } | |
812 | ||
813 | drbd_bm_write_sect(mdev, udw->enr); | |
814 | put_ldev(mdev); | |
815 | ||
816 | kfree(udw); | |
817 | ||
818 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) { | |
819 | switch (mdev->state.conn) { | |
820 | case C_SYNC_SOURCE: case C_SYNC_TARGET: | |
821 | case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T: | |
822 | drbd_resync_finished(mdev); | |
823 | default: | |
824 | /* nothing to do */ | |
825 | break; | |
826 | } | |
827 | } | |
828 | drbd_bcast_sync_progress(mdev); | |
829 | ||
830 | return 1; | |
831 | } | |
832 | ||
833 | ||
834 | /* ATTENTION. The AL's extents are 4MB each, while the extents in the | |
835 | * resync LRU-cache are 16MB each. | |
836 | * The caller of this function has to hold an get_ldev() reference. | |
837 | * | |
838 | * TODO will be obsoleted once we have a caching lru of the on disk bitmap | |
839 | */ | |
840 | static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | |
841 | int count, int success) | |
842 | { | |
843 | struct lc_element *e; | |
844 | struct update_odbm_work *udw; | |
845 | ||
846 | unsigned int enr; | |
847 | ||
848 | D_ASSERT(atomic_read(&mdev->local_cnt)); | |
849 | ||
850 | /* I simply assume that a sector/size pair never crosses | |
851 | * a 16 MB extent border. (Currently this is true...) */ | |
852 | enr = BM_SECT_TO_EXT(sector); | |
853 | ||
854 | e = lc_get(mdev->resync, enr); | |
855 | if (e) { | |
856 | struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); | |
857 | if (ext->lce.lc_number == enr) { | |
858 | if (success) | |
859 | ext->rs_left -= count; | |
860 | else | |
861 | ext->rs_failed += count; | |
862 | if (ext->rs_left < ext->rs_failed) { | |
863 | dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d " | |
864 | "rs_failed=%d count=%d\n", | |
865 | (unsigned long long)sector, | |
866 | ext->lce.lc_number, ext->rs_left, | |
867 | ext->rs_failed, count); | |
868 | dump_stack(); | |
869 | ||
870 | lc_put(mdev->resync, &ext->lce); | |
871 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | |
872 | return; | |
873 | } | |
874 | } else { | |
875 | /* Normally this element should be in the cache, | |
876 | * since drbd_rs_begin_io() pulled it already in. | |
877 | * | |
878 | * But maybe an application write finished, and we set | |
879 | * something outside the resync lru_cache in sync. | |
880 | */ | |
881 | int rs_left = drbd_bm_e_weight(mdev, enr); | |
882 | if (ext->flags != 0) { | |
883 | dev_warn(DEV, "changing resync lce: %d[%u;%02lx]" | |
884 | " -> %d[%u;00]\n", | |
885 | ext->lce.lc_number, ext->rs_left, | |
886 | ext->flags, enr, rs_left); | |
887 | ext->flags = 0; | |
888 | } | |
889 | if (ext->rs_failed) { | |
890 | dev_warn(DEV, "Kicking resync_lru element enr=%u " | |
891 | "out with rs_failed=%d\n", | |
892 | ext->lce.lc_number, ext->rs_failed); | |
893 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | |
894 | } | |
895 | ext->rs_left = rs_left; | |
896 | ext->rs_failed = success ? 0 : count; | |
897 | lc_changed(mdev->resync, &ext->lce); | |
898 | } | |
899 | lc_put(mdev->resync, &ext->lce); | |
900 | /* no race, we are within the al_lock! */ | |
901 | ||
902 | if (ext->rs_left == ext->rs_failed) { | |
903 | ext->rs_failed = 0; | |
904 | ||
905 | udw = kmalloc(sizeof(*udw), GFP_ATOMIC); | |
906 | if (udw) { | |
907 | udw->enr = ext->lce.lc_number; | |
908 | udw->w.cb = w_update_odbm; | |
909 | drbd_queue_work_front(&mdev->data.work, &udw->w); | |
910 | } else { | |
911 | dev_warn(DEV, "Could not kmalloc an udw\n"); | |
912 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | |
913 | } | |
914 | } | |
915 | } else { | |
916 | dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n", | |
917 | mdev->resync_locked, | |
918 | mdev->resync->nr_elements, | |
919 | mdev->resync->flags); | |
920 | } | |
921 | } | |
922 | ||
c6ea14df LE |
923 | void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go) |
924 | { | |
925 | unsigned long now = jiffies; | |
926 | unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; | |
927 | int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; | |
928 | if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { | |
929 | if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go && | |
930 | mdev->state.conn != C_PAUSED_SYNC_T && | |
931 | mdev->state.conn != C_PAUSED_SYNC_S) { | |
932 | mdev->rs_mark_time[next] = now; | |
933 | mdev->rs_mark_left[next] = still_to_go; | |
934 | mdev->rs_last_mark = next; | |
935 | } | |
936 | } | |
937 | } | |
938 | ||
b411b363 PR |
939 | /* clear the bit corresponding to the piece of storage in question: |
940 | * size byte of data starting from sector. Only clear a bits of the affected | |
941 | * one ore more _aligned_ BM_BLOCK_SIZE blocks. | |
942 | * | |
943 | * called by worker on C_SYNC_TARGET and receiver on SyncSource. | |
944 | * | |
945 | */ | |
946 | void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | |
947 | const char *file, const unsigned int line) | |
948 | { | |
949 | /* Is called from worker and receiver context _only_ */ | |
950 | unsigned long sbnr, ebnr, lbnr; | |
951 | unsigned long count = 0; | |
952 | sector_t esector, nr_sectors; | |
953 | int wake_up = 0; | |
954 | unsigned long flags; | |
955 | ||
1816a2b4 | 956 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
b411b363 PR |
957 | dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", |
958 | (unsigned long long)sector, size); | |
959 | return; | |
960 | } | |
961 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | |
962 | esector = sector + (size >> 9) - 1; | |
963 | ||
964 | ERR_IF(sector >= nr_sectors) return; | |
965 | ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); | |
966 | ||
967 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | |
968 | ||
969 | /* we clear it (in sync). | |
970 | * round up start sector, round down end sector. we make sure we only | |
971 | * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */ | |
972 | if (unlikely(esector < BM_SECT_PER_BIT-1)) | |
973 | return; | |
974 | if (unlikely(esector == (nr_sectors-1))) | |
975 | ebnr = lbnr; | |
976 | else | |
977 | ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); | |
978 | sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); | |
979 | ||
b411b363 PR |
980 | if (sbnr > ebnr) |
981 | return; | |
982 | ||
983 | /* | |
984 | * ok, (capacity & 7) != 0 sometimes, but who cares... | |
985 | * we count rs_{total,left} in bits, not sectors. | |
986 | */ | |
b411b363 | 987 | count = drbd_bm_clear_bits(mdev, sbnr, ebnr); |
1d7734a0 | 988 | if (count && get_ldev(mdev)) { |
c6ea14df | 989 | drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev)); |
1d7734a0 LE |
990 | spin_lock_irqsave(&mdev->al_lock, flags); |
991 | drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); | |
992 | spin_unlock_irqrestore(&mdev->al_lock, flags); | |
993 | ||
b411b363 PR |
994 | /* just wake_up unconditional now, various lc_chaged(), |
995 | * lc_put() in drbd_try_clear_on_disk_bm(). */ | |
996 | wake_up = 1; | |
1d7734a0 | 997 | put_ldev(mdev); |
b411b363 | 998 | } |
b411b363 PR |
999 | if (wake_up) |
1000 | wake_up(&mdev->al_wait); | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * this is intended to set one request worth of data out of sync. | |
1005 | * affects at least 1 bit, | |
1816a2b4 | 1006 | * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits. |
b411b363 PR |
1007 | * |
1008 | * called by tl_clear and drbd_send_dblock (==drbd_make_request). | |
1009 | * so this can be _any_ process. | |
1010 | */ | |
73a01a18 | 1011 | int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, |
b411b363 PR |
1012 | const char *file, const unsigned int line) |
1013 | { | |
1014 | unsigned long sbnr, ebnr, lbnr, flags; | |
1015 | sector_t esector, nr_sectors; | |
73a01a18 | 1016 | unsigned int enr, count = 0; |
b411b363 PR |
1017 | struct lc_element *e; |
1018 | ||
1816a2b4 | 1019 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
b411b363 PR |
1020 | dev_err(DEV, "sector: %llus, size: %d\n", |
1021 | (unsigned long long)sector, size); | |
73a01a18 | 1022 | return 0; |
b411b363 PR |
1023 | } |
1024 | ||
1025 | if (!get_ldev(mdev)) | |
73a01a18 | 1026 | return 0; /* no disk, no metadata, no bitmap to set bits in */ |
b411b363 PR |
1027 | |
1028 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | |
1029 | esector = sector + (size >> 9) - 1; | |
1030 | ||
1031 | ERR_IF(sector >= nr_sectors) | |
1032 | goto out; | |
1033 | ERR_IF(esector >= nr_sectors) | |
1034 | esector = (nr_sectors-1); | |
1035 | ||
1036 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | |
1037 | ||
1038 | /* we set it out of sync, | |
1039 | * we do not need to round anything here */ | |
1040 | sbnr = BM_SECT_TO_BIT(sector); | |
1041 | ebnr = BM_SECT_TO_BIT(esector); | |
1042 | ||
b411b363 PR |
1043 | /* ok, (capacity & 7) != 0 sometimes, but who cares... |
1044 | * we count rs_{total,left} in bits, not sectors. */ | |
1045 | spin_lock_irqsave(&mdev->al_lock, flags); | |
1046 | count = drbd_bm_set_bits(mdev, sbnr, ebnr); | |
1047 | ||
1048 | enr = BM_SECT_TO_EXT(sector); | |
1049 | e = lc_find(mdev->resync, enr); | |
1050 | if (e) | |
1051 | lc_entry(e, struct bm_extent, lce)->rs_left += count; | |
1052 | spin_unlock_irqrestore(&mdev->al_lock, flags); | |
1053 | ||
1054 | out: | |
1055 | put_ldev(mdev); | |
73a01a18 PR |
1056 | |
1057 | return count; | |
b411b363 PR |
1058 | } |
1059 | ||
1060 | static | |
1061 | struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) | |
1062 | { | |
1063 | struct lc_element *e; | |
1064 | struct bm_extent *bm_ext; | |
1065 | int wakeup = 0; | |
1066 | unsigned long rs_flags; | |
1067 | ||
1068 | spin_lock_irq(&mdev->al_lock); | |
1069 | if (mdev->resync_locked > mdev->resync->nr_elements/2) { | |
1070 | spin_unlock_irq(&mdev->al_lock); | |
1071 | return NULL; | |
1072 | } | |
1073 | e = lc_get(mdev->resync, enr); | |
1074 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | |
1075 | if (bm_ext) { | |
1076 | if (bm_ext->lce.lc_number != enr) { | |
1077 | bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); | |
1078 | bm_ext->rs_failed = 0; | |
1079 | lc_changed(mdev->resync, &bm_ext->lce); | |
1080 | wakeup = 1; | |
1081 | } | |
1082 | if (bm_ext->lce.refcnt == 1) | |
1083 | mdev->resync_locked++; | |
1084 | set_bit(BME_NO_WRITES, &bm_ext->flags); | |
1085 | } | |
1086 | rs_flags = mdev->resync->flags; | |
1087 | spin_unlock_irq(&mdev->al_lock); | |
1088 | if (wakeup) | |
1089 | wake_up(&mdev->al_wait); | |
1090 | ||
1091 | if (!bm_ext) { | |
1092 | if (rs_flags & LC_STARVING) | |
1093 | dev_warn(DEV, "Have to wait for element" | |
1094 | " (resync LRU too small?)\n"); | |
1095 | BUG_ON(rs_flags & LC_DIRTY); | |
1096 | } | |
1097 | ||
1098 | return bm_ext; | |
1099 | } | |
1100 | ||
1101 | static int _is_in_al(struct drbd_conf *mdev, unsigned int enr) | |
1102 | { | |
1103 | struct lc_element *al_ext; | |
1104 | int rv = 0; | |
1105 | ||
1106 | spin_lock_irq(&mdev->al_lock); | |
1107 | if (unlikely(enr == mdev->act_log->new_number)) | |
1108 | rv = 1; | |
1109 | else { | |
1110 | al_ext = lc_find(mdev->act_log, enr); | |
1111 | if (al_ext) { | |
1112 | if (al_ext->refcnt) | |
1113 | rv = 1; | |
1114 | } | |
1115 | } | |
1116 | spin_unlock_irq(&mdev->al_lock); | |
1117 | ||
1118 | /* | |
1119 | if (unlikely(rv)) { | |
1120 | dev_info(DEV, "Delaying sync read until app's write is done\n"); | |
1121 | } | |
1122 | */ | |
1123 | return rv; | |
1124 | } | |
1125 | ||
1126 | /** | |
1127 | * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED | |
1128 | * @mdev: DRBD device. | |
1129 | * @sector: The sector number. | |
1130 | * | |
80a40e43 | 1131 | * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. |
b411b363 PR |
1132 | */ |
1133 | int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | |
1134 | { | |
1135 | unsigned int enr = BM_SECT_TO_EXT(sector); | |
1136 | struct bm_extent *bm_ext; | |
1137 | int i, sig; | |
1138 | ||
b411b363 PR |
1139 | sig = wait_event_interruptible(mdev->al_wait, |
1140 | (bm_ext = _bme_get(mdev, enr))); | |
1141 | if (sig) | |
80a40e43 | 1142 | return -EINTR; |
b411b363 PR |
1143 | |
1144 | if (test_bit(BME_LOCKED, &bm_ext->flags)) | |
80a40e43 | 1145 | return 0; |
b411b363 PR |
1146 | |
1147 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { | |
1148 | sig = wait_event_interruptible(mdev->al_wait, | |
1149 | !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); | |
1150 | if (sig) { | |
1151 | spin_lock_irq(&mdev->al_lock); | |
1152 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | |
1153 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | |
1154 | mdev->resync_locked--; | |
1155 | wake_up(&mdev->al_wait); | |
1156 | } | |
1157 | spin_unlock_irq(&mdev->al_lock); | |
80a40e43 | 1158 | return -EINTR; |
b411b363 PR |
1159 | } |
1160 | } | |
b411b363 | 1161 | set_bit(BME_LOCKED, &bm_ext->flags); |
80a40e43 | 1162 | return 0; |
b411b363 PR |
1163 | } |
1164 | ||
1165 | /** | |
1166 | * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep | |
1167 | * @mdev: DRBD device. | |
1168 | * @sector: The sector number. | |
1169 | * | |
1170 | * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then | |
1171 | * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN | |
1172 | * if there is still application IO going on in this area. | |
1173 | */ | |
1174 | int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | |
1175 | { | |
1176 | unsigned int enr = BM_SECT_TO_EXT(sector); | |
1177 | const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; | |
1178 | struct lc_element *e; | |
1179 | struct bm_extent *bm_ext; | |
1180 | int i; | |
1181 | ||
b411b363 PR |
1182 | spin_lock_irq(&mdev->al_lock); |
1183 | if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) { | |
1184 | /* in case you have very heavy scattered io, it may | |
1185 | * stall the syncer undefined if we give up the ref count | |
1186 | * when we try again and requeue. | |
1187 | * | |
1188 | * if we don't give up the refcount, but the next time | |
1189 | * we are scheduled this extent has been "synced" by new | |
1190 | * application writes, we'd miss the lc_put on the | |
1191 | * extent we keep the refcount on. | |
1192 | * so we remembered which extent we had to try again, and | |
1193 | * if the next requested one is something else, we do | |
1194 | * the lc_put here... | |
1195 | * we also have to wake_up | |
1196 | */ | |
b411b363 PR |
1197 | e = lc_find(mdev->resync, mdev->resync_wenr); |
1198 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | |
1199 | if (bm_ext) { | |
1200 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | |
1201 | D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); | |
1202 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | |
1203 | mdev->resync_wenr = LC_FREE; | |
1204 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) | |
1205 | mdev->resync_locked--; | |
1206 | wake_up(&mdev->al_wait); | |
1207 | } else { | |
1208 | dev_alert(DEV, "LOGIC BUG\n"); | |
1209 | } | |
1210 | } | |
1211 | /* TRY. */ | |
1212 | e = lc_try_get(mdev->resync, enr); | |
1213 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | |
1214 | if (bm_ext) { | |
1215 | if (test_bit(BME_LOCKED, &bm_ext->flags)) | |
1216 | goto proceed; | |
1217 | if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) { | |
1218 | mdev->resync_locked++; | |
1219 | } else { | |
1220 | /* we did set the BME_NO_WRITES, | |
1221 | * but then could not set BME_LOCKED, | |
1222 | * so we tried again. | |
1223 | * drop the extra reference. */ | |
b411b363 PR |
1224 | bm_ext->lce.refcnt--; |
1225 | D_ASSERT(bm_ext->lce.refcnt > 0); | |
1226 | } | |
1227 | goto check_al; | |
1228 | } else { | |
1229 | /* do we rather want to try later? */ | |
6a0afdf5 | 1230 | if (mdev->resync_locked > mdev->resync->nr_elements-3) |
b411b363 | 1231 | goto try_again; |
b411b363 PR |
1232 | /* Do or do not. There is no try. -- Yoda */ |
1233 | e = lc_get(mdev->resync, enr); | |
1234 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | |
1235 | if (!bm_ext) { | |
1236 | const unsigned long rs_flags = mdev->resync->flags; | |
1237 | if (rs_flags & LC_STARVING) | |
1238 | dev_warn(DEV, "Have to wait for element" | |
1239 | " (resync LRU too small?)\n"); | |
1240 | BUG_ON(rs_flags & LC_DIRTY); | |
1241 | goto try_again; | |
1242 | } | |
1243 | if (bm_ext->lce.lc_number != enr) { | |
1244 | bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); | |
1245 | bm_ext->rs_failed = 0; | |
1246 | lc_changed(mdev->resync, &bm_ext->lce); | |
1247 | wake_up(&mdev->al_wait); | |
1248 | D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0); | |
1249 | } | |
1250 | set_bit(BME_NO_WRITES, &bm_ext->flags); | |
1251 | D_ASSERT(bm_ext->lce.refcnt == 1); | |
1252 | mdev->resync_locked++; | |
1253 | goto check_al; | |
1254 | } | |
1255 | check_al: | |
b411b363 PR |
1256 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { |
1257 | if (unlikely(al_enr+i == mdev->act_log->new_number)) | |
1258 | goto try_again; | |
1259 | if (lc_is_used(mdev->act_log, al_enr+i)) | |
1260 | goto try_again; | |
1261 | } | |
1262 | set_bit(BME_LOCKED, &bm_ext->flags); | |
1263 | proceed: | |
1264 | mdev->resync_wenr = LC_FREE; | |
1265 | spin_unlock_irq(&mdev->al_lock); | |
1266 | return 0; | |
1267 | ||
1268 | try_again: | |
b411b363 PR |
1269 | if (bm_ext) |
1270 | mdev->resync_wenr = enr; | |
1271 | spin_unlock_irq(&mdev->al_lock); | |
1272 | return -EAGAIN; | |
1273 | } | |
1274 | ||
1275 | void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) | |
1276 | { | |
1277 | unsigned int enr = BM_SECT_TO_EXT(sector); | |
1278 | struct lc_element *e; | |
1279 | struct bm_extent *bm_ext; | |
1280 | unsigned long flags; | |
1281 | ||
b411b363 PR |
1282 | spin_lock_irqsave(&mdev->al_lock, flags); |
1283 | e = lc_find(mdev->resync, enr); | |
1284 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | |
1285 | if (!bm_ext) { | |
1286 | spin_unlock_irqrestore(&mdev->al_lock, flags); | |
1287 | if (__ratelimit(&drbd_ratelimit_state)) | |
1288 | dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n"); | |
1289 | return; | |
1290 | } | |
1291 | ||
1292 | if (bm_ext->lce.refcnt == 0) { | |
1293 | spin_unlock_irqrestore(&mdev->al_lock, flags); | |
1294 | dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, " | |
1295 | "but refcnt is 0!?\n", | |
1296 | (unsigned long long)sector, enr); | |
1297 | return; | |
1298 | } | |
1299 | ||
1300 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | |
e3555d85 | 1301 | bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */ |
b411b363 PR |
1302 | mdev->resync_locked--; |
1303 | wake_up(&mdev->al_wait); | |
1304 | } | |
1305 | ||
1306 | spin_unlock_irqrestore(&mdev->al_lock, flags); | |
1307 | } | |
1308 | ||
1309 | /** | |
1310 | * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED) | |
1311 | * @mdev: DRBD device. | |
1312 | */ | |
1313 | void drbd_rs_cancel_all(struct drbd_conf *mdev) | |
1314 | { | |
b411b363 PR |
1315 | spin_lock_irq(&mdev->al_lock); |
1316 | ||
1317 | if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */ | |
1318 | lc_reset(mdev->resync); | |
1319 | put_ldev(mdev); | |
1320 | } | |
1321 | mdev->resync_locked = 0; | |
1322 | mdev->resync_wenr = LC_FREE; | |
1323 | spin_unlock_irq(&mdev->al_lock); | |
1324 | wake_up(&mdev->al_wait); | |
1325 | } | |
1326 | ||
1327 | /** | |
1328 | * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU | |
1329 | * @mdev: DRBD device. | |
1330 | * | |
1331 | * Returns 0 upon success, -EAGAIN if at least one reference count was | |
1332 | * not zero. | |
1333 | */ | |
1334 | int drbd_rs_del_all(struct drbd_conf *mdev) | |
1335 | { | |
1336 | struct lc_element *e; | |
1337 | struct bm_extent *bm_ext; | |
1338 | int i; | |
1339 | ||
b411b363 PR |
1340 | spin_lock_irq(&mdev->al_lock); |
1341 | ||
1342 | if (get_ldev_if_state(mdev, D_FAILED)) { | |
1343 | /* ok, ->resync is there. */ | |
1344 | for (i = 0; i < mdev->resync->nr_elements; i++) { | |
1345 | e = lc_element_by_index(mdev->resync, i); | |
b2b163dd | 1346 | bm_ext = lc_entry(e, struct bm_extent, lce); |
b411b363 PR |
1347 | if (bm_ext->lce.lc_number == LC_FREE) |
1348 | continue; | |
1349 | if (bm_ext->lce.lc_number == mdev->resync_wenr) { | |
1350 | dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently" | |
1351 | " got 'synced' by application io\n", | |
1352 | mdev->resync_wenr); | |
1353 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | |
1354 | D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); | |
1355 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | |
1356 | mdev->resync_wenr = LC_FREE; | |
1357 | lc_put(mdev->resync, &bm_ext->lce); | |
1358 | } | |
1359 | if (bm_ext->lce.refcnt != 0) { | |
1360 | dev_info(DEV, "Retrying drbd_rs_del_all() later. " | |
1361 | "refcnt=%d\n", bm_ext->lce.refcnt); | |
1362 | put_ldev(mdev); | |
1363 | spin_unlock_irq(&mdev->al_lock); | |
1364 | return -EAGAIN; | |
1365 | } | |
1366 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | |
1367 | D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags)); | |
1368 | lc_del(mdev->resync, &bm_ext->lce); | |
1369 | } | |
1370 | D_ASSERT(mdev->resync->used == 0); | |
1371 | put_ldev(mdev); | |
1372 | } | |
1373 | spin_unlock_irq(&mdev->al_lock); | |
1374 | ||
1375 | return 0; | |
1376 | } | |
1377 | ||
1378 | /** | |
1379 | * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks | |
1380 | * @mdev: DRBD device. | |
1381 | * @sector: The sector number. | |
1382 | * @size: Size of failed IO operation, in byte. | |
1383 | */ | |
1384 | void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) | |
1385 | { | |
1386 | /* Is called from worker and receiver context _only_ */ | |
1387 | unsigned long sbnr, ebnr, lbnr; | |
1388 | unsigned long count; | |
1389 | sector_t esector, nr_sectors; | |
1390 | int wake_up = 0; | |
1391 | ||
1816a2b4 | 1392 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { |
b411b363 PR |
1393 | dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", |
1394 | (unsigned long long)sector, size); | |
1395 | return; | |
1396 | } | |
1397 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | |
1398 | esector = sector + (size >> 9) - 1; | |
1399 | ||
1400 | ERR_IF(sector >= nr_sectors) return; | |
1401 | ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); | |
1402 | ||
1403 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | |
1404 | ||
1405 | /* | |
1406 | * round up start sector, round down end sector. we make sure we only | |
1407 | * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */ | |
1408 | if (unlikely(esector < BM_SECT_PER_BIT-1)) | |
1409 | return; | |
1410 | if (unlikely(esector == (nr_sectors-1))) | |
1411 | ebnr = lbnr; | |
1412 | else | |
1413 | ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); | |
1414 | sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); | |
1415 | ||
1416 | if (sbnr > ebnr) | |
1417 | return; | |
1418 | ||
1419 | /* | |
1420 | * ok, (capacity & 7) != 0 sometimes, but who cares... | |
1421 | * we count rs_{total,left} in bits, not sectors. | |
1422 | */ | |
1423 | spin_lock_irq(&mdev->al_lock); | |
1424 | count = drbd_bm_count_bits(mdev, sbnr, ebnr); | |
1425 | if (count) { | |
1426 | mdev->rs_failed += count; | |
1427 | ||
1428 | if (get_ldev(mdev)) { | |
1429 | drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); | |
1430 | put_ldev(mdev); | |
1431 | } | |
1432 | ||
1433 | /* just wake_up unconditional now, various lc_chaged(), | |
1434 | * lc_put() in drbd_try_clear_on_disk_bm(). */ | |
1435 | wake_up = 1; | |
1436 | } | |
1437 | spin_unlock_irq(&mdev->al_lock); | |
1438 | if (wake_up) | |
1439 | wake_up(&mdev->al_wait); | |
1440 | } |