drbd: Change how the "handshake" packets are called
[deliverable/linux.git] / drivers / block / drbd / drbd_actlog.c
CommitLineData
b411b363
PR
1/*
2 drbd_actlog.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/slab.h>
7ad651b5 27#include <linux/crc32c.h>
b411b363 28#include <linux/drbd.h>
7ad651b5
LE
29#include <linux/drbd_limits.h>
30#include <linux/dynamic_debug.h>
b411b363 31#include "drbd_int.h"
b411b363
PR
32#include "drbd_wrappers.h"
33
7ad651b5
LE
34/* all fields on disc in big endian */
35struct __packed al_transaction_on_disk {
36 /* don't we all like magic */
37 __be32 magic;
38
39 /* to identify the most recent transaction block
40 * in the on disk ring buffer */
41 __be32 tr_number;
42
43 /* checksum on the full 4k block, with this field set to 0. */
44 __be32 crc32c;
45
46 /* type of transaction, special transaction types like:
47 * purge-all, set-all-idle, set-all-active, ... to-be-defined */
48 __be16 transaction_type;
49
50 /* we currently allow only a few thousand extents,
51 * so 16bit will be enough for the slot number. */
52
53 /* how many updates in this transaction */
54 __be16 n_updates;
55
56 /* maximum slot number, "al-extents" in drbd.conf speak.
57 * Having this in each transaction should make reconfiguration
58 * of that parameter easier. */
59 __be16 context_size;
60
61 /* slot number the context starts with */
62 __be16 context_start_slot_nr;
63
64 /* Some reserved bytes. Expected usage is a 64bit counter of
65 * sectors-written since device creation, and other data generation tag
66 * supporting usage */
67 __be32 __reserved[4];
68
69 /* --- 36 byte used --- */
70
71 /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
72 * in one transaction, then use the remaining byte in the 4k block for
73 * context information. "Flexible" number of updates per transaction
74 * does not help, as we have to account for the case when all update
75 * slots are used anyways, so it would only complicate code without
76 * additional benefit.
77 */
78 __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
79
80 /* but the extent number is 32bit, which at an extent size of 4 MiB
81 * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
82 __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
83
84 /* --- 420 bytes used (36 + 64*6) --- */
85
86 /* 4096 - 420 = 3676 = 919 * 4 */
87 __be32 context[AL_CONTEXT_PER_TRANSACTION];
b411b363
PR
88};
89
90struct update_odbm_work {
91 struct drbd_work w;
92 unsigned int enr;
93};
94
95struct update_al_work {
96 struct drbd_work w;
b411b363 97 struct completion event;
7ad651b5 98 int err;
b411b363
PR
99};
100
101struct drbd_atodb_wait {
102 atomic_t count;
103 struct completion io_done;
104 struct drbd_conf *mdev;
105 int error;
106};
107
108
99920dc5 109static int w_al_write_transaction(struct drbd_work *, int);
b411b363 110
b411b363
PR
111static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
112 struct drbd_backing_dev *bdev,
113 struct page *page, sector_t sector,
114 int rw, int size)
115{
116 struct bio *bio;
117 struct drbd_md_io md_io;
ac29f403 118 int err;
b411b363
PR
119
120 md_io.mdev = mdev;
121 init_completion(&md_io.event);
122 md_io.error = 0;
123
a8a4e51e 124 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
86e1e98e 125 rw |= REQ_FUA | REQ_FLUSH;
721a9602 126 rw |= REQ_SYNC;
b411b363 127
da4a75d2 128 bio = bio_alloc_drbd(GFP_NOIO);
b411b363
PR
129 bio->bi_bdev = bdev->md_bdev;
130 bio->bi_sector = sector;
ac29f403
AG
131 err = -EIO;
132 if (bio_add_page(bio, page, size, 0) != size)
b411b363
PR
133 goto out;
134 bio->bi_private = &md_io;
135 bio->bi_end_io = drbd_md_io_complete;
136 bio->bi_rw = rw;
137
0cf9d27e 138 if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
b411b363
PR
139 bio_endio(bio, -EIO);
140 else
141 submit_bio(rw, bio);
142 wait_for_completion(&md_io.event);
ac29f403
AG
143 if (bio_flagged(bio, BIO_UPTODATE))
144 err = md_io.error;
b411b363 145
b411b363
PR
146 out:
147 bio_put(bio);
ac29f403 148 return err;
b411b363
PR
149}
150
151int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
152 sector_t sector, int rw)
153{
3fbf4d21 154 int err;
b411b363
PR
155 struct page *iop = mdev->md_io_page;
156
157 D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
158
159 BUG_ON(!bdev->md_bdev);
160
7ad651b5
LE
161 dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
162 current->comm, current->pid, __func__,
163 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
b411b363
PR
164
165 if (sector < drbd_md_first_sector(bdev) ||
7ad651b5 166 sector + 7 > drbd_md_last_sector(bdev))
b411b363
PR
167 dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
168 current->comm, current->pid, __func__,
169 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
170
3fbf4d21
AG
171 err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, MD_BLOCK_SIZE);
172 if (err) {
b411b363
PR
173 dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n",
174 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
b411b363 175 }
3fbf4d21 176 return err;
b411b363
PR
177}
178
179static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
180{
181 struct lc_element *al_ext;
182 struct lc_element *tmp;
f91ab628 183 int wake;
b411b363
PR
184
185 spin_lock_irq(&mdev->al_lock);
186 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
187 if (unlikely(tmp != NULL)) {
188 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
189 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
f91ab628 190 wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
b411b363 191 spin_unlock_irq(&mdev->al_lock);
f91ab628
PR
192 if (wake)
193 wake_up(&mdev->al_wait);
b411b363
PR
194 return NULL;
195 }
196 }
46a15bc3 197 al_ext = lc_get(mdev->act_log, enr);
b411b363 198 spin_unlock_irq(&mdev->al_lock);
b411b363
PR
199 return al_ext;
200}
201
202void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
203{
204 unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
205 struct lc_element *al_ext;
206 struct update_al_work al_work;
207
208 D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
209
b411b363
PR
210 wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
211
212 if (al_ext->lc_number != enr) {
213 /* drbd_al_write_transaction(mdev,al_ext,enr);
214 * recurses into generic_make_request(), which
215 * disallows recursion, bios being serialized on the
216 * current->bio_tail list now.
217 * we have to delegate updates to the activity log
218 * to the worker thread. */
b411b363 219
7ad651b5
LE
220 /* Serialize multiple transactions.
221 * This uses test_and_set_bit, memory barrier is implicit.
222 * Optimization potential:
223 * first check for transaction number > old transaction number,
224 * so not all waiters have to lock/unlock. */
225 wait_event(mdev->al_wait, lc_try_lock_for_transaction(mdev->act_log));
226
227 /* Double check: it may have been committed by someone else,
228 * while we have been waiting for the lock. */
229 if (al_ext->lc_number != enr) {
230 init_completion(&al_work.event);
231 al_work.w.cb = w_al_write_transaction;
232 al_work.w.mdev = mdev;
233 drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w);
234 wait_for_completion(&al_work.event);
235
236 mdev->al_writ_cnt++;
237
238 spin_lock_irq(&mdev->al_lock);
239 /* FIXME
240 if (al_work.err)
241 we need an "lc_cancel" here;
242 */
243 lc_committed(mdev->act_log);
244 spin_unlock_irq(&mdev->al_lock);
245 }
246 lc_unlock(mdev->act_log);
b411b363
PR
247 wake_up(&mdev->al_wait);
248 }
249}
250
251void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
252{
253 unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
254 struct lc_element *extent;
255 unsigned long flags;
256
b411b363
PR
257 spin_lock_irqsave(&mdev->al_lock, flags);
258
259 extent = lc_find(mdev->act_log, enr);
260
261 if (!extent) {
262 spin_unlock_irqrestore(&mdev->al_lock, flags);
263 dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
264 return;
265 }
266
267 if (lc_put(mdev->act_log, extent) == 0)
268 wake_up(&mdev->al_wait);
269
270 spin_unlock_irqrestore(&mdev->al_lock, flags);
271}
272
19f843aa
LE
273#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
274/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
275 * are still coupled, or assume too much about their relation.
276 * Code below will not work if this is violated.
277 * Will be cleaned up with some followup patch.
278 */
279# error FIXME
280#endif
281
282static unsigned int al_extent_to_bm_page(unsigned int al_enr)
283{
284 return al_enr >>
285 /* bit to page */
286 ((PAGE_SHIFT + 3) -
287 /* al extent number to bit */
288 (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
289}
290
291static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
292{
293 return rs_enr >>
294 /* bit to page */
295 ((PAGE_SHIFT + 3) -
296 /* al extent number to bit */
297 (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
298}
299
99920dc5 300static int
00d56944 301w_al_write_transaction(struct drbd_work *w, int unused)
b411b363
PR
302{
303 struct update_al_work *aw = container_of(w, struct update_al_work, w);
00d56944 304 struct drbd_conf *mdev = w->mdev;
7ad651b5
LE
305 struct al_transaction_on_disk *buffer;
306 struct lc_element *e;
b411b363 307 sector_t sector;
7ad651b5
LE
308 int i, mx;
309 unsigned extent_nr;
310 unsigned crc = 0;
b411b363
PR
311
312 if (!get_ldev(mdev)) {
7ad651b5
LE
313 dev_err(DEV, "disk is %s, cannot start al transaction\n",
314 drbd_disk_str(mdev->state.disk));
315 aw->err = -EIO;
b411b363 316 complete(&((struct update_al_work *)w)->event);
99920dc5 317 return 0;
b411b363 318 }
b411b363 319
6719fb03
LE
320 /* The bitmap write may have failed, causing a state change. */
321 if (mdev->state.disk < D_INCONSISTENT) {
322 dev_err(DEV,
7ad651b5
LE
323 "disk is %s, cannot write al transaction\n",
324 drbd_disk_str(mdev->state.disk));
325 aw->err = -EIO;
6719fb03
LE
326 complete(&((struct update_al_work *)w)->event);
327 put_ldev(mdev);
99920dc5 328 return 0;
6719fb03
LE
329 }
330
331 mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
7ad651b5 332 buffer = page_address(mdev->md_io_page);
b411b363 333
7ad651b5
LE
334 memset(buffer, 0, sizeof(*buffer));
335 buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
b411b363
PR
336 buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
337
7ad651b5
LE
338 i = 0;
339
340 /* Even though no one can start to change this list
341 * once we set the LC_LOCKED -- from drbd_al_begin_io(),
342 * lc_try_lock_for_transaction() --, someone may still
343 * be in the process of changing it. */
344 spin_lock_irq(&mdev->al_lock);
345 list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
346 if (i == AL_UPDATES_PER_TRANSACTION) {
347 i++;
348 break;
349 }
350 buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
351 buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
352 if (e->lc_number != LC_FREE)
353 drbd_bm_mark_for_writeout(mdev,
354 al_extent_to_bm_page(e->lc_number));
355 i++;
356 }
357 spin_unlock_irq(&mdev->al_lock);
358 BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
b411b363 359
7ad651b5
LE
360 buffer->n_updates = cpu_to_be16(i);
361 for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
362 buffer->update_slot_nr[i] = cpu_to_be16(-1);
363 buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
364 }
b411b363 365
7ad651b5
LE
366 buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
367 buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
b411b363 368
7ad651b5 369 mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
b411b363
PR
370 mdev->act_log->nr_elements - mdev->al_tr_cycle);
371 for (i = 0; i < mx; i++) {
372 unsigned idx = mdev->al_tr_cycle + i;
373 extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
7ad651b5 374 buffer->context[i] = cpu_to_be32(extent_nr);
b411b363 375 }
7ad651b5
LE
376 for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
377 buffer->context[i] = cpu_to_be32(LC_FREE);
378
379 mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
b411b363
PR
380 if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
381 mdev->al_tr_cycle = 0;
382
b411b363 383 sector = mdev->ldev->md.md_offset
7ad651b5
LE
384 + mdev->ldev->md.al_offset
385 + mdev->al_tr_pos * (MD_BLOCK_SIZE>>9);
b411b363 386
7ad651b5
LE
387 crc = crc32c(0, buffer, 4096);
388 buffer->crc32c = cpu_to_be32(crc);
b411b363 389
7ad651b5
LE
390 if (drbd_bm_write_hinted(mdev))
391 aw->err = -EIO;
392 /* drbd_chk_io_error done already */
3fbf4d21 393 else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
7ad651b5
LE
394 aw->err = -EIO;
395 drbd_chk_io_error(mdev, 1, true);
396 } else {
397 /* advance ringbuffer position and transaction counter */
398 mdev->al_tr_pos = (mdev->al_tr_pos + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
399 mdev->al_tr_number++;
400 }
b411b363
PR
401
402 mutex_unlock(&mdev->md_io_mutex);
b411b363
PR
403 complete(&((struct update_al_work *)w)->event);
404 put_ldev(mdev);
405
99920dc5 406 return 0;
b411b363
PR
407}
408
7ad651b5
LE
409/* FIXME
410 * reading of the activity log,
411 * and potentially dirtying of the affected bitmap regions,
412 * should be done from userland only.
413 * DRBD would simply always attach with an empty activity log,
414 * and refuse to attach to something that looks like a crashed primary.
415 */
416
b411b363
PR
417/**
418 * drbd_al_read_tr() - Read a single transaction from the on disk activity log
419 * @mdev: DRBD device.
420 * @bdev: Block device to read form.
421 * @b: pointer to an al_transaction.
422 * @index: On disk slot of the transaction to read.
423 *
424 * Returns -1 on IO error, 0 on checksum error and 1 upon success.
425 */
426static int drbd_al_read_tr(struct drbd_conf *mdev,
427 struct drbd_backing_dev *bdev,
b411b363
PR
428 int index)
429{
7ad651b5 430 struct al_transaction_on_disk *b = page_address(mdev->md_io_page);
b411b363 431 sector_t sector;
7ad651b5 432 u32 crc;
b411b363 433
7ad651b5
LE
434 sector = bdev->md.md_offset
435 + bdev->md.al_offset
436 + index * (MD_BLOCK_SIZE>>9);
b411b363
PR
437
438 /* Dont process error normally,
439 * as this is done before disk is attached! */
3fbf4d21 440 if (drbd_md_sync_page_io(mdev, bdev, sector, READ))
b411b363
PR
441 return -1;
442
7ad651b5
LE
443 if (!expect(b->magic == cpu_to_be32(DRBD_AL_MAGIC)))
444 return 0;
b411b363 445
7ad651b5
LE
446 if (!expect(be16_to_cpu(b->n_updates) <= AL_UPDATES_PER_TRANSACTION))
447 return 0;
448
449 if (!expect(be16_to_cpu(b->context_size) <= DRBD_AL_EXTENTS_MAX))
450 return 0;
451
452 if (!expect(be16_to_cpu(b->context_start_slot_nr) < DRBD_AL_EXTENTS_MAX))
453 return 0;
b411b363 454
7ad651b5
LE
455 crc = be32_to_cpu(b->crc32c);
456 b->crc32c = 0;
457 if (!expect(crc == crc32c(0, b, 4096)))
458 return 0;
459
460 return 1;
b411b363
PR
461}
462
463/**
464 * drbd_al_read_log() - Restores the activity log from its on disk representation.
465 * @mdev: DRBD device.
466 * @bdev: Block device to read form.
467 *
468 * Returns 1 on success, returns 0 when reading the log failed due to IO errors.
469 */
470int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
471{
7ad651b5 472 struct al_transaction_on_disk *b;
b411b363
PR
473 int i;
474 int rv;
475 int mx;
476 int active_extents = 0;
477 int transactions = 0;
478 int found_valid = 0;
479 int from = 0;
480 int to = 0;
481 u32 from_tnr = 0;
482 u32 to_tnr = 0;
483 u32 cnr;
484
7ad651b5
LE
485 /* Note that this is expected to be called with a newly created,
486 * clean and all unused activity log of the "expected size".
487 */
b411b363
PR
488
489 /* lock out all other meta data io for now,
490 * and make sure the page is mapped.
491 */
492 mutex_lock(&mdev->md_io_mutex);
7ad651b5
LE
493 b = page_address(mdev->md_io_page);
494
495 /* Always use the full ringbuffer space for now.
496 * possible optimization: read in all of it,
497 * then scan the in-memory pages. */
498
499 mx = (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
b411b363
PR
500
501 /* Find the valid transaction in the log */
7ad651b5
LE
502 for (i = 0; i < mx; i++) {
503 rv = drbd_al_read_tr(mdev, bdev, i);
504 /* invalid data in that block */
b411b363
PR
505 if (rv == 0)
506 continue;
7ad651b5
LE
507
508 /* IO error */
b411b363
PR
509 if (rv == -1) {
510 mutex_unlock(&mdev->md_io_mutex);
511 return 0;
512 }
b411b363 513
7ad651b5 514 cnr = be32_to_cpu(b->tr_number);
b411b363
PR
515 if (++found_valid == 1) {
516 from = i;
517 to = i;
518 from_tnr = cnr;
519 to_tnr = cnr;
520 continue;
521 }
7ad651b5
LE
522
523 D_ASSERT(cnr != to_tnr);
524 D_ASSERT(cnr != from_tnr);
b411b363 525 if ((int)cnr - (int)from_tnr < 0) {
7ad651b5 526 D_ASSERT(from_tnr - cnr + i - from == mx);
b411b363
PR
527 from = i;
528 from_tnr = cnr;
529 }
530 if ((int)cnr - (int)to_tnr > 0) {
531 D_ASSERT(cnr - to_tnr == i - to);
532 to = i;
533 to_tnr = cnr;
534 }
535 }
536
537 if (!found_valid) {
538 dev_warn(DEV, "No usable activity log found.\n");
539 mutex_unlock(&mdev->md_io_mutex);
540 return 1;
541 }
542
543 /* Read the valid transactions.
544 * dev_info(DEV, "Reading from %d to %d.\n",from,to); */
545 i = from;
546 while (1) {
7ad651b5
LE
547 struct lc_element *e;
548 unsigned j, n, slot, extent_nr;
b411b363 549
7ad651b5 550 rv = drbd_al_read_tr(mdev, bdev, i);
841ce241
AG
551 if (!expect(rv != 0))
552 goto cancel;
b411b363
PR
553 if (rv == -1) {
554 mutex_unlock(&mdev->md_io_mutex);
555 return 0;
556 }
557
7ad651b5
LE
558 /* deal with different transaction types.
559 * not yet implemented */
560 if (!expect(b->transaction_type == 0))
561 goto cancel;
b411b363 562
7ad651b5
LE
563 /* on the fly re-create/resize activity log?
564 * will be a special transaction type flag. */
565 if (!expect(be16_to_cpu(b->context_size) == mdev->act_log->nr_elements))
566 goto cancel;
567 if (!expect(be16_to_cpu(b->context_start_slot_nr) < mdev->act_log->nr_elements))
568 goto cancel;
b411b363 569
7ad651b5
LE
570 /* We are the only user of the activity log right now,
571 * don't actually need to take that lock. */
572 spin_lock_irq(&mdev->al_lock);
b411b363 573
7ad651b5
LE
574 /* first, apply the context, ... */
575 for (j = 0, slot = be16_to_cpu(b->context_start_slot_nr);
576 j < AL_CONTEXT_PER_TRANSACTION &&
577 slot < mdev->act_log->nr_elements; j++, slot++) {
578 extent_nr = be32_to_cpu(b->context[j]);
579 e = lc_element_by_index(mdev->act_log, slot);
580 if (e->lc_number != extent_nr) {
581 if (extent_nr != LC_FREE)
582 active_extents++;
583 else
584 active_extents--;
585 }
586 lc_set(mdev->act_log, extent_nr, slot);
587 }
b411b363 588
7ad651b5
LE
589 /* ... then apply the updates,
590 * which override the context information.
591 * drbd_al_read_tr already did the rangecheck
592 * on n <= AL_UPDATES_PER_TRANSACTION */
593 n = be16_to_cpu(b->n_updates);
594 for (j = 0; j < n; j++) {
595 slot = be16_to_cpu(b->update_slot_nr[j]);
596 extent_nr = be32_to_cpu(b->update_extent_nr[j]);
597 if (!expect(slot < mdev->act_log->nr_elements))
598 break;
599 e = lc_element_by_index(mdev->act_log, slot);
600 if (e->lc_number != extent_nr) {
601 if (extent_nr != LC_FREE)
602 active_extents++;
603 else
604 active_extents--;
605 }
606 lc_set(mdev->act_log, extent_nr, slot);
b411b363
PR
607 }
608 spin_unlock_irq(&mdev->al_lock);
609
610 transactions++;
611
612cancel:
613 if (i == to)
614 break;
615 i++;
7ad651b5 616 if (i >= mx)
b411b363
PR
617 i = 0;
618 }
619
620 mdev->al_tr_number = to_tnr+1;
7ad651b5 621 mdev->al_tr_pos = (to + 1) % (MD_AL_SECTORS*512/MD_BLOCK_SIZE);
b411b363
PR
622
623 /* ok, we are done with it */
624 mutex_unlock(&mdev->md_io_mutex);
625
626 dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
627 transactions, active_extents);
628
629 return 1;
630}
631
b411b363 632/**
867f5748 633 * drbd_al_apply_to_bm() - Sets the bitmap to dirty(1) where covered by active AL extents
b411b363
PR
634 * @mdev: DRBD device.
635 */
636void drbd_al_apply_to_bm(struct drbd_conf *mdev)
637{
638 unsigned int enr;
639 unsigned long add = 0;
640 char ppb[10];
6719fb03 641 int i, tmp;
b411b363
PR
642
643 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
644
645 for (i = 0; i < mdev->act_log->nr_elements; i++) {
646 enr = lc_element_by_index(mdev->act_log, i)->lc_number;
647 if (enr == LC_FREE)
648 continue;
6719fb03
LE
649 tmp = drbd_bm_ALe_set_all(mdev, enr);
650 dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
651 add += tmp;
b411b363
PR
652 }
653
654 lc_unlock(mdev->act_log);
655 wake_up(&mdev->al_wait);
656
657 dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n",
658 ppsize(ppb, Bit2KB(add)));
659}
660
661static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
662{
663 int rv;
664
665 spin_lock_irq(&mdev->al_lock);
666 rv = (al_ext->refcnt == 0);
667 if (likely(rv))
668 lc_del(mdev->act_log, al_ext);
669 spin_unlock_irq(&mdev->al_lock);
670
671 return rv;
672}
673
674/**
675 * drbd_al_shrink() - Removes all active extents form the activity log
676 * @mdev: DRBD device.
677 *
678 * Removes all active extents form the activity log, waiting until
679 * the reference count of each entry dropped to 0 first, of course.
680 *
681 * You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
682 */
683void drbd_al_shrink(struct drbd_conf *mdev)
684{
685 struct lc_element *al_ext;
686 int i;
687
46a15bc3 688 D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
b411b363
PR
689
690 for (i = 0; i < mdev->act_log->nr_elements; i++) {
691 al_ext = lc_element_by_index(mdev->act_log, i);
692 if (al_ext->lc_number == LC_FREE)
693 continue;
694 wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
695 }
696
697 wake_up(&mdev->al_wait);
698}
699
99920dc5 700static int w_update_odbm(struct drbd_work *w, int unused)
b411b363
PR
701{
702 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
00d56944 703 struct drbd_conf *mdev = w->mdev;
3b98c0c2 704 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
b411b363
PR
705
706 if (!get_ldev(mdev)) {
707 if (__ratelimit(&drbd_ratelimit_state))
708 dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
709 kfree(udw);
99920dc5 710 return 0;
b411b363
PR
711 }
712
19f843aa 713 drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
b411b363
PR
714 put_ldev(mdev);
715
716 kfree(udw);
717
718 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
719 switch (mdev->state.conn) {
720 case C_SYNC_SOURCE: case C_SYNC_TARGET:
721 case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
722 drbd_resync_finished(mdev);
723 default:
724 /* nothing to do */
725 break;
726 }
727 }
3b98c0c2 728 drbd_bcast_event(mdev, &sib);
b411b363 729
99920dc5 730 return 0;
b411b363
PR
731}
732
733
734/* ATTENTION. The AL's extents are 4MB each, while the extents in the
735 * resync LRU-cache are 16MB each.
736 * The caller of this function has to hold an get_ldev() reference.
737 *
738 * TODO will be obsoleted once we have a caching lru of the on disk bitmap
739 */
740static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
741 int count, int success)
742{
743 struct lc_element *e;
744 struct update_odbm_work *udw;
745
746 unsigned int enr;
747
748 D_ASSERT(atomic_read(&mdev->local_cnt));
749
750 /* I simply assume that a sector/size pair never crosses
751 * a 16 MB extent border. (Currently this is true...) */
752 enr = BM_SECT_TO_EXT(sector);
753
754 e = lc_get(mdev->resync, enr);
755 if (e) {
756 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
757 if (ext->lce.lc_number == enr) {
758 if (success)
759 ext->rs_left -= count;
760 else
761 ext->rs_failed += count;
762 if (ext->rs_left < ext->rs_failed) {
763 dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
764 "rs_failed=%d count=%d\n",
765 (unsigned long long)sector,
766 ext->lce.lc_number, ext->rs_left,
767 ext->rs_failed, count);
768 dump_stack();
769
770 lc_put(mdev->resync, &ext->lce);
38fa9988 771 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
b411b363
PR
772 return;
773 }
774 } else {
775 /* Normally this element should be in the cache,
776 * since drbd_rs_begin_io() pulled it already in.
777 *
778 * But maybe an application write finished, and we set
779 * something outside the resync lru_cache in sync.
780 */
781 int rs_left = drbd_bm_e_weight(mdev, enr);
782 if (ext->flags != 0) {
783 dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
784 " -> %d[%u;00]\n",
785 ext->lce.lc_number, ext->rs_left,
786 ext->flags, enr, rs_left);
787 ext->flags = 0;
788 }
789 if (ext->rs_failed) {
790 dev_warn(DEV, "Kicking resync_lru element enr=%u "
791 "out with rs_failed=%d\n",
792 ext->lce.lc_number, ext->rs_failed);
b411b363
PR
793 }
794 ext->rs_left = rs_left;
795 ext->rs_failed = success ? 0 : count;
46a15bc3
LE
796 /* we don't keep a persistent log of the resync lru,
797 * we can commit any change right away. */
798 lc_committed(mdev->resync);
b411b363
PR
799 }
800 lc_put(mdev->resync, &ext->lce);
801 /* no race, we are within the al_lock! */
802
803 if (ext->rs_left == ext->rs_failed) {
804 ext->rs_failed = 0;
805
806 udw = kmalloc(sizeof(*udw), GFP_ATOMIC);
807 if (udw) {
808 udw->enr = ext->lce.lc_number;
809 udw->w.cb = w_update_odbm;
a21e9298 810 udw->w.mdev = mdev;
e42325a5 811 drbd_queue_work_front(&mdev->tconn->data.work, &udw->w);
b411b363
PR
812 } else {
813 dev_warn(DEV, "Could not kmalloc an udw\n");
b411b363
PR
814 }
815 }
816 } else {
817 dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
818 mdev->resync_locked,
819 mdev->resync->nr_elements,
820 mdev->resync->flags);
821 }
822}
823
c6ea14df
LE
824void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
825{
826 unsigned long now = jiffies;
827 unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
828 int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
829 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
830 if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
831 mdev->state.conn != C_PAUSED_SYNC_T &&
832 mdev->state.conn != C_PAUSED_SYNC_S) {
833 mdev->rs_mark_time[next] = now;
834 mdev->rs_mark_left[next] = still_to_go;
835 mdev->rs_last_mark = next;
836 }
837 }
838}
839
b411b363
PR
840/* clear the bit corresponding to the piece of storage in question:
841 * size byte of data starting from sector. Only clear a bits of the affected
842 * one ore more _aligned_ BM_BLOCK_SIZE blocks.
843 *
844 * called by worker on C_SYNC_TARGET and receiver on SyncSource.
845 *
846 */
847void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
848 const char *file, const unsigned int line)
849{
850 /* Is called from worker and receiver context _only_ */
851 unsigned long sbnr, ebnr, lbnr;
852 unsigned long count = 0;
853 sector_t esector, nr_sectors;
854 int wake_up = 0;
855 unsigned long flags;
856
c670a398 857 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
858 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
859 (unsigned long long)sector, size);
860 return;
861 }
862 nr_sectors = drbd_get_capacity(mdev->this_bdev);
863 esector = sector + (size >> 9) - 1;
864
841ce241
AG
865 if (!expect(sector < nr_sectors))
866 return;
867 if (!expect(esector < nr_sectors))
868 esector = nr_sectors - 1;
b411b363
PR
869
870 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
871
872 /* we clear it (in sync).
873 * round up start sector, round down end sector. we make sure we only
874 * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
875 if (unlikely(esector < BM_SECT_PER_BIT-1))
876 return;
877 if (unlikely(esector == (nr_sectors-1)))
878 ebnr = lbnr;
879 else
880 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
881 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
882
b411b363
PR
883 if (sbnr > ebnr)
884 return;
885
886 /*
887 * ok, (capacity & 7) != 0 sometimes, but who cares...
888 * we count rs_{total,left} in bits, not sectors.
889 */
b411b363 890 count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
1d7734a0 891 if (count && get_ldev(mdev)) {
c6ea14df 892 drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
1d7734a0 893 spin_lock_irqsave(&mdev->al_lock, flags);
81e84650 894 drbd_try_clear_on_disk_bm(mdev, sector, count, true);
1d7734a0
LE
895 spin_unlock_irqrestore(&mdev->al_lock, flags);
896
b411b363
PR
897 /* just wake_up unconditional now, various lc_chaged(),
898 * lc_put() in drbd_try_clear_on_disk_bm(). */
899 wake_up = 1;
1d7734a0 900 put_ldev(mdev);
b411b363 901 }
b411b363
PR
902 if (wake_up)
903 wake_up(&mdev->al_wait);
904}
905
906/*
907 * this is intended to set one request worth of data out of sync.
908 * affects at least 1 bit,
1816a2b4 909 * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
b411b363
PR
910 *
911 * called by tl_clear and drbd_send_dblock (==drbd_make_request).
912 * so this can be _any_ process.
913 */
73a01a18 914int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
b411b363
PR
915 const char *file, const unsigned int line)
916{
917 unsigned long sbnr, ebnr, lbnr, flags;
918 sector_t esector, nr_sectors;
73a01a18 919 unsigned int enr, count = 0;
b411b363
PR
920 struct lc_element *e;
921
c670a398 922 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
923 dev_err(DEV, "sector: %llus, size: %d\n",
924 (unsigned long long)sector, size);
73a01a18 925 return 0;
b411b363
PR
926 }
927
928 if (!get_ldev(mdev))
73a01a18 929 return 0; /* no disk, no metadata, no bitmap to set bits in */
b411b363
PR
930
931 nr_sectors = drbd_get_capacity(mdev->this_bdev);
932 esector = sector + (size >> 9) - 1;
933
841ce241 934 if (!expect(sector < nr_sectors))
b411b363 935 goto out;
841ce241
AG
936 if (!expect(esector < nr_sectors))
937 esector = nr_sectors - 1;
b411b363
PR
938
939 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
940
941 /* we set it out of sync,
942 * we do not need to round anything here */
943 sbnr = BM_SECT_TO_BIT(sector);
944 ebnr = BM_SECT_TO_BIT(esector);
945
b411b363
PR
946 /* ok, (capacity & 7) != 0 sometimes, but who cares...
947 * we count rs_{total,left} in bits, not sectors. */
948 spin_lock_irqsave(&mdev->al_lock, flags);
949 count = drbd_bm_set_bits(mdev, sbnr, ebnr);
950
951 enr = BM_SECT_TO_EXT(sector);
952 e = lc_find(mdev->resync, enr);
953 if (e)
954 lc_entry(e, struct bm_extent, lce)->rs_left += count;
955 spin_unlock_irqrestore(&mdev->al_lock, flags);
956
957out:
958 put_ldev(mdev);
73a01a18
PR
959
960 return count;
b411b363
PR
961}
962
963static
964struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
965{
966 struct lc_element *e;
967 struct bm_extent *bm_ext;
968 int wakeup = 0;
969 unsigned long rs_flags;
970
971 spin_lock_irq(&mdev->al_lock);
972 if (mdev->resync_locked > mdev->resync->nr_elements/2) {
973 spin_unlock_irq(&mdev->al_lock);
974 return NULL;
975 }
976 e = lc_get(mdev->resync, enr);
977 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
978 if (bm_ext) {
979 if (bm_ext->lce.lc_number != enr) {
980 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
981 bm_ext->rs_failed = 0;
46a15bc3 982 lc_committed(mdev->resync);
b411b363
PR
983 wakeup = 1;
984 }
985 if (bm_ext->lce.refcnt == 1)
986 mdev->resync_locked++;
987 set_bit(BME_NO_WRITES, &bm_ext->flags);
988 }
989 rs_flags = mdev->resync->flags;
990 spin_unlock_irq(&mdev->al_lock);
991 if (wakeup)
992 wake_up(&mdev->al_wait);
993
994 if (!bm_ext) {
995 if (rs_flags & LC_STARVING)
996 dev_warn(DEV, "Have to wait for element"
997 " (resync LRU too small?)\n");
46a15bc3 998 BUG_ON(rs_flags & LC_LOCKED);
b411b363
PR
999 }
1000
1001 return bm_ext;
1002}
1003
1004static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
1005{
46a15bc3 1006 int rv;
b411b363
PR
1007
1008 spin_lock_irq(&mdev->al_lock);
46a15bc3 1009 rv = lc_is_used(mdev->act_log, enr);
b411b363
PR
1010 spin_unlock_irq(&mdev->al_lock);
1011
b411b363
PR
1012 return rv;
1013}
1014
1015/**
1016 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
1017 * @mdev: DRBD device.
1018 * @sector: The sector number.
1019 *
80a40e43 1020 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
b411b363
PR
1021 */
1022int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1023{
1024 unsigned int enr = BM_SECT_TO_EXT(sector);
1025 struct bm_extent *bm_ext;
1026 int i, sig;
f91ab628
PR
1027 int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
1028 200 times -> 20 seconds. */
b411b363 1029
f91ab628 1030retry:
b411b363
PR
1031 sig = wait_event_interruptible(mdev->al_wait,
1032 (bm_ext = _bme_get(mdev, enr)));
1033 if (sig)
80a40e43 1034 return -EINTR;
b411b363
PR
1035
1036 if (test_bit(BME_LOCKED, &bm_ext->flags))
80a40e43 1037 return 0;
b411b363
PR
1038
1039 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
1040 sig = wait_event_interruptible(mdev->al_wait,
f91ab628 1041 !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
c507f46f 1042 test_bit(BME_PRIORITY, &bm_ext->flags));
f91ab628
PR
1043
1044 if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
b411b363
PR
1045 spin_lock_irq(&mdev->al_lock);
1046 if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
f91ab628 1047 bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
b411b363
PR
1048 mdev->resync_locked--;
1049 wake_up(&mdev->al_wait);
1050 }
1051 spin_unlock_irq(&mdev->al_lock);
f91ab628
PR
1052 if (sig)
1053 return -EINTR;
1054 if (schedule_timeout_interruptible(HZ/10))
1055 return -EINTR;
c507f46f
PR
1056 if (sa && --sa == 0)
1057 dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
1058 "Resync stalled?\n");
f91ab628 1059 goto retry;
b411b363
PR
1060 }
1061 }
b411b363 1062 set_bit(BME_LOCKED, &bm_ext->flags);
80a40e43 1063 return 0;
b411b363
PR
1064}
1065
1066/**
1067 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
1068 * @mdev: DRBD device.
1069 * @sector: The sector number.
1070 *
1071 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
1072 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
1073 * if there is still application IO going on in this area.
1074 */
1075int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
1076{
1077 unsigned int enr = BM_SECT_TO_EXT(sector);
1078 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
1079 struct lc_element *e;
1080 struct bm_extent *bm_ext;
1081 int i;
1082
b411b363
PR
1083 spin_lock_irq(&mdev->al_lock);
1084 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
1085 /* in case you have very heavy scattered io, it may
1086 * stall the syncer undefined if we give up the ref count
1087 * when we try again and requeue.
1088 *
1089 * if we don't give up the refcount, but the next time
1090 * we are scheduled this extent has been "synced" by new
1091 * application writes, we'd miss the lc_put on the
1092 * extent we keep the refcount on.
1093 * so we remembered which extent we had to try again, and
1094 * if the next requested one is something else, we do
1095 * the lc_put here...
1096 * we also have to wake_up
1097 */
b411b363
PR
1098 e = lc_find(mdev->resync, mdev->resync_wenr);
1099 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1100 if (bm_ext) {
1101 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1102 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
1103 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1104 mdev->resync_wenr = LC_FREE;
1105 if (lc_put(mdev->resync, &bm_ext->lce) == 0)
1106 mdev->resync_locked--;
1107 wake_up(&mdev->al_wait);
1108 } else {
1109 dev_alert(DEV, "LOGIC BUG\n");
1110 }
1111 }
1112 /* TRY. */
1113 e = lc_try_get(mdev->resync, enr);
1114 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1115 if (bm_ext) {
1116 if (test_bit(BME_LOCKED, &bm_ext->flags))
1117 goto proceed;
1118 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
1119 mdev->resync_locked++;
1120 } else {
1121 /* we did set the BME_NO_WRITES,
1122 * but then could not set BME_LOCKED,
1123 * so we tried again.
1124 * drop the extra reference. */
b411b363
PR
1125 bm_ext->lce.refcnt--;
1126 D_ASSERT(bm_ext->lce.refcnt > 0);
1127 }
1128 goto check_al;
1129 } else {
1130 /* do we rather want to try later? */
6a0afdf5 1131 if (mdev->resync_locked > mdev->resync->nr_elements-3)
b411b363 1132 goto try_again;
b411b363
PR
1133 /* Do or do not. There is no try. -- Yoda */
1134 e = lc_get(mdev->resync, enr);
1135 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1136 if (!bm_ext) {
1137 const unsigned long rs_flags = mdev->resync->flags;
1138 if (rs_flags & LC_STARVING)
1139 dev_warn(DEV, "Have to wait for element"
1140 " (resync LRU too small?)\n");
46a15bc3 1141 BUG_ON(rs_flags & LC_LOCKED);
b411b363
PR
1142 goto try_again;
1143 }
1144 if (bm_ext->lce.lc_number != enr) {
1145 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
1146 bm_ext->rs_failed = 0;
46a15bc3 1147 lc_committed(mdev->resync);
b411b363
PR
1148 wake_up(&mdev->al_wait);
1149 D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1150 }
1151 set_bit(BME_NO_WRITES, &bm_ext->flags);
1152 D_ASSERT(bm_ext->lce.refcnt == 1);
1153 mdev->resync_locked++;
1154 goto check_al;
1155 }
1156check_al:
b411b363 1157 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
b411b363
PR
1158 if (lc_is_used(mdev->act_log, al_enr+i))
1159 goto try_again;
1160 }
1161 set_bit(BME_LOCKED, &bm_ext->flags);
1162proceed:
1163 mdev->resync_wenr = LC_FREE;
1164 spin_unlock_irq(&mdev->al_lock);
1165 return 0;
1166
1167try_again:
b411b363
PR
1168 if (bm_ext)
1169 mdev->resync_wenr = enr;
1170 spin_unlock_irq(&mdev->al_lock);
1171 return -EAGAIN;
1172}
1173
1174void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
1175{
1176 unsigned int enr = BM_SECT_TO_EXT(sector);
1177 struct lc_element *e;
1178 struct bm_extent *bm_ext;
1179 unsigned long flags;
1180
b411b363
PR
1181 spin_lock_irqsave(&mdev->al_lock, flags);
1182 e = lc_find(mdev->resync, enr);
1183 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1184 if (!bm_ext) {
1185 spin_unlock_irqrestore(&mdev->al_lock, flags);
1186 if (__ratelimit(&drbd_ratelimit_state))
1187 dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
1188 return;
1189 }
1190
1191 if (bm_ext->lce.refcnt == 0) {
1192 spin_unlock_irqrestore(&mdev->al_lock, flags);
1193 dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
1194 "but refcnt is 0!?\n",
1195 (unsigned long long)sector, enr);
1196 return;
1197 }
1198
1199 if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
e3555d85 1200 bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
b411b363
PR
1201 mdev->resync_locked--;
1202 wake_up(&mdev->al_wait);
1203 }
1204
1205 spin_unlock_irqrestore(&mdev->al_lock, flags);
1206}
1207
1208/**
1209 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
1210 * @mdev: DRBD device.
1211 */
1212void drbd_rs_cancel_all(struct drbd_conf *mdev)
1213{
b411b363
PR
1214 spin_lock_irq(&mdev->al_lock);
1215
1216 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
1217 lc_reset(mdev->resync);
1218 put_ldev(mdev);
1219 }
1220 mdev->resync_locked = 0;
1221 mdev->resync_wenr = LC_FREE;
1222 spin_unlock_irq(&mdev->al_lock);
1223 wake_up(&mdev->al_wait);
1224}
1225
1226/**
1227 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
1228 * @mdev: DRBD device.
1229 *
1230 * Returns 0 upon success, -EAGAIN if at least one reference count was
1231 * not zero.
1232 */
1233int drbd_rs_del_all(struct drbd_conf *mdev)
1234{
1235 struct lc_element *e;
1236 struct bm_extent *bm_ext;
1237 int i;
1238
b411b363
PR
1239 spin_lock_irq(&mdev->al_lock);
1240
1241 if (get_ldev_if_state(mdev, D_FAILED)) {
1242 /* ok, ->resync is there. */
1243 for (i = 0; i < mdev->resync->nr_elements; i++) {
1244 e = lc_element_by_index(mdev->resync, i);
b2b163dd 1245 bm_ext = lc_entry(e, struct bm_extent, lce);
b411b363
PR
1246 if (bm_ext->lce.lc_number == LC_FREE)
1247 continue;
1248 if (bm_ext->lce.lc_number == mdev->resync_wenr) {
1249 dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
1250 " got 'synced' by application io\n",
1251 mdev->resync_wenr);
1252 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1253 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
1254 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1255 mdev->resync_wenr = LC_FREE;
1256 lc_put(mdev->resync, &bm_ext->lce);
1257 }
1258 if (bm_ext->lce.refcnt != 0) {
1259 dev_info(DEV, "Retrying drbd_rs_del_all() later. "
1260 "refcnt=%d\n", bm_ext->lce.refcnt);
1261 put_ldev(mdev);
1262 spin_unlock_irq(&mdev->al_lock);
1263 return -EAGAIN;
1264 }
1265 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1266 D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
1267 lc_del(mdev->resync, &bm_ext->lce);
1268 }
1269 D_ASSERT(mdev->resync->used == 0);
1270 put_ldev(mdev);
1271 }
1272 spin_unlock_irq(&mdev->al_lock);
1273
1274 return 0;
1275}
1276
1277/**
1278 * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
1279 * @mdev: DRBD device.
1280 * @sector: The sector number.
1281 * @size: Size of failed IO operation, in byte.
1282 */
1283void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
1284{
1285 /* Is called from worker and receiver context _only_ */
1286 unsigned long sbnr, ebnr, lbnr;
1287 unsigned long count;
1288 sector_t esector, nr_sectors;
1289 int wake_up = 0;
1290
c670a398 1291 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
b411b363
PR
1292 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
1293 (unsigned long long)sector, size);
1294 return;
1295 }
1296 nr_sectors = drbd_get_capacity(mdev->this_bdev);
1297 esector = sector + (size >> 9) - 1;
1298
841ce241
AG
1299 if (!expect(sector < nr_sectors))
1300 return;
1301 if (!expect(esector < nr_sectors))
1302 esector = nr_sectors - 1;
b411b363
PR
1303
1304 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
1305
1306 /*
1307 * round up start sector, round down end sector. we make sure we only
1308 * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */
1309 if (unlikely(esector < BM_SECT_PER_BIT-1))
1310 return;
1311 if (unlikely(esector == (nr_sectors-1)))
1312 ebnr = lbnr;
1313 else
1314 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
1315 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
1316
1317 if (sbnr > ebnr)
1318 return;
1319
1320 /*
1321 * ok, (capacity & 7) != 0 sometimes, but who cares...
1322 * we count rs_{total,left} in bits, not sectors.
1323 */
1324 spin_lock_irq(&mdev->al_lock);
1325 count = drbd_bm_count_bits(mdev, sbnr, ebnr);
1326 if (count) {
1327 mdev->rs_failed += count;
1328
1329 if (get_ldev(mdev)) {
81e84650 1330 drbd_try_clear_on_disk_bm(mdev, sector, count, false);
b411b363
PR
1331 put_ldev(mdev);
1332 }
1333
1334 /* just wake_up unconditional now, various lc_chaged(),
1335 * lc_put() in drbd_try_clear_on_disk_bm(). */
1336 wake_up = 1;
1337 }
1338 spin_unlock_irq(&mdev->al_lock);
1339 if (wake_up)
1340 wake_up(&mdev->al_wait);
1341}
This page took 0.166547 seconds and 5 git commands to generate.