md/raid5: remove redundant bio initialisations.
[deliverable/linux.git] / drivers / md / raid5.c
CommitLineData
1da177e4
LT
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
16a53ecc 5 * Copyright (C) 2002, 2003 H. Peter Anvin
1da177e4 6 *
16a53ecc
N
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
1da177e4
LT
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
ae3c20cc
N
21/*
22 * BITMAP UNPLUGGING:
23 *
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
26 * explanation.
27 *
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
7c13edc8
N
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
ae3c20cc
N
32 * new additions.
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
7c13edc8 35 * the number of the batch it will be in. This is seq_flush+1.
ae3c20cc
N
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
39 * batch.
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
43 * miss any bits.
44 */
1da177e4 45
bff61975 46#include <linux/blkdev.h>
f6705578 47#include <linux/kthread.h>
f701d589 48#include <linux/raid/pq.h>
91c00924 49#include <linux/async_tx.h>
056075c7 50#include <linux/module.h>
07a3b417 51#include <linux/async.h>
bff61975 52#include <linux/seq_file.h>
36d1c647 53#include <linux/cpu.h>
5a0e3ad6 54#include <linux/slab.h>
8bda470e 55#include <linux/ratelimit.h>
43b2e5d8 56#include "md.h"
bff61975 57#include "raid5.h"
54071b38 58#include "raid0.h"
ef740c37 59#include "bitmap.h"
72626685 60
1da177e4
LT
61/*
62 * Stripe cache
63 */
64
65#define NR_STRIPES 256
66#define STRIPE_SIZE PAGE_SIZE
67#define STRIPE_SHIFT (PAGE_SHIFT - 9)
68#define STRIPE_SECTORS (STRIPE_SIZE>>9)
69#define IO_THRESHOLD 1
8b3e6cdc 70#define BYPASS_THRESHOLD 1
fccddba0 71#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
1da177e4
LT
72#define HASH_MASK (NR_HASH - 1)
73
d1688a6d 74static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
db298e19
N
75{
76 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
77 return &conf->stripe_hashtbl[hash];
78}
1da177e4
LT
79
80/* bio's attached to a stripe+device for I/O are linked together in bi_sector
81 * order without overlap. There may be several bio's per stripe+device, and
82 * a bio could span several devices.
83 * When walking this list for a particular stripe+device, we must never proceed
84 * beyond a bio that extends past this device, as the next bio might no longer
85 * be valid.
db298e19 86 * This function is used to determine the 'next' bio in the list, given the sector
1da177e4
LT
87 * of the current stripe+device
88 */
db298e19
N
89static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
90{
91 int sectors = bio->bi_size >> 9;
92 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
93 return bio->bi_next;
94 else
95 return NULL;
96}
1da177e4 97
960e739d 98/*
5b99c2ff
JA
99 * We maintain a biased count of active stripes in the bottom 16 bits of
100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
960e739d
JA
101 */
102static inline int raid5_bi_phys_segments(struct bio *bio)
103{
5b99c2ff 104 return bio->bi_phys_segments & 0xffff;
960e739d
JA
105}
106
107static inline int raid5_bi_hw_segments(struct bio *bio)
108{
5b99c2ff 109 return (bio->bi_phys_segments >> 16) & 0xffff;
960e739d
JA
110}
111
112static inline int raid5_dec_bi_phys_segments(struct bio *bio)
113{
114 --bio->bi_phys_segments;
115 return raid5_bi_phys_segments(bio);
116}
117
118static inline int raid5_dec_bi_hw_segments(struct bio *bio)
119{
120 unsigned short val = raid5_bi_hw_segments(bio);
121
122 --val;
5b99c2ff 123 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
960e739d
JA
124 return val;
125}
126
127static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
128{
9b2dc8b6 129 bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
960e739d
JA
130}
131
d0dabf7e
N
132/* Find first data disk in a raid6 stripe */
133static inline int raid6_d0(struct stripe_head *sh)
134{
67cc2b81
N
135 if (sh->ddf_layout)
136 /* ddf always start from first device */
137 return 0;
138 /* md starts just after Q block */
d0dabf7e
N
139 if (sh->qd_idx == sh->disks - 1)
140 return 0;
141 else
142 return sh->qd_idx + 1;
143}
16a53ecc
N
144static inline int raid6_next_disk(int disk, int raid_disks)
145{
146 disk++;
147 return (disk < raid_disks) ? disk : 0;
148}
a4456856 149
d0dabf7e
N
150/* When walking through the disks in a raid5, starting at raid6_d0,
151 * We need to map each disk to a 'slot', where the data disks are slot
152 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
153 * is raid_disks-1. This help does that mapping.
154 */
67cc2b81
N
155static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
156 int *count, int syndrome_disks)
d0dabf7e 157{
6629542e 158 int slot = *count;
67cc2b81 159
e4424fee 160 if (sh->ddf_layout)
6629542e 161 (*count)++;
d0dabf7e 162 if (idx == sh->pd_idx)
67cc2b81 163 return syndrome_disks;
d0dabf7e 164 if (idx == sh->qd_idx)
67cc2b81 165 return syndrome_disks + 1;
e4424fee 166 if (!sh->ddf_layout)
6629542e 167 (*count)++;
d0dabf7e
N
168 return slot;
169}
170
a4456856
DW
171static void return_io(struct bio *return_bi)
172{
173 struct bio *bi = return_bi;
174 while (bi) {
a4456856
DW
175
176 return_bi = bi->bi_next;
177 bi->bi_next = NULL;
178 bi->bi_size = 0;
0e13fe23 179 bio_endio(bi, 0);
a4456856
DW
180 bi = return_bi;
181 }
182}
183
d1688a6d 184static void print_raid5_conf (struct r5conf *conf);
1da177e4 185
600aa109
DW
186static int stripe_operations_active(struct stripe_head *sh)
187{
188 return sh->check_state || sh->reconstruct_state ||
189 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
190 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
191}
192
d1688a6d 193static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
1da177e4
LT
194{
195 if (atomic_dec_and_test(&sh->count)) {
78bafebd
ES
196 BUG_ON(!list_empty(&sh->lru));
197 BUG_ON(atomic_read(&conf->active_stripes)==0);
1da177e4 198 if (test_bit(STRIPE_HANDLE, &sh->state)) {
482c0834 199 if (test_bit(STRIPE_DELAYED, &sh->state))
1da177e4 200 list_add_tail(&sh->lru, &conf->delayed_list);
482c0834
N
201 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
202 sh->bm_seq - conf->seq_write > 0)
72626685 203 list_add_tail(&sh->lru, &conf->bitmap_list);
482c0834 204 else {
72626685 205 clear_bit(STRIPE_BIT_DELAY, &sh->state);
1da177e4 206 list_add_tail(&sh->lru, &conf->handle_list);
72626685 207 }
1da177e4
LT
208 md_wakeup_thread(conf->mddev->thread);
209 } else {
600aa109 210 BUG_ON(stripe_operations_active(sh));
1da177e4
LT
211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
212 atomic_dec(&conf->preread_active_stripes);
213 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
214 md_wakeup_thread(conf->mddev->thread);
215 }
1da177e4 216 atomic_dec(&conf->active_stripes);
ccfcc3c1
N
217 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
218 list_add_tail(&sh->lru, &conf->inactive_list);
1da177e4 219 wake_up(&conf->wait_for_stripe);
46031f9a
RBJ
220 if (conf->retry_read_aligned)
221 md_wakeup_thread(conf->mddev->thread);
ccfcc3c1 222 }
1da177e4
LT
223 }
224 }
225}
d0dabf7e 226
1da177e4
LT
227static void release_stripe(struct stripe_head *sh)
228{
d1688a6d 229 struct r5conf *conf = sh->raid_conf;
1da177e4 230 unsigned long flags;
16a53ecc 231
1da177e4
LT
232 spin_lock_irqsave(&conf->device_lock, flags);
233 __release_stripe(conf, sh);
234 spin_unlock_irqrestore(&conf->device_lock, flags);
235}
236
fccddba0 237static inline void remove_hash(struct stripe_head *sh)
1da177e4 238{
45b4233c
DW
239 pr_debug("remove_hash(), stripe %llu\n",
240 (unsigned long long)sh->sector);
1da177e4 241
fccddba0 242 hlist_del_init(&sh->hash);
1da177e4
LT
243}
244
d1688a6d 245static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
1da177e4 246{
fccddba0 247 struct hlist_head *hp = stripe_hash(conf, sh->sector);
1da177e4 248
45b4233c
DW
249 pr_debug("insert_hash(), stripe %llu\n",
250 (unsigned long long)sh->sector);
1da177e4 251
fccddba0 252 hlist_add_head(&sh->hash, hp);
1da177e4
LT
253}
254
255
256/* find an idle stripe, make sure it is unhashed, and return it. */
d1688a6d 257static struct stripe_head *get_free_stripe(struct r5conf *conf)
1da177e4
LT
258{
259 struct stripe_head *sh = NULL;
260 struct list_head *first;
261
1da177e4
LT
262 if (list_empty(&conf->inactive_list))
263 goto out;
264 first = conf->inactive_list.next;
265 sh = list_entry(first, struct stripe_head, lru);
266 list_del_init(first);
267 remove_hash(sh);
268 atomic_inc(&conf->active_stripes);
269out:
270 return sh;
271}
272
e4e11e38 273static void shrink_buffers(struct stripe_head *sh)
1da177e4
LT
274{
275 struct page *p;
276 int i;
e4e11e38 277 int num = sh->raid_conf->pool_size;
1da177e4 278
e4e11e38 279 for (i = 0; i < num ; i++) {
1da177e4
LT
280 p = sh->dev[i].page;
281 if (!p)
282 continue;
283 sh->dev[i].page = NULL;
2d1f3b5d 284 put_page(p);
1da177e4
LT
285 }
286}
287
e4e11e38 288static int grow_buffers(struct stripe_head *sh)
1da177e4
LT
289{
290 int i;
e4e11e38 291 int num = sh->raid_conf->pool_size;
1da177e4 292
e4e11e38 293 for (i = 0; i < num; i++) {
1da177e4
LT
294 struct page *page;
295
296 if (!(page = alloc_page(GFP_KERNEL))) {
297 return 1;
298 }
299 sh->dev[i].page = page;
300 }
301 return 0;
302}
303
784052ec 304static void raid5_build_block(struct stripe_head *sh, int i, int previous);
d1688a6d 305static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
911d4ee8 306 struct stripe_head *sh);
1da177e4 307
b5663ba4 308static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
1da177e4 309{
d1688a6d 310 struct r5conf *conf = sh->raid_conf;
7ecaa1e6 311 int i;
1da177e4 312
78bafebd
ES
313 BUG_ON(atomic_read(&sh->count) != 0);
314 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
600aa109 315 BUG_ON(stripe_operations_active(sh));
d84e0f10 316
45b4233c 317 pr_debug("init_stripe called, stripe %llu\n",
1da177e4
LT
318 (unsigned long long)sh->sector);
319
320 remove_hash(sh);
16a53ecc 321
86b42c71 322 sh->generation = conf->generation - previous;
b5663ba4 323 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
1da177e4 324 sh->sector = sector;
911d4ee8 325 stripe_set_idx(sector, conf, previous, sh);
1da177e4
LT
326 sh->state = 0;
327
7ecaa1e6
N
328
329 for (i = sh->disks; i--; ) {
1da177e4
LT
330 struct r5dev *dev = &sh->dev[i];
331
d84e0f10 332 if (dev->toread || dev->read || dev->towrite || dev->written ||
1da177e4 333 test_bit(R5_LOCKED, &dev->flags)) {
d84e0f10 334 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
1da177e4 335 (unsigned long long)sh->sector, i, dev->toread,
d84e0f10 336 dev->read, dev->towrite, dev->written,
1da177e4 337 test_bit(R5_LOCKED, &dev->flags));
8cfa7b0f 338 WARN_ON(1);
1da177e4
LT
339 }
340 dev->flags = 0;
784052ec 341 raid5_build_block(sh, i, previous);
1da177e4
LT
342 }
343 insert_hash(conf, sh);
344}
345
d1688a6d 346static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
86b42c71 347 short generation)
1da177e4
LT
348{
349 struct stripe_head *sh;
fccddba0 350 struct hlist_node *hn;
1da177e4 351
45b4233c 352 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
fccddba0 353 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
86b42c71 354 if (sh->sector == sector && sh->generation == generation)
1da177e4 355 return sh;
45b4233c 356 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
1da177e4
LT
357 return NULL;
358}
359
674806d6
N
360/*
361 * Need to check if array has failed when deciding whether to:
362 * - start an array
363 * - remove non-faulty devices
364 * - add a spare
365 * - allow a reshape
366 * This determination is simple when no reshape is happening.
367 * However if there is a reshape, we need to carefully check
368 * both the before and after sections.
369 * This is because some failed devices may only affect one
370 * of the two sections, and some non-in_sync devices may
371 * be insync in the section most affected by failed devices.
372 */
908f4fbd 373static int calc_degraded(struct r5conf *conf)
674806d6 374{
908f4fbd 375 int degraded, degraded2;
674806d6 376 int i;
674806d6
N
377
378 rcu_read_lock();
379 degraded = 0;
380 for (i = 0; i < conf->previous_raid_disks; i++) {
3cb03002 381 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
674806d6
N
382 if (!rdev || test_bit(Faulty, &rdev->flags))
383 degraded++;
384 else if (test_bit(In_sync, &rdev->flags))
385 ;
386 else
387 /* not in-sync or faulty.
388 * If the reshape increases the number of devices,
389 * this is being recovered by the reshape, so
390 * this 'previous' section is not in_sync.
391 * If the number of devices is being reduced however,
392 * the device can only be part of the array if
393 * we are reverting a reshape, so this section will
394 * be in-sync.
395 */
396 if (conf->raid_disks >= conf->previous_raid_disks)
397 degraded++;
398 }
399 rcu_read_unlock();
908f4fbd
N
400 if (conf->raid_disks == conf->previous_raid_disks)
401 return degraded;
674806d6 402 rcu_read_lock();
908f4fbd 403 degraded2 = 0;
674806d6 404 for (i = 0; i < conf->raid_disks; i++) {
3cb03002 405 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
674806d6 406 if (!rdev || test_bit(Faulty, &rdev->flags))
908f4fbd 407 degraded2++;
674806d6
N
408 else if (test_bit(In_sync, &rdev->flags))
409 ;
410 else
411 /* not in-sync or faulty.
412 * If reshape increases the number of devices, this
413 * section has already been recovered, else it
414 * almost certainly hasn't.
415 */
416 if (conf->raid_disks <= conf->previous_raid_disks)
908f4fbd 417 degraded2++;
674806d6
N
418 }
419 rcu_read_unlock();
908f4fbd
N
420 if (degraded2 > degraded)
421 return degraded2;
422 return degraded;
423}
424
425static int has_failed(struct r5conf *conf)
426{
427 int degraded;
428
429 if (conf->mddev->reshape_position == MaxSector)
430 return conf->mddev->degraded > conf->max_degraded;
431
432 degraded = calc_degraded(conf);
674806d6
N
433 if (degraded > conf->max_degraded)
434 return 1;
435 return 0;
436}
437
b5663ba4 438static struct stripe_head *
d1688a6d 439get_active_stripe(struct r5conf *conf, sector_t sector,
a8c906ca 440 int previous, int noblock, int noquiesce)
1da177e4
LT
441{
442 struct stripe_head *sh;
443
45b4233c 444 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
1da177e4
LT
445
446 spin_lock_irq(&conf->device_lock);
447
448 do {
72626685 449 wait_event_lock_irq(conf->wait_for_stripe,
a8c906ca 450 conf->quiesce == 0 || noquiesce,
72626685 451 conf->device_lock, /* nothing */);
86b42c71 452 sh = __find_stripe(conf, sector, conf->generation - previous);
1da177e4
LT
453 if (!sh) {
454 if (!conf->inactive_blocked)
455 sh = get_free_stripe(conf);
456 if (noblock && sh == NULL)
457 break;
458 if (!sh) {
459 conf->inactive_blocked = 1;
460 wait_event_lock_irq(conf->wait_for_stripe,
461 !list_empty(&conf->inactive_list) &&
5036805b
N
462 (atomic_read(&conf->active_stripes)
463 < (conf->max_nr_stripes *3/4)
1da177e4
LT
464 || !conf->inactive_blocked),
465 conf->device_lock,
7c13edc8 466 );
1da177e4
LT
467 conf->inactive_blocked = 0;
468 } else
b5663ba4 469 init_stripe(sh, sector, previous);
1da177e4
LT
470 } else {
471 if (atomic_read(&sh->count)) {
ab69ae12
N
472 BUG_ON(!list_empty(&sh->lru)
473 && !test_bit(STRIPE_EXPANDING, &sh->state));
1da177e4
LT
474 } else {
475 if (!test_bit(STRIPE_HANDLE, &sh->state))
476 atomic_inc(&conf->active_stripes);
ff4e8d9a
N
477 if (list_empty(&sh->lru) &&
478 !test_bit(STRIPE_EXPANDING, &sh->state))
16a53ecc
N
479 BUG();
480 list_del_init(&sh->lru);
1da177e4
LT
481 }
482 }
483 } while (sh == NULL);
484
485 if (sh)
486 atomic_inc(&sh->count);
487
488 spin_unlock_irq(&conf->device_lock);
489 return sh;
490}
491
6712ecf8
N
492static void
493raid5_end_read_request(struct bio *bi, int error);
494static void
495raid5_end_write_request(struct bio *bi, int error);
91c00924 496
c4e5ac0a 497static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
91c00924 498{
d1688a6d 499 struct r5conf *conf = sh->raid_conf;
91c00924
DW
500 int i, disks = sh->disks;
501
502 might_sleep();
503
504 for (i = disks; i--; ) {
505 int rw;
506 struct bio *bi;
3cb03002 507 struct md_rdev *rdev;
e9c7469b
TH
508 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
509 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
510 rw = WRITE_FUA;
511 else
512 rw = WRITE;
513 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
91c00924
DW
514 rw = READ;
515 else
516 continue;
517
518 bi = &sh->dev[i].req;
519
520 bi->bi_rw = rw;
b062962e 521 if (rw & WRITE)
91c00924
DW
522 bi->bi_end_io = raid5_end_write_request;
523 else
524 bi->bi_end_io = raid5_end_read_request;
525
526 rcu_read_lock();
527 rdev = rcu_dereference(conf->disks[i].rdev);
528 if (rdev && test_bit(Faulty, &rdev->flags))
529 rdev = NULL;
530 if (rdev)
531 atomic_inc(&rdev->nr_pending);
532 rcu_read_unlock();
533
73e92e51
N
534 /* We have already checked bad blocks for reads. Now
535 * need to check for writes.
536 */
537 while ((rw & WRITE) && rdev &&
538 test_bit(WriteErrorSeen, &rdev->flags)) {
539 sector_t first_bad;
540 int bad_sectors;
541 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
542 &first_bad, &bad_sectors);
543 if (!bad)
544 break;
545
546 if (bad < 0) {
547 set_bit(BlockedBadBlocks, &rdev->flags);
548 if (!conf->mddev->external &&
549 conf->mddev->flags) {
550 /* It is very unlikely, but we might
551 * still need to write out the
552 * bad block log - better give it
553 * a chance*/
554 md_check_recovery(conf->mddev);
555 }
556 md_wait_for_blocked_rdev(rdev, conf->mddev);
557 } else {
558 /* Acknowledged bad block - skip the write */
559 rdev_dec_pending(rdev, conf->mddev);
560 rdev = NULL;
561 }
562 }
563
91c00924 564 if (rdev) {
c4e5ac0a 565 if (s->syncing || s->expanding || s->expanded)
91c00924
DW
566 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
567
2b7497f0
DW
568 set_bit(STRIPE_IO_STARTED, &sh->state);
569
91c00924
DW
570 bi->bi_bdev = rdev->bdev;
571 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
e46b272b 572 __func__, (unsigned long long)sh->sector,
91c00924
DW
573 bi->bi_rw, i);
574 atomic_inc(&sh->count);
575 bi->bi_sector = sh->sector + rdev->data_offset;
576 bi->bi_flags = 1 << BIO_UPTODATE;
91c00924 577 bi->bi_idx = 0;
91c00924
DW
578 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
579 bi->bi_io_vec[0].bv_offset = 0;
580 bi->bi_size = STRIPE_SIZE;
581 bi->bi_next = NULL;
91c00924
DW
582 generic_make_request(bi);
583 } else {
b062962e 584 if (rw & WRITE)
91c00924
DW
585 set_bit(STRIPE_DEGRADED, &sh->state);
586 pr_debug("skip op %ld on disc %d for sector %llu\n",
587 bi->bi_rw, i, (unsigned long long)sh->sector);
588 clear_bit(R5_LOCKED, &sh->dev[i].flags);
589 set_bit(STRIPE_HANDLE, &sh->state);
590 }
591 }
592}
593
594static struct dma_async_tx_descriptor *
595async_copy_data(int frombio, struct bio *bio, struct page *page,
596 sector_t sector, struct dma_async_tx_descriptor *tx)
597{
598 struct bio_vec *bvl;
599 struct page *bio_page;
600 int i;
601 int page_offset;
a08abd8c 602 struct async_submit_ctl submit;
0403e382 603 enum async_tx_flags flags = 0;
91c00924
DW
604
605 if (bio->bi_sector >= sector)
606 page_offset = (signed)(bio->bi_sector - sector) * 512;
607 else
608 page_offset = (signed)(sector - bio->bi_sector) * -512;
a08abd8c 609
0403e382
DW
610 if (frombio)
611 flags |= ASYNC_TX_FENCE;
612 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
613
91c00924 614 bio_for_each_segment(bvl, bio, i) {
fcde9075 615 int len = bvl->bv_len;
91c00924
DW
616 int clen;
617 int b_offset = 0;
618
619 if (page_offset < 0) {
620 b_offset = -page_offset;
621 page_offset += b_offset;
622 len -= b_offset;
623 }
624
625 if (len > 0 && page_offset + len > STRIPE_SIZE)
626 clen = STRIPE_SIZE - page_offset;
627 else
628 clen = len;
629
630 if (clen > 0) {
fcde9075
NK
631 b_offset += bvl->bv_offset;
632 bio_page = bvl->bv_page;
91c00924
DW
633 if (frombio)
634 tx = async_memcpy(page, bio_page, page_offset,
a08abd8c 635 b_offset, clen, &submit);
91c00924
DW
636 else
637 tx = async_memcpy(bio_page, page, b_offset,
a08abd8c 638 page_offset, clen, &submit);
91c00924 639 }
a08abd8c
DW
640 /* chain the operations */
641 submit.depend_tx = tx;
642
91c00924
DW
643 if (clen < len) /* hit end of page */
644 break;
645 page_offset += len;
646 }
647
648 return tx;
649}
650
651static void ops_complete_biofill(void *stripe_head_ref)
652{
653 struct stripe_head *sh = stripe_head_ref;
654 struct bio *return_bi = NULL;
d1688a6d 655 struct r5conf *conf = sh->raid_conf;
e4d84909 656 int i;
91c00924 657
e46b272b 658 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
659 (unsigned long long)sh->sector);
660
661 /* clear completed biofills */
83de75cc 662 spin_lock_irq(&conf->device_lock);
91c00924
DW
663 for (i = sh->disks; i--; ) {
664 struct r5dev *dev = &sh->dev[i];
91c00924
DW
665
666 /* acknowledge completion of a biofill operation */
e4d84909
DW
667 /* and check if we need to reply to a read request,
668 * new R5_Wantfill requests are held off until
83de75cc 669 * !STRIPE_BIOFILL_RUN
e4d84909
DW
670 */
671 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
91c00924 672 struct bio *rbi, *rbi2;
91c00924 673
91c00924
DW
674 BUG_ON(!dev->read);
675 rbi = dev->read;
676 dev->read = NULL;
677 while (rbi && rbi->bi_sector <
678 dev->sector + STRIPE_SECTORS) {
679 rbi2 = r5_next_bio(rbi, dev->sector);
960e739d 680 if (!raid5_dec_bi_phys_segments(rbi)) {
91c00924
DW
681 rbi->bi_next = return_bi;
682 return_bi = rbi;
683 }
91c00924
DW
684 rbi = rbi2;
685 }
686 }
687 }
83de75cc
DW
688 spin_unlock_irq(&conf->device_lock);
689 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
91c00924
DW
690
691 return_io(return_bi);
692
e4d84909 693 set_bit(STRIPE_HANDLE, &sh->state);
91c00924
DW
694 release_stripe(sh);
695}
696
697static void ops_run_biofill(struct stripe_head *sh)
698{
699 struct dma_async_tx_descriptor *tx = NULL;
d1688a6d 700 struct r5conf *conf = sh->raid_conf;
a08abd8c 701 struct async_submit_ctl submit;
91c00924
DW
702 int i;
703
e46b272b 704 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
705 (unsigned long long)sh->sector);
706
707 for (i = sh->disks; i--; ) {
708 struct r5dev *dev = &sh->dev[i];
709 if (test_bit(R5_Wantfill, &dev->flags)) {
710 struct bio *rbi;
711 spin_lock_irq(&conf->device_lock);
712 dev->read = rbi = dev->toread;
713 dev->toread = NULL;
714 spin_unlock_irq(&conf->device_lock);
715 while (rbi && rbi->bi_sector <
716 dev->sector + STRIPE_SECTORS) {
717 tx = async_copy_data(0, rbi, dev->page,
718 dev->sector, tx);
719 rbi = r5_next_bio(rbi, dev->sector);
720 }
721 }
722 }
723
724 atomic_inc(&sh->count);
a08abd8c
DW
725 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
726 async_trigger_callback(&submit);
91c00924
DW
727}
728
4e7d2c0a 729static void mark_target_uptodate(struct stripe_head *sh, int target)
91c00924 730{
4e7d2c0a 731 struct r5dev *tgt;
91c00924 732
4e7d2c0a
DW
733 if (target < 0)
734 return;
91c00924 735
4e7d2c0a 736 tgt = &sh->dev[target];
91c00924
DW
737 set_bit(R5_UPTODATE, &tgt->flags);
738 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
739 clear_bit(R5_Wantcompute, &tgt->flags);
4e7d2c0a
DW
740}
741
ac6b53b6 742static void ops_complete_compute(void *stripe_head_ref)
91c00924
DW
743{
744 struct stripe_head *sh = stripe_head_ref;
91c00924 745
e46b272b 746 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
747 (unsigned long long)sh->sector);
748
ac6b53b6 749 /* mark the computed target(s) as uptodate */
4e7d2c0a 750 mark_target_uptodate(sh, sh->ops.target);
ac6b53b6 751 mark_target_uptodate(sh, sh->ops.target2);
4e7d2c0a 752
ecc65c9b
DW
753 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
754 if (sh->check_state == check_state_compute_run)
755 sh->check_state = check_state_compute_result;
91c00924
DW
756 set_bit(STRIPE_HANDLE, &sh->state);
757 release_stripe(sh);
758}
759
d6f38f31
DW
760/* return a pointer to the address conversion region of the scribble buffer */
761static addr_conv_t *to_addr_conv(struct stripe_head *sh,
762 struct raid5_percpu *percpu)
763{
764 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
765}
766
767static struct dma_async_tx_descriptor *
768ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
91c00924 769{
91c00924 770 int disks = sh->disks;
d6f38f31 771 struct page **xor_srcs = percpu->scribble;
91c00924
DW
772 int target = sh->ops.target;
773 struct r5dev *tgt = &sh->dev[target];
774 struct page *xor_dest = tgt->page;
775 int count = 0;
776 struct dma_async_tx_descriptor *tx;
a08abd8c 777 struct async_submit_ctl submit;
91c00924
DW
778 int i;
779
780 pr_debug("%s: stripe %llu block: %d\n",
e46b272b 781 __func__, (unsigned long long)sh->sector, target);
91c00924
DW
782 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
783
784 for (i = disks; i--; )
785 if (i != target)
786 xor_srcs[count++] = sh->dev[i].page;
787
788 atomic_inc(&sh->count);
789
0403e382 790 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
ac6b53b6 791 ops_complete_compute, sh, to_addr_conv(sh, percpu));
91c00924 792 if (unlikely(count == 1))
a08abd8c 793 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
91c00924 794 else
a08abd8c 795 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
91c00924 796
91c00924
DW
797 return tx;
798}
799
ac6b53b6
DW
800/* set_syndrome_sources - populate source buffers for gen_syndrome
801 * @srcs - (struct page *) array of size sh->disks
802 * @sh - stripe_head to parse
803 *
804 * Populates srcs in proper layout order for the stripe and returns the
805 * 'count' of sources to be used in a call to async_gen_syndrome. The P
806 * destination buffer is recorded in srcs[count] and the Q destination
807 * is recorded in srcs[count+1]].
808 */
809static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
810{
811 int disks = sh->disks;
812 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
813 int d0_idx = raid6_d0(sh);
814 int count;
815 int i;
816
817 for (i = 0; i < disks; i++)
5dd33c9a 818 srcs[i] = NULL;
ac6b53b6
DW
819
820 count = 0;
821 i = d0_idx;
822 do {
823 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
824
825 srcs[slot] = sh->dev[i].page;
826 i = raid6_next_disk(i, disks);
827 } while (i != d0_idx);
ac6b53b6 828
e4424fee 829 return syndrome_disks;
ac6b53b6
DW
830}
831
832static struct dma_async_tx_descriptor *
833ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
834{
835 int disks = sh->disks;
836 struct page **blocks = percpu->scribble;
837 int target;
838 int qd_idx = sh->qd_idx;
839 struct dma_async_tx_descriptor *tx;
840 struct async_submit_ctl submit;
841 struct r5dev *tgt;
842 struct page *dest;
843 int i;
844 int count;
845
846 if (sh->ops.target < 0)
847 target = sh->ops.target2;
848 else if (sh->ops.target2 < 0)
849 target = sh->ops.target;
91c00924 850 else
ac6b53b6
DW
851 /* we should only have one valid target */
852 BUG();
853 BUG_ON(target < 0);
854 pr_debug("%s: stripe %llu block: %d\n",
855 __func__, (unsigned long long)sh->sector, target);
856
857 tgt = &sh->dev[target];
858 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
859 dest = tgt->page;
860
861 atomic_inc(&sh->count);
862
863 if (target == qd_idx) {
864 count = set_syndrome_sources(blocks, sh);
865 blocks[count] = NULL; /* regenerating p is not necessary */
866 BUG_ON(blocks[count+1] != dest); /* q should already be set */
0403e382
DW
867 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
868 ops_complete_compute, sh,
ac6b53b6
DW
869 to_addr_conv(sh, percpu));
870 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
871 } else {
872 /* Compute any data- or p-drive using XOR */
873 count = 0;
874 for (i = disks; i-- ; ) {
875 if (i == target || i == qd_idx)
876 continue;
877 blocks[count++] = sh->dev[i].page;
878 }
879
0403e382
DW
880 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
881 NULL, ops_complete_compute, sh,
ac6b53b6
DW
882 to_addr_conv(sh, percpu));
883 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
884 }
91c00924 885
91c00924
DW
886 return tx;
887}
888
ac6b53b6
DW
889static struct dma_async_tx_descriptor *
890ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
891{
892 int i, count, disks = sh->disks;
893 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
894 int d0_idx = raid6_d0(sh);
895 int faila = -1, failb = -1;
896 int target = sh->ops.target;
897 int target2 = sh->ops.target2;
898 struct r5dev *tgt = &sh->dev[target];
899 struct r5dev *tgt2 = &sh->dev[target2];
900 struct dma_async_tx_descriptor *tx;
901 struct page **blocks = percpu->scribble;
902 struct async_submit_ctl submit;
903
904 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
905 __func__, (unsigned long long)sh->sector, target, target2);
906 BUG_ON(target < 0 || target2 < 0);
907 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
908 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
909
6c910a78 910 /* we need to open-code set_syndrome_sources to handle the
ac6b53b6
DW
911 * slot number conversion for 'faila' and 'failb'
912 */
913 for (i = 0; i < disks ; i++)
5dd33c9a 914 blocks[i] = NULL;
ac6b53b6
DW
915 count = 0;
916 i = d0_idx;
917 do {
918 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
919
920 blocks[slot] = sh->dev[i].page;
921
922 if (i == target)
923 faila = slot;
924 if (i == target2)
925 failb = slot;
926 i = raid6_next_disk(i, disks);
927 } while (i != d0_idx);
ac6b53b6
DW
928
929 BUG_ON(faila == failb);
930 if (failb < faila)
931 swap(faila, failb);
932 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
933 __func__, (unsigned long long)sh->sector, faila, failb);
934
935 atomic_inc(&sh->count);
936
937 if (failb == syndrome_disks+1) {
938 /* Q disk is one of the missing disks */
939 if (faila == syndrome_disks) {
940 /* Missing P+Q, just recompute */
0403e382
DW
941 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
942 ops_complete_compute, sh,
943 to_addr_conv(sh, percpu));
e4424fee 944 return async_gen_syndrome(blocks, 0, syndrome_disks+2,
ac6b53b6
DW
945 STRIPE_SIZE, &submit);
946 } else {
947 struct page *dest;
948 int data_target;
949 int qd_idx = sh->qd_idx;
950
951 /* Missing D+Q: recompute D from P, then recompute Q */
952 if (target == qd_idx)
953 data_target = target2;
954 else
955 data_target = target;
956
957 count = 0;
958 for (i = disks; i-- ; ) {
959 if (i == data_target || i == qd_idx)
960 continue;
961 blocks[count++] = sh->dev[i].page;
962 }
963 dest = sh->dev[data_target].page;
0403e382
DW
964 init_async_submit(&submit,
965 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
966 NULL, NULL, NULL,
967 to_addr_conv(sh, percpu));
ac6b53b6
DW
968 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
969 &submit);
970
971 count = set_syndrome_sources(blocks, sh);
0403e382
DW
972 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
973 ops_complete_compute, sh,
974 to_addr_conv(sh, percpu));
ac6b53b6
DW
975 return async_gen_syndrome(blocks, 0, count+2,
976 STRIPE_SIZE, &submit);
977 }
ac6b53b6 978 } else {
6c910a78
DW
979 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
980 ops_complete_compute, sh,
981 to_addr_conv(sh, percpu));
982 if (failb == syndrome_disks) {
983 /* We're missing D+P. */
984 return async_raid6_datap_recov(syndrome_disks+2,
985 STRIPE_SIZE, faila,
986 blocks, &submit);
987 } else {
988 /* We're missing D+D. */
989 return async_raid6_2data_recov(syndrome_disks+2,
990 STRIPE_SIZE, faila, failb,
991 blocks, &submit);
992 }
ac6b53b6
DW
993 }
994}
995
996
91c00924
DW
997static void ops_complete_prexor(void *stripe_head_ref)
998{
999 struct stripe_head *sh = stripe_head_ref;
1000
e46b272b 1001 pr_debug("%s: stripe %llu\n", __func__,
91c00924 1002 (unsigned long long)sh->sector);
91c00924
DW
1003}
1004
1005static struct dma_async_tx_descriptor *
d6f38f31
DW
1006ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1007 struct dma_async_tx_descriptor *tx)
91c00924 1008{
91c00924 1009 int disks = sh->disks;
d6f38f31 1010 struct page **xor_srcs = percpu->scribble;
91c00924 1011 int count = 0, pd_idx = sh->pd_idx, i;
a08abd8c 1012 struct async_submit_ctl submit;
91c00924
DW
1013
1014 /* existing parity data subtracted */
1015 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1016
e46b272b 1017 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1018 (unsigned long long)sh->sector);
1019
1020 for (i = disks; i--; ) {
1021 struct r5dev *dev = &sh->dev[i];
1022 /* Only process blocks that are known to be uptodate */
d8ee0728 1023 if (test_bit(R5_Wantdrain, &dev->flags))
91c00924
DW
1024 xor_srcs[count++] = dev->page;
1025 }
1026
0403e382 1027 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
d6f38f31 1028 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
a08abd8c 1029 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
91c00924
DW
1030
1031 return tx;
1032}
1033
1034static struct dma_async_tx_descriptor *
d8ee0728 1035ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
91c00924
DW
1036{
1037 int disks = sh->disks;
d8ee0728 1038 int i;
91c00924 1039
e46b272b 1040 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1041 (unsigned long long)sh->sector);
1042
1043 for (i = disks; i--; ) {
1044 struct r5dev *dev = &sh->dev[i];
1045 struct bio *chosen;
91c00924 1046
d8ee0728 1047 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
91c00924
DW
1048 struct bio *wbi;
1049
cbe47ec5 1050 spin_lock_irq(&sh->raid_conf->device_lock);
91c00924
DW
1051 chosen = dev->towrite;
1052 dev->towrite = NULL;
1053 BUG_ON(dev->written);
1054 wbi = dev->written = chosen;
cbe47ec5 1055 spin_unlock_irq(&sh->raid_conf->device_lock);
91c00924
DW
1056
1057 while (wbi && wbi->bi_sector <
1058 dev->sector + STRIPE_SECTORS) {
e9c7469b
TH
1059 if (wbi->bi_rw & REQ_FUA)
1060 set_bit(R5_WantFUA, &dev->flags);
91c00924
DW
1061 tx = async_copy_data(1, wbi, dev->page,
1062 dev->sector, tx);
1063 wbi = r5_next_bio(wbi, dev->sector);
1064 }
1065 }
1066 }
1067
1068 return tx;
1069}
1070
ac6b53b6 1071static void ops_complete_reconstruct(void *stripe_head_ref)
91c00924
DW
1072{
1073 struct stripe_head *sh = stripe_head_ref;
ac6b53b6
DW
1074 int disks = sh->disks;
1075 int pd_idx = sh->pd_idx;
1076 int qd_idx = sh->qd_idx;
1077 int i;
e9c7469b 1078 bool fua = false;
91c00924 1079
e46b272b 1080 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1081 (unsigned long long)sh->sector);
1082
e9c7469b
TH
1083 for (i = disks; i--; )
1084 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1085
91c00924
DW
1086 for (i = disks; i--; ) {
1087 struct r5dev *dev = &sh->dev[i];
ac6b53b6 1088
e9c7469b 1089 if (dev->written || i == pd_idx || i == qd_idx) {
91c00924 1090 set_bit(R5_UPTODATE, &dev->flags);
e9c7469b
TH
1091 if (fua)
1092 set_bit(R5_WantFUA, &dev->flags);
1093 }
91c00924
DW
1094 }
1095
d8ee0728
DW
1096 if (sh->reconstruct_state == reconstruct_state_drain_run)
1097 sh->reconstruct_state = reconstruct_state_drain_result;
1098 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1099 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1100 else {
1101 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1102 sh->reconstruct_state = reconstruct_state_result;
1103 }
91c00924
DW
1104
1105 set_bit(STRIPE_HANDLE, &sh->state);
1106 release_stripe(sh);
1107}
1108
1109static void
ac6b53b6
DW
1110ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1111 struct dma_async_tx_descriptor *tx)
91c00924 1112{
91c00924 1113 int disks = sh->disks;
d6f38f31 1114 struct page **xor_srcs = percpu->scribble;
a08abd8c 1115 struct async_submit_ctl submit;
91c00924
DW
1116 int count = 0, pd_idx = sh->pd_idx, i;
1117 struct page *xor_dest;
d8ee0728 1118 int prexor = 0;
91c00924 1119 unsigned long flags;
91c00924 1120
e46b272b 1121 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1122 (unsigned long long)sh->sector);
1123
1124 /* check if prexor is active which means only process blocks
1125 * that are part of a read-modify-write (written)
1126 */
d8ee0728
DW
1127 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1128 prexor = 1;
91c00924
DW
1129 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1130 for (i = disks; i--; ) {
1131 struct r5dev *dev = &sh->dev[i];
1132 if (dev->written)
1133 xor_srcs[count++] = dev->page;
1134 }
1135 } else {
1136 xor_dest = sh->dev[pd_idx].page;
1137 for (i = disks; i--; ) {
1138 struct r5dev *dev = &sh->dev[i];
1139 if (i != pd_idx)
1140 xor_srcs[count++] = dev->page;
1141 }
1142 }
1143
91c00924
DW
1144 /* 1/ if we prexor'd then the dest is reused as a source
1145 * 2/ if we did not prexor then we are redoing the parity
1146 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1147 * for the synchronous xor case
1148 */
88ba2aa5 1149 flags = ASYNC_TX_ACK |
91c00924
DW
1150 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1151
1152 atomic_inc(&sh->count);
1153
ac6b53b6 1154 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
d6f38f31 1155 to_addr_conv(sh, percpu));
a08abd8c
DW
1156 if (unlikely(count == 1))
1157 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1158 else
1159 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
91c00924
DW
1160}
1161
ac6b53b6
DW
1162static void
1163ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1164 struct dma_async_tx_descriptor *tx)
1165{
1166 struct async_submit_ctl submit;
1167 struct page **blocks = percpu->scribble;
1168 int count;
1169
1170 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1171
1172 count = set_syndrome_sources(blocks, sh);
1173
1174 atomic_inc(&sh->count);
1175
1176 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1177 sh, to_addr_conv(sh, percpu));
1178 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
91c00924
DW
1179}
1180
1181static void ops_complete_check(void *stripe_head_ref)
1182{
1183 struct stripe_head *sh = stripe_head_ref;
91c00924 1184
e46b272b 1185 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1186 (unsigned long long)sh->sector);
1187
ecc65c9b 1188 sh->check_state = check_state_check_result;
91c00924
DW
1189 set_bit(STRIPE_HANDLE, &sh->state);
1190 release_stripe(sh);
1191}
1192
ac6b53b6 1193static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
91c00924 1194{
91c00924 1195 int disks = sh->disks;
ac6b53b6
DW
1196 int pd_idx = sh->pd_idx;
1197 int qd_idx = sh->qd_idx;
1198 struct page *xor_dest;
d6f38f31 1199 struct page **xor_srcs = percpu->scribble;
91c00924 1200 struct dma_async_tx_descriptor *tx;
a08abd8c 1201 struct async_submit_ctl submit;
ac6b53b6
DW
1202 int count;
1203 int i;
91c00924 1204
e46b272b 1205 pr_debug("%s: stripe %llu\n", __func__,
91c00924
DW
1206 (unsigned long long)sh->sector);
1207
ac6b53b6
DW
1208 count = 0;
1209 xor_dest = sh->dev[pd_idx].page;
1210 xor_srcs[count++] = xor_dest;
91c00924 1211 for (i = disks; i--; ) {
ac6b53b6
DW
1212 if (i == pd_idx || i == qd_idx)
1213 continue;
1214 xor_srcs[count++] = sh->dev[i].page;
91c00924
DW
1215 }
1216
d6f38f31
DW
1217 init_async_submit(&submit, 0, NULL, NULL, NULL,
1218 to_addr_conv(sh, percpu));
099f53cb 1219 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
a08abd8c 1220 &sh->ops.zero_sum_result, &submit);
91c00924 1221
91c00924 1222 atomic_inc(&sh->count);
a08abd8c
DW
1223 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1224 tx = async_trigger_callback(&submit);
91c00924
DW
1225}
1226
ac6b53b6
DW
1227static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1228{
1229 struct page **srcs = percpu->scribble;
1230 struct async_submit_ctl submit;
1231 int count;
1232
1233 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1234 (unsigned long long)sh->sector, checkp);
1235
1236 count = set_syndrome_sources(srcs, sh);
1237 if (!checkp)
1238 srcs[count] = NULL;
91c00924 1239
91c00924 1240 atomic_inc(&sh->count);
ac6b53b6
DW
1241 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1242 sh, to_addr_conv(sh, percpu));
1243 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1244 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
91c00924
DW
1245}
1246
417b8d4a 1247static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
91c00924
DW
1248{
1249 int overlap_clear = 0, i, disks = sh->disks;
1250 struct dma_async_tx_descriptor *tx = NULL;
d1688a6d 1251 struct r5conf *conf = sh->raid_conf;
ac6b53b6 1252 int level = conf->level;
d6f38f31
DW
1253 struct raid5_percpu *percpu;
1254 unsigned long cpu;
91c00924 1255
d6f38f31
DW
1256 cpu = get_cpu();
1257 percpu = per_cpu_ptr(conf->percpu, cpu);
83de75cc 1258 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
91c00924
DW
1259 ops_run_biofill(sh);
1260 overlap_clear++;
1261 }
1262
7b3a871e 1263 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
ac6b53b6
DW
1264 if (level < 6)
1265 tx = ops_run_compute5(sh, percpu);
1266 else {
1267 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1268 tx = ops_run_compute6_1(sh, percpu);
1269 else
1270 tx = ops_run_compute6_2(sh, percpu);
1271 }
1272 /* terminate the chain if reconstruct is not set to be run */
1273 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
7b3a871e
DW
1274 async_tx_ack(tx);
1275 }
91c00924 1276
600aa109 1277 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
d6f38f31 1278 tx = ops_run_prexor(sh, percpu, tx);
91c00924 1279
600aa109 1280 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
d8ee0728 1281 tx = ops_run_biodrain(sh, tx);
91c00924
DW
1282 overlap_clear++;
1283 }
1284
ac6b53b6
DW
1285 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1286 if (level < 6)
1287 ops_run_reconstruct5(sh, percpu, tx);
1288 else
1289 ops_run_reconstruct6(sh, percpu, tx);
1290 }
91c00924 1291
ac6b53b6
DW
1292 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1293 if (sh->check_state == check_state_run)
1294 ops_run_check_p(sh, percpu);
1295 else if (sh->check_state == check_state_run_q)
1296 ops_run_check_pq(sh, percpu, 0);
1297 else if (sh->check_state == check_state_run_pq)
1298 ops_run_check_pq(sh, percpu, 1);
1299 else
1300 BUG();
1301 }
91c00924 1302
91c00924
DW
1303 if (overlap_clear)
1304 for (i = disks; i--; ) {
1305 struct r5dev *dev = &sh->dev[i];
1306 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1307 wake_up(&sh->raid_conf->wait_for_overlap);
1308 }
d6f38f31 1309 put_cpu();
91c00924
DW
1310}
1311
417b8d4a
DW
1312#ifdef CONFIG_MULTICORE_RAID456
1313static void async_run_ops(void *param, async_cookie_t cookie)
1314{
1315 struct stripe_head *sh = param;
1316 unsigned long ops_request = sh->ops.request;
1317
1318 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1319 wake_up(&sh->ops.wait_for_ops);
1320
1321 __raid_run_ops(sh, ops_request);
1322 release_stripe(sh);
1323}
1324
1325static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1326{
1327 /* since handle_stripe can be called outside of raid5d context
1328 * we need to ensure sh->ops.request is de-staged before another
1329 * request arrives
1330 */
1331 wait_event(sh->ops.wait_for_ops,
1332 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1333 sh->ops.request = ops_request;
1334
1335 atomic_inc(&sh->count);
1336 async_schedule(async_run_ops, sh);
1337}
1338#else
1339#define raid_run_ops __raid_run_ops
1340#endif
1341
d1688a6d 1342static int grow_one_stripe(struct r5conf *conf)
1da177e4
LT
1343{
1344 struct stripe_head *sh;
6ce32846 1345 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
3f294f4f
N
1346 if (!sh)
1347 return 0;
6ce32846 1348
3f294f4f 1349 sh->raid_conf = conf;
417b8d4a
DW
1350 #ifdef CONFIG_MULTICORE_RAID456
1351 init_waitqueue_head(&sh->ops.wait_for_ops);
1352 #endif
3f294f4f 1353
e4e11e38
N
1354 if (grow_buffers(sh)) {
1355 shrink_buffers(sh);
3f294f4f
N
1356 kmem_cache_free(conf->slab_cache, sh);
1357 return 0;
1358 }
1359 /* we just created an active stripe so... */
1360 atomic_set(&sh->count, 1);
1361 atomic_inc(&conf->active_stripes);
1362 INIT_LIST_HEAD(&sh->lru);
1363 release_stripe(sh);
1364 return 1;
1365}
1366
d1688a6d 1367static int grow_stripes(struct r5conf *conf, int num)
3f294f4f 1368{
e18b890b 1369 struct kmem_cache *sc;
5e5e3e78 1370 int devs = max(conf->raid_disks, conf->previous_raid_disks);
1da177e4 1371
f4be6b43
N
1372 if (conf->mddev->gendisk)
1373 sprintf(conf->cache_name[0],
1374 "raid%d-%s", conf->level, mdname(conf->mddev));
1375 else
1376 sprintf(conf->cache_name[0],
1377 "raid%d-%p", conf->level, conf->mddev);
1378 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1379
ad01c9e3
N
1380 conf->active_name = 0;
1381 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1da177e4 1382 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
20c2df83 1383 0, 0, NULL);
1da177e4
LT
1384 if (!sc)
1385 return 1;
1386 conf->slab_cache = sc;
ad01c9e3 1387 conf->pool_size = devs;
16a53ecc 1388 while (num--)
3f294f4f 1389 if (!grow_one_stripe(conf))
1da177e4 1390 return 1;
1da177e4
LT
1391 return 0;
1392}
29269553 1393
d6f38f31
DW
1394/**
1395 * scribble_len - return the required size of the scribble region
1396 * @num - total number of disks in the array
1397 *
1398 * The size must be enough to contain:
1399 * 1/ a struct page pointer for each device in the array +2
1400 * 2/ room to convert each entry in (1) to its corresponding dma
1401 * (dma_map_page()) or page (page_address()) address.
1402 *
1403 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1404 * calculate over all devices (not just the data blocks), using zeros in place
1405 * of the P and Q blocks.
1406 */
1407static size_t scribble_len(int num)
1408{
1409 size_t len;
1410
1411 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1412
1413 return len;
1414}
1415
d1688a6d 1416static int resize_stripes(struct r5conf *conf, int newsize)
ad01c9e3
N
1417{
1418 /* Make all the stripes able to hold 'newsize' devices.
1419 * New slots in each stripe get 'page' set to a new page.
1420 *
1421 * This happens in stages:
1422 * 1/ create a new kmem_cache and allocate the required number of
1423 * stripe_heads.
1424 * 2/ gather all the old stripe_heads and tranfer the pages across
1425 * to the new stripe_heads. This will have the side effect of
1426 * freezing the array as once all stripe_heads have been collected,
1427 * no IO will be possible. Old stripe heads are freed once their
1428 * pages have been transferred over, and the old kmem_cache is
1429 * freed when all stripes are done.
1430 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1431 * we simple return a failre status - no need to clean anything up.
1432 * 4/ allocate new pages for the new slots in the new stripe_heads.
1433 * If this fails, we don't bother trying the shrink the
1434 * stripe_heads down again, we just leave them as they are.
1435 * As each stripe_head is processed the new one is released into
1436 * active service.
1437 *
1438 * Once step2 is started, we cannot afford to wait for a write,
1439 * so we use GFP_NOIO allocations.
1440 */
1441 struct stripe_head *osh, *nsh;
1442 LIST_HEAD(newstripes);
1443 struct disk_info *ndisks;
d6f38f31 1444 unsigned long cpu;
b5470dc5 1445 int err;
e18b890b 1446 struct kmem_cache *sc;
ad01c9e3
N
1447 int i;
1448
1449 if (newsize <= conf->pool_size)
1450 return 0; /* never bother to shrink */
1451
b5470dc5
DW
1452 err = md_allow_write(conf->mddev);
1453 if (err)
1454 return err;
2a2275d6 1455
ad01c9e3
N
1456 /* Step 1 */
1457 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1458 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
20c2df83 1459 0, 0, NULL);
ad01c9e3
N
1460 if (!sc)
1461 return -ENOMEM;
1462
1463 for (i = conf->max_nr_stripes; i; i--) {
6ce32846 1464 nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
ad01c9e3
N
1465 if (!nsh)
1466 break;
1467
ad01c9e3 1468 nsh->raid_conf = conf;
417b8d4a
DW
1469 #ifdef CONFIG_MULTICORE_RAID456
1470 init_waitqueue_head(&nsh->ops.wait_for_ops);
1471 #endif
ad01c9e3
N
1472
1473 list_add(&nsh->lru, &newstripes);
1474 }
1475 if (i) {
1476 /* didn't get enough, give up */
1477 while (!list_empty(&newstripes)) {
1478 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1479 list_del(&nsh->lru);
1480 kmem_cache_free(sc, nsh);
1481 }
1482 kmem_cache_destroy(sc);
1483 return -ENOMEM;
1484 }
1485 /* Step 2 - Must use GFP_NOIO now.
1486 * OK, we have enough stripes, start collecting inactive
1487 * stripes and copying them over
1488 */
1489 list_for_each_entry(nsh, &newstripes, lru) {
1490 spin_lock_irq(&conf->device_lock);
1491 wait_event_lock_irq(conf->wait_for_stripe,
1492 !list_empty(&conf->inactive_list),
1493 conf->device_lock,
482c0834 1494 );
ad01c9e3
N
1495 osh = get_free_stripe(conf);
1496 spin_unlock_irq(&conf->device_lock);
1497 atomic_set(&nsh->count, 1);
1498 for(i=0; i<conf->pool_size; i++)
1499 nsh->dev[i].page = osh->dev[i].page;
1500 for( ; i<newsize; i++)
1501 nsh->dev[i].page = NULL;
1502 kmem_cache_free(conf->slab_cache, osh);
1503 }
1504 kmem_cache_destroy(conf->slab_cache);
1505
1506 /* Step 3.
1507 * At this point, we are holding all the stripes so the array
1508 * is completely stalled, so now is a good time to resize
d6f38f31 1509 * conf->disks and the scribble region
ad01c9e3
N
1510 */
1511 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1512 if (ndisks) {
1513 for (i=0; i<conf->raid_disks; i++)
1514 ndisks[i] = conf->disks[i];
1515 kfree(conf->disks);
1516 conf->disks = ndisks;
1517 } else
1518 err = -ENOMEM;
1519
d6f38f31
DW
1520 get_online_cpus();
1521 conf->scribble_len = scribble_len(newsize);
1522 for_each_present_cpu(cpu) {
1523 struct raid5_percpu *percpu;
1524 void *scribble;
1525
1526 percpu = per_cpu_ptr(conf->percpu, cpu);
1527 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1528
1529 if (scribble) {
1530 kfree(percpu->scribble);
1531 percpu->scribble = scribble;
1532 } else {
1533 err = -ENOMEM;
1534 break;
1535 }
1536 }
1537 put_online_cpus();
1538
ad01c9e3
N
1539 /* Step 4, return new stripes to service */
1540 while(!list_empty(&newstripes)) {
1541 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1542 list_del_init(&nsh->lru);
d6f38f31 1543
ad01c9e3
N
1544 for (i=conf->raid_disks; i < newsize; i++)
1545 if (nsh->dev[i].page == NULL) {
1546 struct page *p = alloc_page(GFP_NOIO);
1547 nsh->dev[i].page = p;
1548 if (!p)
1549 err = -ENOMEM;
1550 }
1551 release_stripe(nsh);
1552 }
1553 /* critical section pass, GFP_NOIO no longer needed */
1554
1555 conf->slab_cache = sc;
1556 conf->active_name = 1-conf->active_name;
1557 conf->pool_size = newsize;
1558 return err;
1559}
1da177e4 1560
d1688a6d 1561static int drop_one_stripe(struct r5conf *conf)
1da177e4
LT
1562{
1563 struct stripe_head *sh;
1564
3f294f4f
N
1565 spin_lock_irq(&conf->device_lock);
1566 sh = get_free_stripe(conf);
1567 spin_unlock_irq(&conf->device_lock);
1568 if (!sh)
1569 return 0;
78bafebd 1570 BUG_ON(atomic_read(&sh->count));
e4e11e38 1571 shrink_buffers(sh);
3f294f4f
N
1572 kmem_cache_free(conf->slab_cache, sh);
1573 atomic_dec(&conf->active_stripes);
1574 return 1;
1575}
1576
d1688a6d 1577static void shrink_stripes(struct r5conf *conf)
3f294f4f
N
1578{
1579 while (drop_one_stripe(conf))
1580 ;
1581
29fc7e3e
N
1582 if (conf->slab_cache)
1583 kmem_cache_destroy(conf->slab_cache);
1da177e4
LT
1584 conf->slab_cache = NULL;
1585}
1586
6712ecf8 1587static void raid5_end_read_request(struct bio * bi, int error)
1da177e4 1588{
99c0fb5f 1589 struct stripe_head *sh = bi->bi_private;
d1688a6d 1590 struct r5conf *conf = sh->raid_conf;
7ecaa1e6 1591 int disks = sh->disks, i;
1da177e4 1592 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
d6950432 1593 char b[BDEVNAME_SIZE];
3cb03002 1594 struct md_rdev *rdev;
1da177e4 1595
1da177e4
LT
1596
1597 for (i=0 ; i<disks; i++)
1598 if (bi == &sh->dev[i].req)
1599 break;
1600
45b4233c
DW
1601 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1602 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1da177e4
LT
1603 uptodate);
1604 if (i == disks) {
1605 BUG();
6712ecf8 1606 return;
1da177e4
LT
1607 }
1608
1609 if (uptodate) {
1da177e4 1610 set_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5 1611 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
d6950432 1612 rdev = conf->disks[i].rdev;
8bda470e
CD
1613 printk_ratelimited(
1614 KERN_INFO
1615 "md/raid:%s: read error corrected"
1616 " (%lu sectors at %llu on %s)\n",
1617 mdname(conf->mddev), STRIPE_SECTORS,
1618 (unsigned long long)(sh->sector
1619 + rdev->data_offset),
1620 bdevname(rdev->bdev, b));
ddd5115f 1621 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
4e5314b5
N
1622 clear_bit(R5_ReadError, &sh->dev[i].flags);
1623 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1624 }
ba22dcbf
N
1625 if (atomic_read(&conf->disks[i].rdev->read_errors))
1626 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1da177e4 1627 } else {
d6950432 1628 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
ba22dcbf 1629 int retry = 0;
d6950432
N
1630 rdev = conf->disks[i].rdev;
1631
1da177e4 1632 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
d6950432 1633 atomic_inc(&rdev->read_errors);
7b0bb536 1634 if (conf->mddev->degraded >= conf->max_degraded)
8bda470e
CD
1635 printk_ratelimited(
1636 KERN_WARNING
1637 "md/raid:%s: read error not correctable "
1638 "(sector %llu on %s).\n",
1639 mdname(conf->mddev),
1640 (unsigned long long)(sh->sector
1641 + rdev->data_offset),
1642 bdn);
ba22dcbf 1643 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
4e5314b5 1644 /* Oh, no!!! */
8bda470e
CD
1645 printk_ratelimited(
1646 KERN_WARNING
1647 "md/raid:%s: read error NOT corrected!! "
1648 "(sector %llu on %s).\n",
1649 mdname(conf->mddev),
1650 (unsigned long long)(sh->sector
1651 + rdev->data_offset),
1652 bdn);
d6950432 1653 else if (atomic_read(&rdev->read_errors)
ba22dcbf 1654 > conf->max_nr_stripes)
14f8d26b 1655 printk(KERN_WARNING
0c55e022 1656 "md/raid:%s: Too many read errors, failing device %s.\n",
d6950432 1657 mdname(conf->mddev), bdn);
ba22dcbf
N
1658 else
1659 retry = 1;
1660 if (retry)
1661 set_bit(R5_ReadError, &sh->dev[i].flags);
1662 else {
4e5314b5
N
1663 clear_bit(R5_ReadError, &sh->dev[i].flags);
1664 clear_bit(R5_ReWrite, &sh->dev[i].flags);
d6950432 1665 md_error(conf->mddev, rdev);
ba22dcbf 1666 }
1da177e4
LT
1667 }
1668 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1da177e4
LT
1669 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1670 set_bit(STRIPE_HANDLE, &sh->state);
1671 release_stripe(sh);
1da177e4
LT
1672}
1673
d710e138 1674static void raid5_end_write_request(struct bio *bi, int error)
1da177e4 1675{
99c0fb5f 1676 struct stripe_head *sh = bi->bi_private;
d1688a6d 1677 struct r5conf *conf = sh->raid_conf;
7ecaa1e6 1678 int disks = sh->disks, i;
1da177e4 1679 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
b84db560
N
1680 sector_t first_bad;
1681 int bad_sectors;
1da177e4 1682
1da177e4
LT
1683 for (i=0 ; i<disks; i++)
1684 if (bi == &sh->dev[i].req)
1685 break;
1686
45b4233c 1687 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1da177e4
LT
1688 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1689 uptodate);
1690 if (i == disks) {
1691 BUG();
6712ecf8 1692 return;
1da177e4
LT
1693 }
1694
bc2607f3
N
1695 if (!uptodate) {
1696 set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
1697 set_bit(R5_WriteError, &sh->dev[i].flags);
b84db560
N
1698 } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
1699 &first_bad, &bad_sectors))
1700 set_bit(R5_MadeGood, &sh->dev[i].flags);
1da177e4
LT
1701
1702 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1703
1704 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1705 set_bit(STRIPE_HANDLE, &sh->state);
c04be0aa 1706 release_stripe(sh);
1da177e4
LT
1707}
1708
1709
784052ec 1710static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1da177e4 1711
784052ec 1712static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1da177e4
LT
1713{
1714 struct r5dev *dev = &sh->dev[i];
1715
1716 bio_init(&dev->req);
1717 dev->req.bi_io_vec = &dev->vec;
1718 dev->req.bi_vcnt++;
1719 dev->req.bi_max_vecs++;
1da177e4 1720 dev->req.bi_private = sh;
995c4275 1721 dev->vec.bv_page = dev->page;
1da177e4
LT
1722
1723 dev->flags = 0;
784052ec 1724 dev->sector = compute_blocknr(sh, i, previous);
1da177e4
LT
1725}
1726
fd01b88c 1727static void error(struct mddev *mddev, struct md_rdev *rdev)
1da177e4
LT
1728{
1729 char b[BDEVNAME_SIZE];
d1688a6d 1730 struct r5conf *conf = mddev->private;
908f4fbd 1731 unsigned long flags;
0c55e022 1732 pr_debug("raid456: error called\n");
1da177e4 1733
908f4fbd
N
1734 spin_lock_irqsave(&conf->device_lock, flags);
1735 clear_bit(In_sync, &rdev->flags);
1736 mddev->degraded = calc_degraded(conf);
1737 spin_unlock_irqrestore(&conf->device_lock, flags);
1738 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1739
de393cde 1740 set_bit(Blocked, &rdev->flags);
6f8d0c77
N
1741 set_bit(Faulty, &rdev->flags);
1742 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1743 printk(KERN_ALERT
1744 "md/raid:%s: Disk failure on %s, disabling device.\n"
1745 "md/raid:%s: Operation continuing on %d devices.\n",
1746 mdname(mddev),
1747 bdevname(rdev->bdev, b),
1748 mdname(mddev),
1749 conf->raid_disks - mddev->degraded);
16a53ecc 1750}
1da177e4
LT
1751
1752/*
1753 * Input: a 'big' sector number,
1754 * Output: index of the data and parity disk, and the sector # in them.
1755 */
d1688a6d 1756static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
911d4ee8
N
1757 int previous, int *dd_idx,
1758 struct stripe_head *sh)
1da177e4 1759{
6e3b96ed 1760 sector_t stripe, stripe2;
35f2a591 1761 sector_t chunk_number;
1da177e4 1762 unsigned int chunk_offset;
911d4ee8 1763 int pd_idx, qd_idx;
67cc2b81 1764 int ddf_layout = 0;
1da177e4 1765 sector_t new_sector;
e183eaed
N
1766 int algorithm = previous ? conf->prev_algo
1767 : conf->algorithm;
09c9e5fa
AN
1768 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1769 : conf->chunk_sectors;
112bf897
N
1770 int raid_disks = previous ? conf->previous_raid_disks
1771 : conf->raid_disks;
1772 int data_disks = raid_disks - conf->max_degraded;
1da177e4
LT
1773
1774 /* First compute the information on this sector */
1775
1776 /*
1777 * Compute the chunk number and the sector offset inside the chunk
1778 */
1779 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1780 chunk_number = r_sector;
1da177e4
LT
1781
1782 /*
1783 * Compute the stripe number
1784 */
35f2a591
N
1785 stripe = chunk_number;
1786 *dd_idx = sector_div(stripe, data_disks);
6e3b96ed 1787 stripe2 = stripe;
1da177e4
LT
1788 /*
1789 * Select the parity disk based on the user selected algorithm.
1790 */
84789554 1791 pd_idx = qd_idx = -1;
16a53ecc
N
1792 switch(conf->level) {
1793 case 4:
911d4ee8 1794 pd_idx = data_disks;
16a53ecc
N
1795 break;
1796 case 5:
e183eaed 1797 switch (algorithm) {
1da177e4 1798 case ALGORITHM_LEFT_ASYMMETRIC:
6e3b96ed 1799 pd_idx = data_disks - sector_div(stripe2, raid_disks);
911d4ee8 1800 if (*dd_idx >= pd_idx)
1da177e4
LT
1801 (*dd_idx)++;
1802 break;
1803 case ALGORITHM_RIGHT_ASYMMETRIC:
6e3b96ed 1804 pd_idx = sector_div(stripe2, raid_disks);
911d4ee8 1805 if (*dd_idx >= pd_idx)
1da177e4
LT
1806 (*dd_idx)++;
1807 break;
1808 case ALGORITHM_LEFT_SYMMETRIC:
6e3b96ed 1809 pd_idx = data_disks - sector_div(stripe2, raid_disks);
911d4ee8 1810 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4
LT
1811 break;
1812 case ALGORITHM_RIGHT_SYMMETRIC:
6e3b96ed 1813 pd_idx = sector_div(stripe2, raid_disks);
911d4ee8 1814 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1da177e4 1815 break;
99c0fb5f
N
1816 case ALGORITHM_PARITY_0:
1817 pd_idx = 0;
1818 (*dd_idx)++;
1819 break;
1820 case ALGORITHM_PARITY_N:
1821 pd_idx = data_disks;
1822 break;
1da177e4 1823 default:
99c0fb5f 1824 BUG();
16a53ecc
N
1825 }
1826 break;
1827 case 6:
1828
e183eaed 1829 switch (algorithm) {
16a53ecc 1830 case ALGORITHM_LEFT_ASYMMETRIC:
6e3b96ed 1831 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
911d4ee8
N
1832 qd_idx = pd_idx + 1;
1833 if (pd_idx == raid_disks-1) {
99c0fb5f 1834 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1835 qd_idx = 0;
1836 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1837 (*dd_idx) += 2; /* D D P Q D */
1838 break;
1839 case ALGORITHM_RIGHT_ASYMMETRIC:
6e3b96ed 1840 pd_idx = sector_div(stripe2, raid_disks);
911d4ee8
N
1841 qd_idx = pd_idx + 1;
1842 if (pd_idx == raid_disks-1) {
99c0fb5f 1843 (*dd_idx)++; /* Q D D D P */
911d4ee8
N
1844 qd_idx = 0;
1845 } else if (*dd_idx >= pd_idx)
16a53ecc
N
1846 (*dd_idx) += 2; /* D D P Q D */
1847 break;
1848 case ALGORITHM_LEFT_SYMMETRIC:
6e3b96ed 1849 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
911d4ee8
N
1850 qd_idx = (pd_idx + 1) % raid_disks;
1851 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc
N
1852 break;
1853 case ALGORITHM_RIGHT_SYMMETRIC:
6e3b96ed 1854 pd_idx = sector_div(stripe2, raid_disks);
911d4ee8
N
1855 qd_idx = (pd_idx + 1) % raid_disks;
1856 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
16a53ecc 1857 break;
99c0fb5f
N
1858
1859 case ALGORITHM_PARITY_0:
1860 pd_idx = 0;
1861 qd_idx = 1;
1862 (*dd_idx) += 2;
1863 break;
1864 case ALGORITHM_PARITY_N:
1865 pd_idx = data_disks;
1866 qd_idx = data_disks + 1;
1867 break;
1868
1869 case ALGORITHM_ROTATING_ZERO_RESTART:
1870 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1871 * of blocks for computing Q is different.
1872 */
6e3b96ed 1873 pd_idx = sector_div(stripe2, raid_disks);
99c0fb5f
N
1874 qd_idx = pd_idx + 1;
1875 if (pd_idx == raid_disks-1) {
1876 (*dd_idx)++; /* Q D D D P */
1877 qd_idx = 0;
1878 } else if (*dd_idx >= pd_idx)
1879 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1880 ddf_layout = 1;
99c0fb5f
N
1881 break;
1882
1883 case ALGORITHM_ROTATING_N_RESTART:
1884 /* Same a left_asymmetric, by first stripe is
1885 * D D D P Q rather than
1886 * Q D D D P
1887 */
6e3b96ed
N
1888 stripe2 += 1;
1889 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
99c0fb5f
N
1890 qd_idx = pd_idx + 1;
1891 if (pd_idx == raid_disks-1) {
1892 (*dd_idx)++; /* Q D D D P */
1893 qd_idx = 0;
1894 } else if (*dd_idx >= pd_idx)
1895 (*dd_idx) += 2; /* D D P Q D */
67cc2b81 1896 ddf_layout = 1;
99c0fb5f
N
1897 break;
1898
1899 case ALGORITHM_ROTATING_N_CONTINUE:
1900 /* Same as left_symmetric but Q is before P */
6e3b96ed 1901 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
99c0fb5f
N
1902 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1903 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
67cc2b81 1904 ddf_layout = 1;
99c0fb5f
N
1905 break;
1906
1907 case ALGORITHM_LEFT_ASYMMETRIC_6:
1908 /* RAID5 left_asymmetric, with Q on last device */
6e3b96ed 1909 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
99c0fb5f
N
1910 if (*dd_idx >= pd_idx)
1911 (*dd_idx)++;
1912 qd_idx = raid_disks - 1;
1913 break;
1914
1915 case ALGORITHM_RIGHT_ASYMMETRIC_6:
6e3b96ed 1916 pd_idx = sector_div(stripe2, raid_disks-1);
99c0fb5f
N
1917 if (*dd_idx >= pd_idx)
1918 (*dd_idx)++;
1919 qd_idx = raid_disks - 1;
1920 break;
1921
1922 case ALGORITHM_LEFT_SYMMETRIC_6:
6e3b96ed 1923 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
99c0fb5f
N
1924 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1925 qd_idx = raid_disks - 1;
1926 break;
1927
1928 case ALGORITHM_RIGHT_SYMMETRIC_6:
6e3b96ed 1929 pd_idx = sector_div(stripe2, raid_disks-1);
99c0fb5f
N
1930 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1931 qd_idx = raid_disks - 1;
1932 break;
1933
1934 case ALGORITHM_PARITY_0_6:
1935 pd_idx = 0;
1936 (*dd_idx)++;
1937 qd_idx = raid_disks - 1;
1938 break;
1939
16a53ecc 1940 default:
99c0fb5f 1941 BUG();
16a53ecc
N
1942 }
1943 break;
1da177e4
LT
1944 }
1945
911d4ee8
N
1946 if (sh) {
1947 sh->pd_idx = pd_idx;
1948 sh->qd_idx = qd_idx;
67cc2b81 1949 sh->ddf_layout = ddf_layout;
911d4ee8 1950 }
1da177e4
LT
1951 /*
1952 * Finally, compute the new sector number
1953 */
1954 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1955 return new_sector;
1956}
1957
1958
784052ec 1959static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1da177e4 1960{
d1688a6d 1961 struct r5conf *conf = sh->raid_conf;
b875e531
N
1962 int raid_disks = sh->disks;
1963 int data_disks = raid_disks - conf->max_degraded;
1da177e4 1964 sector_t new_sector = sh->sector, check;
09c9e5fa
AN
1965 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1966 : conf->chunk_sectors;
e183eaed
N
1967 int algorithm = previous ? conf->prev_algo
1968 : conf->algorithm;
1da177e4
LT
1969 sector_t stripe;
1970 int chunk_offset;
35f2a591
N
1971 sector_t chunk_number;
1972 int dummy1, dd_idx = i;
1da177e4 1973 sector_t r_sector;
911d4ee8 1974 struct stripe_head sh2;
1da177e4 1975
16a53ecc 1976
1da177e4
LT
1977 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1978 stripe = new_sector;
1da177e4 1979
16a53ecc
N
1980 if (i == sh->pd_idx)
1981 return 0;
1982 switch(conf->level) {
1983 case 4: break;
1984 case 5:
e183eaed 1985 switch (algorithm) {
1da177e4
LT
1986 case ALGORITHM_LEFT_ASYMMETRIC:
1987 case ALGORITHM_RIGHT_ASYMMETRIC:
1988 if (i > sh->pd_idx)
1989 i--;
1990 break;
1991 case ALGORITHM_LEFT_SYMMETRIC:
1992 case ALGORITHM_RIGHT_SYMMETRIC:
1993 if (i < sh->pd_idx)
1994 i += raid_disks;
1995 i -= (sh->pd_idx + 1);
1996 break;
99c0fb5f
N
1997 case ALGORITHM_PARITY_0:
1998 i -= 1;
1999 break;
2000 case ALGORITHM_PARITY_N:
2001 break;
1da177e4 2002 default:
99c0fb5f 2003 BUG();
16a53ecc
N
2004 }
2005 break;
2006 case 6:
d0dabf7e 2007 if (i == sh->qd_idx)
16a53ecc 2008 return 0; /* It is the Q disk */
e183eaed 2009 switch (algorithm) {
16a53ecc
N
2010 case ALGORITHM_LEFT_ASYMMETRIC:
2011 case ALGORITHM_RIGHT_ASYMMETRIC:
99c0fb5f
N
2012 case ALGORITHM_ROTATING_ZERO_RESTART:
2013 case ALGORITHM_ROTATING_N_RESTART:
2014 if (sh->pd_idx == raid_disks-1)
2015 i--; /* Q D D D P */
16a53ecc
N
2016 else if (i > sh->pd_idx)
2017 i -= 2; /* D D P Q D */
2018 break;
2019 case ALGORITHM_LEFT_SYMMETRIC:
2020 case ALGORITHM_RIGHT_SYMMETRIC:
2021 if (sh->pd_idx == raid_disks-1)
2022 i--; /* Q D D D P */
2023 else {
2024 /* D D P Q D */
2025 if (i < sh->pd_idx)
2026 i += raid_disks;
2027 i -= (sh->pd_idx + 2);
2028 }
2029 break;
99c0fb5f
N
2030 case ALGORITHM_PARITY_0:
2031 i -= 2;
2032 break;
2033 case ALGORITHM_PARITY_N:
2034 break;
2035 case ALGORITHM_ROTATING_N_CONTINUE:
e4424fee 2036 /* Like left_symmetric, but P is before Q */
99c0fb5f
N
2037 if (sh->pd_idx == 0)
2038 i--; /* P D D D Q */
e4424fee
N
2039 else {
2040 /* D D Q P D */
2041 if (i < sh->pd_idx)
2042 i += raid_disks;
2043 i -= (sh->pd_idx + 1);
2044 }
99c0fb5f
N
2045 break;
2046 case ALGORITHM_LEFT_ASYMMETRIC_6:
2047 case ALGORITHM_RIGHT_ASYMMETRIC_6:
2048 if (i > sh->pd_idx)
2049 i--;
2050 break;
2051 case ALGORITHM_LEFT_SYMMETRIC_6:
2052 case ALGORITHM_RIGHT_SYMMETRIC_6:
2053 if (i < sh->pd_idx)
2054 i += data_disks + 1;
2055 i -= (sh->pd_idx + 1);
2056 break;
2057 case ALGORITHM_PARITY_0_6:
2058 i -= 1;
2059 break;
16a53ecc 2060 default:
99c0fb5f 2061 BUG();
16a53ecc
N
2062 }
2063 break;
1da177e4
LT
2064 }
2065
2066 chunk_number = stripe * data_disks + i;
35f2a591 2067 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
1da177e4 2068
112bf897 2069 check = raid5_compute_sector(conf, r_sector,
784052ec 2070 previous, &dummy1, &sh2);
911d4ee8
N
2071 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2072 || sh2.qd_idx != sh->qd_idx) {
0c55e022
N
2073 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2074 mdname(conf->mddev));
1da177e4
LT
2075 return 0;
2076 }
2077 return r_sector;
2078}
2079
2080
600aa109 2081static void
c0f7bddb 2082schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
600aa109 2083 int rcw, int expand)
e33129d8
DW
2084{
2085 int i, pd_idx = sh->pd_idx, disks = sh->disks;
d1688a6d 2086 struct r5conf *conf = sh->raid_conf;
c0f7bddb 2087 int level = conf->level;
e33129d8
DW
2088
2089 if (rcw) {
2090 /* if we are not expanding this is a proper write request, and
2091 * there will be bios with new data to be drained into the
2092 * stripe cache
2093 */
2094 if (!expand) {
600aa109
DW
2095 sh->reconstruct_state = reconstruct_state_drain_run;
2096 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2097 } else
2098 sh->reconstruct_state = reconstruct_state_run;
16a53ecc 2099
ac6b53b6 2100 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
e33129d8
DW
2101
2102 for (i = disks; i--; ) {
2103 struct r5dev *dev = &sh->dev[i];
2104
2105 if (dev->towrite) {
2106 set_bit(R5_LOCKED, &dev->flags);
d8ee0728 2107 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
2108 if (!expand)
2109 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 2110 s->locked++;
e33129d8
DW
2111 }
2112 }
c0f7bddb 2113 if (s->locked + conf->max_degraded == disks)
8b3e6cdc 2114 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
c0f7bddb 2115 atomic_inc(&conf->pending_full_writes);
e33129d8 2116 } else {
c0f7bddb 2117 BUG_ON(level == 6);
e33129d8
DW
2118 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2119 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2120
d8ee0728 2121 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
600aa109
DW
2122 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2123 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
ac6b53b6 2124 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
e33129d8
DW
2125
2126 for (i = disks; i--; ) {
2127 struct r5dev *dev = &sh->dev[i];
2128 if (i == pd_idx)
2129 continue;
2130
e33129d8
DW
2131 if (dev->towrite &&
2132 (test_bit(R5_UPTODATE, &dev->flags) ||
d8ee0728
DW
2133 test_bit(R5_Wantcompute, &dev->flags))) {
2134 set_bit(R5_Wantdrain, &dev->flags);
e33129d8
DW
2135 set_bit(R5_LOCKED, &dev->flags);
2136 clear_bit(R5_UPTODATE, &dev->flags);
600aa109 2137 s->locked++;
e33129d8
DW
2138 }
2139 }
2140 }
2141
c0f7bddb 2142 /* keep the parity disk(s) locked while asynchronous operations
e33129d8
DW
2143 * are in flight
2144 */
2145 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2146 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
600aa109 2147 s->locked++;
e33129d8 2148
c0f7bddb
YT
2149 if (level == 6) {
2150 int qd_idx = sh->qd_idx;
2151 struct r5dev *dev = &sh->dev[qd_idx];
2152
2153 set_bit(R5_LOCKED, &dev->flags);
2154 clear_bit(R5_UPTODATE, &dev->flags);
2155 s->locked++;
2156 }
2157
600aa109 2158 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
e46b272b 2159 __func__, (unsigned long long)sh->sector,
600aa109 2160 s->locked, s->ops_request);
e33129d8 2161}
16a53ecc 2162
1da177e4
LT
2163/*
2164 * Each stripe/dev can have one or more bion attached.
16a53ecc 2165 * toread/towrite point to the first in a chain.
1da177e4
LT
2166 * The bi_next chain must be in order.
2167 */
2168static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2169{
2170 struct bio **bip;
d1688a6d 2171 struct r5conf *conf = sh->raid_conf;
72626685 2172 int firstwrite=0;
1da177e4 2173
cbe47ec5 2174 pr_debug("adding bi b#%llu to stripe s#%llu\n",
1da177e4
LT
2175 (unsigned long long)bi->bi_sector,
2176 (unsigned long long)sh->sector);
2177
2178
1da177e4 2179 spin_lock_irq(&conf->device_lock);
72626685 2180 if (forwrite) {
1da177e4 2181 bip = &sh->dev[dd_idx].towrite;
72626685
N
2182 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2183 firstwrite = 1;
2184 } else
1da177e4
LT
2185 bip = &sh->dev[dd_idx].toread;
2186 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2187 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2188 goto overlap;
2189 bip = & (*bip)->bi_next;
2190 }
2191 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2192 goto overlap;
2193
78bafebd 2194 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
1da177e4
LT
2195 if (*bip)
2196 bi->bi_next = *bip;
2197 *bip = bi;
960e739d 2198 bi->bi_phys_segments++;
72626685 2199
1da177e4
LT
2200 if (forwrite) {
2201 /* check if page is covered */
2202 sector_t sector = sh->dev[dd_idx].sector;
2203 for (bi=sh->dev[dd_idx].towrite;
2204 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2205 bi && bi->bi_sector <= sector;
2206 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2207 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2208 sector = bi->bi_sector + (bi->bi_size>>9);
2209 }
2210 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2211 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2212 }
cbe47ec5 2213 spin_unlock_irq(&conf->device_lock);
cbe47ec5
N
2214
2215 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2216 (unsigned long long)(*bip)->bi_sector,
2217 (unsigned long long)sh->sector, dd_idx);
2218
2219 if (conf->mddev->bitmap && firstwrite) {
2220 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2221 STRIPE_SECTORS, 0);
2222 sh->bm_seq = conf->seq_flush+1;
2223 set_bit(STRIPE_BIT_DELAY, &sh->state);
2224 }
1da177e4
LT
2225 return 1;
2226
2227 overlap:
2228 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2229 spin_unlock_irq(&conf->device_lock);
1da177e4
LT
2230 return 0;
2231}
2232
d1688a6d 2233static void end_reshape(struct r5conf *conf);
29269553 2234
d1688a6d 2235static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
911d4ee8 2236 struct stripe_head *sh)
ccfcc3c1 2237{
784052ec 2238 int sectors_per_chunk =
09c9e5fa 2239 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
911d4ee8 2240 int dd_idx;
2d2063ce 2241 int chunk_offset = sector_div(stripe, sectors_per_chunk);
112bf897 2242 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2d2063ce 2243
112bf897
N
2244 raid5_compute_sector(conf,
2245 stripe * (disks - conf->max_degraded)
b875e531 2246 *sectors_per_chunk + chunk_offset,
112bf897 2247 previous,
911d4ee8 2248 &dd_idx, sh);
ccfcc3c1
N
2249}
2250
a4456856 2251static void
d1688a6d 2252handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
a4456856
DW
2253 struct stripe_head_state *s, int disks,
2254 struct bio **return_bi)
2255{
2256 int i;
2257 for (i = disks; i--; ) {
2258 struct bio *bi;
2259 int bitmap_end = 0;
2260
2261 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
3cb03002 2262 struct md_rdev *rdev;
a4456856
DW
2263 rcu_read_lock();
2264 rdev = rcu_dereference(conf->disks[i].rdev);
2265 if (rdev && test_bit(In_sync, &rdev->flags))
7f0da59b
N
2266 atomic_inc(&rdev->nr_pending);
2267 else
2268 rdev = NULL;
a4456856 2269 rcu_read_unlock();
7f0da59b
N
2270 if (rdev) {
2271 if (!rdev_set_badblocks(
2272 rdev,
2273 sh->sector,
2274 STRIPE_SECTORS, 0))
2275 md_error(conf->mddev, rdev);
2276 rdev_dec_pending(rdev, conf->mddev);
2277 }
a4456856
DW
2278 }
2279 spin_lock_irq(&conf->device_lock);
2280 /* fail all writes first */
2281 bi = sh->dev[i].towrite;
2282 sh->dev[i].towrite = NULL;
2283 if (bi) {
2284 s->to_write--;
2285 bitmap_end = 1;
2286 }
2287
2288 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2289 wake_up(&conf->wait_for_overlap);
2290
2291 while (bi && bi->bi_sector <
2292 sh->dev[i].sector + STRIPE_SECTORS) {
2293 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2294 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2295 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2296 md_write_end(conf->mddev);
2297 bi->bi_next = *return_bi;
2298 *return_bi = bi;
2299 }
2300 bi = nextbi;
2301 }
2302 /* and fail all 'written' */
2303 bi = sh->dev[i].written;
2304 sh->dev[i].written = NULL;
2305 if (bi) bitmap_end = 1;
2306 while (bi && bi->bi_sector <
2307 sh->dev[i].sector + STRIPE_SECTORS) {
2308 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2309 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2310 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2311 md_write_end(conf->mddev);
2312 bi->bi_next = *return_bi;
2313 *return_bi = bi;
2314 }
2315 bi = bi2;
2316 }
2317
b5e98d65
DW
2318 /* fail any reads if this device is non-operational and
2319 * the data has not reached the cache yet.
2320 */
2321 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2322 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2323 test_bit(R5_ReadError, &sh->dev[i].flags))) {
a4456856
DW
2324 bi = sh->dev[i].toread;
2325 sh->dev[i].toread = NULL;
2326 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2327 wake_up(&conf->wait_for_overlap);
2328 if (bi) s->to_read--;
2329 while (bi && bi->bi_sector <
2330 sh->dev[i].sector + STRIPE_SECTORS) {
2331 struct bio *nextbi =
2332 r5_next_bio(bi, sh->dev[i].sector);
2333 clear_bit(BIO_UPTODATE, &bi->bi_flags);
960e739d 2334 if (!raid5_dec_bi_phys_segments(bi)) {
a4456856
DW
2335 bi->bi_next = *return_bi;
2336 *return_bi = bi;
2337 }
2338 bi = nextbi;
2339 }
2340 }
2341 spin_unlock_irq(&conf->device_lock);
2342 if (bitmap_end)
2343 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2344 STRIPE_SECTORS, 0, 0);
8cfa7b0f
N
2345 /* If we were in the middle of a write the parity block might
2346 * still be locked - so just clear all R5_LOCKED flags
2347 */
2348 clear_bit(R5_LOCKED, &sh->dev[i].flags);
a4456856
DW
2349 }
2350
8b3e6cdc
DW
2351 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2352 if (atomic_dec_and_test(&conf->pending_full_writes))
2353 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2354}
2355
7f0da59b 2356static void
d1688a6d 2357handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
7f0da59b
N
2358 struct stripe_head_state *s)
2359{
2360 int abort = 0;
2361 int i;
2362
2363 md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
2364 clear_bit(STRIPE_SYNCING, &sh->state);
2365 s->syncing = 0;
2366 /* There is nothing more to do for sync/check/repair.
2367 * For recover we need to record a bad block on all
2368 * non-sync devices, or abort the recovery
2369 */
2370 if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
2371 return;
2372 /* During recovery devices cannot be removed, so locking and
2373 * refcounting of rdevs is not needed
2374 */
2375 for (i = 0; i < conf->raid_disks; i++) {
3cb03002 2376 struct md_rdev *rdev = conf->disks[i].rdev;
7f0da59b
N
2377 if (!rdev
2378 || test_bit(Faulty, &rdev->flags)
2379 || test_bit(In_sync, &rdev->flags))
2380 continue;
2381 if (!rdev_set_badblocks(rdev, sh->sector,
2382 STRIPE_SECTORS, 0))
2383 abort = 1;
2384 }
2385 if (abort) {
2386 conf->recovery_disabled = conf->mddev->recovery_disabled;
2387 set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
2388 }
2389}
2390
93b3dbce 2391/* fetch_block - checks the given member device to see if its data needs
1fe797e6
DW
2392 * to be read or computed to satisfy a request.
2393 *
2394 * Returns 1 when no more member devices need to be checked, otherwise returns
93b3dbce 2395 * 0 to tell the loop in handle_stripe_fill to continue
f38e1219 2396 */
93b3dbce
N
2397static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2398 int disk_idx, int disks)
a4456856 2399{
5599becc 2400 struct r5dev *dev = &sh->dev[disk_idx];
f2b3b44d
N
2401 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2402 &sh->dev[s->failed_num[1]] };
5599becc 2403
93b3dbce 2404 /* is the data in this block needed, and can we get it? */
5599becc
YT
2405 if (!test_bit(R5_LOCKED, &dev->flags) &&
2406 !test_bit(R5_UPTODATE, &dev->flags) &&
2407 (dev->toread ||
2408 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2409 s->syncing || s->expanding ||
5d35e09c
N
2410 (s->failed >= 1 && fdev[0]->toread) ||
2411 (s->failed >= 2 && fdev[1]->toread) ||
93b3dbce
N
2412 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2413 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2414 (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
5599becc
YT
2415 /* we would like to get this block, possibly by computing it,
2416 * otherwise read it if the backing disk is insync
2417 */
2418 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2419 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2420 if ((s->uptodate == disks - 1) &&
f2b3b44d
N
2421 (s->failed && (disk_idx == s->failed_num[0] ||
2422 disk_idx == s->failed_num[1]))) {
5599becc
YT
2423 /* have disk failed, and we're requested to fetch it;
2424 * do compute it
a4456856 2425 */
5599becc
YT
2426 pr_debug("Computing stripe %llu block %d\n",
2427 (unsigned long long)sh->sector, disk_idx);
2428 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2429 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2430 set_bit(R5_Wantcompute, &dev->flags);
2431 sh->ops.target = disk_idx;
2432 sh->ops.target2 = -1; /* no 2nd target */
2433 s->req_compute = 1;
93b3dbce
N
2434 /* Careful: from this point on 'uptodate' is in the eye
2435 * of raid_run_ops which services 'compute' operations
2436 * before writes. R5_Wantcompute flags a block that will
2437 * be R5_UPTODATE by the time it is needed for a
2438 * subsequent operation.
2439 */
5599becc
YT
2440 s->uptodate++;
2441 return 1;
2442 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2443 /* Computing 2-failure is *very* expensive; only
2444 * do it if failed >= 2
2445 */
2446 int other;
2447 for (other = disks; other--; ) {
2448 if (other == disk_idx)
2449 continue;
2450 if (!test_bit(R5_UPTODATE,
2451 &sh->dev[other].flags))
2452 break;
a4456856 2453 }
5599becc
YT
2454 BUG_ON(other < 0);
2455 pr_debug("Computing stripe %llu blocks %d,%d\n",
2456 (unsigned long long)sh->sector,
2457 disk_idx, other);
2458 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2459 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2460 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2461 set_bit(R5_Wantcompute, &sh->dev[other].flags);
2462 sh->ops.target = disk_idx;
2463 sh->ops.target2 = other;
2464 s->uptodate += 2;
2465 s->req_compute = 1;
2466 return 1;
2467 } else if (test_bit(R5_Insync, &dev->flags)) {
2468 set_bit(R5_LOCKED, &dev->flags);
2469 set_bit(R5_Wantread, &dev->flags);
2470 s->locked++;
2471 pr_debug("Reading block %d (sync=%d)\n",
2472 disk_idx, s->syncing);
a4456856
DW
2473 }
2474 }
5599becc
YT
2475
2476 return 0;
2477}
2478
2479/**
93b3dbce 2480 * handle_stripe_fill - read or compute data to satisfy pending requests.
5599becc 2481 */
93b3dbce
N
2482static void handle_stripe_fill(struct stripe_head *sh,
2483 struct stripe_head_state *s,
2484 int disks)
5599becc
YT
2485{
2486 int i;
2487
2488 /* look for blocks to read/compute, skip this if a compute
2489 * is already in flight, or if the stripe contents are in the
2490 * midst of changing due to a write
2491 */
2492 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2493 !sh->reconstruct_state)
2494 for (i = disks; i--; )
93b3dbce 2495 if (fetch_block(sh, s, i, disks))
5599becc 2496 break;
a4456856
DW
2497 set_bit(STRIPE_HANDLE, &sh->state);
2498}
2499
2500
1fe797e6 2501/* handle_stripe_clean_event
a4456856
DW
2502 * any written block on an uptodate or failed drive can be returned.
2503 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2504 * never LOCKED, so we don't need to test 'failed' directly.
2505 */
d1688a6d 2506static void handle_stripe_clean_event(struct r5conf *conf,
a4456856
DW
2507 struct stripe_head *sh, int disks, struct bio **return_bi)
2508{
2509 int i;
2510 struct r5dev *dev;
2511
2512 for (i = disks; i--; )
2513 if (sh->dev[i].written) {
2514 dev = &sh->dev[i];
2515 if (!test_bit(R5_LOCKED, &dev->flags) &&
2516 test_bit(R5_UPTODATE, &dev->flags)) {
2517 /* We can return any write requests */
2518 struct bio *wbi, *wbi2;
2519 int bitmap_end = 0;
45b4233c 2520 pr_debug("Return write for disc %d\n", i);
a4456856
DW
2521 spin_lock_irq(&conf->device_lock);
2522 wbi = dev->written;
2523 dev->written = NULL;
2524 while (wbi && wbi->bi_sector <
2525 dev->sector + STRIPE_SECTORS) {
2526 wbi2 = r5_next_bio(wbi, dev->sector);
960e739d 2527 if (!raid5_dec_bi_phys_segments(wbi)) {
a4456856
DW
2528 md_write_end(conf->mddev);
2529 wbi->bi_next = *return_bi;
2530 *return_bi = wbi;
2531 }
2532 wbi = wbi2;
2533 }
2534 if (dev->towrite == NULL)
2535 bitmap_end = 1;
2536 spin_unlock_irq(&conf->device_lock);
2537 if (bitmap_end)
2538 bitmap_endwrite(conf->mddev->bitmap,
2539 sh->sector,
2540 STRIPE_SECTORS,
2541 !test_bit(STRIPE_DEGRADED, &sh->state),
2542 0);
2543 }
2544 }
8b3e6cdc
DW
2545
2546 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2547 if (atomic_dec_and_test(&conf->pending_full_writes))
2548 md_wakeup_thread(conf->mddev->thread);
a4456856
DW
2549}
2550
d1688a6d 2551static void handle_stripe_dirtying(struct r5conf *conf,
c8ac1803
N
2552 struct stripe_head *sh,
2553 struct stripe_head_state *s,
2554 int disks)
a4456856
DW
2555{
2556 int rmw = 0, rcw = 0, i;
c8ac1803
N
2557 if (conf->max_degraded == 2) {
2558 /* RAID6 requires 'rcw' in current implementation
2559 * Calculate the real rcw later - for now fake it
2560 * look like rcw is cheaper
2561 */
2562 rcw = 1; rmw = 2;
2563 } else for (i = disks; i--; ) {
a4456856
DW
2564 /* would I have to read this buffer for read_modify_write */
2565 struct r5dev *dev = &sh->dev[i];
2566 if ((dev->towrite || i == sh->pd_idx) &&
2567 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2568 !(test_bit(R5_UPTODATE, &dev->flags) ||
2569 test_bit(R5_Wantcompute, &dev->flags))) {
a4456856
DW
2570 if (test_bit(R5_Insync, &dev->flags))
2571 rmw++;
2572 else
2573 rmw += 2*disks; /* cannot read it */
2574 }
2575 /* Would I have to read this buffer for reconstruct_write */
2576 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2577 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2578 !(test_bit(R5_UPTODATE, &dev->flags) ||
2579 test_bit(R5_Wantcompute, &dev->flags))) {
2580 if (test_bit(R5_Insync, &dev->flags)) rcw++;
a4456856
DW
2581 else
2582 rcw += 2*disks;
2583 }
2584 }
45b4233c 2585 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
a4456856
DW
2586 (unsigned long long)sh->sector, rmw, rcw);
2587 set_bit(STRIPE_HANDLE, &sh->state);
2588 if (rmw < rcw && rmw > 0)
2589 /* prefer read-modify-write, but need to get some data */
2590 for (i = disks; i--; ) {
2591 struct r5dev *dev = &sh->dev[i];
2592 if ((dev->towrite || i == sh->pd_idx) &&
2593 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219
DW
2594 !(test_bit(R5_UPTODATE, &dev->flags) ||
2595 test_bit(R5_Wantcompute, &dev->flags)) &&
a4456856
DW
2596 test_bit(R5_Insync, &dev->flags)) {
2597 if (
2598 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2599 pr_debug("Read_old block "
a4456856
DW
2600 "%d for r-m-w\n", i);
2601 set_bit(R5_LOCKED, &dev->flags);
2602 set_bit(R5_Wantread, &dev->flags);
2603 s->locked++;
2604 } else {
2605 set_bit(STRIPE_DELAYED, &sh->state);
2606 set_bit(STRIPE_HANDLE, &sh->state);
2607 }
2608 }
2609 }
c8ac1803 2610 if (rcw <= rmw && rcw > 0) {
a4456856 2611 /* want reconstruct write, but need to get some data */
c8ac1803 2612 rcw = 0;
a4456856
DW
2613 for (i = disks; i--; ) {
2614 struct r5dev *dev = &sh->dev[i];
2615 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
c8ac1803 2616 i != sh->pd_idx && i != sh->qd_idx &&
a4456856 2617 !test_bit(R5_LOCKED, &dev->flags) &&
f38e1219 2618 !(test_bit(R5_UPTODATE, &dev->flags) ||
c8ac1803
N
2619 test_bit(R5_Wantcompute, &dev->flags))) {
2620 rcw++;
2621 if (!test_bit(R5_Insync, &dev->flags))
2622 continue; /* it's a failed drive */
a4456856
DW
2623 if (
2624 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
45b4233c 2625 pr_debug("Read_old block "
a4456856
DW
2626 "%d for Reconstruct\n", i);
2627 set_bit(R5_LOCKED, &dev->flags);
2628 set_bit(R5_Wantread, &dev->flags);
2629 s->locked++;
2630 } else {
2631 set_bit(STRIPE_DELAYED, &sh->state);
2632 set_bit(STRIPE_HANDLE, &sh->state);
2633 }
2634 }
2635 }
c8ac1803 2636 }
a4456856
DW
2637 /* now if nothing is locked, and if we have enough data,
2638 * we can start a write request
2639 */
f38e1219
DW
2640 /* since handle_stripe can be called at any time we need to handle the
2641 * case where a compute block operation has been submitted and then a
ac6b53b6
DW
2642 * subsequent call wants to start a write request. raid_run_ops only
2643 * handles the case where compute block and reconstruct are requested
f38e1219
DW
2644 * simultaneously. If this is not the case then new writes need to be
2645 * held off until the compute completes.
2646 */
976ea8d4
DW
2647 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2648 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2649 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
c0f7bddb 2650 schedule_reconstruction(sh, s, rcw == 0, 0);
a4456856
DW
2651}
2652
d1688a6d 2653static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
a4456856
DW
2654 struct stripe_head_state *s, int disks)
2655{
ecc65c9b 2656 struct r5dev *dev = NULL;
bd2ab670 2657
a4456856 2658 set_bit(STRIPE_HANDLE, &sh->state);
e89f8962 2659
ecc65c9b
DW
2660 switch (sh->check_state) {
2661 case check_state_idle:
2662 /* start a new check operation if there are no failures */
bd2ab670 2663 if (s->failed == 0) {
bd2ab670 2664 BUG_ON(s->uptodate != disks);
ecc65c9b
DW
2665 sh->check_state = check_state_run;
2666 set_bit(STRIPE_OP_CHECK, &s->ops_request);
bd2ab670 2667 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
bd2ab670 2668 s->uptodate--;
ecc65c9b 2669 break;
bd2ab670 2670 }
f2b3b44d 2671 dev = &sh->dev[s->failed_num[0]];
ecc65c9b
DW
2672 /* fall through */
2673 case check_state_compute_result:
2674 sh->check_state = check_state_idle;
2675 if (!dev)
2676 dev = &sh->dev[sh->pd_idx];
2677
2678 /* check that a write has not made the stripe insync */
2679 if (test_bit(STRIPE_INSYNC, &sh->state))
2680 break;
c8894419 2681
a4456856 2682 /* either failed parity check, or recovery is happening */
a4456856
DW
2683 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2684 BUG_ON(s->uptodate != disks);
2685
2686 set_bit(R5_LOCKED, &dev->flags);
ecc65c9b 2687 s->locked++;
a4456856 2688 set_bit(R5_Wantwrite, &dev->flags);
830ea016 2689
a4456856 2690 clear_bit(STRIPE_DEGRADED, &sh->state);
a4456856 2691 set_bit(STRIPE_INSYNC, &sh->state);
ecc65c9b
DW
2692 break;
2693 case check_state_run:
2694 break; /* we will be called again upon completion */
2695 case check_state_check_result:
2696 sh->check_state = check_state_idle;
2697
2698 /* if a failure occurred during the check operation, leave
2699 * STRIPE_INSYNC not set and let the stripe be handled again
2700 */
2701 if (s->failed)
2702 break;
2703
2704 /* handle a successful check operation, if parity is correct
2705 * we are done. Otherwise update the mismatch count and repair
2706 * parity if !MD_RECOVERY_CHECK
2707 */
ad283ea4 2708 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
ecc65c9b
DW
2709 /* parity is correct (on disc,
2710 * not in buffer any more)
2711 */
2712 set_bit(STRIPE_INSYNC, &sh->state);
2713 else {
2714 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2715 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2716 /* don't try to repair!! */
2717 set_bit(STRIPE_INSYNC, &sh->state);
2718 else {
2719 sh->check_state = check_state_compute_run;
976ea8d4 2720 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
ecc65c9b
DW
2721 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2722 set_bit(R5_Wantcompute,
2723 &sh->dev[sh->pd_idx].flags);
2724 sh->ops.target = sh->pd_idx;
ac6b53b6 2725 sh->ops.target2 = -1;
ecc65c9b
DW
2726 s->uptodate++;
2727 }
2728 }
2729 break;
2730 case check_state_compute_run:
2731 break;
2732 default:
2733 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2734 __func__, sh->check_state,
2735 (unsigned long long) sh->sector);
2736 BUG();
a4456856
DW
2737 }
2738}
2739
2740
d1688a6d 2741static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
36d1c647 2742 struct stripe_head_state *s,
f2b3b44d 2743 int disks)
a4456856 2744{
a4456856 2745 int pd_idx = sh->pd_idx;
34e04e87 2746 int qd_idx = sh->qd_idx;
d82dfee0 2747 struct r5dev *dev;
a4456856
DW
2748
2749 set_bit(STRIPE_HANDLE, &sh->state);
2750
2751 BUG_ON(s->failed > 2);
d82dfee0 2752
a4456856
DW
2753 /* Want to check and possibly repair P and Q.
2754 * However there could be one 'failed' device, in which
2755 * case we can only check one of them, possibly using the
2756 * other to generate missing data
2757 */
2758
d82dfee0
DW
2759 switch (sh->check_state) {
2760 case check_state_idle:
2761 /* start a new check operation if there are < 2 failures */
f2b3b44d 2762 if (s->failed == s->q_failed) {
d82dfee0 2763 /* The only possible failed device holds Q, so it
a4456856
DW
2764 * makes sense to check P (If anything else were failed,
2765 * we would have used P to recreate it).
2766 */
d82dfee0 2767 sh->check_state = check_state_run;
a4456856 2768 }
f2b3b44d 2769 if (!s->q_failed && s->failed < 2) {
d82dfee0 2770 /* Q is not failed, and we didn't use it to generate
a4456856
DW
2771 * anything, so it makes sense to check it
2772 */
d82dfee0
DW
2773 if (sh->check_state == check_state_run)
2774 sh->check_state = check_state_run_pq;
2775 else
2776 sh->check_state = check_state_run_q;
a4456856 2777 }
a4456856 2778
d82dfee0
DW
2779 /* discard potentially stale zero_sum_result */
2780 sh->ops.zero_sum_result = 0;
a4456856 2781
d82dfee0
DW
2782 if (sh->check_state == check_state_run) {
2783 /* async_xor_zero_sum destroys the contents of P */
2784 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2785 s->uptodate--;
a4456856 2786 }
d82dfee0
DW
2787 if (sh->check_state >= check_state_run &&
2788 sh->check_state <= check_state_run_pq) {
2789 /* async_syndrome_zero_sum preserves P and Q, so
2790 * no need to mark them !uptodate here
2791 */
2792 set_bit(STRIPE_OP_CHECK, &s->ops_request);
2793 break;
a4456856
DW
2794 }
2795
d82dfee0
DW
2796 /* we have 2-disk failure */
2797 BUG_ON(s->failed != 2);
2798 /* fall through */
2799 case check_state_compute_result:
2800 sh->check_state = check_state_idle;
a4456856 2801
d82dfee0
DW
2802 /* check that a write has not made the stripe insync */
2803 if (test_bit(STRIPE_INSYNC, &sh->state))
2804 break;
a4456856
DW
2805
2806 /* now write out any block on a failed drive,
d82dfee0 2807 * or P or Q if they were recomputed
a4456856 2808 */
d82dfee0 2809 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
a4456856 2810 if (s->failed == 2) {
f2b3b44d 2811 dev = &sh->dev[s->failed_num[1]];
a4456856
DW
2812 s->locked++;
2813 set_bit(R5_LOCKED, &dev->flags);
2814 set_bit(R5_Wantwrite, &dev->flags);
2815 }
2816 if (s->failed >= 1) {
f2b3b44d 2817 dev = &sh->dev[s->failed_num[0]];
a4456856
DW
2818 s->locked++;
2819 set_bit(R5_LOCKED, &dev->flags);
2820 set_bit(R5_Wantwrite, &dev->flags);
2821 }
d82dfee0 2822 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
a4456856
DW
2823 dev = &sh->dev[pd_idx];
2824 s->locked++;
2825 set_bit(R5_LOCKED, &dev->flags);
2826 set_bit(R5_Wantwrite, &dev->flags);
2827 }
d82dfee0 2828 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
a4456856
DW
2829 dev = &sh->dev[qd_idx];
2830 s->locked++;
2831 set_bit(R5_LOCKED, &dev->flags);
2832 set_bit(R5_Wantwrite, &dev->flags);
2833 }
2834 clear_bit(STRIPE_DEGRADED, &sh->state);
2835
2836 set_bit(STRIPE_INSYNC, &sh->state);
d82dfee0
DW
2837 break;
2838 case check_state_run:
2839 case check_state_run_q:
2840 case check_state_run_pq:
2841 break; /* we will be called again upon completion */
2842 case check_state_check_result:
2843 sh->check_state = check_state_idle;
2844
2845 /* handle a successful check operation, if parity is correct
2846 * we are done. Otherwise update the mismatch count and repair
2847 * parity if !MD_RECOVERY_CHECK
2848 */
2849 if (sh->ops.zero_sum_result == 0) {
2850 /* both parities are correct */
2851 if (!s->failed)
2852 set_bit(STRIPE_INSYNC, &sh->state);
2853 else {
2854 /* in contrast to the raid5 case we can validate
2855 * parity, but still have a failure to write
2856 * back
2857 */
2858 sh->check_state = check_state_compute_result;
2859 /* Returning at this point means that we may go
2860 * off and bring p and/or q uptodate again so
2861 * we make sure to check zero_sum_result again
2862 * to verify if p or q need writeback
2863 */
2864 }
2865 } else {
2866 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2867 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2868 /* don't try to repair!! */
2869 set_bit(STRIPE_INSYNC, &sh->state);
2870 else {
2871 int *target = &sh->ops.target;
2872
2873 sh->ops.target = -1;
2874 sh->ops.target2 = -1;
2875 sh->check_state = check_state_compute_run;
2876 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2877 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2878 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2879 set_bit(R5_Wantcompute,
2880 &sh->dev[pd_idx].flags);
2881 *target = pd_idx;
2882 target = &sh->ops.target2;
2883 s->uptodate++;
2884 }
2885 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2886 set_bit(R5_Wantcompute,
2887 &sh->dev[qd_idx].flags);
2888 *target = qd_idx;
2889 s->uptodate++;
2890 }
2891 }
2892 }
2893 break;
2894 case check_state_compute_run:
2895 break;
2896 default:
2897 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2898 __func__, sh->check_state,
2899 (unsigned long long) sh->sector);
2900 BUG();
a4456856
DW
2901 }
2902}
2903
d1688a6d 2904static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
a4456856
DW
2905{
2906 int i;
2907
2908 /* We have read all the blocks in this stripe and now we need to
2909 * copy some of them into a target stripe for expand.
2910 */
f0a50d37 2911 struct dma_async_tx_descriptor *tx = NULL;
a4456856
DW
2912 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2913 for (i = 0; i < sh->disks; i++)
34e04e87 2914 if (i != sh->pd_idx && i != sh->qd_idx) {
911d4ee8 2915 int dd_idx, j;
a4456856 2916 struct stripe_head *sh2;
a08abd8c 2917 struct async_submit_ctl submit;
a4456856 2918
784052ec 2919 sector_t bn = compute_blocknr(sh, i, 1);
911d4ee8
N
2920 sector_t s = raid5_compute_sector(conf, bn, 0,
2921 &dd_idx, NULL);
a8c906ca 2922 sh2 = get_active_stripe(conf, s, 0, 1, 1);
a4456856
DW
2923 if (sh2 == NULL)
2924 /* so far only the early blocks of this stripe
2925 * have been requested. When later blocks
2926 * get requested, we will try again
2927 */
2928 continue;
2929 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2930 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2931 /* must have already done this block */
2932 release_stripe(sh2);
2933 continue;
2934 }
f0a50d37
DW
2935
2936 /* place all the copies on one channel */
a08abd8c 2937 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
f0a50d37 2938 tx = async_memcpy(sh2->dev[dd_idx].page,
88ba2aa5 2939 sh->dev[i].page, 0, 0, STRIPE_SIZE,
a08abd8c 2940 &submit);
f0a50d37 2941
a4456856
DW
2942 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2943 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2944 for (j = 0; j < conf->raid_disks; j++)
2945 if (j != sh2->pd_idx &&
86c374ba 2946 j != sh2->qd_idx &&
a4456856
DW
2947 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2948 break;
2949 if (j == conf->raid_disks) {
2950 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2951 set_bit(STRIPE_HANDLE, &sh2->state);
2952 }
2953 release_stripe(sh2);
f0a50d37 2954
a4456856 2955 }
a2e08551
N
2956 /* done submitting copies, wait for them to complete */
2957 if (tx) {
2958 async_tx_ack(tx);
2959 dma_wait_for_async_tx(tx);
2960 }
a4456856 2961}
1da177e4 2962
6bfe0b49 2963
1da177e4
LT
2964/*
2965 * handle_stripe - do things to a stripe.
2966 *
2967 * We lock the stripe and then examine the state of various bits
2968 * to see what needs to be done.
2969 * Possible results:
2970 * return some read request which now have data
2971 * return some write requests which are safely on disc
2972 * schedule a read on some buffers
2973 * schedule a write of some buffers
2974 * return confirmation of parity correctness
2975 *
1da177e4
LT
2976 * buffers are taken off read_list or write_list, and bh_cache buffers
2977 * get BH_Lock set before the stripe lock is released.
2978 *
2979 */
a4456856 2980
acfe726b 2981static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
1da177e4 2982{
d1688a6d 2983 struct r5conf *conf = sh->raid_conf;
f416885e 2984 int disks = sh->disks;
474af965
N
2985 struct r5dev *dev;
2986 int i;
1da177e4 2987
acfe726b
N
2988 memset(s, 0, sizeof(*s));
2989
2990 s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
2991 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2992 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2993 s->failed_num[0] = -1;
2994 s->failed_num[1] = -1;
1da177e4 2995
acfe726b 2996 /* Now to look around and see what can be done */
1da177e4 2997 rcu_read_lock();
c4c1663b 2998 spin_lock_irq(&conf->device_lock);
16a53ecc 2999 for (i=disks; i--; ) {
3cb03002 3000 struct md_rdev *rdev;
31c176ec
N
3001 sector_t first_bad;
3002 int bad_sectors;
3003 int is_bad = 0;
acfe726b 3004
16a53ecc 3005 dev = &sh->dev[i];
1da177e4 3006
45b4233c 3007 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
16a53ecc 3008 i, dev->flags, dev->toread, dev->towrite, dev->written);
6c0069c0
YT
3009 /* maybe we can reply to a read
3010 *
3011 * new wantfill requests are only permitted while
3012 * ops_complete_biofill is guaranteed to be inactive
3013 */
3014 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3015 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3016 set_bit(R5_Wantfill, &dev->flags);
1da177e4 3017
16a53ecc 3018 /* now count some things */
cc94015a
N
3019 if (test_bit(R5_LOCKED, &dev->flags))
3020 s->locked++;
3021 if (test_bit(R5_UPTODATE, &dev->flags))
3022 s->uptodate++;
2d6e4ecc 3023 if (test_bit(R5_Wantcompute, &dev->flags)) {
cc94015a
N
3024 s->compute++;
3025 BUG_ON(s->compute > 2);
2d6e4ecc 3026 }
1da177e4 3027
acfe726b 3028 if (test_bit(R5_Wantfill, &dev->flags))
cc94015a 3029 s->to_fill++;
acfe726b 3030 else if (dev->toread)
cc94015a 3031 s->to_read++;
16a53ecc 3032 if (dev->towrite) {
cc94015a 3033 s->to_write++;
16a53ecc 3034 if (!test_bit(R5_OVERWRITE, &dev->flags))
cc94015a 3035 s->non_overwrite++;
16a53ecc 3036 }
a4456856 3037 if (dev->written)
cc94015a 3038 s->written++;
16a53ecc 3039 rdev = rcu_dereference(conf->disks[i].rdev);
9283d8c5
N
3040 if (rdev && test_bit(Faulty, &rdev->flags))
3041 rdev = NULL;
31c176ec
N
3042 if (rdev) {
3043 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3044 &first_bad, &bad_sectors);
3045 if (s->blocked_rdev == NULL
3046 && (test_bit(Blocked, &rdev->flags)
3047 || is_bad < 0)) {
3048 if (is_bad < 0)
3049 set_bit(BlockedBadBlocks,
3050 &rdev->flags);
3051 s->blocked_rdev = rdev;
3052 atomic_inc(&rdev->nr_pending);
3053 }
6bfe0b49 3054 }
415e72d0
N
3055 clear_bit(R5_Insync, &dev->flags);
3056 if (!rdev)
3057 /* Not in-sync */;
31c176ec
N
3058 else if (is_bad) {
3059 /* also not in-sync */
3060 if (!test_bit(WriteErrorSeen, &rdev->flags)) {
3061 /* treat as in-sync, but with a read error
3062 * which we can now try to correct
3063 */
3064 set_bit(R5_Insync, &dev->flags);
3065 set_bit(R5_ReadError, &dev->flags);
3066 }
3067 } else if (test_bit(In_sync, &rdev->flags))
415e72d0 3068 set_bit(R5_Insync, &dev->flags);
30d7a483 3069 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
415e72d0 3070 /* in sync if before recovery_offset */
30d7a483
N
3071 set_bit(R5_Insync, &dev->flags);
3072 else if (test_bit(R5_UPTODATE, &dev->flags) &&
3073 test_bit(R5_Expanded, &dev->flags))
3074 /* If we've reshaped into here, we assume it is Insync.
3075 * We will shortly update recovery_offset to make
3076 * it official.
3077 */
3078 set_bit(R5_Insync, &dev->flags);
3079
5d8c71f9 3080 if (rdev && test_bit(R5_WriteError, &dev->flags)) {
bc2607f3
N
3081 clear_bit(R5_Insync, &dev->flags);
3082 if (!test_bit(Faulty, &rdev->flags)) {
3083 s->handle_bad_blocks = 1;
3084 atomic_inc(&rdev->nr_pending);
3085 } else
3086 clear_bit(R5_WriteError, &dev->flags);
3087 }
5d8c71f9 3088 if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
b84db560
N
3089 if (!test_bit(Faulty, &rdev->flags)) {
3090 s->handle_bad_blocks = 1;
3091 atomic_inc(&rdev->nr_pending);
3092 } else
3093 clear_bit(R5_MadeGood, &dev->flags);
3094 }
415e72d0 3095 if (!test_bit(R5_Insync, &dev->flags)) {
16a53ecc
N
3096 /* The ReadError flag will just be confusing now */
3097 clear_bit(R5_ReadError, &dev->flags);
3098 clear_bit(R5_ReWrite, &dev->flags);
1da177e4 3099 }
415e72d0
N
3100 if (test_bit(R5_ReadError, &dev->flags))
3101 clear_bit(R5_Insync, &dev->flags);
3102 if (!test_bit(R5_Insync, &dev->flags)) {
cc94015a
N
3103 if (s->failed < 2)
3104 s->failed_num[s->failed] = i;
3105 s->failed++;
415e72d0 3106 }
1da177e4 3107 }
c4c1663b 3108 spin_unlock_irq(&conf->device_lock);
1da177e4 3109 rcu_read_unlock();
cc94015a
N
3110}
3111
3112static void handle_stripe(struct stripe_head *sh)
3113{
3114 struct stripe_head_state s;
d1688a6d 3115 struct r5conf *conf = sh->raid_conf;
3687c061 3116 int i;
84789554
N
3117 int prexor;
3118 int disks = sh->disks;
474af965 3119 struct r5dev *pdev, *qdev;
cc94015a
N
3120
3121 clear_bit(STRIPE_HANDLE, &sh->state);
257a4b42 3122 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
cc94015a
N
3123 /* already being handled, ensure it gets handled
3124 * again when current action finishes */
3125 set_bit(STRIPE_HANDLE, &sh->state);
3126 return;
3127 }
3128
3129 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3130 set_bit(STRIPE_SYNCING, &sh->state);
3131 clear_bit(STRIPE_INSYNC, &sh->state);
3132 }
3133 clear_bit(STRIPE_DELAYED, &sh->state);
3134
3135 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3136 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3137 (unsigned long long)sh->sector, sh->state,
3138 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3139 sh->check_state, sh->reconstruct_state);
3687c061 3140
acfe726b 3141 analyse_stripe(sh, &s);
c5a31000 3142
bc2607f3
N
3143 if (s.handle_bad_blocks) {
3144 set_bit(STRIPE_HANDLE, &sh->state);
3145 goto finish;
3146 }
3147
474af965
N
3148 if (unlikely(s.blocked_rdev)) {
3149 if (s.syncing || s.expanding || s.expanded ||
3150 s.to_write || s.written) {
3151 set_bit(STRIPE_HANDLE, &sh->state);
3152 goto finish;
3153 }
3154 /* There is nothing for the blocked_rdev to block */
3155 rdev_dec_pending(s.blocked_rdev, conf->mddev);
3156 s.blocked_rdev = NULL;
3157 }
3158
3159 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3160 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3161 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3162 }
3163
3164 pr_debug("locked=%d uptodate=%d to_read=%d"
3165 " to_write=%d failed=%d failed_num=%d,%d\n",
3166 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3167 s.failed_num[0], s.failed_num[1]);
3168 /* check if the array has lost more than max_degraded devices and,
3169 * if so, some requests might need to be failed.
3170 */
9a3f530f
N
3171 if (s.failed > conf->max_degraded) {
3172 sh->check_state = 0;
3173 sh->reconstruct_state = 0;
3174 if (s.to_read+s.to_write+s.written)
3175 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3176 if (s.syncing)
3177 handle_failed_sync(conf, sh, &s);
3178 }
474af965
N
3179
3180 /*
3181 * might be able to return some write requests if the parity blocks
3182 * are safe, or on a failed drive
3183 */
3184 pdev = &sh->dev[sh->pd_idx];
3185 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3186 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3187 qdev = &sh->dev[sh->qd_idx];
3188 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3189 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3190 || conf->level < 6;
3191
3192 if (s.written &&
3193 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3194 && !test_bit(R5_LOCKED, &pdev->flags)
3195 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3196 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3197 && !test_bit(R5_LOCKED, &qdev->flags)
3198 && test_bit(R5_UPTODATE, &qdev->flags)))))
3199 handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3200
3201 /* Now we might consider reading some blocks, either to check/generate
3202 * parity, or to satisfy requests
3203 * or to load a block that is being partially written.
3204 */
3205 if (s.to_read || s.non_overwrite
3206 || (conf->level == 6 && s.to_write && s.failed)
3207 || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3208 handle_stripe_fill(sh, &s, disks);
3209
84789554
N
3210 /* Now we check to see if any write operations have recently
3211 * completed
3212 */
3213 prexor = 0;
3214 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3215 prexor = 1;
3216 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3217 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3218 sh->reconstruct_state = reconstruct_state_idle;
3219
3220 /* All the 'written' buffers and the parity block are ready to
3221 * be written back to disk
3222 */
3223 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3224 BUG_ON(sh->qd_idx >= 0 &&
3225 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3226 for (i = disks; i--; ) {
3227 struct r5dev *dev = &sh->dev[i];
3228 if (test_bit(R5_LOCKED, &dev->flags) &&
3229 (i == sh->pd_idx || i == sh->qd_idx ||
3230 dev->written)) {
3231 pr_debug("Writing block %d\n", i);
3232 set_bit(R5_Wantwrite, &dev->flags);
3233 if (prexor)
3234 continue;
3235 if (!test_bit(R5_Insync, &dev->flags) ||
3236 ((i == sh->pd_idx || i == sh->qd_idx) &&
3237 s.failed == 0))
3238 set_bit(STRIPE_INSYNC, &sh->state);
3239 }
3240 }
3241 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3242 s.dec_preread_active = 1;
3243 }
3244
3245 /* Now to consider new write requests and what else, if anything
3246 * should be read. We do not handle new writes when:
3247 * 1/ A 'write' operation (copy+xor) is already in flight.
3248 * 2/ A 'check' operation is in flight, as it may clobber the parity
3249 * block.
3250 */
3251 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3252 handle_stripe_dirtying(conf, sh, &s, disks);
3253
3254 /* maybe we need to check and possibly fix the parity for this stripe
3255 * Any reads will already have been scheduled, so we just see if enough
3256 * data is available. The parity check is held off while parity
3257 * dependent operations are in flight.
3258 */
3259 if (sh->check_state ||
3260 (s.syncing && s.locked == 0 &&
3261 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3262 !test_bit(STRIPE_INSYNC, &sh->state))) {
3263 if (conf->level == 6)
3264 handle_parity_checks6(conf, sh, &s, disks);
3265 else
3266 handle_parity_checks5(conf, sh, &s, disks);
3267 }
c5a31000
N
3268
3269 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3270 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3271 clear_bit(STRIPE_SYNCING, &sh->state);
3272 }
3273
3274 /* If the failed drives are just a ReadError, then we might need
3275 * to progress the repair/check process
3276 */
3277 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3278 for (i = 0; i < s.failed; i++) {
3279 struct r5dev *dev = &sh->dev[s.failed_num[i]];
3280 if (test_bit(R5_ReadError, &dev->flags)
3281 && !test_bit(R5_LOCKED, &dev->flags)
3282 && test_bit(R5_UPTODATE, &dev->flags)
3283 ) {
3284 if (!test_bit(R5_ReWrite, &dev->flags)) {
3285 set_bit(R5_Wantwrite, &dev->flags);
3286 set_bit(R5_ReWrite, &dev->flags);
3287 set_bit(R5_LOCKED, &dev->flags);
3288 s.locked++;
3289 } else {
3290 /* let's read it back */
3291 set_bit(R5_Wantread, &dev->flags);
3292 set_bit(R5_LOCKED, &dev->flags);
3293 s.locked++;
3294 }
3295 }
3296 }
3297
3298
3687c061
N
3299 /* Finish reconstruct operations initiated by the expansion process */
3300 if (sh->reconstruct_state == reconstruct_state_result) {
3301 struct stripe_head *sh_src
3302 = get_active_stripe(conf, sh->sector, 1, 1, 1);
3303 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3304 /* sh cannot be written until sh_src has been read.
3305 * so arrange for sh to be delayed a little
3306 */
3307 set_bit(STRIPE_DELAYED, &sh->state);
3308 set_bit(STRIPE_HANDLE, &sh->state);
3309 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3310 &sh_src->state))
3311 atomic_inc(&conf->preread_active_stripes);
3312 release_stripe(sh_src);
3313 goto finish;
3314 }
3315 if (sh_src)
3316 release_stripe(sh_src);
3317
3318 sh->reconstruct_state = reconstruct_state_idle;
3319 clear_bit(STRIPE_EXPANDING, &sh->state);
3320 for (i = conf->raid_disks; i--; ) {
3321 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3322 set_bit(R5_LOCKED, &sh->dev[i].flags);
3323 s.locked++;
3324 }
3325 }
f416885e 3326
3687c061
N
3327 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3328 !sh->reconstruct_state) {
3329 /* Need to write out all blocks after computing parity */
3330 sh->disks = conf->raid_disks;
3331 stripe_set_idx(sh->sector, conf, 0, sh);
3332 schedule_reconstruction(sh, &s, 1, 1);
3333 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3334 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3335 atomic_dec(&conf->reshape_stripes);
3336 wake_up(&conf->wait_for_overlap);
3337 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3338 }
3339
3340 if (s.expanding && s.locked == 0 &&
3341 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3342 handle_stripe_expansion(conf, sh);
16a53ecc 3343
3687c061 3344finish:
6bfe0b49 3345 /* wait for this device to become unblocked */
43220aa0 3346 if (conf->mddev->external && unlikely(s.blocked_rdev))
c5709ef6 3347 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
6bfe0b49 3348
bc2607f3
N
3349 if (s.handle_bad_blocks)
3350 for (i = disks; i--; ) {
3cb03002 3351 struct md_rdev *rdev;
bc2607f3
N
3352 struct r5dev *dev = &sh->dev[i];
3353 if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3354 /* We own a safe reference to the rdev */
3355 rdev = conf->disks[i].rdev;
3356 if (!rdev_set_badblocks(rdev, sh->sector,
3357 STRIPE_SECTORS, 0))
3358 md_error(conf->mddev, rdev);
3359 rdev_dec_pending(rdev, conf->mddev);
3360 }
b84db560
N
3361 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3362 rdev = conf->disks[i].rdev;
3363 rdev_clear_badblocks(rdev, sh->sector,
3364 STRIPE_SECTORS);
3365 rdev_dec_pending(rdev, conf->mddev);
3366 }
bc2607f3
N
3367 }
3368
6c0069c0
YT
3369 if (s.ops_request)
3370 raid_run_ops(sh, s.ops_request);
3371
f0e43bcd 3372 ops_run_io(sh, &s);
16a53ecc 3373
c5709ef6 3374 if (s.dec_preread_active) {
729a1866 3375 /* We delay this until after ops_run_io so that if make_request
e9c7469b 3376 * is waiting on a flush, it won't continue until the writes
729a1866
N
3377 * have actually been submitted.
3378 */
3379 atomic_dec(&conf->preread_active_stripes);
3380 if (atomic_read(&conf->preread_active_stripes) <
3381 IO_THRESHOLD)
3382 md_wakeup_thread(conf->mddev->thread);
3383 }
3384
c5709ef6 3385 return_io(s.return_bi);
16a53ecc 3386
257a4b42 3387 clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
16a53ecc
N
3388}
3389
d1688a6d 3390static void raid5_activate_delayed(struct r5conf *conf)
16a53ecc
N
3391{
3392 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3393 while (!list_empty(&conf->delayed_list)) {
3394 struct list_head *l = conf->delayed_list.next;
3395 struct stripe_head *sh;
3396 sh = list_entry(l, struct stripe_head, lru);
3397 list_del_init(l);
3398 clear_bit(STRIPE_DELAYED, &sh->state);
3399 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3400 atomic_inc(&conf->preread_active_stripes);
8b3e6cdc 3401 list_add_tail(&sh->lru, &conf->hold_list);
16a53ecc 3402 }
482c0834 3403 }
16a53ecc
N
3404}
3405
d1688a6d 3406static void activate_bit_delay(struct r5conf *conf)
16a53ecc
N
3407{
3408 /* device_lock is held */
3409 struct list_head head;
3410 list_add(&head, &conf->bitmap_list);
3411 list_del_init(&conf->bitmap_list);
3412 while (!list_empty(&head)) {
3413 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3414 list_del_init(&sh->lru);
3415 atomic_inc(&sh->count);
3416 __release_stripe(conf, sh);
3417 }
3418}
3419
fd01b88c 3420int md_raid5_congested(struct mddev *mddev, int bits)
f022b2fd 3421{
d1688a6d 3422 struct r5conf *conf = mddev->private;
f022b2fd
N
3423
3424 /* No difference between reads and writes. Just check
3425 * how busy the stripe_cache is
3426 */
3fa841d7 3427
f022b2fd
N
3428 if (conf->inactive_blocked)
3429 return 1;
3430 if (conf->quiesce)
3431 return 1;
3432 if (list_empty_careful(&conf->inactive_list))
3433 return 1;
3434
3435 return 0;
3436}
11d8a6e3
N
3437EXPORT_SYMBOL_GPL(md_raid5_congested);
3438
3439static int raid5_congested(void *data, int bits)
3440{
fd01b88c 3441 struct mddev *mddev = data;
11d8a6e3
N
3442
3443 return mddev_congested(mddev, bits) ||
3444 md_raid5_congested(mddev, bits);
3445}
f022b2fd 3446
23032a0e
RBJ
3447/* We want read requests to align with chunks where possible,
3448 * but write requests don't need to.
3449 */
cc371e66
AK
3450static int raid5_mergeable_bvec(struct request_queue *q,
3451 struct bvec_merge_data *bvm,
3452 struct bio_vec *biovec)
23032a0e 3453{
fd01b88c 3454 struct mddev *mddev = q->queuedata;
cc371e66 3455 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
23032a0e 3456 int max;
9d8f0363 3457 unsigned int chunk_sectors = mddev->chunk_sectors;
cc371e66 3458 unsigned int bio_sectors = bvm->bi_size >> 9;
23032a0e 3459
cc371e66 3460 if ((bvm->bi_rw & 1) == WRITE)
23032a0e
RBJ
3461 return biovec->bv_len; /* always allow writes to be mergeable */
3462
664e7c41
AN
3463 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3464 chunk_sectors = mddev->new_chunk_sectors;
23032a0e
RBJ
3465 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3466 if (max < 0) max = 0;
3467 if (max <= biovec->bv_len && bio_sectors == 0)
3468 return biovec->bv_len;
3469 else
3470 return max;
3471}
3472
f679623f 3473
fd01b88c 3474static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
f679623f
RBJ
3475{
3476 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
9d8f0363 3477 unsigned int chunk_sectors = mddev->chunk_sectors;
f679623f
RBJ
3478 unsigned int bio_sectors = bio->bi_size >> 9;
3479
664e7c41
AN
3480 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3481 chunk_sectors = mddev->new_chunk_sectors;
f679623f
RBJ
3482 return chunk_sectors >=
3483 ((sector & (chunk_sectors - 1)) + bio_sectors);
3484}
3485
46031f9a
RBJ
3486/*
3487 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3488 * later sampled by raid5d.
3489 */
d1688a6d 3490static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
46031f9a
RBJ
3491{
3492 unsigned long flags;
3493
3494 spin_lock_irqsave(&conf->device_lock, flags);
3495
3496 bi->bi_next = conf->retry_read_aligned_list;
3497 conf->retry_read_aligned_list = bi;
3498
3499 spin_unlock_irqrestore(&conf->device_lock, flags);
3500 md_wakeup_thread(conf->mddev->thread);
3501}
3502
3503
d1688a6d 3504static struct bio *remove_bio_from_retry(struct r5conf *conf)
46031f9a
RBJ
3505{
3506 struct bio *bi;
3507
3508 bi = conf->retry_read_aligned;
3509 if (bi) {
3510 conf->retry_read_aligned = NULL;
3511 return bi;
3512 }
3513 bi = conf->retry_read_aligned_list;
3514 if(bi) {
387bb173 3515 conf->retry_read_aligned_list = bi->bi_next;
46031f9a 3516 bi->bi_next = NULL;
960e739d
JA
3517 /*
3518 * this sets the active strip count to 1 and the processed
3519 * strip count to zero (upper 8 bits)
3520 */
46031f9a 3521 bi->bi_phys_segments = 1; /* biased count of active stripes */
46031f9a
RBJ
3522 }
3523
3524 return bi;
3525}
3526
3527
f679623f
RBJ
3528/*
3529 * The "raid5_align_endio" should check if the read succeeded and if it
3530 * did, call bio_endio on the original bio (having bio_put the new bio
3531 * first).
3532 * If the read failed..
3533 */
6712ecf8 3534static void raid5_align_endio(struct bio *bi, int error)
f679623f
RBJ
3535{
3536 struct bio* raid_bi = bi->bi_private;
fd01b88c 3537 struct mddev *mddev;
d1688a6d 3538 struct r5conf *conf;
46031f9a 3539 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3cb03002 3540 struct md_rdev *rdev;
46031f9a 3541
f679623f 3542 bio_put(bi);
46031f9a 3543
46031f9a
RBJ
3544 rdev = (void*)raid_bi->bi_next;
3545 raid_bi->bi_next = NULL;
2b7f2228
N
3546 mddev = rdev->mddev;
3547 conf = mddev->private;
46031f9a
RBJ
3548
3549 rdev_dec_pending(rdev, conf->mddev);
3550
3551 if (!error && uptodate) {
6712ecf8 3552 bio_endio(raid_bi, 0);
46031f9a
RBJ
3553 if (atomic_dec_and_test(&conf->active_aligned_reads))
3554 wake_up(&conf->wait_for_stripe);
6712ecf8 3555 return;
46031f9a
RBJ
3556 }
3557
3558
45b4233c 3559 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
46031f9a
RBJ
3560
3561 add_bio_to_retry(raid_bi, conf);
f679623f
RBJ
3562}
3563
387bb173
NB
3564static int bio_fits_rdev(struct bio *bi)
3565{
165125e1 3566 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
387bb173 3567
ae03bf63 3568 if ((bi->bi_size>>9) > queue_max_sectors(q))
387bb173
NB
3569 return 0;
3570 blk_recount_segments(q, bi);
8a78362c 3571 if (bi->bi_phys_segments > queue_max_segments(q))
387bb173
NB
3572 return 0;
3573
3574 if (q->merge_bvec_fn)
3575 /* it's too hard to apply the merge_bvec_fn at this stage,
3576 * just just give up
3577 */
3578 return 0;
3579
3580 return 1;
3581}
3582
3583
fd01b88c 3584static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
f679623f 3585{
d1688a6d 3586 struct r5conf *conf = mddev->private;
8553fe7e 3587 int dd_idx;
f679623f 3588 struct bio* align_bi;
3cb03002 3589 struct md_rdev *rdev;
671488cc 3590 sector_t end_sector;
f679623f
RBJ
3591
3592 if (!in_chunk_boundary(mddev, raid_bio)) {
45b4233c 3593 pr_debug("chunk_aligned_read : non aligned\n");
f679623f
RBJ
3594 return 0;
3595 }
3596 /*
a167f663 3597 * use bio_clone_mddev to make a copy of the bio
f679623f 3598 */
a167f663 3599 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
f679623f
RBJ
3600 if (!align_bi)
3601 return 0;
3602 /*
3603 * set bi_end_io to a new function, and set bi_private to the
3604 * original bio.
3605 */
3606 align_bi->bi_end_io = raid5_align_endio;
3607 align_bi->bi_private = raid_bio;
3608 /*
3609 * compute position
3610 */
112bf897
N
3611 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3612 0,
911d4ee8 3613 &dd_idx, NULL);
f679623f 3614
671488cc 3615 end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
f679623f 3616 rcu_read_lock();
671488cc
N
3617 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
3618 if (!rdev || test_bit(Faulty, &rdev->flags) ||
3619 rdev->recovery_offset < end_sector) {
3620 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3621 if (rdev &&
3622 (test_bit(Faulty, &rdev->flags) ||
3623 !(test_bit(In_sync, &rdev->flags) ||
3624 rdev->recovery_offset >= end_sector)))
3625 rdev = NULL;
3626 }
3627 if (rdev) {
31c176ec
N
3628 sector_t first_bad;
3629 int bad_sectors;
3630
f679623f
RBJ
3631 atomic_inc(&rdev->nr_pending);
3632 rcu_read_unlock();
46031f9a
RBJ
3633 raid_bio->bi_next = (void*)rdev;
3634 align_bi->bi_bdev = rdev->bdev;
3635 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3636 align_bi->bi_sector += rdev->data_offset;
3637
31c176ec
N
3638 if (!bio_fits_rdev(align_bi) ||
3639 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3640 &first_bad, &bad_sectors)) {
3641 /* too big in some way, or has a known bad block */
387bb173
NB
3642 bio_put(align_bi);
3643 rdev_dec_pending(rdev, mddev);
3644 return 0;
3645 }
3646
46031f9a
RBJ
3647 spin_lock_irq(&conf->device_lock);
3648 wait_event_lock_irq(conf->wait_for_stripe,
3649 conf->quiesce == 0,
3650 conf->device_lock, /* nothing */);
3651 atomic_inc(&conf->active_aligned_reads);
3652 spin_unlock_irq(&conf->device_lock);
3653
f679623f
RBJ
3654 generic_make_request(align_bi);
3655 return 1;
3656 } else {
3657 rcu_read_unlock();
46031f9a 3658 bio_put(align_bi);
f679623f
RBJ
3659 return 0;
3660 }
3661}
3662
8b3e6cdc
DW
3663/* __get_priority_stripe - get the next stripe to process
3664 *
3665 * Full stripe writes are allowed to pass preread active stripes up until
3666 * the bypass_threshold is exceeded. In general the bypass_count
3667 * increments when the handle_list is handled before the hold_list; however, it
3668 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3669 * stripe with in flight i/o. The bypass_count will be reset when the
3670 * head of the hold_list has changed, i.e. the head was promoted to the
3671 * handle_list.
3672 */
d1688a6d 3673static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
8b3e6cdc
DW
3674{
3675 struct stripe_head *sh;
3676
3677 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3678 __func__,
3679 list_empty(&conf->handle_list) ? "empty" : "busy",
3680 list_empty(&conf->hold_list) ? "empty" : "busy",
3681 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3682
3683 if (!list_empty(&conf->handle_list)) {
3684 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3685
3686 if (list_empty(&conf->hold_list))
3687 conf->bypass_count = 0;
3688 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3689 if (conf->hold_list.next == conf->last_hold)
3690 conf->bypass_count++;
3691 else {
3692 conf->last_hold = conf->hold_list.next;
3693 conf->bypass_count -= conf->bypass_threshold;
3694 if (conf->bypass_count < 0)
3695 conf->bypass_count = 0;
3696 }
3697 }
3698 } else if (!list_empty(&conf->hold_list) &&
3699 ((conf->bypass_threshold &&
3700 conf->bypass_count > conf->bypass_threshold) ||
3701 atomic_read(&conf->pending_full_writes) == 0)) {
3702 sh = list_entry(conf->hold_list.next,
3703 typeof(*sh), lru);
3704 conf->bypass_count -= conf->bypass_threshold;
3705 if (conf->bypass_count < 0)
3706 conf->bypass_count = 0;
3707 } else
3708 return NULL;
3709
3710 list_del_init(&sh->lru);
3711 atomic_inc(&sh->count);
3712 BUG_ON(atomic_read(&sh->count) != 1);
3713 return sh;
3714}
f679623f 3715
b4fdcb02 3716static void make_request(struct mddev *mddev, struct bio * bi)
1da177e4 3717{
d1688a6d 3718 struct r5conf *conf = mddev->private;
911d4ee8 3719 int dd_idx;
1da177e4
LT
3720 sector_t new_sector;
3721 sector_t logical_sector, last_sector;
3722 struct stripe_head *sh;
a362357b 3723 const int rw = bio_data_dir(bi);
49077326 3724 int remaining;
7c13edc8 3725 int plugged;
1da177e4 3726
e9c7469b
TH
3727 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3728 md_flush_request(mddev, bi);
5a7bbad2 3729 return;
e5dcdd80
N
3730 }
3731
3d310eb7 3732 md_write_start(mddev, bi);
06d91a5f 3733
802ba064 3734 if (rw == READ &&
52488615 3735 mddev->reshape_position == MaxSector &&
21a52c6d 3736 chunk_aligned_read(mddev,bi))
5a7bbad2 3737 return;
52488615 3738
1da177e4
LT
3739 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3740 last_sector = bi->bi_sector + (bi->bi_size>>9);
3741 bi->bi_next = NULL;
3742 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
06d91a5f 3743
7c13edc8 3744 plugged = mddev_check_plugged(mddev);
1da177e4
LT
3745 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3746 DEFINE_WAIT(w);
16a53ecc 3747 int disks, data_disks;
b5663ba4 3748 int previous;
b578d55f 3749
7ecaa1e6 3750 retry:
b5663ba4 3751 previous = 0;
b0f9ec04 3752 disks = conf->raid_disks;
b578d55f 3753 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
b0f9ec04 3754 if (unlikely(conf->reshape_progress != MaxSector)) {
fef9c61f 3755 /* spinlock is needed as reshape_progress may be
df8e7f76
N
3756 * 64bit on a 32bit platform, and so it might be
3757 * possible to see a half-updated value
aeb878b0 3758 * Of course reshape_progress could change after
df8e7f76
N
3759 * the lock is dropped, so once we get a reference
3760 * to the stripe that we think it is, we will have
3761 * to check again.
3762 */
7ecaa1e6 3763 spin_lock_irq(&conf->device_lock);
fef9c61f
N
3764 if (mddev->delta_disks < 0
3765 ? logical_sector < conf->reshape_progress
3766 : logical_sector >= conf->reshape_progress) {
7ecaa1e6 3767 disks = conf->previous_raid_disks;
b5663ba4
N
3768 previous = 1;
3769 } else {
fef9c61f
N
3770 if (mddev->delta_disks < 0
3771 ? logical_sector < conf->reshape_safe
3772 : logical_sector >= conf->reshape_safe) {
b578d55f
N
3773 spin_unlock_irq(&conf->device_lock);
3774 schedule();
3775 goto retry;
3776 }
3777 }
7ecaa1e6
N
3778 spin_unlock_irq(&conf->device_lock);
3779 }
16a53ecc
N
3780 data_disks = disks - conf->max_degraded;
3781
112bf897
N
3782 new_sector = raid5_compute_sector(conf, logical_sector,
3783 previous,
911d4ee8 3784 &dd_idx, NULL);
0c55e022 3785 pr_debug("raid456: make_request, sector %llu logical %llu\n",
1da177e4
LT
3786 (unsigned long long)new_sector,
3787 (unsigned long long)logical_sector);
3788
b5663ba4 3789 sh = get_active_stripe(conf, new_sector, previous,
a8c906ca 3790 (bi->bi_rw&RWA_MASK), 0);
1da177e4 3791 if (sh) {
b0f9ec04 3792 if (unlikely(previous)) {
7ecaa1e6 3793 /* expansion might have moved on while waiting for a
df8e7f76
N
3794 * stripe, so we must do the range check again.
3795 * Expansion could still move past after this
3796 * test, but as we are holding a reference to
3797 * 'sh', we know that if that happens,
3798 * STRIPE_EXPANDING will get set and the expansion
3799 * won't proceed until we finish with the stripe.
7ecaa1e6
N
3800 */
3801 int must_retry = 0;
3802 spin_lock_irq(&conf->device_lock);
b0f9ec04
N
3803 if (mddev->delta_disks < 0
3804 ? logical_sector >= conf->reshape_progress
3805 : logical_sector < conf->reshape_progress)
7ecaa1e6
N
3806 /* mismatch, need to try again */
3807 must_retry = 1;
3808 spin_unlock_irq(&conf->device_lock);
3809 if (must_retry) {
3810 release_stripe(sh);
7a3ab908 3811 schedule();
7ecaa1e6
N
3812 goto retry;
3813 }
3814 }
e62e58a5 3815
ffd96e35 3816 if (rw == WRITE &&
a5c308d4 3817 logical_sector >= mddev->suspend_lo &&
e464eafd
N
3818 logical_sector < mddev->suspend_hi) {
3819 release_stripe(sh);
e62e58a5
N
3820 /* As the suspend_* range is controlled by
3821 * userspace, we want an interruptible
3822 * wait.
3823 */
3824 flush_signals(current);
3825 prepare_to_wait(&conf->wait_for_overlap,
3826 &w, TASK_INTERRUPTIBLE);
3827 if (logical_sector >= mddev->suspend_lo &&
3828 logical_sector < mddev->suspend_hi)
3829 schedule();
e464eafd
N
3830 goto retry;
3831 }
7ecaa1e6
N
3832
3833 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
ffd96e35 3834 !add_stripe_bio(sh, bi, dd_idx, rw)) {
7ecaa1e6
N
3835 /* Stripe is busy expanding or
3836 * add failed due to overlap. Flush everything
1da177e4
LT
3837 * and wait a while
3838 */
482c0834 3839 md_wakeup_thread(mddev->thread);
1da177e4
LT
3840 release_stripe(sh);
3841 schedule();
3842 goto retry;
3843 }
3844 finish_wait(&conf->wait_for_overlap, &w);
6ed3003c
N
3845 set_bit(STRIPE_HANDLE, &sh->state);
3846 clear_bit(STRIPE_DELAYED, &sh->state);
e9c7469b 3847 if ((bi->bi_rw & REQ_SYNC) &&
729a1866
N
3848 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3849 atomic_inc(&conf->preread_active_stripes);
1da177e4 3850 release_stripe(sh);
1da177e4
LT
3851 } else {
3852 /* cannot get stripe for read-ahead, just give-up */
3853 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3854 finish_wait(&conf->wait_for_overlap, &w);
3855 break;
3856 }
3857
3858 }
7c13edc8
N
3859 if (!plugged)
3860 md_wakeup_thread(mddev->thread);
3861
1da177e4 3862 spin_lock_irq(&conf->device_lock);
960e739d 3863 remaining = raid5_dec_bi_phys_segments(bi);
f6344757
N
3864 spin_unlock_irq(&conf->device_lock);
3865 if (remaining == 0) {
1da177e4 3866
16a53ecc 3867 if ( rw == WRITE )
1da177e4 3868 md_write_end(mddev);
6712ecf8 3869
0e13fe23 3870 bio_endio(bi, 0);
1da177e4 3871 }
1da177e4
LT
3872}
3873
fd01b88c 3874static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
b522adcd 3875
fd01b88c 3876static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
1da177e4 3877{
52c03291
N
3878 /* reshaping is quite different to recovery/resync so it is
3879 * handled quite separately ... here.
3880 *
3881 * On each call to sync_request, we gather one chunk worth of
3882 * destination stripes and flag them as expanding.
3883 * Then we find all the source stripes and request reads.
3884 * As the reads complete, handle_stripe will copy the data
3885 * into the destination stripe and release that stripe.
3886 */
d1688a6d 3887 struct r5conf *conf = mddev->private;
1da177e4 3888 struct stripe_head *sh;
ccfcc3c1 3889 sector_t first_sector, last_sector;
f416885e
N
3890 int raid_disks = conf->previous_raid_disks;
3891 int data_disks = raid_disks - conf->max_degraded;
3892 int new_data_disks = conf->raid_disks - conf->max_degraded;
52c03291
N
3893 int i;
3894 int dd_idx;
c8f517c4 3895 sector_t writepos, readpos, safepos;
ec32a2bd 3896 sector_t stripe_addr;
7a661381 3897 int reshape_sectors;
ab69ae12 3898 struct list_head stripes;
52c03291 3899
fef9c61f
N
3900 if (sector_nr == 0) {
3901 /* If restarting in the middle, skip the initial sectors */
3902 if (mddev->delta_disks < 0 &&
3903 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3904 sector_nr = raid5_size(mddev, 0, 0)
3905 - conf->reshape_progress;
a639755c 3906 } else if (mddev->delta_disks >= 0 &&
fef9c61f
N
3907 conf->reshape_progress > 0)
3908 sector_nr = conf->reshape_progress;
f416885e 3909 sector_div(sector_nr, new_data_disks);
fef9c61f 3910 if (sector_nr) {
8dee7211
N
3911 mddev->curr_resync_completed = sector_nr;
3912 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
fef9c61f
N
3913 *skipped = 1;
3914 return sector_nr;
3915 }
52c03291
N
3916 }
3917
7a661381
N
3918 /* We need to process a full chunk at a time.
3919 * If old and new chunk sizes differ, we need to process the
3920 * largest of these
3921 */
664e7c41
AN
3922 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
3923 reshape_sectors = mddev->new_chunk_sectors;
7a661381 3924 else
9d8f0363 3925 reshape_sectors = mddev->chunk_sectors;
7a661381 3926
52c03291
N
3927 /* we update the metadata when there is more than 3Meg
3928 * in the block range (that is rather arbitrary, should
3929 * probably be time based) or when the data about to be
3930 * copied would over-write the source of the data at
3931 * the front of the range.
fef9c61f
N
3932 * i.e. one new_stripe along from reshape_progress new_maps
3933 * to after where reshape_safe old_maps to
52c03291 3934 */
fef9c61f 3935 writepos = conf->reshape_progress;
f416885e 3936 sector_div(writepos, new_data_disks);
c8f517c4
N
3937 readpos = conf->reshape_progress;
3938 sector_div(readpos, data_disks);
fef9c61f 3939 safepos = conf->reshape_safe;
f416885e 3940 sector_div(safepos, data_disks);
fef9c61f 3941 if (mddev->delta_disks < 0) {
ed37d83e 3942 writepos -= min_t(sector_t, reshape_sectors, writepos);
c8f517c4 3943 readpos += reshape_sectors;
7a661381 3944 safepos += reshape_sectors;
fef9c61f 3945 } else {
7a661381 3946 writepos += reshape_sectors;
ed37d83e
N
3947 readpos -= min_t(sector_t, reshape_sectors, readpos);
3948 safepos -= min_t(sector_t, reshape_sectors, safepos);
fef9c61f 3949 }
52c03291 3950
c8f517c4
N
3951 /* 'writepos' is the most advanced device address we might write.
3952 * 'readpos' is the least advanced device address we might read.
3953 * 'safepos' is the least address recorded in the metadata as having
3954 * been reshaped.
3955 * If 'readpos' is behind 'writepos', then there is no way that we can
3956 * ensure safety in the face of a crash - that must be done by userspace
3957 * making a backup of the data. So in that case there is no particular
3958 * rush to update metadata.
3959 * Otherwise if 'safepos' is behind 'writepos', then we really need to
3960 * update the metadata to advance 'safepos' to match 'readpos' so that
3961 * we can be safe in the event of a crash.
3962 * So we insist on updating metadata if safepos is behind writepos and
3963 * readpos is beyond writepos.
3964 * In any case, update the metadata every 10 seconds.
3965 * Maybe that number should be configurable, but I'm not sure it is
3966 * worth it.... maybe it could be a multiple of safemode_delay???
3967 */
fef9c61f 3968 if ((mddev->delta_disks < 0
c8f517c4
N
3969 ? (safepos > writepos && readpos < writepos)
3970 : (safepos < writepos && readpos > writepos)) ||
3971 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
52c03291
N
3972 /* Cannot proceed until we've updated the superblock... */
3973 wait_event(conf->wait_for_overlap,
3974 atomic_read(&conf->reshape_stripes)==0);
fef9c61f 3975 mddev->reshape_position = conf->reshape_progress;
75d3da43 3976 mddev->curr_resync_completed = sector_nr;
c8f517c4 3977 conf->reshape_checkpoint = jiffies;
850b2b42 3978 set_bit(MD_CHANGE_DEVS, &mddev->flags);
52c03291 3979 md_wakeup_thread(mddev->thread);
850b2b42 3980 wait_event(mddev->sb_wait, mddev->flags == 0 ||
52c03291
N
3981 kthread_should_stop());
3982 spin_lock_irq(&conf->device_lock);
fef9c61f 3983 conf->reshape_safe = mddev->reshape_position;
52c03291
N
3984 spin_unlock_irq(&conf->device_lock);
3985 wake_up(&conf->wait_for_overlap);
acb180b0 3986 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
52c03291
N
3987 }
3988
ec32a2bd
N
3989 if (mddev->delta_disks < 0) {
3990 BUG_ON(conf->reshape_progress == 0);
3991 stripe_addr = writepos;
3992 BUG_ON((mddev->dev_sectors &
7a661381
N
3993 ~((sector_t)reshape_sectors - 1))
3994 - reshape_sectors - stripe_addr
ec32a2bd
N
3995 != sector_nr);
3996 } else {
7a661381 3997 BUG_ON(writepos != sector_nr + reshape_sectors);
ec32a2bd
N
3998 stripe_addr = sector_nr;
3999 }
ab69ae12 4000 INIT_LIST_HEAD(&stripes);
7a661381 4001 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
52c03291 4002 int j;
a9f326eb 4003 int skipped_disk = 0;
a8c906ca 4004 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
52c03291
N
4005 set_bit(STRIPE_EXPANDING, &sh->state);
4006 atomic_inc(&conf->reshape_stripes);
4007 /* If any of this stripe is beyond the end of the old
4008 * array, then we need to zero those blocks
4009 */
4010 for (j=sh->disks; j--;) {
4011 sector_t s;
4012 if (j == sh->pd_idx)
4013 continue;
f416885e 4014 if (conf->level == 6 &&
d0dabf7e 4015 j == sh->qd_idx)
f416885e 4016 continue;
784052ec 4017 s = compute_blocknr(sh, j, 0);
b522adcd 4018 if (s < raid5_size(mddev, 0, 0)) {
a9f326eb 4019 skipped_disk = 1;
52c03291
N
4020 continue;
4021 }
4022 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4023 set_bit(R5_Expanded, &sh->dev[j].flags);
4024 set_bit(R5_UPTODATE, &sh->dev[j].flags);
4025 }
a9f326eb 4026 if (!skipped_disk) {
52c03291
N
4027 set_bit(STRIPE_EXPAND_READY, &sh->state);
4028 set_bit(STRIPE_HANDLE, &sh->state);
4029 }
ab69ae12 4030 list_add(&sh->lru, &stripes);
52c03291
N
4031 }
4032 spin_lock_irq(&conf->device_lock);
fef9c61f 4033 if (mddev->delta_disks < 0)
7a661381 4034 conf->reshape_progress -= reshape_sectors * new_data_disks;
fef9c61f 4035 else
7a661381 4036 conf->reshape_progress += reshape_sectors * new_data_disks;
52c03291
N
4037 spin_unlock_irq(&conf->device_lock);
4038 /* Ok, those stripe are ready. We can start scheduling
4039 * reads on the source stripes.
4040 * The source stripes are determined by mapping the first and last
4041 * block on the destination stripes.
4042 */
52c03291 4043 first_sector =
ec32a2bd 4044 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
911d4ee8 4045 1, &dd_idx, NULL);
52c03291 4046 last_sector =
0e6e0271 4047 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
09c9e5fa 4048 * new_data_disks - 1),
911d4ee8 4049 1, &dd_idx, NULL);
58c0fed4
AN
4050 if (last_sector >= mddev->dev_sectors)
4051 last_sector = mddev->dev_sectors - 1;
52c03291 4052 while (first_sector <= last_sector) {
a8c906ca 4053 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
52c03291
N
4054 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4055 set_bit(STRIPE_HANDLE, &sh->state);
4056 release_stripe(sh);
4057 first_sector += STRIPE_SECTORS;
4058 }
ab69ae12
N
4059 /* Now that the sources are clearly marked, we can release
4060 * the destination stripes
4061 */
4062 while (!list_empty(&stripes)) {
4063 sh = list_entry(stripes.next, struct stripe_head, lru);
4064 list_del_init(&sh->lru);
4065 release_stripe(sh);
4066 }
c6207277
N
4067 /* If this takes us to the resync_max point where we have to pause,
4068 * then we need to write out the superblock.
4069 */
7a661381 4070 sector_nr += reshape_sectors;
c03f6a19
N
4071 if ((sector_nr - mddev->curr_resync_completed) * 2
4072 >= mddev->resync_max - mddev->curr_resync_completed) {
c6207277
N
4073 /* Cannot proceed until we've updated the superblock... */
4074 wait_event(conf->wait_for_overlap,
4075 atomic_read(&conf->reshape_stripes) == 0);
fef9c61f 4076 mddev->reshape_position = conf->reshape_progress;
75d3da43 4077 mddev->curr_resync_completed = sector_nr;
c8f517c4 4078 conf->reshape_checkpoint = jiffies;
c6207277
N
4079 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4080 md_wakeup_thread(mddev->thread);
4081 wait_event(mddev->sb_wait,
4082 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4083 || kthread_should_stop());
4084 spin_lock_irq(&conf->device_lock);
fef9c61f 4085 conf->reshape_safe = mddev->reshape_position;
c6207277
N
4086 spin_unlock_irq(&conf->device_lock);
4087 wake_up(&conf->wait_for_overlap);
acb180b0 4088 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
c6207277 4089 }
7a661381 4090 return reshape_sectors;
52c03291
N
4091}
4092
4093/* FIXME go_faster isn't used */
fd01b88c 4094static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
52c03291 4095{
d1688a6d 4096 struct r5conf *conf = mddev->private;
52c03291 4097 struct stripe_head *sh;
58c0fed4 4098 sector_t max_sector = mddev->dev_sectors;
57dab0bd 4099 sector_t sync_blocks;
16a53ecc
N
4100 int still_degraded = 0;
4101 int i;
1da177e4 4102
72626685 4103 if (sector_nr >= max_sector) {
1da177e4 4104 /* just being told to finish up .. nothing much to do */
cea9c228 4105
29269553
N
4106 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4107 end_reshape(conf);
4108 return 0;
4109 }
72626685
N
4110
4111 if (mddev->curr_resync < max_sector) /* aborted */
4112 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4113 &sync_blocks, 1);
16a53ecc 4114 else /* completed sync */
72626685
N
4115 conf->fullsync = 0;
4116 bitmap_close_sync(mddev->bitmap);
4117
1da177e4
LT
4118 return 0;
4119 }
ccfcc3c1 4120
64bd660b
N
4121 /* Allow raid5_quiesce to complete */
4122 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4123
52c03291
N
4124 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4125 return reshape_request(mddev, sector_nr, skipped);
f6705578 4126
c6207277
N
4127 /* No need to check resync_max as we never do more than one
4128 * stripe, and as resync_max will always be on a chunk boundary,
4129 * if the check in md_do_sync didn't fire, there is no chance
4130 * of overstepping resync_max here
4131 */
4132
16a53ecc 4133 /* if there is too many failed drives and we are trying
1da177e4
LT
4134 * to resync, then assert that we are finished, because there is
4135 * nothing we can do.
4136 */
3285edf1 4137 if (mddev->degraded >= conf->max_degraded &&
16a53ecc 4138 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
58c0fed4 4139 sector_t rv = mddev->dev_sectors - sector_nr;
57afd89f 4140 *skipped = 1;
1da177e4
LT
4141 return rv;
4142 }
72626685 4143 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3855ad9f 4144 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
72626685
N
4145 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4146 /* we can skip this block, and probably more */
4147 sync_blocks /= STRIPE_SECTORS;
4148 *skipped = 1;
4149 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4150 }
1da177e4 4151
b47490c9
N
4152
4153 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4154
a8c906ca 4155 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
1da177e4 4156 if (sh == NULL) {
a8c906ca 4157 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
1da177e4 4158 /* make sure we don't swamp the stripe cache if someone else
16a53ecc 4159 * is trying to get access
1da177e4 4160 */
66c006a5 4161 schedule_timeout_uninterruptible(1);
1da177e4 4162 }
16a53ecc
N
4163 /* Need to check if array will still be degraded after recovery/resync
4164 * We don't need to check the 'failed' flag as when that gets set,
4165 * recovery aborts.
4166 */
f001a70c 4167 for (i = 0; i < conf->raid_disks; i++)
16a53ecc
N
4168 if (conf->disks[i].rdev == NULL)
4169 still_degraded = 1;
4170
4171 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4172
83206d66 4173 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
1da177e4 4174
1442577b 4175 handle_stripe(sh);
1da177e4
LT
4176 release_stripe(sh);
4177
4178 return STRIPE_SECTORS;
4179}
4180
d1688a6d 4181static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
46031f9a
RBJ
4182{
4183 /* We may not be able to submit a whole bio at once as there
4184 * may not be enough stripe_heads available.
4185 * We cannot pre-allocate enough stripe_heads as we may need
4186 * more than exist in the cache (if we allow ever large chunks).
4187 * So we do one stripe head at a time and record in
4188 * ->bi_hw_segments how many have been done.
4189 *
4190 * We *know* that this entire raid_bio is in one chunk, so
4191 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4192 */
4193 struct stripe_head *sh;
911d4ee8 4194 int dd_idx;
46031f9a
RBJ
4195 sector_t sector, logical_sector, last_sector;
4196 int scnt = 0;
4197 int remaining;
4198 int handled = 0;
4199
4200 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
112bf897 4201 sector = raid5_compute_sector(conf, logical_sector,
911d4ee8 4202 0, &dd_idx, NULL);
46031f9a
RBJ
4203 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4204
4205 for (; logical_sector < last_sector;
387bb173
NB
4206 logical_sector += STRIPE_SECTORS,
4207 sector += STRIPE_SECTORS,
4208 scnt++) {
46031f9a 4209
960e739d 4210 if (scnt < raid5_bi_hw_segments(raid_bio))
46031f9a
RBJ
4211 /* already done this stripe */
4212 continue;
4213
a8c906ca 4214 sh = get_active_stripe(conf, sector, 0, 1, 0);
46031f9a
RBJ
4215
4216 if (!sh) {
4217 /* failed to get a stripe - must wait */
960e739d 4218 raid5_set_bi_hw_segments(raid_bio, scnt);
46031f9a
RBJ
4219 conf->retry_read_aligned = raid_bio;
4220 return handled;
4221 }
4222
4223 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
387bb173
NB
4224 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4225 release_stripe(sh);
960e739d 4226 raid5_set_bi_hw_segments(raid_bio, scnt);
387bb173
NB
4227 conf->retry_read_aligned = raid_bio;
4228 return handled;
4229 }
4230
36d1c647 4231 handle_stripe(sh);
46031f9a
RBJ
4232 release_stripe(sh);
4233 handled++;
4234 }
4235 spin_lock_irq(&conf->device_lock);
960e739d 4236 remaining = raid5_dec_bi_phys_segments(raid_bio);
46031f9a 4237 spin_unlock_irq(&conf->device_lock);
0e13fe23
NB
4238 if (remaining == 0)
4239 bio_endio(raid_bio, 0);
46031f9a
RBJ
4240 if (atomic_dec_and_test(&conf->active_aligned_reads))
4241 wake_up(&conf->wait_for_stripe);
4242 return handled;
4243}
4244
46031f9a 4245
1da177e4
LT
4246/*
4247 * This is our raid5 kernel thread.
4248 *
4249 * We scan the hash table for stripes which can be handled now.
4250 * During the scan, completed stripes are saved for us by the interrupt
4251 * handler, so that they will not have to wait for our next wakeup.
4252 */
fd01b88c 4253static void raid5d(struct mddev *mddev)
1da177e4
LT
4254{
4255 struct stripe_head *sh;
d1688a6d 4256 struct r5conf *conf = mddev->private;
1da177e4 4257 int handled;
e1dfa0a2 4258 struct blk_plug plug;
1da177e4 4259
45b4233c 4260 pr_debug("+++ raid5d active\n");
1da177e4
LT
4261
4262 md_check_recovery(mddev);
1da177e4 4263
e1dfa0a2 4264 blk_start_plug(&plug);
1da177e4
LT
4265 handled = 0;
4266 spin_lock_irq(&conf->device_lock);
4267 while (1) {
46031f9a 4268 struct bio *bio;
1da177e4 4269
7c13edc8
N
4270 if (atomic_read(&mddev->plug_cnt) == 0 &&
4271 !list_empty(&conf->bitmap_list)) {
4272 /* Now is a good time to flush some bitmap updates */
4273 conf->seq_flush++;
700e432d 4274 spin_unlock_irq(&conf->device_lock);
72626685 4275 bitmap_unplug(mddev->bitmap);
700e432d 4276 spin_lock_irq(&conf->device_lock);
7c13edc8 4277 conf->seq_write = conf->seq_flush;
72626685
N
4278 activate_bit_delay(conf);
4279 }
7c13edc8
N
4280 if (atomic_read(&mddev->plug_cnt) == 0)
4281 raid5_activate_delayed(conf);
72626685 4282
46031f9a
RBJ
4283 while ((bio = remove_bio_from_retry(conf))) {
4284 int ok;
4285 spin_unlock_irq(&conf->device_lock);
4286 ok = retry_aligned_read(conf, bio);
4287 spin_lock_irq(&conf->device_lock);
4288 if (!ok)
4289 break;
4290 handled++;
4291 }
4292
8b3e6cdc
DW
4293 sh = __get_priority_stripe(conf);
4294
c9f21aaf 4295 if (!sh)
1da177e4 4296 break;
1da177e4
LT
4297 spin_unlock_irq(&conf->device_lock);
4298
4299 handled++;
417b8d4a
DW
4300 handle_stripe(sh);
4301 release_stripe(sh);
4302 cond_resched();
1da177e4 4303
de393cde
N
4304 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
4305 md_check_recovery(mddev);
4306
1da177e4
LT
4307 spin_lock_irq(&conf->device_lock);
4308 }
45b4233c 4309 pr_debug("%d stripes handled\n", handled);
1da177e4
LT
4310
4311 spin_unlock_irq(&conf->device_lock);
4312
c9f21aaf 4313 async_tx_issue_pending_all();
e1dfa0a2 4314 blk_finish_plug(&plug);
1da177e4 4315
45b4233c 4316 pr_debug("--- raid5d inactive\n");
1da177e4
LT
4317}
4318
3f294f4f 4319static ssize_t
fd01b88c 4320raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
3f294f4f 4321{
d1688a6d 4322 struct r5conf *conf = mddev->private;
96de1e66
N
4323 if (conf)
4324 return sprintf(page, "%d\n", conf->max_nr_stripes);
4325 else
4326 return 0;
3f294f4f
N
4327}
4328
c41d4ac4 4329int
fd01b88c 4330raid5_set_cache_size(struct mddev *mddev, int size)
3f294f4f 4331{
d1688a6d 4332 struct r5conf *conf = mddev->private;
b5470dc5
DW
4333 int err;
4334
c41d4ac4 4335 if (size <= 16 || size > 32768)
3f294f4f 4336 return -EINVAL;
c41d4ac4 4337 while (size < conf->max_nr_stripes) {
3f294f4f
N
4338 if (drop_one_stripe(conf))
4339 conf->max_nr_stripes--;
4340 else
4341 break;
4342 }
b5470dc5
DW
4343 err = md_allow_write(mddev);
4344 if (err)
4345 return err;
c41d4ac4 4346 while (size > conf->max_nr_stripes) {
3f294f4f
N
4347 if (grow_one_stripe(conf))
4348 conf->max_nr_stripes++;
4349 else break;
4350 }
c41d4ac4
N
4351 return 0;
4352}
4353EXPORT_SYMBOL(raid5_set_cache_size);
4354
4355static ssize_t
fd01b88c 4356raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
c41d4ac4 4357{
d1688a6d 4358 struct r5conf *conf = mddev->private;
c41d4ac4
N
4359 unsigned long new;
4360 int err;
4361
4362 if (len >= PAGE_SIZE)
4363 return -EINVAL;
4364 if (!conf)
4365 return -ENODEV;
4366
4367 if (strict_strtoul(page, 10, &new))
4368 return -EINVAL;
4369 err = raid5_set_cache_size(mddev, new);
4370 if (err)
4371 return err;
3f294f4f
N
4372 return len;
4373}
007583c9 4374
96de1e66
N
4375static struct md_sysfs_entry
4376raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4377 raid5_show_stripe_cache_size,
4378 raid5_store_stripe_cache_size);
3f294f4f 4379
8b3e6cdc 4380static ssize_t
fd01b88c 4381raid5_show_preread_threshold(struct mddev *mddev, char *page)
8b3e6cdc 4382{
d1688a6d 4383 struct r5conf *conf = mddev->private;
8b3e6cdc
DW
4384 if (conf)
4385 return sprintf(page, "%d\n", conf->bypass_threshold);
4386 else
4387 return 0;
4388}
4389
4390static ssize_t
fd01b88c 4391raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
8b3e6cdc 4392{
d1688a6d 4393 struct r5conf *conf = mddev->private;
4ef197d8 4394 unsigned long new;
8b3e6cdc
DW
4395 if (len >= PAGE_SIZE)
4396 return -EINVAL;
4397 if (!conf)
4398 return -ENODEV;
4399
4ef197d8 4400 if (strict_strtoul(page, 10, &new))
8b3e6cdc 4401 return -EINVAL;
4ef197d8 4402 if (new > conf->max_nr_stripes)
8b3e6cdc
DW
4403 return -EINVAL;
4404 conf->bypass_threshold = new;
4405 return len;
4406}
4407
4408static struct md_sysfs_entry
4409raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4410 S_IRUGO | S_IWUSR,
4411 raid5_show_preread_threshold,
4412 raid5_store_preread_threshold);
4413
3f294f4f 4414static ssize_t
fd01b88c 4415stripe_cache_active_show(struct mddev *mddev, char *page)
3f294f4f 4416{
d1688a6d 4417 struct r5conf *conf = mddev->private;
96de1e66
N
4418 if (conf)
4419 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4420 else
4421 return 0;
3f294f4f
N
4422}
4423
96de1e66
N
4424static struct md_sysfs_entry
4425raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
3f294f4f 4426
007583c9 4427static struct attribute *raid5_attrs[] = {
3f294f4f
N
4428 &raid5_stripecache_size.attr,
4429 &raid5_stripecache_active.attr,
8b3e6cdc 4430 &raid5_preread_bypass_threshold.attr,
3f294f4f
N
4431 NULL,
4432};
007583c9
N
4433static struct attribute_group raid5_attrs_group = {
4434 .name = NULL,
4435 .attrs = raid5_attrs,
3f294f4f
N
4436};
4437
80c3a6ce 4438static sector_t
fd01b88c 4439raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
80c3a6ce 4440{
d1688a6d 4441 struct r5conf *conf = mddev->private;
80c3a6ce
DW
4442
4443 if (!sectors)
4444 sectors = mddev->dev_sectors;
5e5e3e78 4445 if (!raid_disks)
7ec05478 4446 /* size is defined by the smallest of previous and new size */
5e5e3e78 4447 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
80c3a6ce 4448
9d8f0363 4449 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
664e7c41 4450 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
80c3a6ce
DW
4451 return sectors * (raid_disks - conf->max_degraded);
4452}
4453
d1688a6d 4454static void raid5_free_percpu(struct r5conf *conf)
36d1c647
DW
4455{
4456 struct raid5_percpu *percpu;
4457 unsigned long cpu;
4458
4459 if (!conf->percpu)
4460 return;
4461
4462 get_online_cpus();
4463 for_each_possible_cpu(cpu) {
4464 percpu = per_cpu_ptr(conf->percpu, cpu);
4465 safe_put_page(percpu->spare_page);
d6f38f31 4466 kfree(percpu->scribble);
36d1c647
DW
4467 }
4468#ifdef CONFIG_HOTPLUG_CPU
4469 unregister_cpu_notifier(&conf->cpu_notify);
4470#endif
4471 put_online_cpus();
4472
4473 free_percpu(conf->percpu);
4474}
4475
d1688a6d 4476static void free_conf(struct r5conf *conf)
95fc17aa
DW
4477{
4478 shrink_stripes(conf);
36d1c647 4479 raid5_free_percpu(conf);
95fc17aa
DW
4480 kfree(conf->disks);
4481 kfree(conf->stripe_hashtbl);
4482 kfree(conf);
4483}
4484
36d1c647
DW
4485#ifdef CONFIG_HOTPLUG_CPU
4486static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4487 void *hcpu)
4488{
d1688a6d 4489 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
36d1c647
DW
4490 long cpu = (long)hcpu;
4491 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4492
4493 switch (action) {
4494 case CPU_UP_PREPARE:
4495 case CPU_UP_PREPARE_FROZEN:
d6f38f31 4496 if (conf->level == 6 && !percpu->spare_page)
36d1c647 4497 percpu->spare_page = alloc_page(GFP_KERNEL);
d6f38f31
DW
4498 if (!percpu->scribble)
4499 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4500
4501 if (!percpu->scribble ||
4502 (conf->level == 6 && !percpu->spare_page)) {
4503 safe_put_page(percpu->spare_page);
4504 kfree(percpu->scribble);
36d1c647
DW
4505 pr_err("%s: failed memory allocation for cpu%ld\n",
4506 __func__, cpu);
55af6bb5 4507 return notifier_from_errno(-ENOMEM);
36d1c647
DW
4508 }
4509 break;
4510 case CPU_DEAD:
4511 case CPU_DEAD_FROZEN:
4512 safe_put_page(percpu->spare_page);
d6f38f31 4513 kfree(percpu->scribble);
36d1c647 4514 percpu->spare_page = NULL;
d6f38f31 4515 percpu->scribble = NULL;
36d1c647
DW
4516 break;
4517 default:
4518 break;
4519 }
4520 return NOTIFY_OK;
4521}
4522#endif
4523
d1688a6d 4524static int raid5_alloc_percpu(struct r5conf *conf)
36d1c647
DW
4525{
4526 unsigned long cpu;
4527 struct page *spare_page;
a29d8b8e 4528 struct raid5_percpu __percpu *allcpus;
d6f38f31 4529 void *scribble;
36d1c647
DW
4530 int err;
4531
36d1c647
DW
4532 allcpus = alloc_percpu(struct raid5_percpu);
4533 if (!allcpus)
4534 return -ENOMEM;
4535 conf->percpu = allcpus;
4536
4537 get_online_cpus();
4538 err = 0;
4539 for_each_present_cpu(cpu) {
d6f38f31
DW
4540 if (conf->level == 6) {
4541 spare_page = alloc_page(GFP_KERNEL);
4542 if (!spare_page) {
4543 err = -ENOMEM;
4544 break;
4545 }
4546 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4547 }
5e5e3e78 4548 scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
d6f38f31 4549 if (!scribble) {
36d1c647
DW
4550 err = -ENOMEM;
4551 break;
4552 }
d6f38f31 4553 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
36d1c647
DW
4554 }
4555#ifdef CONFIG_HOTPLUG_CPU
4556 conf->cpu_notify.notifier_call = raid456_cpu_notify;
4557 conf->cpu_notify.priority = 0;
4558 if (err == 0)
4559 err = register_cpu_notifier(&conf->cpu_notify);
4560#endif
4561 put_online_cpus();
4562
4563 return err;
4564}
4565
d1688a6d 4566static struct r5conf *setup_conf(struct mddev *mddev)
1da177e4 4567{
d1688a6d 4568 struct r5conf *conf;
5e5e3e78 4569 int raid_disk, memory, max_disks;
3cb03002 4570 struct md_rdev *rdev;
1da177e4 4571 struct disk_info *disk;
1da177e4 4572
91adb564
N
4573 if (mddev->new_level != 5
4574 && mddev->new_level != 4
4575 && mddev->new_level != 6) {
0c55e022 4576 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
91adb564
N
4577 mdname(mddev), mddev->new_level);
4578 return ERR_PTR(-EIO);
1da177e4 4579 }
91adb564
N
4580 if ((mddev->new_level == 5
4581 && !algorithm_valid_raid5(mddev->new_layout)) ||
4582 (mddev->new_level == 6
4583 && !algorithm_valid_raid6(mddev->new_layout))) {
0c55e022 4584 printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
91adb564
N
4585 mdname(mddev), mddev->new_layout);
4586 return ERR_PTR(-EIO);
99c0fb5f 4587 }
91adb564 4588 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
0c55e022 4589 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
91adb564
N
4590 mdname(mddev), mddev->raid_disks);
4591 return ERR_PTR(-EINVAL);
4bbf3771
N
4592 }
4593
664e7c41
AN
4594 if (!mddev->new_chunk_sectors ||
4595 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4596 !is_power_of_2(mddev->new_chunk_sectors)) {
0c55e022
N
4597 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4598 mdname(mddev), mddev->new_chunk_sectors << 9);
91adb564 4599 return ERR_PTR(-EINVAL);
f6705578
N
4600 }
4601
d1688a6d 4602 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
91adb564 4603 if (conf == NULL)
1da177e4 4604 goto abort;
f5efd45a
DW
4605 spin_lock_init(&conf->device_lock);
4606 init_waitqueue_head(&conf->wait_for_stripe);
4607 init_waitqueue_head(&conf->wait_for_overlap);
4608 INIT_LIST_HEAD(&conf->handle_list);
4609 INIT_LIST_HEAD(&conf->hold_list);
4610 INIT_LIST_HEAD(&conf->delayed_list);
4611 INIT_LIST_HEAD(&conf->bitmap_list);
4612 INIT_LIST_HEAD(&conf->inactive_list);
4613 atomic_set(&conf->active_stripes, 0);
4614 atomic_set(&conf->preread_active_stripes, 0);
4615 atomic_set(&conf->active_aligned_reads, 0);
4616 conf->bypass_threshold = BYPASS_THRESHOLD;
d890fa2b 4617 conf->recovery_disabled = mddev->recovery_disabled - 1;
91adb564
N
4618
4619 conf->raid_disks = mddev->raid_disks;
4620 if (mddev->reshape_position == MaxSector)
4621 conf->previous_raid_disks = mddev->raid_disks;
4622 else
f6705578 4623 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
5e5e3e78
N
4624 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4625 conf->scribble_len = scribble_len(max_disks);
f6705578 4626
5e5e3e78 4627 conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
b55e6bfc
N
4628 GFP_KERNEL);
4629 if (!conf->disks)
4630 goto abort;
9ffae0cf 4631
1da177e4
LT
4632 conf->mddev = mddev;
4633
fccddba0 4634 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1da177e4 4635 goto abort;
1da177e4 4636
36d1c647
DW
4637 conf->level = mddev->new_level;
4638 if (raid5_alloc_percpu(conf) != 0)
4639 goto abort;
4640
0c55e022 4641 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
1da177e4 4642
159ec1fc 4643 list_for_each_entry(rdev, &mddev->disks, same_set) {
1da177e4 4644 raid_disk = rdev->raid_disk;
5e5e3e78 4645 if (raid_disk >= max_disks
1da177e4
LT
4646 || raid_disk < 0)
4647 continue;
4648 disk = conf->disks + raid_disk;
4649
4650 disk->rdev = rdev;
4651
b2d444d7 4652 if (test_bit(In_sync, &rdev->flags)) {
1da177e4 4653 char b[BDEVNAME_SIZE];
0c55e022
N
4654 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4655 " disk %d\n",
4656 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
d6b212f4 4657 } else if (rdev->saved_raid_disk != raid_disk)
8c2e870a
NB
4658 /* Cannot rely on bitmap to complete recovery */
4659 conf->fullsync = 1;
1da177e4
LT
4660 }
4661
09c9e5fa 4662 conf->chunk_sectors = mddev->new_chunk_sectors;
91adb564 4663 conf->level = mddev->new_level;
16a53ecc
N
4664 if (conf->level == 6)
4665 conf->max_degraded = 2;
4666 else
4667 conf->max_degraded = 1;
91adb564 4668 conf->algorithm = mddev->new_layout;
1da177e4 4669 conf->max_nr_stripes = NR_STRIPES;
fef9c61f 4670 conf->reshape_progress = mddev->reshape_position;
e183eaed 4671 if (conf->reshape_progress != MaxSector) {
09c9e5fa 4672 conf->prev_chunk_sectors = mddev->chunk_sectors;
e183eaed
N
4673 conf->prev_algo = mddev->layout;
4674 }
1da177e4 4675
91adb564 4676 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
5e5e3e78 4677 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
91adb564
N
4678 if (grow_stripes(conf, conf->max_nr_stripes)) {
4679 printk(KERN_ERR
0c55e022
N
4680 "md/raid:%s: couldn't allocate %dkB for buffers\n",
4681 mdname(mddev), memory);
91adb564
N
4682 goto abort;
4683 } else
0c55e022
N
4684 printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4685 mdname(mddev), memory);
1da177e4 4686
0da3c619 4687 conf->thread = md_register_thread(raid5d, mddev, NULL);
91adb564
N
4688 if (!conf->thread) {
4689 printk(KERN_ERR
0c55e022 4690 "md/raid:%s: couldn't allocate thread.\n",
91adb564 4691 mdname(mddev));
16a53ecc
N
4692 goto abort;
4693 }
91adb564
N
4694
4695 return conf;
4696
4697 abort:
4698 if (conf) {
95fc17aa 4699 free_conf(conf);
91adb564
N
4700 return ERR_PTR(-EIO);
4701 } else
4702 return ERR_PTR(-ENOMEM);
4703}
4704
c148ffdc
N
4705
4706static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4707{
4708 switch (algo) {
4709 case ALGORITHM_PARITY_0:
4710 if (raid_disk < max_degraded)
4711 return 1;
4712 break;
4713 case ALGORITHM_PARITY_N:
4714 if (raid_disk >= raid_disks - max_degraded)
4715 return 1;
4716 break;
4717 case ALGORITHM_PARITY_0_6:
4718 if (raid_disk == 0 ||
4719 raid_disk == raid_disks - 1)
4720 return 1;
4721 break;
4722 case ALGORITHM_LEFT_ASYMMETRIC_6:
4723 case ALGORITHM_RIGHT_ASYMMETRIC_6:
4724 case ALGORITHM_LEFT_SYMMETRIC_6:
4725 case ALGORITHM_RIGHT_SYMMETRIC_6:
4726 if (raid_disk == raid_disks - 1)
4727 return 1;
4728 }
4729 return 0;
4730}
4731
fd01b88c 4732static int run(struct mddev *mddev)
91adb564 4733{
d1688a6d 4734 struct r5conf *conf;
9f7c2220 4735 int working_disks = 0;
c148ffdc 4736 int dirty_parity_disks = 0;
3cb03002 4737 struct md_rdev *rdev;
c148ffdc 4738 sector_t reshape_offset = 0;
91adb564 4739
8c6ac868 4740 if (mddev->recovery_cp != MaxSector)
0c55e022 4741 printk(KERN_NOTICE "md/raid:%s: not clean"
8c6ac868
AN
4742 " -- starting background reconstruction\n",
4743 mdname(mddev));
91adb564
N
4744 if (mddev->reshape_position != MaxSector) {
4745 /* Check that we can continue the reshape.
4746 * Currently only disks can change, it must
4747 * increase, and we must be past the point where
4748 * a stripe over-writes itself
4749 */
4750 sector_t here_new, here_old;
4751 int old_disks;
18b00334 4752 int max_degraded = (mddev->level == 6 ? 2 : 1);
91adb564 4753
88ce4930 4754 if (mddev->new_level != mddev->level) {
0c55e022 4755 printk(KERN_ERR "md/raid:%s: unsupported reshape "
91adb564
N
4756 "required - aborting.\n",
4757 mdname(mddev));
4758 return -EINVAL;
4759 }
91adb564
N
4760 old_disks = mddev->raid_disks - mddev->delta_disks;
4761 /* reshape_position must be on a new-stripe boundary, and one
4762 * further up in new geometry must map after here in old
4763 * geometry.
4764 */
4765 here_new = mddev->reshape_position;
664e7c41 4766 if (sector_div(here_new, mddev->new_chunk_sectors *
91adb564 4767 (mddev->raid_disks - max_degraded))) {
0c55e022
N
4768 printk(KERN_ERR "md/raid:%s: reshape_position not "
4769 "on a stripe boundary\n", mdname(mddev));
91adb564
N
4770 return -EINVAL;
4771 }
c148ffdc 4772 reshape_offset = here_new * mddev->new_chunk_sectors;
91adb564
N
4773 /* here_new is the stripe we will write to */
4774 here_old = mddev->reshape_position;
9d8f0363 4775 sector_div(here_old, mddev->chunk_sectors *
91adb564
N
4776 (old_disks-max_degraded));
4777 /* here_old is the first stripe that we might need to read
4778 * from */
67ac6011
N
4779 if (mddev->delta_disks == 0) {
4780 /* We cannot be sure it is safe to start an in-place
4781 * reshape. It is only safe if user-space if monitoring
4782 * and taking constant backups.
4783 * mdadm always starts a situation like this in
4784 * readonly mode so it can take control before
4785 * allowing any writes. So just check for that.
4786 */
4787 if ((here_new * mddev->new_chunk_sectors !=
4788 here_old * mddev->chunk_sectors) ||
4789 mddev->ro == 0) {
0c55e022
N
4790 printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
4791 " in read-only mode - aborting\n",
4792 mdname(mddev));
67ac6011
N
4793 return -EINVAL;
4794 }
4795 } else if (mddev->delta_disks < 0
4796 ? (here_new * mddev->new_chunk_sectors <=
4797 here_old * mddev->chunk_sectors)
4798 : (here_new * mddev->new_chunk_sectors >=
4799 here_old * mddev->chunk_sectors)) {
91adb564 4800 /* Reading from the same stripe as writing to - bad */
0c55e022
N
4801 printk(KERN_ERR "md/raid:%s: reshape_position too early for "
4802 "auto-recovery - aborting.\n",
4803 mdname(mddev));
91adb564
N
4804 return -EINVAL;
4805 }
0c55e022
N
4806 printk(KERN_INFO "md/raid:%s: reshape will continue\n",
4807 mdname(mddev));
91adb564
N
4808 /* OK, we should be able to continue; */
4809 } else {
4810 BUG_ON(mddev->level != mddev->new_level);
4811 BUG_ON(mddev->layout != mddev->new_layout);
664e7c41 4812 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
91adb564 4813 BUG_ON(mddev->delta_disks != 0);
1da177e4 4814 }
91adb564 4815
245f46c2
N
4816 if (mddev->private == NULL)
4817 conf = setup_conf(mddev);
4818 else
4819 conf = mddev->private;
4820
91adb564
N
4821 if (IS_ERR(conf))
4822 return PTR_ERR(conf);
4823
4824 mddev->thread = conf->thread;
4825 conf->thread = NULL;
4826 mddev->private = conf;
4827
4828 /*
4829 * 0 for a fully functional array, 1 or 2 for a degraded array.
4830 */
c148ffdc
N
4831 list_for_each_entry(rdev, &mddev->disks, same_set) {
4832 if (rdev->raid_disk < 0)
4833 continue;
2f115882 4834 if (test_bit(In_sync, &rdev->flags)) {
91adb564 4835 working_disks++;
2f115882
N
4836 continue;
4837 }
c148ffdc
N
4838 /* This disc is not fully in-sync. However if it
4839 * just stored parity (beyond the recovery_offset),
4840 * when we don't need to be concerned about the
4841 * array being dirty.
4842 * When reshape goes 'backwards', we never have
4843 * partially completed devices, so we only need
4844 * to worry about reshape going forwards.
4845 */
4846 /* Hack because v0.91 doesn't store recovery_offset properly. */
4847 if (mddev->major_version == 0 &&
4848 mddev->minor_version > 90)
4849 rdev->recovery_offset = reshape_offset;
4850
c148ffdc
N
4851 if (rdev->recovery_offset < reshape_offset) {
4852 /* We need to check old and new layout */
4853 if (!only_parity(rdev->raid_disk,
4854 conf->algorithm,
4855 conf->raid_disks,
4856 conf->max_degraded))
4857 continue;
4858 }
4859 if (!only_parity(rdev->raid_disk,
4860 conf->prev_algo,
4861 conf->previous_raid_disks,
4862 conf->max_degraded))
4863 continue;
4864 dirty_parity_disks++;
4865 }
91adb564 4866
908f4fbd 4867 mddev->degraded = calc_degraded(conf);
91adb564 4868
674806d6 4869 if (has_failed(conf)) {
0c55e022 4870 printk(KERN_ERR "md/raid:%s: not enough operational devices"
1da177e4 4871 " (%d/%d failed)\n",
02c2de8c 4872 mdname(mddev), mddev->degraded, conf->raid_disks);
1da177e4
LT
4873 goto abort;
4874 }
4875
91adb564 4876 /* device size must be a multiple of chunk size */
9d8f0363 4877 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
91adb564
N
4878 mddev->resync_max_sectors = mddev->dev_sectors;
4879
c148ffdc 4880 if (mddev->degraded > dirty_parity_disks &&
1da177e4 4881 mddev->recovery_cp != MaxSector) {
6ff8d8ec
N
4882 if (mddev->ok_start_degraded)
4883 printk(KERN_WARNING
0c55e022
N
4884 "md/raid:%s: starting dirty degraded array"
4885 " - data corruption possible.\n",
6ff8d8ec
N
4886 mdname(mddev));
4887 else {
4888 printk(KERN_ERR
0c55e022 4889 "md/raid:%s: cannot start dirty degraded array.\n",
6ff8d8ec
N
4890 mdname(mddev));
4891 goto abort;
4892 }
1da177e4
LT
4893 }
4894
1da177e4 4895 if (mddev->degraded == 0)
0c55e022
N
4896 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
4897 " devices, algorithm %d\n", mdname(mddev), conf->level,
e183eaed
N
4898 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4899 mddev->new_layout);
1da177e4 4900 else
0c55e022
N
4901 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
4902 " out of %d devices, algorithm %d\n",
4903 mdname(mddev), conf->level,
4904 mddev->raid_disks - mddev->degraded,
4905 mddev->raid_disks, mddev->new_layout);
1da177e4
LT
4906
4907 print_raid5_conf(conf);
4908
fef9c61f 4909 if (conf->reshape_progress != MaxSector) {
fef9c61f 4910 conf->reshape_safe = conf->reshape_progress;
f6705578
N
4911 atomic_set(&conf->reshape_stripes, 0);
4912 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4913 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4914 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4915 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4916 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
0da3c619 4917 "reshape");
f6705578
N
4918 }
4919
1da177e4
LT
4920
4921 /* Ok, everything is just fine now */
a64c876f
N
4922 if (mddev->to_remove == &raid5_attrs_group)
4923 mddev->to_remove = NULL;
00bcb4ac
N
4924 else if (mddev->kobj.sd &&
4925 sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
5e55e2f5 4926 printk(KERN_WARNING
4a5add49 4927 "raid5: failed to create sysfs attributes for %s\n",
5e55e2f5 4928 mdname(mddev));
4a5add49 4929 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7a5febe9 4930
4a5add49 4931 if (mddev->queue) {
9f7c2220 4932 int chunk_size;
4a5add49
N
4933 /* read-ahead size must cover two whole stripes, which
4934 * is 2 * (datadisks) * chunksize where 'n' is the
4935 * number of raid devices
4936 */
4937 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4938 int stripe = data_disks *
4939 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
4940 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4941 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
91adb564 4942
4a5add49 4943 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
f022b2fd 4944
11d8a6e3
N
4945 mddev->queue->backing_dev_info.congested_data = mddev;
4946 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
7a5febe9 4947
9f7c2220
N
4948 chunk_size = mddev->chunk_sectors << 9;
4949 blk_queue_io_min(mddev->queue, chunk_size);
4950 blk_queue_io_opt(mddev->queue, chunk_size *
4951 (conf->raid_disks - conf->max_degraded));
8f6c2e4b 4952
9f7c2220
N
4953 list_for_each_entry(rdev, &mddev->disks, same_set)
4954 disk_stack_limits(mddev->gendisk, rdev->bdev,
4955 rdev->data_offset << 9);
4956 }
23032a0e 4957
1da177e4
LT
4958 return 0;
4959abort:
01f96c0a 4960 md_unregister_thread(&mddev->thread);
e4f869d9
N
4961 print_raid5_conf(conf);
4962 free_conf(conf);
1da177e4 4963 mddev->private = NULL;
0c55e022 4964 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
1da177e4
LT
4965 return -EIO;
4966}
4967
fd01b88c 4968static int stop(struct mddev *mddev)
1da177e4 4969{
d1688a6d 4970 struct r5conf *conf = mddev->private;
1da177e4 4971
01f96c0a 4972 md_unregister_thread(&mddev->thread);
11d8a6e3
N
4973 if (mddev->queue)
4974 mddev->queue->backing_dev_info.congested_fn = NULL;
95fc17aa 4975 free_conf(conf);
a64c876f
N
4976 mddev->private = NULL;
4977 mddev->to_remove = &raid5_attrs_group;
1da177e4
LT
4978 return 0;
4979}
4980
fd01b88c 4981static void status(struct seq_file *seq, struct mddev *mddev)
1da177e4 4982{
d1688a6d 4983 struct r5conf *conf = mddev->private;
1da177e4
LT
4984 int i;
4985
9d8f0363
AN
4986 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
4987 mddev->chunk_sectors / 2, mddev->layout);
02c2de8c 4988 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
1da177e4
LT
4989 for (i = 0; i < conf->raid_disks; i++)
4990 seq_printf (seq, "%s",
4991 conf->disks[i].rdev &&
b2d444d7 4992 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
1da177e4 4993 seq_printf (seq, "]");
1da177e4
LT
4994}
4995
d1688a6d 4996static void print_raid5_conf (struct r5conf *conf)
1da177e4
LT
4997{
4998 int i;
4999 struct disk_info *tmp;
5000
0c55e022 5001 printk(KERN_DEBUG "RAID conf printout:\n");
1da177e4
LT
5002 if (!conf) {
5003 printk("(conf==NULL)\n");
5004 return;
5005 }
0c55e022
N
5006 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
5007 conf->raid_disks,
5008 conf->raid_disks - conf->mddev->degraded);
1da177e4
LT
5009
5010 for (i = 0; i < conf->raid_disks; i++) {
5011 char b[BDEVNAME_SIZE];
5012 tmp = conf->disks + i;
5013 if (tmp->rdev)
0c55e022
N
5014 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
5015 i, !test_bit(Faulty, &tmp->rdev->flags),
5016 bdevname(tmp->rdev->bdev, b));
1da177e4
LT
5017 }
5018}
5019
fd01b88c 5020static int raid5_spare_active(struct mddev *mddev)
1da177e4
LT
5021{
5022 int i;
d1688a6d 5023 struct r5conf *conf = mddev->private;
1da177e4 5024 struct disk_info *tmp;
6b965620
N
5025 int count = 0;
5026 unsigned long flags;
1da177e4
LT
5027
5028 for (i = 0; i < conf->raid_disks; i++) {
5029 tmp = conf->disks + i;
5030 if (tmp->rdev
70fffd0b 5031 && tmp->rdev->recovery_offset == MaxSector
b2d444d7 5032 && !test_bit(Faulty, &tmp->rdev->flags)
c04be0aa 5033 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
6b965620 5034 count++;
43c73ca4 5035 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1da177e4
LT
5036 }
5037 }
6b965620 5038 spin_lock_irqsave(&conf->device_lock, flags);
908f4fbd 5039 mddev->degraded = calc_degraded(conf);
6b965620 5040 spin_unlock_irqrestore(&conf->device_lock, flags);
1da177e4 5041 print_raid5_conf(conf);
6b965620 5042 return count;
1da177e4
LT
5043}
5044
b8321b68 5045static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1da177e4 5046{
d1688a6d 5047 struct r5conf *conf = mddev->private;
1da177e4 5048 int err = 0;
b8321b68 5049 int number = rdev->raid_disk;
1da177e4
LT
5050 struct disk_info *p = conf->disks + number;
5051
5052 print_raid5_conf(conf);
b8321b68 5053 if (rdev == p->rdev) {
ec32a2bd
N
5054 if (number >= conf->raid_disks &&
5055 conf->reshape_progress == MaxSector)
5056 clear_bit(In_sync, &rdev->flags);
5057
b2d444d7 5058 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
5059 atomic_read(&rdev->nr_pending)) {
5060 err = -EBUSY;
5061 goto abort;
5062 }
dfc70645
N
5063 /* Only remove non-faulty devices if recovery
5064 * isn't possible.
5065 */
5066 if (!test_bit(Faulty, &rdev->flags) &&
7f0da59b 5067 mddev->recovery_disabled != conf->recovery_disabled &&
674806d6 5068 !has_failed(conf) &&
ec32a2bd 5069 number < conf->raid_disks) {
dfc70645
N
5070 err = -EBUSY;
5071 goto abort;
5072 }
1da177e4 5073 p->rdev = NULL;
fbd568a3 5074 synchronize_rcu();
1da177e4
LT
5075 if (atomic_read(&rdev->nr_pending)) {
5076 /* lost the race, try later */
5077 err = -EBUSY;
5078 p->rdev = rdev;
5079 }
5080 }
5081abort:
5082
5083 print_raid5_conf(conf);
5084 return err;
5085}
5086
fd01b88c 5087static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1da177e4 5088{
d1688a6d 5089 struct r5conf *conf = mddev->private;
199050ea 5090 int err = -EEXIST;
1da177e4
LT
5091 int disk;
5092 struct disk_info *p;
6c2fce2e
NB
5093 int first = 0;
5094 int last = conf->raid_disks - 1;
1da177e4 5095
7f0da59b
N
5096 if (mddev->recovery_disabled == conf->recovery_disabled)
5097 return -EBUSY;
5098
674806d6 5099 if (has_failed(conf))
1da177e4 5100 /* no point adding a device */
199050ea 5101 return -EINVAL;
1da177e4 5102
6c2fce2e
NB
5103 if (rdev->raid_disk >= 0)
5104 first = last = rdev->raid_disk;
1da177e4
LT
5105
5106 /*
16a53ecc
N
5107 * find the disk ... but prefer rdev->saved_raid_disk
5108 * if possible.
1da177e4 5109 */
16a53ecc 5110 if (rdev->saved_raid_disk >= 0 &&
6c2fce2e 5111 rdev->saved_raid_disk >= first &&
16a53ecc
N
5112 conf->disks[rdev->saved_raid_disk].rdev == NULL)
5113 disk = rdev->saved_raid_disk;
5114 else
6c2fce2e
NB
5115 disk = first;
5116 for ( ; disk <= last ; disk++)
1da177e4 5117 if ((p=conf->disks + disk)->rdev == NULL) {
b2d444d7 5118 clear_bit(In_sync, &rdev->flags);
1da177e4 5119 rdev->raid_disk = disk;
199050ea 5120 err = 0;
72626685
N
5121 if (rdev->saved_raid_disk != disk)
5122 conf->fullsync = 1;
d6065f7b 5123 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
5124 break;
5125 }
5126 print_raid5_conf(conf);
199050ea 5127 return err;
1da177e4
LT
5128}
5129
fd01b88c 5130static int raid5_resize(struct mddev *mddev, sector_t sectors)
1da177e4
LT
5131{
5132 /* no resync is happening, and there is enough space
5133 * on all devices, so we can resize.
5134 * We need to make sure resync covers any new space.
5135 * If the array is shrinking we should possibly wait until
5136 * any io in the removed space completes, but it hardly seems
5137 * worth it.
5138 */
9d8f0363 5139 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
1f403624
DW
5140 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5141 mddev->raid_disks));
b522adcd
DW
5142 if (mddev->array_sectors >
5143 raid5_size(mddev, sectors, mddev->raid_disks))
5144 return -EINVAL;
f233ea5c 5145 set_capacity(mddev->gendisk, mddev->array_sectors);
449aad3e 5146 revalidate_disk(mddev->gendisk);
b098636c
N
5147 if (sectors > mddev->dev_sectors &&
5148 mddev->recovery_cp > mddev->dev_sectors) {
58c0fed4 5149 mddev->recovery_cp = mddev->dev_sectors;
1da177e4
LT
5150 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5151 }
58c0fed4 5152 mddev->dev_sectors = sectors;
4b5c7ae8 5153 mddev->resync_max_sectors = sectors;
1da177e4
LT
5154 return 0;
5155}
5156
fd01b88c 5157static int check_stripe_cache(struct mddev *mddev)
01ee22b4
N
5158{
5159 /* Can only proceed if there are plenty of stripe_heads.
5160 * We need a minimum of one full stripe,, and for sensible progress
5161 * it is best to have about 4 times that.
5162 * If we require 4 times, then the default 256 4K stripe_heads will
5163 * allow for chunk sizes up to 256K, which is probably OK.
5164 * If the chunk size is greater, user-space should request more
5165 * stripe_heads first.
5166 */
d1688a6d 5167 struct r5conf *conf = mddev->private;
01ee22b4
N
5168 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5169 > conf->max_nr_stripes ||
5170 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5171 > conf->max_nr_stripes) {
0c55e022
N
5172 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
5173 mdname(mddev),
01ee22b4
N
5174 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5175 / STRIPE_SIZE)*4);
5176 return 0;
5177 }
5178 return 1;
5179}
5180
fd01b88c 5181static int check_reshape(struct mddev *mddev)
29269553 5182{
d1688a6d 5183 struct r5conf *conf = mddev->private;
29269553 5184
88ce4930
N
5185 if (mddev->delta_disks == 0 &&
5186 mddev->new_layout == mddev->layout &&
664e7c41 5187 mddev->new_chunk_sectors == mddev->chunk_sectors)
50ac168a 5188 return 0; /* nothing to do */
dba034ee
N
5189 if (mddev->bitmap)
5190 /* Cannot grow a bitmap yet */
5191 return -EBUSY;
674806d6 5192 if (has_failed(conf))
ec32a2bd
N
5193 return -EINVAL;
5194 if (mddev->delta_disks < 0) {
5195 /* We might be able to shrink, but the devices must
5196 * be made bigger first.
5197 * For raid6, 4 is the minimum size.
5198 * Otherwise 2 is the minimum
5199 */
5200 int min = 2;
5201 if (mddev->level == 6)
5202 min = 4;
5203 if (mddev->raid_disks + mddev->delta_disks < min)
5204 return -EINVAL;
5205 }
29269553 5206
01ee22b4 5207 if (!check_stripe_cache(mddev))
29269553 5208 return -ENOSPC;
29269553 5209
ec32a2bd 5210 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
63c70c4f
N
5211}
5212
fd01b88c 5213static int raid5_start_reshape(struct mddev *mddev)
63c70c4f 5214{
d1688a6d 5215 struct r5conf *conf = mddev->private;
3cb03002 5216 struct md_rdev *rdev;
63c70c4f 5217 int spares = 0;
c04be0aa 5218 unsigned long flags;
63c70c4f 5219
f416885e 5220 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
63c70c4f
N
5221 return -EBUSY;
5222
01ee22b4
N
5223 if (!check_stripe_cache(mddev))
5224 return -ENOSPC;
5225
159ec1fc 5226 list_for_each_entry(rdev, &mddev->disks, same_set)
469518a3
N
5227 if (!test_bit(In_sync, &rdev->flags)
5228 && !test_bit(Faulty, &rdev->flags))
29269553 5229 spares++;
63c70c4f 5230
f416885e 5231 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
29269553
N
5232 /* Not enough devices even to make a degraded array
5233 * of that size
5234 */
5235 return -EINVAL;
5236
ec32a2bd
N
5237 /* Refuse to reduce size of the array. Any reductions in
5238 * array size must be through explicit setting of array_size
5239 * attribute.
5240 */
5241 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5242 < mddev->array_sectors) {
0c55e022 5243 printk(KERN_ERR "md/raid:%s: array size must be reduced "
ec32a2bd
N
5244 "before number of disks\n", mdname(mddev));
5245 return -EINVAL;
5246 }
5247
f6705578 5248 atomic_set(&conf->reshape_stripes, 0);
29269553
N
5249 spin_lock_irq(&conf->device_lock);
5250 conf->previous_raid_disks = conf->raid_disks;
63c70c4f 5251 conf->raid_disks += mddev->delta_disks;
09c9e5fa
AN
5252 conf->prev_chunk_sectors = conf->chunk_sectors;
5253 conf->chunk_sectors = mddev->new_chunk_sectors;
88ce4930
N
5254 conf->prev_algo = conf->algorithm;
5255 conf->algorithm = mddev->new_layout;
fef9c61f
N
5256 if (mddev->delta_disks < 0)
5257 conf->reshape_progress = raid5_size(mddev, 0, 0);
5258 else
5259 conf->reshape_progress = 0;
5260 conf->reshape_safe = conf->reshape_progress;
86b42c71 5261 conf->generation++;
29269553
N
5262 spin_unlock_irq(&conf->device_lock);
5263
5264 /* Add some new drives, as many as will fit.
5265 * We know there are enough to make the newly sized array work.
3424bf6a
N
5266 * Don't add devices if we are reducing the number of
5267 * devices in the array. This is because it is not possible
5268 * to correctly record the "partially reconstructed" state of
5269 * such devices during the reshape and confusion could result.
29269553 5270 */
87a8dec9
N
5271 if (mddev->delta_disks >= 0) {
5272 int added_devices = 0;
5273 list_for_each_entry(rdev, &mddev->disks, same_set)
5274 if (rdev->raid_disk < 0 &&
5275 !test_bit(Faulty, &rdev->flags)) {
5276 if (raid5_add_disk(mddev, rdev) == 0) {
87a8dec9
N
5277 if (rdev->raid_disk
5278 >= conf->previous_raid_disks) {
5279 set_bit(In_sync, &rdev->flags);
5280 added_devices++;
5281 } else
5282 rdev->recovery_offset = 0;
36fad858
NK
5283
5284 if (sysfs_link_rdev(mddev, rdev))
87a8dec9 5285 /* Failure here is OK */;
50da0840 5286 }
87a8dec9
N
5287 } else if (rdev->raid_disk >= conf->previous_raid_disks
5288 && !test_bit(Faulty, &rdev->flags)) {
5289 /* This is a spare that was manually added */
5290 set_bit(In_sync, &rdev->flags);
5291 added_devices++;
5292 }
29269553 5293
87a8dec9
N
5294 /* When a reshape changes the number of devices,
5295 * ->degraded is measured against the larger of the
5296 * pre and post number of devices.
5297 */
ec32a2bd 5298 spin_lock_irqsave(&conf->device_lock, flags);
908f4fbd 5299 mddev->degraded = calc_degraded(conf);
ec32a2bd
N
5300 spin_unlock_irqrestore(&conf->device_lock, flags);
5301 }
63c70c4f 5302 mddev->raid_disks = conf->raid_disks;
e516402c 5303 mddev->reshape_position = conf->reshape_progress;
850b2b42 5304 set_bit(MD_CHANGE_DEVS, &mddev->flags);
f6705578 5305
29269553
N
5306 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5307 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5308 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5309 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5310 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
0da3c619 5311 "reshape");
29269553
N
5312 if (!mddev->sync_thread) {
5313 mddev->recovery = 0;
5314 spin_lock_irq(&conf->device_lock);
5315 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
fef9c61f 5316 conf->reshape_progress = MaxSector;
29269553
N
5317 spin_unlock_irq(&conf->device_lock);
5318 return -EAGAIN;
5319 }
c8f517c4 5320 conf->reshape_checkpoint = jiffies;
29269553
N
5321 md_wakeup_thread(mddev->sync_thread);
5322 md_new_event(mddev);
5323 return 0;
5324}
29269553 5325
ec32a2bd
N
5326/* This is called from the reshape thread and should make any
5327 * changes needed in 'conf'
5328 */
d1688a6d 5329static void end_reshape(struct r5conf *conf)
29269553 5330{
29269553 5331
f6705578 5332 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
f6705578 5333
f6705578 5334 spin_lock_irq(&conf->device_lock);
cea9c228 5335 conf->previous_raid_disks = conf->raid_disks;
fef9c61f 5336 conf->reshape_progress = MaxSector;
f6705578 5337 spin_unlock_irq(&conf->device_lock);
b0f9ec04 5338 wake_up(&conf->wait_for_overlap);
16a53ecc
N
5339
5340 /* read-ahead size must cover two whole stripes, which is
5341 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5342 */
4a5add49 5343 if (conf->mddev->queue) {
cea9c228 5344 int data_disks = conf->raid_disks - conf->max_degraded;
09c9e5fa 5345 int stripe = data_disks * ((conf->chunk_sectors << 9)
cea9c228 5346 / PAGE_SIZE);
16a53ecc
N
5347 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5348 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5349 }
29269553 5350 }
29269553
N
5351}
5352
ec32a2bd
N
5353/* This is called from the raid5d thread with mddev_lock held.
5354 * It makes config changes to the device.
5355 */
fd01b88c 5356static void raid5_finish_reshape(struct mddev *mddev)
cea9c228 5357{
d1688a6d 5358 struct r5conf *conf = mddev->private;
cea9c228
N
5359
5360 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5361
ec32a2bd
N
5362 if (mddev->delta_disks > 0) {
5363 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5364 set_capacity(mddev->gendisk, mddev->array_sectors);
449aad3e 5365 revalidate_disk(mddev->gendisk);
ec32a2bd
N
5366 } else {
5367 int d;
908f4fbd
N
5368 spin_lock_irq(&conf->device_lock);
5369 mddev->degraded = calc_degraded(conf);
5370 spin_unlock_irq(&conf->device_lock);
ec32a2bd
N
5371 for (d = conf->raid_disks ;
5372 d < conf->raid_disks - mddev->delta_disks;
1a67dde0 5373 d++) {
3cb03002 5374 struct md_rdev *rdev = conf->disks[d].rdev;
b8321b68
N
5375 if (rdev &&
5376 raid5_remove_disk(mddev, rdev) == 0) {
36fad858 5377 sysfs_unlink_rdev(mddev, rdev);
1a67dde0
N
5378 rdev->raid_disk = -1;
5379 }
5380 }
cea9c228 5381 }
88ce4930 5382 mddev->layout = conf->algorithm;
09c9e5fa 5383 mddev->chunk_sectors = conf->chunk_sectors;
ec32a2bd
N
5384 mddev->reshape_position = MaxSector;
5385 mddev->delta_disks = 0;
cea9c228
N
5386 }
5387}
5388
fd01b88c 5389static void raid5_quiesce(struct mddev *mddev, int state)
72626685 5390{
d1688a6d 5391 struct r5conf *conf = mddev->private;
72626685
N
5392
5393 switch(state) {
e464eafd
N
5394 case 2: /* resume for a suspend */
5395 wake_up(&conf->wait_for_overlap);
5396 break;
5397
72626685
N
5398 case 1: /* stop all writes */
5399 spin_lock_irq(&conf->device_lock);
64bd660b
N
5400 /* '2' tells resync/reshape to pause so that all
5401 * active stripes can drain
5402 */
5403 conf->quiesce = 2;
72626685 5404 wait_event_lock_irq(conf->wait_for_stripe,
46031f9a
RBJ
5405 atomic_read(&conf->active_stripes) == 0 &&
5406 atomic_read(&conf->active_aligned_reads) == 0,
72626685 5407 conf->device_lock, /* nothing */);
64bd660b 5408 conf->quiesce = 1;
72626685 5409 spin_unlock_irq(&conf->device_lock);
64bd660b
N
5410 /* allow reshape to continue */
5411 wake_up(&conf->wait_for_overlap);
72626685
N
5412 break;
5413
5414 case 0: /* re-enable writes */
5415 spin_lock_irq(&conf->device_lock);
5416 conf->quiesce = 0;
5417 wake_up(&conf->wait_for_stripe);
e464eafd 5418 wake_up(&conf->wait_for_overlap);
72626685
N
5419 spin_unlock_irq(&conf->device_lock);
5420 break;
5421 }
72626685 5422}
b15c2e57 5423
d562b0c4 5424
fd01b88c 5425static void *raid45_takeover_raid0(struct mddev *mddev, int level)
54071b38 5426{
e373ab10 5427 struct r0conf *raid0_conf = mddev->private;
d76c8420 5428 sector_t sectors;
54071b38 5429
f1b29bca 5430 /* for raid0 takeover only one zone is supported */
e373ab10 5431 if (raid0_conf->nr_strip_zones > 1) {
0c55e022
N
5432 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5433 mdname(mddev));
f1b29bca
DW
5434 return ERR_PTR(-EINVAL);
5435 }
5436
e373ab10
N
5437 sectors = raid0_conf->strip_zone[0].zone_end;
5438 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
3b71bd93 5439 mddev->dev_sectors = sectors;
f1b29bca 5440 mddev->new_level = level;
54071b38
TM
5441 mddev->new_layout = ALGORITHM_PARITY_N;
5442 mddev->new_chunk_sectors = mddev->chunk_sectors;
5443 mddev->raid_disks += 1;
5444 mddev->delta_disks = 1;
5445 /* make sure it will be not marked as dirty */
5446 mddev->recovery_cp = MaxSector;
5447
5448 return setup_conf(mddev);
5449}
5450
5451
fd01b88c 5452static void *raid5_takeover_raid1(struct mddev *mddev)
d562b0c4
N
5453{
5454 int chunksect;
5455
5456 if (mddev->raid_disks != 2 ||
5457 mddev->degraded > 1)
5458 return ERR_PTR(-EINVAL);
5459
5460 /* Should check if there are write-behind devices? */
5461
5462 chunksect = 64*2; /* 64K by default */
5463
5464 /* The array must be an exact multiple of chunksize */
5465 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5466 chunksect >>= 1;
5467
5468 if ((chunksect<<9) < STRIPE_SIZE)
5469 /* array size does not allow a suitable chunk size */
5470 return ERR_PTR(-EINVAL);
5471
5472 mddev->new_level = 5;
5473 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
664e7c41 5474 mddev->new_chunk_sectors = chunksect;
d562b0c4
N
5475
5476 return setup_conf(mddev);
5477}
5478
fd01b88c 5479static void *raid5_takeover_raid6(struct mddev *mddev)
fc9739c6
N
5480{
5481 int new_layout;
5482
5483 switch (mddev->layout) {
5484 case ALGORITHM_LEFT_ASYMMETRIC_6:
5485 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5486 break;
5487 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5488 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5489 break;
5490 case ALGORITHM_LEFT_SYMMETRIC_6:
5491 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5492 break;
5493 case ALGORITHM_RIGHT_SYMMETRIC_6:
5494 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5495 break;
5496 case ALGORITHM_PARITY_0_6:
5497 new_layout = ALGORITHM_PARITY_0;
5498 break;
5499 case ALGORITHM_PARITY_N:
5500 new_layout = ALGORITHM_PARITY_N;
5501 break;
5502 default:
5503 return ERR_PTR(-EINVAL);
5504 }
5505 mddev->new_level = 5;
5506 mddev->new_layout = new_layout;
5507 mddev->delta_disks = -1;
5508 mddev->raid_disks -= 1;
5509 return setup_conf(mddev);
5510}
5511
d562b0c4 5512
fd01b88c 5513static int raid5_check_reshape(struct mddev *mddev)
b3546035 5514{
88ce4930
N
5515 /* For a 2-drive array, the layout and chunk size can be changed
5516 * immediately as not restriping is needed.
5517 * For larger arrays we record the new value - after validation
5518 * to be used by a reshape pass.
b3546035 5519 */
d1688a6d 5520 struct r5conf *conf = mddev->private;
597a711b 5521 int new_chunk = mddev->new_chunk_sectors;
b3546035 5522
597a711b 5523 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
b3546035
N
5524 return -EINVAL;
5525 if (new_chunk > 0) {
0ba459d2 5526 if (!is_power_of_2(new_chunk))
b3546035 5527 return -EINVAL;
597a711b 5528 if (new_chunk < (PAGE_SIZE>>9))
b3546035 5529 return -EINVAL;
597a711b 5530 if (mddev->array_sectors & (new_chunk-1))
b3546035
N
5531 /* not factor of array size */
5532 return -EINVAL;
5533 }
5534
5535 /* They look valid */
5536
88ce4930 5537 if (mddev->raid_disks == 2) {
597a711b
N
5538 /* can make the change immediately */
5539 if (mddev->new_layout >= 0) {
5540 conf->algorithm = mddev->new_layout;
5541 mddev->layout = mddev->new_layout;
88ce4930
N
5542 }
5543 if (new_chunk > 0) {
597a711b
N
5544 conf->chunk_sectors = new_chunk ;
5545 mddev->chunk_sectors = new_chunk;
88ce4930
N
5546 }
5547 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5548 md_wakeup_thread(mddev->thread);
b3546035 5549 }
50ac168a 5550 return check_reshape(mddev);
88ce4930
N
5551}
5552
fd01b88c 5553static int raid6_check_reshape(struct mddev *mddev)
88ce4930 5554{
597a711b 5555 int new_chunk = mddev->new_chunk_sectors;
50ac168a 5556
597a711b 5557 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
88ce4930 5558 return -EINVAL;
b3546035 5559 if (new_chunk > 0) {
0ba459d2 5560 if (!is_power_of_2(new_chunk))
88ce4930 5561 return -EINVAL;
597a711b 5562 if (new_chunk < (PAGE_SIZE >> 9))
88ce4930 5563 return -EINVAL;
597a711b 5564 if (mddev->array_sectors & (new_chunk-1))
88ce4930
N
5565 /* not factor of array size */
5566 return -EINVAL;
b3546035 5567 }
88ce4930
N
5568
5569 /* They look valid */
50ac168a 5570 return check_reshape(mddev);
b3546035
N
5571}
5572
fd01b88c 5573static void *raid5_takeover(struct mddev *mddev)
d562b0c4
N
5574{
5575 /* raid5 can take over:
f1b29bca 5576 * raid0 - if there is only one strip zone - make it a raid4 layout
d562b0c4
N
5577 * raid1 - if there are two drives. We need to know the chunk size
5578 * raid4 - trivial - just use a raid4 layout.
5579 * raid6 - Providing it is a *_6 layout
d562b0c4 5580 */
f1b29bca
DW
5581 if (mddev->level == 0)
5582 return raid45_takeover_raid0(mddev, 5);
d562b0c4
N
5583 if (mddev->level == 1)
5584 return raid5_takeover_raid1(mddev);
e9d4758f
N
5585 if (mddev->level == 4) {
5586 mddev->new_layout = ALGORITHM_PARITY_N;
5587 mddev->new_level = 5;
5588 return setup_conf(mddev);
5589 }
fc9739c6
N
5590 if (mddev->level == 6)
5591 return raid5_takeover_raid6(mddev);
d562b0c4
N
5592
5593 return ERR_PTR(-EINVAL);
5594}
5595
fd01b88c 5596static void *raid4_takeover(struct mddev *mddev)
a78d38a1 5597{
f1b29bca
DW
5598 /* raid4 can take over:
5599 * raid0 - if there is only one strip zone
5600 * raid5 - if layout is right
a78d38a1 5601 */
f1b29bca
DW
5602 if (mddev->level == 0)
5603 return raid45_takeover_raid0(mddev, 4);
a78d38a1
N
5604 if (mddev->level == 5 &&
5605 mddev->layout == ALGORITHM_PARITY_N) {
5606 mddev->new_layout = 0;
5607 mddev->new_level = 4;
5608 return setup_conf(mddev);
5609 }
5610 return ERR_PTR(-EINVAL);
5611}
d562b0c4 5612
84fc4b56 5613static struct md_personality raid5_personality;
245f46c2 5614
fd01b88c 5615static void *raid6_takeover(struct mddev *mddev)
245f46c2
N
5616{
5617 /* Currently can only take over a raid5. We map the
5618 * personality to an equivalent raid6 personality
5619 * with the Q block at the end.
5620 */
5621 int new_layout;
5622
5623 if (mddev->pers != &raid5_personality)
5624 return ERR_PTR(-EINVAL);
5625 if (mddev->degraded > 1)
5626 return ERR_PTR(-EINVAL);
5627 if (mddev->raid_disks > 253)
5628 return ERR_PTR(-EINVAL);
5629 if (mddev->raid_disks < 3)
5630 return ERR_PTR(-EINVAL);
5631
5632 switch (mddev->layout) {
5633 case ALGORITHM_LEFT_ASYMMETRIC:
5634 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5635 break;
5636 case ALGORITHM_RIGHT_ASYMMETRIC:
5637 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5638 break;
5639 case ALGORITHM_LEFT_SYMMETRIC:
5640 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5641 break;
5642 case ALGORITHM_RIGHT_SYMMETRIC:
5643 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5644 break;
5645 case ALGORITHM_PARITY_0:
5646 new_layout = ALGORITHM_PARITY_0_6;
5647 break;
5648 case ALGORITHM_PARITY_N:
5649 new_layout = ALGORITHM_PARITY_N;
5650 break;
5651 default:
5652 return ERR_PTR(-EINVAL);
5653 }
5654 mddev->new_level = 6;
5655 mddev->new_layout = new_layout;
5656 mddev->delta_disks = 1;
5657 mddev->raid_disks += 1;
5658 return setup_conf(mddev);
5659}
5660
5661
84fc4b56 5662static struct md_personality raid6_personality =
16a53ecc
N
5663{
5664 .name = "raid6",
5665 .level = 6,
5666 .owner = THIS_MODULE,
5667 .make_request = make_request,
5668 .run = run,
5669 .stop = stop,
5670 .status = status,
5671 .error_handler = error,
5672 .hot_add_disk = raid5_add_disk,
5673 .hot_remove_disk= raid5_remove_disk,
5674 .spare_active = raid5_spare_active,
5675 .sync_request = sync_request,
5676 .resize = raid5_resize,
80c3a6ce 5677 .size = raid5_size,
50ac168a 5678 .check_reshape = raid6_check_reshape,
f416885e 5679 .start_reshape = raid5_start_reshape,
cea9c228 5680 .finish_reshape = raid5_finish_reshape,
16a53ecc 5681 .quiesce = raid5_quiesce,
245f46c2 5682 .takeover = raid6_takeover,
16a53ecc 5683};
84fc4b56 5684static struct md_personality raid5_personality =
1da177e4
LT
5685{
5686 .name = "raid5",
2604b703 5687 .level = 5,
1da177e4
LT
5688 .owner = THIS_MODULE,
5689 .make_request = make_request,
5690 .run = run,
5691 .stop = stop,
5692 .status = status,
5693 .error_handler = error,
5694 .hot_add_disk = raid5_add_disk,
5695 .hot_remove_disk= raid5_remove_disk,
5696 .spare_active = raid5_spare_active,
5697 .sync_request = sync_request,
5698 .resize = raid5_resize,
80c3a6ce 5699 .size = raid5_size,
63c70c4f
N
5700 .check_reshape = raid5_check_reshape,
5701 .start_reshape = raid5_start_reshape,
cea9c228 5702 .finish_reshape = raid5_finish_reshape,
72626685 5703 .quiesce = raid5_quiesce,
d562b0c4 5704 .takeover = raid5_takeover,
1da177e4
LT
5705};
5706
84fc4b56 5707static struct md_personality raid4_personality =
1da177e4 5708{
2604b703
N
5709 .name = "raid4",
5710 .level = 4,
5711 .owner = THIS_MODULE,
5712 .make_request = make_request,
5713 .run = run,
5714 .stop = stop,
5715 .status = status,
5716 .error_handler = error,
5717 .hot_add_disk = raid5_add_disk,
5718 .hot_remove_disk= raid5_remove_disk,
5719 .spare_active = raid5_spare_active,
5720 .sync_request = sync_request,
5721 .resize = raid5_resize,
80c3a6ce 5722 .size = raid5_size,
3d37890b
N
5723 .check_reshape = raid5_check_reshape,
5724 .start_reshape = raid5_start_reshape,
cea9c228 5725 .finish_reshape = raid5_finish_reshape,
2604b703 5726 .quiesce = raid5_quiesce,
a78d38a1 5727 .takeover = raid4_takeover,
2604b703
N
5728};
5729
5730static int __init raid5_init(void)
5731{
16a53ecc 5732 register_md_personality(&raid6_personality);
2604b703
N
5733 register_md_personality(&raid5_personality);
5734 register_md_personality(&raid4_personality);
5735 return 0;
1da177e4
LT
5736}
5737
2604b703 5738static void raid5_exit(void)
1da177e4 5739{
16a53ecc 5740 unregister_md_personality(&raid6_personality);
2604b703
N
5741 unregister_md_personality(&raid5_personality);
5742 unregister_md_personality(&raid4_personality);
1da177e4
LT
5743}
5744
5745module_init(raid5_init);
5746module_exit(raid5_exit);
5747MODULE_LICENSE("GPL");
0efb9e61 5748MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
1da177e4 5749MODULE_ALIAS("md-personality-4"); /* RAID5 */
d9d166c2
N
5750MODULE_ALIAS("md-raid5");
5751MODULE_ALIAS("md-raid4");
2604b703
N
5752MODULE_ALIAS("md-level-5");
5753MODULE_ALIAS("md-level-4");
16a53ecc
N
5754MODULE_ALIAS("md-personality-8"); /* RAID6 */
5755MODULE_ALIAS("md-raid6");
5756MODULE_ALIAS("md-level-6");
5757
5758/* This used to be two separate modules, they were: */
5759MODULE_ALIAS("raid5");
5760MODULE_ALIAS("raid6");
This page took 1.076011 seconds and 5 git commands to generate.