[PATCH] md: Core of raid5 resize process
[deliverable/linux.git] / drivers / md / raid5.c
CommitLineData
1da177e4
LT
1/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 *
6 * RAID-5 management functions.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/raid/raid5.h>
23#include <linux/highmem.h>
24#include <linux/bitops.h>
25#include <asm/atomic.h>
26
72626685
N
27#include <linux/raid/bitmap.h>
28
1da177e4
LT
29/*
30 * Stripe cache
31 */
32
33#define NR_STRIPES 256
34#define STRIPE_SIZE PAGE_SIZE
35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1
fccddba0 38#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
1da177e4
LT
39#define HASH_MASK (NR_HASH - 1)
40
fccddba0 41#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
1da177e4
LT
42
43/* bio's attached to a stripe+device for I/O are linked together in bi_sector
44 * order without overlap. There may be several bio's per stripe+device, and
45 * a bio could span several devices.
46 * When walking this list for a particular stripe+device, we must never proceed
47 * beyond a bio that extends past this device, as the next bio might no longer
48 * be valid.
49 * This macro is used to determine the 'next' bio in the list, given the sector
50 * of the current stripe+device
51 */
52#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
53/*
54 * The following can be used to debug the driver
55 */
56#define RAID5_DEBUG 0
57#define RAID5_PARANOIA 1
58#if RAID5_PARANOIA && defined(CONFIG_SMP)
59# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
60#else
61# define CHECK_DEVLOCK()
62#endif
63
64#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
65#if RAID5_DEBUG
66#define inline
67#define __inline__
68#endif
69
70static void print_raid5_conf (raid5_conf_t *conf);
71
858119e1 72static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4
LT
73{
74 if (atomic_dec_and_test(&sh->count)) {
75 if (!list_empty(&sh->lru))
76 BUG();
77 if (atomic_read(&conf->active_stripes)==0)
78 BUG();
79 if (test_bit(STRIPE_HANDLE, &sh->state)) {
80 if (test_bit(STRIPE_DELAYED, &sh->state))
81 list_add_tail(&sh->lru, &conf->delayed_list);
72626685
N
82 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
83 conf->seq_write == sh->bm_seq)
84 list_add_tail(&sh->lru, &conf->bitmap_list);
85 else {
86 clear_bit(STRIPE_BIT_DELAY, &sh->state);
1da177e4 87 list_add_tail(&sh->lru, &conf->handle_list);
72626685 88 }
1da177e4
LT
89 md_wakeup_thread(conf->mddev->thread);
90 } else {
91 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
92 atomic_dec(&conf->preread_active_stripes);
93 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 md_wakeup_thread(conf->mddev->thread);
95 }
1da177e4 96 atomic_dec(&conf->active_stripes);
ccfcc3c1
N
97 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
98 list_add_tail(&sh->lru, &conf->inactive_list);
1da177e4 99 wake_up(&conf->wait_for_stripe);
ccfcc3c1 100 }
1da177e4
LT
101 }
102 }
103}
104static void release_stripe(struct stripe_head *sh)
105{
106 raid5_conf_t *conf = sh->raid_conf;
107 unsigned long flags;
108
109 spin_lock_irqsave(&conf->device_lock, flags);
110 __release_stripe(conf, sh);
111 spin_unlock_irqrestore(&conf->device_lock, flags);
112}
113
fccddba0 114static inline void remove_hash(struct stripe_head *sh)
1da177e4
LT
115{
116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
117
fccddba0 118 hlist_del_init(&sh->hash);
1da177e4
LT
119}
120
858119e1 121static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
1da177e4 122{
fccddba0 123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
1da177e4
LT
124
125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
126
127 CHECK_DEVLOCK();
fccddba0 128 hlist_add_head(&sh->hash, hp);
1da177e4
LT
129}
130
131
132/* find an idle stripe, make sure it is unhashed, and return it. */
133static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
134{
135 struct stripe_head *sh = NULL;
136 struct list_head *first;
137
138 CHECK_DEVLOCK();
139 if (list_empty(&conf->inactive_list))
140 goto out;
141 first = conf->inactive_list.next;
142 sh = list_entry(first, struct stripe_head, lru);
143 list_del_init(first);
144 remove_hash(sh);
145 atomic_inc(&conf->active_stripes);
146out:
147 return sh;
148}
149
150static void shrink_buffers(struct stripe_head *sh, int num)
151{
152 struct page *p;
153 int i;
154
155 for (i=0; i<num ; i++) {
156 p = sh->dev[i].page;
157 if (!p)
158 continue;
159 sh->dev[i].page = NULL;
2d1f3b5d 160 put_page(p);
1da177e4
LT
161 }
162}
163
164static int grow_buffers(struct stripe_head *sh, int num)
165{
166 int i;
167
168 for (i=0; i<num; i++) {
169 struct page *page;
170
171 if (!(page = alloc_page(GFP_KERNEL))) {
172 return 1;
173 }
174 sh->dev[i].page = page;
175 }
176 return 0;
177}
178
179static void raid5_build_block (struct stripe_head *sh, int i);
180
7ecaa1e6 181static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
1da177e4
LT
182{
183 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 184 int i;
1da177e4
LT
185
186 if (atomic_read(&sh->count) != 0)
187 BUG();
188 if (test_bit(STRIPE_HANDLE, &sh->state))
189 BUG();
190
191 CHECK_DEVLOCK();
192 PRINTK("init_stripe called, stripe %llu\n",
193 (unsigned long long)sh->sector);
194
195 remove_hash(sh);
196
197 sh->sector = sector;
198 sh->pd_idx = pd_idx;
199 sh->state = 0;
200
7ecaa1e6
N
201 sh->disks = disks;
202
203 for (i = sh->disks; i--; ) {
1da177e4
LT
204 struct r5dev *dev = &sh->dev[i];
205
206 if (dev->toread || dev->towrite || dev->written ||
207 test_bit(R5_LOCKED, &dev->flags)) {
208 printk("sector=%llx i=%d %p %p %p %d\n",
209 (unsigned long long)sh->sector, i, dev->toread,
210 dev->towrite, dev->written,
211 test_bit(R5_LOCKED, &dev->flags));
212 BUG();
213 }
214 dev->flags = 0;
215 raid5_build_block(sh, i);
216 }
217 insert_hash(conf, sh);
218}
219
7ecaa1e6 220static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
1da177e4
LT
221{
222 struct stripe_head *sh;
fccddba0 223 struct hlist_node *hn;
1da177e4
LT
224
225 CHECK_DEVLOCK();
226 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
fccddba0 227 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
7ecaa1e6 228 if (sh->sector == sector && sh->disks == disks)
1da177e4
LT
229 return sh;
230 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
231 return NULL;
232}
233
234static void unplug_slaves(mddev_t *mddev);
235static void raid5_unplug_device(request_queue_t *q);
236
7ecaa1e6
N
237static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
238 int pd_idx, int noblock)
1da177e4
LT
239{
240 struct stripe_head *sh;
241
242 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
243
244 spin_lock_irq(&conf->device_lock);
245
246 do {
72626685
N
247 wait_event_lock_irq(conf->wait_for_stripe,
248 conf->quiesce == 0,
249 conf->device_lock, /* nothing */);
7ecaa1e6 250 sh = __find_stripe(conf, sector, disks);
1da177e4
LT
251 if (!sh) {
252 if (!conf->inactive_blocked)
253 sh = get_free_stripe(conf);
254 if (noblock && sh == NULL)
255 break;
256 if (!sh) {
257 conf->inactive_blocked = 1;
258 wait_event_lock_irq(conf->wait_for_stripe,
259 !list_empty(&conf->inactive_list) &&
5036805b
N
260 (atomic_read(&conf->active_stripes)
261 < (conf->max_nr_stripes *3/4)
1da177e4
LT
262 || !conf->inactive_blocked),
263 conf->device_lock,
264 unplug_slaves(conf->mddev);
265 );
266 conf->inactive_blocked = 0;
267 } else
7ecaa1e6 268 init_stripe(sh, sector, pd_idx, disks);
1da177e4
LT
269 } else {
270 if (atomic_read(&sh->count)) {
271 if (!list_empty(&sh->lru))
272 BUG();
273 } else {
274 if (!test_bit(STRIPE_HANDLE, &sh->state))
275 atomic_inc(&conf->active_stripes);
ccfcc3c1
N
276 if (!list_empty(&sh->lru))
277 list_del_init(&sh->lru);
1da177e4
LT
278 }
279 }
280 } while (sh == NULL);
281
282 if (sh)
283 atomic_inc(&sh->count);
284
285 spin_unlock_irq(&conf->device_lock);
286 return sh;
287}
288
3f294f4f 289static int grow_one_stripe(raid5_conf_t *conf)
1da177e4
LT
290{
291 struct stripe_head *sh;
3f294f4f
N
292 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
293 if (!sh)
294 return 0;
295 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
296 sh->raid_conf = conf;
297 spin_lock_init(&sh->lock);
298
299 if (grow_buffers(sh, conf->raid_disks)) {
300 shrink_buffers(sh, conf->raid_disks);
301 kmem_cache_free(conf->slab_cache, sh);
302 return 0;
303 }
7ecaa1e6 304 sh->disks = conf->raid_disks;
3f294f4f
N
305 /* we just created an active stripe so... */
306 atomic_set(&sh->count, 1);
307 atomic_inc(&conf->active_stripes);
308 INIT_LIST_HEAD(&sh->lru);
309 release_stripe(sh);
310 return 1;
311}
312
313static int grow_stripes(raid5_conf_t *conf, int num)
314{
1da177e4
LT
315 kmem_cache_t *sc;
316 int devs = conf->raid_disks;
317
ad01c9e3
N
318 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
319 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
320 conf->active_name = 0;
321 sc = kmem_cache_create(conf->cache_name[conf->active_name],
1da177e4
LT
322 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
323 0, 0, NULL, NULL);
324 if (!sc)
325 return 1;
326 conf->slab_cache = sc;
ad01c9e3 327 conf->pool_size = devs;
1da177e4 328 while (num--) {
3f294f4f 329 if (!grow_one_stripe(conf))
1da177e4 330 return 1;
1da177e4
LT
331 }
332 return 0;
333}
ad01c9e3
N
334static int resize_stripes(raid5_conf_t *conf, int newsize)
335{
336 /* Make all the stripes able to hold 'newsize' devices.
337 * New slots in each stripe get 'page' set to a new page.
338 *
339 * This happens in stages:
340 * 1/ create a new kmem_cache and allocate the required number of
341 * stripe_heads.
342 * 2/ gather all the old stripe_heads and tranfer the pages across
343 * to the new stripe_heads. This will have the side effect of
344 * freezing the array as once all stripe_heads have been collected,
345 * no IO will be possible. Old stripe heads are freed once their
346 * pages have been transferred over, and the old kmem_cache is
347 * freed when all stripes are done.
348 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
349 * we simple return a failre status - no need to clean anything up.
350 * 4/ allocate new pages for the new slots in the new stripe_heads.
351 * If this fails, we don't bother trying the shrink the
352 * stripe_heads down again, we just leave them as they are.
353 * As each stripe_head is processed the new one is released into
354 * active service.
355 *
356 * Once step2 is started, we cannot afford to wait for a write,
357 * so we use GFP_NOIO allocations.
358 */
359 struct stripe_head *osh, *nsh;
360 LIST_HEAD(newstripes);
361 struct disk_info *ndisks;
362 int err = 0;
363 kmem_cache_t *sc;
364 int i;
365
366 if (newsize <= conf->pool_size)
367 return 0; /* never bother to shrink */
368
369 /* Step 1 */
370 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
371 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
372 0, 0, NULL, NULL);
373 if (!sc)
374 return -ENOMEM;
375
376 for (i = conf->max_nr_stripes; i; i--) {
377 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
378 if (!nsh)
379 break;
380
381 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
382
383 nsh->raid_conf = conf;
384 spin_lock_init(&nsh->lock);
385
386 list_add(&nsh->lru, &newstripes);
387 }
388 if (i) {
389 /* didn't get enough, give up */
390 while (!list_empty(&newstripes)) {
391 nsh = list_entry(newstripes.next, struct stripe_head, lru);
392 list_del(&nsh->lru);
393 kmem_cache_free(sc, nsh);
394 }
395 kmem_cache_destroy(sc);
396 return -ENOMEM;
397 }
398 /* Step 2 - Must use GFP_NOIO now.
399 * OK, we have enough stripes, start collecting inactive
400 * stripes and copying them over
401 */
402 list_for_each_entry(nsh, &newstripes, lru) {
403 spin_lock_irq(&conf->device_lock);
404 wait_event_lock_irq(conf->wait_for_stripe,
405 !list_empty(&conf->inactive_list),
406 conf->device_lock,
407 unplug_slaves(conf->mddev);
408 );
409 osh = get_free_stripe(conf);
410 spin_unlock_irq(&conf->device_lock);
411 atomic_set(&nsh->count, 1);
412 for(i=0; i<conf->pool_size; i++)
413 nsh->dev[i].page = osh->dev[i].page;
414 for( ; i<newsize; i++)
415 nsh->dev[i].page = NULL;
416 kmem_cache_free(conf->slab_cache, osh);
417 }
418 kmem_cache_destroy(conf->slab_cache);
419
420 /* Step 3.
421 * At this point, we are holding all the stripes so the array
422 * is completely stalled, so now is a good time to resize
423 * conf->disks.
424 */
425 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
426 if (ndisks) {
427 for (i=0; i<conf->raid_disks; i++)
428 ndisks[i] = conf->disks[i];
429 kfree(conf->disks);
430 conf->disks = ndisks;
431 } else
432 err = -ENOMEM;
433
434 /* Step 4, return new stripes to service */
435 while(!list_empty(&newstripes)) {
436 nsh = list_entry(newstripes.next, struct stripe_head, lru);
437 list_del_init(&nsh->lru);
438 for (i=conf->raid_disks; i < newsize; i++)
439 if (nsh->dev[i].page == NULL) {
440 struct page *p = alloc_page(GFP_NOIO);
441 nsh->dev[i].page = p;
442 if (!p)
443 err = -ENOMEM;
444 }
445 release_stripe(nsh);
446 }
447 /* critical section pass, GFP_NOIO no longer needed */
448
449 conf->slab_cache = sc;
450 conf->active_name = 1-conf->active_name;
451 conf->pool_size = newsize;
452 return err;
453}
454
1da177e4 455
3f294f4f 456static int drop_one_stripe(raid5_conf_t *conf)
1da177e4
LT
457{
458 struct stripe_head *sh;
459
3f294f4f
N
460 spin_lock_irq(&conf->device_lock);
461 sh = get_free_stripe(conf);
462 spin_unlock_irq(&conf->device_lock);
463 if (!sh)
464 return 0;
465 if (atomic_read(&sh->count))
466 BUG();
ad01c9e3 467 shrink_buffers(sh, conf->pool_size);
3f294f4f
N
468 kmem_cache_free(conf->slab_cache, sh);
469 atomic_dec(&conf->active_stripes);
470 return 1;
471}
472
473static void shrink_stripes(raid5_conf_t *conf)
474{
475 while (drop_one_stripe(conf))
476 ;
477
29fc7e3e
N
478 if (conf->slab_cache)
479 kmem_cache_destroy(conf->slab_cache);
1da177e4
LT
480 conf->slab_cache = NULL;
481}
482
4e5314b5 483static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1da177e4
LT
484 int error)
485{
486 struct stripe_head *sh = bi->bi_private;
487 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 488 int disks = sh->disks, i;
1da177e4
LT
489 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
490
491 if (bi->bi_size)
492 return 1;
493
494 for (i=0 ; i<disks; i++)
495 if (bi == &sh->dev[i].req)
496 break;
497
498 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
499 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
500 uptodate);
501 if (i == disks) {
502 BUG();
503 return 0;
504 }
505
506 if (uptodate) {
507#if 0
508 struct bio *bio;
509 unsigned long flags;
510 spin_lock_irqsave(&conf->device_lock, flags);
511 /* we can return a buffer if we bypassed the cache or
512 * if the top buffer is not in highmem. If there are
513 * multiple buffers, leave the extra work to
514 * handle_stripe
515 */
516 buffer = sh->bh_read[i];
517 if (buffer &&
518 (!PageHighMem(buffer->b_page)
519 || buffer->b_page == bh->b_page )
520 ) {
521 sh->bh_read[i] = buffer->b_reqnext;
522 buffer->b_reqnext = NULL;
523 } else
524 buffer = NULL;
525 spin_unlock_irqrestore(&conf->device_lock, flags);
526 if (sh->bh_page[i]==bh->b_page)
527 set_buffer_uptodate(bh);
528 if (buffer) {
529 if (buffer->b_page != bh->b_page)
530 memcpy(buffer->b_data, bh->b_data, bh->b_size);
531 buffer->b_end_io(buffer, 1);
532 }
533#else
534 set_bit(R5_UPTODATE, &sh->dev[i].flags);
4e5314b5
N
535#endif
536 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
14f8d26b 537 printk(KERN_INFO "raid5: read error corrected!!\n");
4e5314b5
N
538 clear_bit(R5_ReadError, &sh->dev[i].flags);
539 clear_bit(R5_ReWrite, &sh->dev[i].flags);
540 }
ba22dcbf
N
541 if (atomic_read(&conf->disks[i].rdev->read_errors))
542 atomic_set(&conf->disks[i].rdev->read_errors, 0);
1da177e4 543 } else {
ba22dcbf 544 int retry = 0;
1da177e4 545 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
ba22dcbf
N
546 atomic_inc(&conf->disks[i].rdev->read_errors);
547 if (conf->mddev->degraded)
14f8d26b 548 printk(KERN_WARNING "raid5: read error not correctable.\n");
ba22dcbf 549 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
4e5314b5 550 /* Oh, no!!! */
14f8d26b 551 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
ba22dcbf
N
552 else if (atomic_read(&conf->disks[i].rdev->read_errors)
553 > conf->max_nr_stripes)
14f8d26b
N
554 printk(KERN_WARNING
555 "raid5: Too many read errors, failing device.\n");
ba22dcbf
N
556 else
557 retry = 1;
558 if (retry)
559 set_bit(R5_ReadError, &sh->dev[i].flags);
560 else {
4e5314b5
N
561 clear_bit(R5_ReadError, &sh->dev[i].flags);
562 clear_bit(R5_ReWrite, &sh->dev[i].flags);
563 md_error(conf->mddev, conf->disks[i].rdev);
ba22dcbf 564 }
1da177e4
LT
565 }
566 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
567#if 0
568 /* must restore b_page before unlocking buffer... */
569 if (sh->bh_page[i] != bh->b_page) {
570 bh->b_page = sh->bh_page[i];
571 bh->b_data = page_address(bh->b_page);
572 clear_buffer_uptodate(bh);
573 }
574#endif
575 clear_bit(R5_LOCKED, &sh->dev[i].flags);
576 set_bit(STRIPE_HANDLE, &sh->state);
577 release_stripe(sh);
578 return 0;
579}
580
581static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
582 int error)
583{
584 struct stripe_head *sh = bi->bi_private;
585 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 586 int disks = sh->disks, i;
1da177e4
LT
587 unsigned long flags;
588 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
589
590 if (bi->bi_size)
591 return 1;
592
593 for (i=0 ; i<disks; i++)
594 if (bi == &sh->dev[i].req)
595 break;
596
597 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
598 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
599 uptodate);
600 if (i == disks) {
601 BUG();
602 return 0;
603 }
604
605 spin_lock_irqsave(&conf->device_lock, flags);
606 if (!uptodate)
607 md_error(conf->mddev, conf->disks[i].rdev);
608
609 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
610
611 clear_bit(R5_LOCKED, &sh->dev[i].flags);
612 set_bit(STRIPE_HANDLE, &sh->state);
613 __release_stripe(conf, sh);
614 spin_unlock_irqrestore(&conf->device_lock, flags);
615 return 0;
616}
617
618
619static sector_t compute_blocknr(struct stripe_head *sh, int i);
620
621static void raid5_build_block (struct stripe_head *sh, int i)
622{
623 struct r5dev *dev = &sh->dev[i];
624
625 bio_init(&dev->req);
626 dev->req.bi_io_vec = &dev->vec;
627 dev->req.bi_vcnt++;
628 dev->req.bi_max_vecs++;
629 dev->vec.bv_page = dev->page;
630 dev->vec.bv_len = STRIPE_SIZE;
631 dev->vec.bv_offset = 0;
632
633 dev->req.bi_sector = sh->sector;
634 dev->req.bi_private = sh;
635
636 dev->flags = 0;
637 if (i != sh->pd_idx)
638 dev->sector = compute_blocknr(sh, i);
639}
640
641static void error(mddev_t *mddev, mdk_rdev_t *rdev)
642{
643 char b[BDEVNAME_SIZE];
644 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
645 PRINTK("raid5: error called\n");
646
b2d444d7 647 if (!test_bit(Faulty, &rdev->flags)) {
1da177e4 648 mddev->sb_dirty = 1;
b2d444d7 649 if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
650 conf->working_disks--;
651 mddev->degraded++;
652 conf->failed_disks++;
b2d444d7 653 clear_bit(In_sync, &rdev->flags);
1da177e4
LT
654 /*
655 * if recovery was running, make sure it aborts.
656 */
657 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
658 }
b2d444d7 659 set_bit(Faulty, &rdev->flags);
1da177e4
LT
660 printk (KERN_ALERT
661 "raid5: Disk failure on %s, disabling device."
662 " Operation continuing on %d devices\n",
663 bdevname(rdev->bdev,b), conf->working_disks);
664 }
665}
666
667/*
668 * Input: a 'big' sector number,
669 * Output: index of the data and parity disk, and the sector # in them.
670 */
671static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
672 unsigned int data_disks, unsigned int * dd_idx,
673 unsigned int * pd_idx, raid5_conf_t *conf)
674{
675 long stripe;
676 unsigned long chunk_number;
677 unsigned int chunk_offset;
678 sector_t new_sector;
679 int sectors_per_chunk = conf->chunk_size >> 9;
680
681 /* First compute the information on this sector */
682
683 /*
684 * Compute the chunk number and the sector offset inside the chunk
685 */
686 chunk_offset = sector_div(r_sector, sectors_per_chunk);
687 chunk_number = r_sector;
688 BUG_ON(r_sector != chunk_number);
689
690 /*
691 * Compute the stripe number
692 */
693 stripe = chunk_number / data_disks;
694
695 /*
696 * Compute the data disk and parity disk indexes inside the stripe
697 */
698 *dd_idx = chunk_number % data_disks;
699
700 /*
701 * Select the parity disk based on the user selected algorithm.
702 */
703 if (conf->level == 4)
704 *pd_idx = data_disks;
705 else switch (conf->algorithm) {
706 case ALGORITHM_LEFT_ASYMMETRIC:
707 *pd_idx = data_disks - stripe % raid_disks;
708 if (*dd_idx >= *pd_idx)
709 (*dd_idx)++;
710 break;
711 case ALGORITHM_RIGHT_ASYMMETRIC:
712 *pd_idx = stripe % raid_disks;
713 if (*dd_idx >= *pd_idx)
714 (*dd_idx)++;
715 break;
716 case ALGORITHM_LEFT_SYMMETRIC:
717 *pd_idx = data_disks - stripe % raid_disks;
718 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
719 break;
720 case ALGORITHM_RIGHT_SYMMETRIC:
721 *pd_idx = stripe % raid_disks;
722 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
723 break;
724 default:
14f8d26b 725 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1da177e4
LT
726 conf->algorithm);
727 }
728
729 /*
730 * Finally, compute the new sector number
731 */
732 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
733 return new_sector;
734}
735
736
737static sector_t compute_blocknr(struct stripe_head *sh, int i)
738{
739 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 740 int raid_disks = sh->disks, data_disks = raid_disks - 1;
1da177e4
LT
741 sector_t new_sector = sh->sector, check;
742 int sectors_per_chunk = conf->chunk_size >> 9;
743 sector_t stripe;
744 int chunk_offset;
745 int chunk_number, dummy1, dummy2, dd_idx = i;
746 sector_t r_sector;
747
748 chunk_offset = sector_div(new_sector, sectors_per_chunk);
749 stripe = new_sector;
750 BUG_ON(new_sector != stripe);
751
752
753 switch (conf->algorithm) {
754 case ALGORITHM_LEFT_ASYMMETRIC:
755 case ALGORITHM_RIGHT_ASYMMETRIC:
756 if (i > sh->pd_idx)
757 i--;
758 break;
759 case ALGORITHM_LEFT_SYMMETRIC:
760 case ALGORITHM_RIGHT_SYMMETRIC:
761 if (i < sh->pd_idx)
762 i += raid_disks;
763 i -= (sh->pd_idx + 1);
764 break;
765 default:
14f8d26b 766 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
1da177e4
LT
767 conf->algorithm);
768 }
769
770 chunk_number = stripe * data_disks + i;
771 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
772
773 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
774 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
14f8d26b 775 printk(KERN_ERR "compute_blocknr: map not correct\n");
1da177e4
LT
776 return 0;
777 }
778 return r_sector;
779}
780
781
782
783/*
784 * Copy data between a page in the stripe cache, and a bio.
785 * There are no alignment or size guarantees between the page or the
786 * bio except that there is some overlap.
787 * All iovecs in the bio must be considered.
788 */
789static void copy_data(int frombio, struct bio *bio,
790 struct page *page,
791 sector_t sector)
792{
793 char *pa = page_address(page);
794 struct bio_vec *bvl;
795 int i;
796 int page_offset;
797
798 if (bio->bi_sector >= sector)
799 page_offset = (signed)(bio->bi_sector - sector) * 512;
800 else
801 page_offset = (signed)(sector - bio->bi_sector) * -512;
802 bio_for_each_segment(bvl, bio, i) {
803 int len = bio_iovec_idx(bio,i)->bv_len;
804 int clen;
805 int b_offset = 0;
806
807 if (page_offset < 0) {
808 b_offset = -page_offset;
809 page_offset += b_offset;
810 len -= b_offset;
811 }
812
813 if (len > 0 && page_offset + len > STRIPE_SIZE)
814 clen = STRIPE_SIZE - page_offset;
815 else clen = len;
816
817 if (clen > 0) {
818 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
819 if (frombio)
820 memcpy(pa+page_offset, ba+b_offset, clen);
821 else
822 memcpy(ba+b_offset, pa+page_offset, clen);
823 __bio_kunmap_atomic(ba, KM_USER0);
824 }
825 if (clen < len) /* hit end of page */
826 break;
827 page_offset += len;
828 }
829}
830
831#define check_xor() do { \
832 if (count == MAX_XOR_BLOCKS) { \
833 xor_block(count, STRIPE_SIZE, ptr); \
834 count = 1; \
835 } \
836 } while(0)
837
838
839static void compute_block(struct stripe_head *sh, int dd_idx)
840{
7ecaa1e6 841 int i, count, disks = sh->disks;
1da177e4
LT
842 void *ptr[MAX_XOR_BLOCKS], *p;
843
844 PRINTK("compute_block, stripe %llu, idx %d\n",
845 (unsigned long long)sh->sector, dd_idx);
846
847 ptr[0] = page_address(sh->dev[dd_idx].page);
848 memset(ptr[0], 0, STRIPE_SIZE);
849 count = 1;
850 for (i = disks ; i--; ) {
851 if (i == dd_idx)
852 continue;
853 p = page_address(sh->dev[i].page);
854 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
855 ptr[count++] = p;
856 else
14f8d26b 857 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
1da177e4
LT
858 " not present\n", dd_idx,
859 (unsigned long long)sh->sector, i);
860
861 check_xor();
862 }
863 if (count != 1)
864 xor_block(count, STRIPE_SIZE, ptr);
865 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
866}
867
868static void compute_parity(struct stripe_head *sh, int method)
869{
870 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 871 int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
1da177e4
LT
872 void *ptr[MAX_XOR_BLOCKS];
873 struct bio *chosen;
874
875 PRINTK("compute_parity, stripe %llu, method %d\n",
876 (unsigned long long)sh->sector, method);
877
878 count = 1;
879 ptr[0] = page_address(sh->dev[pd_idx].page);
880 switch(method) {
881 case READ_MODIFY_WRITE:
882 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
883 BUG();
884 for (i=disks ; i-- ;) {
885 if (i==pd_idx)
886 continue;
887 if (sh->dev[i].towrite &&
888 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
889 ptr[count++] = page_address(sh->dev[i].page);
890 chosen = sh->dev[i].towrite;
891 sh->dev[i].towrite = NULL;
892
893 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
894 wake_up(&conf->wait_for_overlap);
895
896 if (sh->dev[i].written) BUG();
897 sh->dev[i].written = chosen;
898 check_xor();
899 }
900 }
901 break;
902 case RECONSTRUCT_WRITE:
903 memset(ptr[0], 0, STRIPE_SIZE);
904 for (i= disks; i-- ;)
905 if (i!=pd_idx && sh->dev[i].towrite) {
906 chosen = sh->dev[i].towrite;
907 sh->dev[i].towrite = NULL;
908
909 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
910 wake_up(&conf->wait_for_overlap);
911
912 if (sh->dev[i].written) BUG();
913 sh->dev[i].written = chosen;
914 }
915 break;
916 case CHECK_PARITY:
917 break;
918 }
919 if (count>1) {
920 xor_block(count, STRIPE_SIZE, ptr);
921 count = 1;
922 }
923
924 for (i = disks; i--;)
925 if (sh->dev[i].written) {
926 sector_t sector = sh->dev[i].sector;
927 struct bio *wbi = sh->dev[i].written;
928 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
929 copy_data(1, wbi, sh->dev[i].page, sector);
930 wbi = r5_next_bio(wbi, sector);
931 }
932
933 set_bit(R5_LOCKED, &sh->dev[i].flags);
934 set_bit(R5_UPTODATE, &sh->dev[i].flags);
935 }
936
937 switch(method) {
938 case RECONSTRUCT_WRITE:
939 case CHECK_PARITY:
940 for (i=disks; i--;)
941 if (i != pd_idx) {
942 ptr[count++] = page_address(sh->dev[i].page);
943 check_xor();
944 }
945 break;
946 case READ_MODIFY_WRITE:
947 for (i = disks; i--;)
948 if (sh->dev[i].written) {
949 ptr[count++] = page_address(sh->dev[i].page);
950 check_xor();
951 }
952 }
953 if (count != 1)
954 xor_block(count, STRIPE_SIZE, ptr);
955
956 if (method != CHECK_PARITY) {
957 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
958 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
959 } else
960 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
961}
962
963/*
964 * Each stripe/dev can have one or more bion attached.
965 * toread/towrite point to the first in a chain.
966 * The bi_next chain must be in order.
967 */
968static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
969{
970 struct bio **bip;
971 raid5_conf_t *conf = sh->raid_conf;
72626685 972 int firstwrite=0;
1da177e4
LT
973
974 PRINTK("adding bh b#%llu to stripe s#%llu\n",
975 (unsigned long long)bi->bi_sector,
976 (unsigned long long)sh->sector);
977
978
979 spin_lock(&sh->lock);
980 spin_lock_irq(&conf->device_lock);
72626685 981 if (forwrite) {
1da177e4 982 bip = &sh->dev[dd_idx].towrite;
72626685
N
983 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
984 firstwrite = 1;
985 } else
1da177e4
LT
986 bip = &sh->dev[dd_idx].toread;
987 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
988 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
989 goto overlap;
990 bip = & (*bip)->bi_next;
991 }
992 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
993 goto overlap;
994
995 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
996 BUG();
997 if (*bip)
998 bi->bi_next = *bip;
999 *bip = bi;
1000 bi->bi_phys_segments ++;
1001 spin_unlock_irq(&conf->device_lock);
1002 spin_unlock(&sh->lock);
1003
1004 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
1005 (unsigned long long)bi->bi_sector,
1006 (unsigned long long)sh->sector, dd_idx);
1007
72626685
N
1008 if (conf->mddev->bitmap && firstwrite) {
1009 sh->bm_seq = conf->seq_write;
1010 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1011 STRIPE_SECTORS, 0);
1012 set_bit(STRIPE_BIT_DELAY, &sh->state);
1013 }
1014
1da177e4
LT
1015 if (forwrite) {
1016 /* check if page is covered */
1017 sector_t sector = sh->dev[dd_idx].sector;
1018 for (bi=sh->dev[dd_idx].towrite;
1019 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1020 bi && bi->bi_sector <= sector;
1021 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1022 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1023 sector = bi->bi_sector + (bi->bi_size>>9);
1024 }
1025 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1026 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1027 }
1028 return 1;
1029
1030 overlap:
1031 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1032 spin_unlock_irq(&conf->device_lock);
1033 spin_unlock(&sh->lock);
1034 return 0;
1035}
1036
ccfcc3c1
N
1037static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1038{
1039 int sectors_per_chunk = conf->chunk_size >> 9;
1040 sector_t x = stripe;
1041 int pd_idx, dd_idx;
1042 int chunk_offset = sector_div(x, sectors_per_chunk);
1043 stripe = x;
1044 raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk
1045 + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf);
1046 return pd_idx;
1047}
1048
1da177e4
LT
1049
1050/*
1051 * handle_stripe - do things to a stripe.
1052 *
1053 * We lock the stripe and then examine the state of various bits
1054 * to see what needs to be done.
1055 * Possible results:
1056 * return some read request which now have data
1057 * return some write requests which are safely on disc
1058 * schedule a read on some buffers
1059 * schedule a write of some buffers
1060 * return confirmation of parity correctness
1061 *
1062 * Parity calculations are done inside the stripe lock
1063 * buffers are taken off read_list or write_list, and bh_cache buffers
1064 * get BH_Lock set before the stripe lock is released.
1065 *
1066 */
1067
1068static void handle_stripe(struct stripe_head *sh)
1069{
1070 raid5_conf_t *conf = sh->raid_conf;
7ecaa1e6 1071 int disks = sh->disks;
1da177e4
LT
1072 struct bio *return_bi= NULL;
1073 struct bio *bi;
1074 int i;
ccfcc3c1 1075 int syncing, expanding, expanded;
1da177e4
LT
1076 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1077 int non_overwrite = 0;
1078 int failed_num=0;
1079 struct r5dev *dev;
1080
1081 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
1082 (unsigned long long)sh->sector, atomic_read(&sh->count),
1083 sh->pd_idx);
1084
1085 spin_lock(&sh->lock);
1086 clear_bit(STRIPE_HANDLE, &sh->state);
1087 clear_bit(STRIPE_DELAYED, &sh->state);
1088
1089 syncing = test_bit(STRIPE_SYNCING, &sh->state);
ccfcc3c1
N
1090 expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1091 expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
1da177e4
LT
1092 /* Now to look around and see what can be done */
1093
9910f16a 1094 rcu_read_lock();
1da177e4
LT
1095 for (i=disks; i--; ) {
1096 mdk_rdev_t *rdev;
1097 dev = &sh->dev[i];
1098 clear_bit(R5_Insync, &dev->flags);
1da177e4
LT
1099
1100 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1101 i, dev->flags, dev->toread, dev->towrite, dev->written);
1102 /* maybe we can reply to a read */
1103 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1104 struct bio *rbi, *rbi2;
1105 PRINTK("Return read for disc %d\n", i);
1106 spin_lock_irq(&conf->device_lock);
1107 rbi = dev->toread;
1108 dev->toread = NULL;
1109 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1110 wake_up(&conf->wait_for_overlap);
1111 spin_unlock_irq(&conf->device_lock);
1112 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1113 copy_data(0, rbi, dev->page, dev->sector);
1114 rbi2 = r5_next_bio(rbi, dev->sector);
1115 spin_lock_irq(&conf->device_lock);
1116 if (--rbi->bi_phys_segments == 0) {
1117 rbi->bi_next = return_bi;
1118 return_bi = rbi;
1119 }
1120 spin_unlock_irq(&conf->device_lock);
1121 rbi = rbi2;
1122 }
1123 }
1124
1125 /* now count some things */
1126 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1127 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1128
1129
1130 if (dev->toread) to_read++;
1131 if (dev->towrite) {
1132 to_write++;
1133 if (!test_bit(R5_OVERWRITE, &dev->flags))
1134 non_overwrite++;
1135 }
1136 if (dev->written) written++;
9910f16a 1137 rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1138 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
14f8d26b 1139 /* The ReadError flag will just be confusing now */
4e5314b5
N
1140 clear_bit(R5_ReadError, &dev->flags);
1141 clear_bit(R5_ReWrite, &dev->flags);
1142 }
b2d444d7 1143 if (!rdev || !test_bit(In_sync, &rdev->flags)
4e5314b5 1144 || test_bit(R5_ReadError, &dev->flags)) {
1da177e4
LT
1145 failed++;
1146 failed_num = i;
1147 } else
1148 set_bit(R5_Insync, &dev->flags);
1149 }
9910f16a 1150 rcu_read_unlock();
1da177e4
LT
1151 PRINTK("locked=%d uptodate=%d to_read=%d"
1152 " to_write=%d failed=%d failed_num=%d\n",
1153 locked, uptodate, to_read, to_write, failed, failed_num);
1154 /* check if the array has lost two devices and, if so, some requests might
1155 * need to be failed
1156 */
1157 if (failed > 1 && to_read+to_write+written) {
1da177e4 1158 for (i=disks; i--; ) {
72626685 1159 int bitmap_end = 0;
4e5314b5
N
1160
1161 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
9910f16a
N
1162 mdk_rdev_t *rdev;
1163 rcu_read_lock();
1164 rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1165 if (rdev && test_bit(In_sync, &rdev->flags))
4e5314b5
N
1166 /* multiple read failures in one stripe */
1167 md_error(conf->mddev, rdev);
9910f16a 1168 rcu_read_unlock();
4e5314b5
N
1169 }
1170
72626685 1171 spin_lock_irq(&conf->device_lock);
1da177e4
LT
1172 /* fail all writes first */
1173 bi = sh->dev[i].towrite;
1174 sh->dev[i].towrite = NULL;
72626685 1175 if (bi) { to_write--; bitmap_end = 1; }
1da177e4
LT
1176
1177 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1178 wake_up(&conf->wait_for_overlap);
1179
1180 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1181 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1182 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1183 if (--bi->bi_phys_segments == 0) {
1184 md_write_end(conf->mddev);
1185 bi->bi_next = return_bi;
1186 return_bi = bi;
1187 }
1188 bi = nextbi;
1189 }
1190 /* and fail all 'written' */
1191 bi = sh->dev[i].written;
1192 sh->dev[i].written = NULL;
72626685 1193 if (bi) bitmap_end = 1;
1da177e4
LT
1194 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1195 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1196 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1197 if (--bi->bi_phys_segments == 0) {
1198 md_write_end(conf->mddev);
1199 bi->bi_next = return_bi;
1200 return_bi = bi;
1201 }
1202 bi = bi2;
1203 }
1204
1205 /* fail any reads if this device is non-operational */
4e5314b5
N
1206 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1207 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1da177e4
LT
1208 bi = sh->dev[i].toread;
1209 sh->dev[i].toread = NULL;
1210 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1211 wake_up(&conf->wait_for_overlap);
1212 if (bi) to_read--;
1213 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1214 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1215 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1216 if (--bi->bi_phys_segments == 0) {
1217 bi->bi_next = return_bi;
1218 return_bi = bi;
1219 }
1220 bi = nextbi;
1221 }
1222 }
72626685
N
1223 spin_unlock_irq(&conf->device_lock);
1224 if (bitmap_end)
1225 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1226 STRIPE_SECTORS, 0, 0);
1da177e4 1227 }
1da177e4
LT
1228 }
1229 if (failed > 1 && syncing) {
1230 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1231 clear_bit(STRIPE_SYNCING, &sh->state);
1232 syncing = 0;
1233 }
1234
1235 /* might be able to return some write requests if the parity block
1236 * is safe, or on a failed drive
1237 */
1238 dev = &sh->dev[sh->pd_idx];
1239 if ( written &&
1240 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1241 test_bit(R5_UPTODATE, &dev->flags))
1242 || (failed == 1 && failed_num == sh->pd_idx))
1243 ) {
1244 /* any written block on an uptodate or failed drive can be returned.
1245 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1246 * never LOCKED, so we don't need to test 'failed' directly.
1247 */
1248 for (i=disks; i--; )
1249 if (sh->dev[i].written) {
1250 dev = &sh->dev[i];
1251 if (!test_bit(R5_LOCKED, &dev->flags) &&
1252 test_bit(R5_UPTODATE, &dev->flags) ) {
1253 /* We can return any write requests */
1254 struct bio *wbi, *wbi2;
72626685 1255 int bitmap_end = 0;
1da177e4
LT
1256 PRINTK("Return write for disc %d\n", i);
1257 spin_lock_irq(&conf->device_lock);
1258 wbi = dev->written;
1259 dev->written = NULL;
1260 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1261 wbi2 = r5_next_bio(wbi, dev->sector);
1262 if (--wbi->bi_phys_segments == 0) {
1263 md_write_end(conf->mddev);
1264 wbi->bi_next = return_bi;
1265 return_bi = wbi;
1266 }
1267 wbi = wbi2;
1268 }
72626685
N
1269 if (dev->towrite == NULL)
1270 bitmap_end = 1;
1da177e4 1271 spin_unlock_irq(&conf->device_lock);
72626685
N
1272 if (bitmap_end)
1273 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1274 STRIPE_SECTORS,
1275 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1da177e4
LT
1276 }
1277 }
1278 }
1279
1280 /* Now we might consider reading some blocks, either to check/generate
1281 * parity, or to satisfy requests
1282 * or to load a block that is being partially written.
1283 */
ccfcc3c1 1284 if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
1da177e4
LT
1285 for (i=disks; i--;) {
1286 dev = &sh->dev[i];
1287 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1288 (dev->toread ||
1289 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1290 syncing ||
ccfcc3c1 1291 expanding ||
1da177e4
LT
1292 (failed && (sh->dev[failed_num].toread ||
1293 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1294 )
1295 ) {
1296 /* we would like to get this block, possibly
1297 * by computing it, but we might not be able to
1298 */
1299 if (uptodate == disks-1) {
1300 PRINTK("Computing block %d\n", i);
1301 compute_block(sh, i);
1302 uptodate++;
1303 } else if (test_bit(R5_Insync, &dev->flags)) {
1304 set_bit(R5_LOCKED, &dev->flags);
1305 set_bit(R5_Wantread, &dev->flags);
1306#if 0
1307 /* if I am just reading this block and we don't have
1308 a failed drive, or any pending writes then sidestep the cache */
1309 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1310 ! syncing && !failed && !to_write) {
1311 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1312 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1313 }
1314#endif
1315 locked++;
1316 PRINTK("Reading block %d (sync=%d)\n",
1317 i, syncing);
1da177e4
LT
1318 }
1319 }
1320 }
1321 set_bit(STRIPE_HANDLE, &sh->state);
1322 }
1323
1324 /* now to consider writing and what else, if anything should be read */
1325 if (to_write) {
1326 int rmw=0, rcw=0;
1327 for (i=disks ; i--;) {
1328 /* would I have to read this buffer for read_modify_write */
1329 dev = &sh->dev[i];
1330 if ((dev->towrite || i == sh->pd_idx) &&
1331 (!test_bit(R5_LOCKED, &dev->flags)
1332#if 0
1333|| sh->bh_page[i]!=bh->b_page
1334#endif
1335 ) &&
1336 !test_bit(R5_UPTODATE, &dev->flags)) {
1337 if (test_bit(R5_Insync, &dev->flags)
1338/* && !(!mddev->insync && i == sh->pd_idx) */
1339 )
1340 rmw++;
1341 else rmw += 2*disks; /* cannot read it */
1342 }
1343 /* Would I have to read this buffer for reconstruct_write */
1344 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1345 (!test_bit(R5_LOCKED, &dev->flags)
1346#if 0
1347|| sh->bh_page[i] != bh->b_page
1348#endif
1349 ) &&
1350 !test_bit(R5_UPTODATE, &dev->flags)) {
1351 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1352 else rcw += 2*disks;
1353 }
1354 }
1355 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1356 (unsigned long long)sh->sector, rmw, rcw);
1357 set_bit(STRIPE_HANDLE, &sh->state);
1358 if (rmw < rcw && rmw > 0)
1359 /* prefer read-modify-write, but need to get some data */
1360 for (i=disks; i--;) {
1361 dev = &sh->dev[i];
1362 if ((dev->towrite || i == sh->pd_idx) &&
1363 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1364 test_bit(R5_Insync, &dev->flags)) {
1365 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1366 {
1367 PRINTK("Read_old block %d for r-m-w\n", i);
1368 set_bit(R5_LOCKED, &dev->flags);
1369 set_bit(R5_Wantread, &dev->flags);
1370 locked++;
1371 } else {
1372 set_bit(STRIPE_DELAYED, &sh->state);
1373 set_bit(STRIPE_HANDLE, &sh->state);
1374 }
1375 }
1376 }
1377 if (rcw <= rmw && rcw > 0)
1378 /* want reconstruct write, but need to get some data */
1379 for (i=disks; i--;) {
1380 dev = &sh->dev[i];
1381 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1382 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1383 test_bit(R5_Insync, &dev->flags)) {
1384 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1385 {
1386 PRINTK("Read_old block %d for Reconstruct\n", i);
1387 set_bit(R5_LOCKED, &dev->flags);
1388 set_bit(R5_Wantread, &dev->flags);
1389 locked++;
1390 } else {
1391 set_bit(STRIPE_DELAYED, &sh->state);
1392 set_bit(STRIPE_HANDLE, &sh->state);
1393 }
1394 }
1395 }
1396 /* now if nothing is locked, and if we have enough data, we can start a write request */
72626685
N
1397 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1398 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1da177e4
LT
1399 PRINTK("Computing parity...\n");
1400 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1401 /* now every locked buffer is ready to be written */
1402 for (i=disks; i--;)
1403 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1404 PRINTK("Writing block %d\n", i);
1405 locked++;
1406 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1407 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1408 || (i==sh->pd_idx && failed == 0))
1409 set_bit(STRIPE_INSYNC, &sh->state);
1410 }
1411 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1412 atomic_dec(&conf->preread_active_stripes);
1413 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1414 md_wakeup_thread(conf->mddev->thread);
1415 }
1416 }
1417 }
1418
1419 /* maybe we need to check and possibly fix the parity for this stripe
1420 * Any reads will already have been scheduled, so we just see if enough data
1421 * is available
1422 */
1423 if (syncing && locked == 0 &&
14f8d26b 1424 !test_bit(STRIPE_INSYNC, &sh->state)) {
1da177e4
LT
1425 set_bit(STRIPE_HANDLE, &sh->state);
1426 if (failed == 0) {
1427 char *pagea;
1428 if (uptodate != disks)
1429 BUG();
1430 compute_parity(sh, CHECK_PARITY);
1431 uptodate--;
1432 pagea = page_address(sh->dev[sh->pd_idx].page);
1433 if ((*(u32*)pagea) == 0 &&
1434 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1435 /* parity is correct (on disc, not in buffer any more) */
1436 set_bit(STRIPE_INSYNC, &sh->state);
9d88883e
N
1437 } else {
1438 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1439 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1440 /* don't try to repair!! */
1441 set_bit(STRIPE_INSYNC, &sh->state);
14f8d26b
N
1442 else {
1443 compute_block(sh, sh->pd_idx);
1444 uptodate++;
1445 }
1da177e4
LT
1446 }
1447 }
1448 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
14f8d26b 1449 /* either failed parity check, or recovery is happening */
1da177e4
LT
1450 if (failed==0)
1451 failed_num = sh->pd_idx;
1da177e4 1452 dev = &sh->dev[failed_num];
14f8d26b
N
1453 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1454 BUG_ON(uptodate != disks);
1455
1da177e4
LT
1456 set_bit(R5_LOCKED, &dev->flags);
1457 set_bit(R5_Wantwrite, &dev->flags);
72626685 1458 clear_bit(STRIPE_DEGRADED, &sh->state);
1da177e4
LT
1459 locked++;
1460 set_bit(STRIPE_INSYNC, &sh->state);
1da177e4
LT
1461 }
1462 }
1463 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1464 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1465 clear_bit(STRIPE_SYNCING, &sh->state);
1466 }
4e5314b5
N
1467
1468 /* If the failed drive is just a ReadError, then we might need to progress
1469 * the repair/check process
1470 */
ba22dcbf
N
1471 if (failed == 1 && ! conf->mddev->ro &&
1472 test_bit(R5_ReadError, &sh->dev[failed_num].flags)
4e5314b5
N
1473 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1474 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1475 ) {
1476 dev = &sh->dev[failed_num];
1477 if (!test_bit(R5_ReWrite, &dev->flags)) {
1478 set_bit(R5_Wantwrite, &dev->flags);
1479 set_bit(R5_ReWrite, &dev->flags);
1480 set_bit(R5_LOCKED, &dev->flags);
ccfcc3c1 1481 locked++;
4e5314b5
N
1482 } else {
1483 /* let's read it back */
1484 set_bit(R5_Wantread, &dev->flags);
1485 set_bit(R5_LOCKED, &dev->flags);
ccfcc3c1 1486 locked++;
4e5314b5
N
1487 }
1488 }
1489
ccfcc3c1
N
1490 if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
1491 /* Need to write out all blocks after computing parity */
1492 sh->disks = conf->raid_disks;
1493 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
1494 compute_parity(sh, RECONSTRUCT_WRITE);
1495 for (i= conf->raid_disks; i--;) {
1496 set_bit(R5_LOCKED, &sh->dev[i].flags);
1497 locked++;
1498 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1499 }
1500 clear_bit(STRIPE_EXPANDING, &sh->state);
1501 } else if (expanded) {
1502 clear_bit(STRIPE_EXPAND_READY, &sh->state);
1503 wake_up(&conf->wait_for_overlap);
1504 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
1505 }
1506
1507 if (expanding && locked == 0) {
1508 /* We have read all the blocks in this stripe and now we need to
1509 * copy some of them into a target stripe for expand.
1510 */
1511 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1512 for (i=0; i< sh->disks; i++)
1513 if (i != sh->pd_idx) {
1514 int dd_idx, pd_idx, j;
1515 struct stripe_head *sh2;
1516
1517 sector_t bn = compute_blocknr(sh, i);
1518 sector_t s = raid5_compute_sector(bn, conf->raid_disks,
1519 conf->raid_disks-1,
1520 &dd_idx, &pd_idx, conf);
1521 sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
1522 if (sh2 == NULL)
1523 /* so far only the early blocks of this stripe
1524 * have been requested. When later blocks
1525 * get requested, we will try again
1526 */
1527 continue;
1528 if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
1529 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
1530 /* must have already done this block */
1531 release_stripe(sh2);
1532 continue;
1533 }
1534 memcpy(page_address(sh2->dev[dd_idx].page),
1535 page_address(sh->dev[i].page),
1536 STRIPE_SIZE);
1537 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
1538 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
1539 for (j=0; j<conf->raid_disks; j++)
1540 if (j != sh2->pd_idx &&
1541 !test_bit(R5_Expanded, &sh2->dev[j].flags))
1542 break;
1543 if (j == conf->raid_disks) {
1544 set_bit(STRIPE_EXPAND_READY, &sh2->state);
1545 set_bit(STRIPE_HANDLE, &sh2->state);
1546 }
1547 release_stripe(sh2);
1548 }
1549 }
1550
1da177e4
LT
1551 spin_unlock(&sh->lock);
1552
1553 while ((bi=return_bi)) {
1554 int bytes = bi->bi_size;
1555
1556 return_bi = bi->bi_next;
1557 bi->bi_next = NULL;
1558 bi->bi_size = 0;
1559 bi->bi_end_io(bi, bytes, 0);
1560 }
1561 for (i=disks; i-- ;) {
1562 int rw;
1563 struct bio *bi;
1564 mdk_rdev_t *rdev;
1565 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1566 rw = 1;
1567 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1568 rw = 0;
1569 else
1570 continue;
1571
1572 bi = &sh->dev[i].req;
1573
1574 bi->bi_rw = rw;
1575 if (rw)
1576 bi->bi_end_io = raid5_end_write_request;
1577 else
1578 bi->bi_end_io = raid5_end_read_request;
1579
1580 rcu_read_lock();
d6065f7b 1581 rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1582 if (rdev && test_bit(Faulty, &rdev->flags))
1da177e4
LT
1583 rdev = NULL;
1584 if (rdev)
1585 atomic_inc(&rdev->nr_pending);
1586 rcu_read_unlock();
1587
1588 if (rdev) {
ccfcc3c1 1589 if (syncing || expanding || expanded)
1da177e4
LT
1590 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1591
1592 bi->bi_bdev = rdev->bdev;
1593 PRINTK("for %llu schedule op %ld on disc %d\n",
1594 (unsigned long long)sh->sector, bi->bi_rw, i);
1595 atomic_inc(&sh->count);
1596 bi->bi_sector = sh->sector + rdev->data_offset;
1597 bi->bi_flags = 1 << BIO_UPTODATE;
1598 bi->bi_vcnt = 1;
1599 bi->bi_max_vecs = 1;
1600 bi->bi_idx = 0;
1601 bi->bi_io_vec = &sh->dev[i].vec;
1602 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1603 bi->bi_io_vec[0].bv_offset = 0;
1604 bi->bi_size = STRIPE_SIZE;
1605 bi->bi_next = NULL;
4dbcdc75
N
1606 if (rw == WRITE &&
1607 test_bit(R5_ReWrite, &sh->dev[i].flags))
1608 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1da177e4
LT
1609 generic_make_request(bi);
1610 } else {
72626685
N
1611 if (rw == 1)
1612 set_bit(STRIPE_DEGRADED, &sh->state);
1da177e4
LT
1613 PRINTK("skip op %ld on disc %d for sector %llu\n",
1614 bi->bi_rw, i, (unsigned long long)sh->sector);
1615 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1616 set_bit(STRIPE_HANDLE, &sh->state);
1617 }
1618 }
1619}
1620
858119e1 1621static void raid5_activate_delayed(raid5_conf_t *conf)
1da177e4
LT
1622{
1623 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1624 while (!list_empty(&conf->delayed_list)) {
1625 struct list_head *l = conf->delayed_list.next;
1626 struct stripe_head *sh;
1627 sh = list_entry(l, struct stripe_head, lru);
1628 list_del_init(l);
1629 clear_bit(STRIPE_DELAYED, &sh->state);
1630 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1631 atomic_inc(&conf->preread_active_stripes);
1632 list_add_tail(&sh->lru, &conf->handle_list);
1633 }
1634 }
1635}
1636
858119e1 1637static void activate_bit_delay(raid5_conf_t *conf)
72626685
N
1638{
1639 /* device_lock is held */
1640 struct list_head head;
1641 list_add(&head, &conf->bitmap_list);
1642 list_del_init(&conf->bitmap_list);
1643 while (!list_empty(&head)) {
1644 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1645 list_del_init(&sh->lru);
1646 atomic_inc(&sh->count);
1647 __release_stripe(conf, sh);
1648 }
1649}
1650
1da177e4
LT
1651static void unplug_slaves(mddev_t *mddev)
1652{
1653 raid5_conf_t *conf = mddev_to_conf(mddev);
1654 int i;
1655
1656 rcu_read_lock();
1657 for (i=0; i<mddev->raid_disks; i++) {
d6065f7b 1658 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1659 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1da177e4
LT
1660 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1661
1662 atomic_inc(&rdev->nr_pending);
1663 rcu_read_unlock();
1664
1665 if (r_queue->unplug_fn)
1666 r_queue->unplug_fn(r_queue);
1667
1668 rdev_dec_pending(rdev, mddev);
1669 rcu_read_lock();
1670 }
1671 }
1672 rcu_read_unlock();
1673}
1674
1675static void raid5_unplug_device(request_queue_t *q)
1676{
1677 mddev_t *mddev = q->queuedata;
1678 raid5_conf_t *conf = mddev_to_conf(mddev);
1679 unsigned long flags;
1680
1681 spin_lock_irqsave(&conf->device_lock, flags);
1682
72626685
N
1683 if (blk_remove_plug(q)) {
1684 conf->seq_flush++;
1da177e4 1685 raid5_activate_delayed(conf);
72626685 1686 }
1da177e4
LT
1687 md_wakeup_thread(mddev->thread);
1688
1689 spin_unlock_irqrestore(&conf->device_lock, flags);
1690
1691 unplug_slaves(mddev);
1692}
1693
1694static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1695 sector_t *error_sector)
1696{
1697 mddev_t *mddev = q->queuedata;
1698 raid5_conf_t *conf = mddev_to_conf(mddev);
1699 int i, ret = 0;
1700
1701 rcu_read_lock();
1702 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
d6065f7b 1703 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
b2d444d7 1704 if (rdev && !test_bit(Faulty, &rdev->flags)) {
1da177e4
LT
1705 struct block_device *bdev = rdev->bdev;
1706 request_queue_t *r_queue = bdev_get_queue(bdev);
1707
1708 if (!r_queue->issue_flush_fn)
1709 ret = -EOPNOTSUPP;
1710 else {
1711 atomic_inc(&rdev->nr_pending);
1712 rcu_read_unlock();
1713 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1714 error_sector);
1715 rdev_dec_pending(rdev, mddev);
1716 rcu_read_lock();
1717 }
1718 }
1719 }
1720 rcu_read_unlock();
1721 return ret;
1722}
1723
1724static inline void raid5_plug_device(raid5_conf_t *conf)
1725{
1726 spin_lock_irq(&conf->device_lock);
1727 blk_plug_device(conf->mddev->queue);
1728 spin_unlock_irq(&conf->device_lock);
1729}
1730
7ecaa1e6 1731static int make_request(request_queue_t *q, struct bio * bi)
1da177e4
LT
1732{
1733 mddev_t *mddev = q->queuedata;
1734 raid5_conf_t *conf = mddev_to_conf(mddev);
1da177e4
LT
1735 unsigned int dd_idx, pd_idx;
1736 sector_t new_sector;
1737 sector_t logical_sector, last_sector;
1738 struct stripe_head *sh;
a362357b 1739 const int rw = bio_data_dir(bi);
1da177e4 1740
e5dcdd80
N
1741 if (unlikely(bio_barrier(bi))) {
1742 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1743 return 0;
1744 }
1745
3d310eb7 1746 md_write_start(mddev, bi);
06d91a5f 1747
a362357b
JA
1748 disk_stat_inc(mddev->gendisk, ios[rw]);
1749 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1da177e4
LT
1750
1751 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1752 last_sector = bi->bi_sector + (bi->bi_size>>9);
1753 bi->bi_next = NULL;
1754 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
06d91a5f 1755
1da177e4
LT
1756 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1757 DEFINE_WAIT(w);
7ecaa1e6 1758 int disks;
1da177e4 1759
7ecaa1e6
N
1760 retry:
1761 if (likely(conf->expand_progress == MaxSector))
1762 disks = conf->raid_disks;
1763 else {
1764 spin_lock_irq(&conf->device_lock);
1765 disks = conf->raid_disks;
1766 if (logical_sector >= conf->expand_progress)
1767 disks = conf->previous_raid_disks;
1768 spin_unlock_irq(&conf->device_lock);
1769 }
1770 new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
1771 &dd_idx, &pd_idx, conf);
1da177e4
LT
1772 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1773 (unsigned long long)new_sector,
1774 (unsigned long long)logical_sector);
1775
1da177e4 1776 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
7ecaa1e6 1777 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
1da177e4 1778 if (sh) {
7ecaa1e6
N
1779 if (unlikely(conf->expand_progress != MaxSector)) {
1780 /* expansion might have moved on while waiting for a
1781 * stripe, so we much do the range check again.
1782 */
1783 int must_retry = 0;
1784 spin_lock_irq(&conf->device_lock);
1785 if (logical_sector < conf->expand_progress &&
1786 disks == conf->previous_raid_disks)
1787 /* mismatch, need to try again */
1788 must_retry = 1;
1789 spin_unlock_irq(&conf->device_lock);
1790 if (must_retry) {
1791 release_stripe(sh);
1792 goto retry;
1793 }
1794 }
1795
1796 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
1797 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1798 /* Stripe is busy expanding or
1799 * add failed due to overlap. Flush everything
1da177e4
LT
1800 * and wait a while
1801 */
1802 raid5_unplug_device(mddev->queue);
1803 release_stripe(sh);
1804 schedule();
1805 goto retry;
1806 }
1807 finish_wait(&conf->wait_for_overlap, &w);
1808 raid5_plug_device(conf);
1809 handle_stripe(sh);
1810 release_stripe(sh);
1da177e4
LT
1811 } else {
1812 /* cannot get stripe for read-ahead, just give-up */
1813 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1814 finish_wait(&conf->wait_for_overlap, &w);
1815 break;
1816 }
1817
1818 }
1819 spin_lock_irq(&conf->device_lock);
1820 if (--bi->bi_phys_segments == 0) {
1821 int bytes = bi->bi_size;
1822
1823 if ( bio_data_dir(bi) == WRITE )
1824 md_write_end(mddev);
1825 bi->bi_size = 0;
1826 bi->bi_end_io(bi, bytes, 0);
1827 }
1828 spin_unlock_irq(&conf->device_lock);
1829 return 0;
1830}
1831
1832/* FIXME go_faster isn't used */
57afd89f 1833static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1da177e4
LT
1834{
1835 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1836 struct stripe_head *sh;
ccfcc3c1
N
1837 int pd_idx;
1838 sector_t first_sector, last_sector;
1da177e4
LT
1839 int raid_disks = conf->raid_disks;
1840 int data_disks = raid_disks-1;
72626685
N
1841 sector_t max_sector = mddev->size << 1;
1842 int sync_blocks;
1da177e4 1843
72626685 1844 if (sector_nr >= max_sector) {
1da177e4
LT
1845 /* just being told to finish up .. nothing much to do */
1846 unplug_slaves(mddev);
72626685
N
1847
1848 if (mddev->curr_resync < max_sector) /* aborted */
1849 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1850 &sync_blocks, 1);
1851 else /* compelted sync */
1852 conf->fullsync = 0;
1853 bitmap_close_sync(mddev->bitmap);
1854
1da177e4
LT
1855 return 0;
1856 }
ccfcc3c1
N
1857
1858 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
1859 /* reshaping is quite different to recovery/resync so it is
1860 * handled quite separately ... here.
1861 *
1862 * On each call to sync_request, we gather one chunk worth of
1863 * destination stripes and flag them as expanding.
1864 * Then we find all the source stripes and request reads.
1865 * As the reads complete, handle_stripe will copy the data
1866 * into the destination stripe and release that stripe.
1867 */
1868 int i;
1869 int dd_idx;
1870 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
1871 int j;
1872 int skipped = 0;
1873 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
1874 sh = get_active_stripe(conf, sector_nr+i,
1875 conf->raid_disks, pd_idx, 0);
1876 set_bit(STRIPE_EXPANDING, &sh->state);
1877 /* If any of this stripe is beyond the end of the old
1878 * array, then we need to zero those blocks
1879 */
1880 for (j=sh->disks; j--;) {
1881 sector_t s;
1882 if (j == sh->pd_idx)
1883 continue;
1884 s = compute_blocknr(sh, j);
1885 if (s < (mddev->array_size<<1)) {
1886 skipped = 1;
1887 continue;
1888 }
1889 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
1890 set_bit(R5_Expanded, &sh->dev[j].flags);
1891 set_bit(R5_UPTODATE, &sh->dev[j].flags);
1892 }
1893 if (!skipped) {
1894 set_bit(STRIPE_EXPAND_READY, &sh->state);
1895 set_bit(STRIPE_HANDLE, &sh->state);
1896 }
1897 release_stripe(sh);
1898 }
1899 spin_lock_irq(&conf->device_lock);
1900 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
1901 spin_unlock_irq(&conf->device_lock);
1902 /* Ok, those stripe are ready. We can start scheduling
1903 * reads on the source stripes.
1904 * The source stripes are determined by mapping the first and last
1905 * block on the destination stripes.
1906 */
1907 raid_disks = conf->previous_raid_disks;
1908 data_disks = raid_disks - 1;
1909 first_sector =
1910 raid5_compute_sector(sector_nr*(conf->raid_disks-1),
1911 raid_disks, data_disks,
1912 &dd_idx, &pd_idx, conf);
1913 last_sector =
1914 raid5_compute_sector((sector_nr+conf->chunk_size/512)
1915 *(conf->raid_disks-1) -1,
1916 raid_disks, data_disks,
1917 &dd_idx, &pd_idx, conf);
1918 if (last_sector >= (mddev->size<<1))
1919 last_sector = (mddev->size<<1)-1;
1920 while (first_sector <= last_sector) {
1921 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
1922 sh = get_active_stripe(conf, first_sector,
1923 conf->previous_raid_disks, pd_idx, 0);
1924 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1925 set_bit(STRIPE_HANDLE, &sh->state);
1926 release_stripe(sh);
1927 first_sector += STRIPE_SECTORS;
1928 }
1929 return conf->chunk_size>>9;
1930 }
1da177e4
LT
1931 /* if there is 1 or more failed drives and we are trying
1932 * to resync, then assert that we are finished, because there is
1933 * nothing we can do.
1934 */
1935 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
57afd89f
N
1936 sector_t rv = (mddev->size << 1) - sector_nr;
1937 *skipped = 1;
1da177e4
LT
1938 return rv;
1939 }
72626685 1940 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
3855ad9f 1941 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
72626685
N
1942 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1943 /* we can skip this block, and probably more */
1944 sync_blocks /= STRIPE_SECTORS;
1945 *skipped = 1;
1946 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1947 }
1da177e4 1948
ccfcc3c1 1949 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
7ecaa1e6 1950 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
1da177e4 1951 if (sh == NULL) {
7ecaa1e6 1952 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
1da177e4
LT
1953 /* make sure we don't swamp the stripe cache if someone else
1954 * is trying to get access
1955 */
66c006a5 1956 schedule_timeout_uninterruptible(1);
1da177e4 1957 }
72626685 1958 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1da177e4
LT
1959 spin_lock(&sh->lock);
1960 set_bit(STRIPE_SYNCING, &sh->state);
1961 clear_bit(STRIPE_INSYNC, &sh->state);
1962 spin_unlock(&sh->lock);
1963
1964 handle_stripe(sh);
1965 release_stripe(sh);
1966
1967 return STRIPE_SECTORS;
1968}
1969
1970/*
1971 * This is our raid5 kernel thread.
1972 *
1973 * We scan the hash table for stripes which can be handled now.
1974 * During the scan, completed stripes are saved for us by the interrupt
1975 * handler, so that they will not have to wait for our next wakeup.
1976 */
1977static void raid5d (mddev_t *mddev)
1978{
1979 struct stripe_head *sh;
1980 raid5_conf_t *conf = mddev_to_conf(mddev);
1981 int handled;
1982
1983 PRINTK("+++ raid5d active\n");
1984
1985 md_check_recovery(mddev);
1da177e4
LT
1986
1987 handled = 0;
1988 spin_lock_irq(&conf->device_lock);
1989 while (1) {
1990 struct list_head *first;
1991
72626685
N
1992 if (conf->seq_flush - conf->seq_write > 0) {
1993 int seq = conf->seq_flush;
700e432d 1994 spin_unlock_irq(&conf->device_lock);
72626685 1995 bitmap_unplug(mddev->bitmap);
700e432d 1996 spin_lock_irq(&conf->device_lock);
72626685
N
1997 conf->seq_write = seq;
1998 activate_bit_delay(conf);
1999 }
2000
1da177e4
LT
2001 if (list_empty(&conf->handle_list) &&
2002 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
2003 !blk_queue_plugged(mddev->queue) &&
2004 !list_empty(&conf->delayed_list))
2005 raid5_activate_delayed(conf);
2006
2007 if (list_empty(&conf->handle_list))
2008 break;
2009
2010 first = conf->handle_list.next;
2011 sh = list_entry(first, struct stripe_head, lru);
2012
2013 list_del_init(first);
2014 atomic_inc(&sh->count);
2015 if (atomic_read(&sh->count)!= 1)
2016 BUG();
2017 spin_unlock_irq(&conf->device_lock);
2018
2019 handled++;
2020 handle_stripe(sh);
2021 release_stripe(sh);
2022
2023 spin_lock_irq(&conf->device_lock);
2024 }
2025 PRINTK("%d stripes handled\n", handled);
2026
2027 spin_unlock_irq(&conf->device_lock);
2028
2029 unplug_slaves(mddev);
2030
2031 PRINTK("--- raid5d inactive\n");
2032}
2033
3f294f4f 2034static ssize_t
007583c9 2035raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
3f294f4f 2036{
007583c9 2037 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
2038 if (conf)
2039 return sprintf(page, "%d\n", conf->max_nr_stripes);
2040 else
2041 return 0;
3f294f4f
N
2042}
2043
2044static ssize_t
007583c9 2045raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
3f294f4f 2046{
007583c9 2047 raid5_conf_t *conf = mddev_to_conf(mddev);
3f294f4f
N
2048 char *end;
2049 int new;
2050 if (len >= PAGE_SIZE)
2051 return -EINVAL;
96de1e66
N
2052 if (!conf)
2053 return -ENODEV;
3f294f4f
N
2054
2055 new = simple_strtoul(page, &end, 10);
2056 if (!*page || (*end && *end != '\n') )
2057 return -EINVAL;
2058 if (new <= 16 || new > 32768)
2059 return -EINVAL;
2060 while (new < conf->max_nr_stripes) {
2061 if (drop_one_stripe(conf))
2062 conf->max_nr_stripes--;
2063 else
2064 break;
2065 }
2066 while (new > conf->max_nr_stripes) {
2067 if (grow_one_stripe(conf))
2068 conf->max_nr_stripes++;
2069 else break;
2070 }
2071 return len;
2072}
007583c9 2073
96de1e66
N
2074static struct md_sysfs_entry
2075raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
2076 raid5_show_stripe_cache_size,
2077 raid5_store_stripe_cache_size);
3f294f4f
N
2078
2079static ssize_t
96de1e66 2080stripe_cache_active_show(mddev_t *mddev, char *page)
3f294f4f 2081{
007583c9 2082 raid5_conf_t *conf = mddev_to_conf(mddev);
96de1e66
N
2083 if (conf)
2084 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
2085 else
2086 return 0;
3f294f4f
N
2087}
2088
96de1e66
N
2089static struct md_sysfs_entry
2090raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
3f294f4f 2091
007583c9 2092static struct attribute *raid5_attrs[] = {
3f294f4f
N
2093 &raid5_stripecache_size.attr,
2094 &raid5_stripecache_active.attr,
2095 NULL,
2096};
007583c9
N
2097static struct attribute_group raid5_attrs_group = {
2098 .name = NULL,
2099 .attrs = raid5_attrs,
3f294f4f
N
2100};
2101
72626685 2102static int run(mddev_t *mddev)
1da177e4
LT
2103{
2104 raid5_conf_t *conf;
2105 int raid_disk, memory;
2106 mdk_rdev_t *rdev;
2107 struct disk_info *disk;
2108 struct list_head *tmp;
2109
2110 if (mddev->level != 5 && mddev->level != 4) {
14f8d26b
N
2111 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
2112 mdname(mddev), mddev->level);
1da177e4
LT
2113 return -EIO;
2114 }
2115
b55e6bfc 2116 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
1da177e4
LT
2117 if ((conf = mddev->private) == NULL)
2118 goto abort;
b55e6bfc
N
2119 conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
2120 GFP_KERNEL);
2121 if (!conf->disks)
2122 goto abort;
9ffae0cf 2123
1da177e4
LT
2124 conf->mddev = mddev;
2125
fccddba0 2126 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1da177e4 2127 goto abort;
1da177e4
LT
2128
2129 spin_lock_init(&conf->device_lock);
2130 init_waitqueue_head(&conf->wait_for_stripe);
2131 init_waitqueue_head(&conf->wait_for_overlap);
2132 INIT_LIST_HEAD(&conf->handle_list);
2133 INIT_LIST_HEAD(&conf->delayed_list);
72626685 2134 INIT_LIST_HEAD(&conf->bitmap_list);
1da177e4
LT
2135 INIT_LIST_HEAD(&conf->inactive_list);
2136 atomic_set(&conf->active_stripes, 0);
2137 atomic_set(&conf->preread_active_stripes, 0);
2138
1da177e4
LT
2139 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
2140
2141 ITERATE_RDEV(mddev,rdev,tmp) {
2142 raid_disk = rdev->raid_disk;
2143 if (raid_disk >= mddev->raid_disks
2144 || raid_disk < 0)
2145 continue;
2146 disk = conf->disks + raid_disk;
2147
2148 disk->rdev = rdev;
2149
b2d444d7 2150 if (test_bit(In_sync, &rdev->flags)) {
1da177e4
LT
2151 char b[BDEVNAME_SIZE];
2152 printk(KERN_INFO "raid5: device %s operational as raid"
2153 " disk %d\n", bdevname(rdev->bdev,b),
2154 raid_disk);
2155 conf->working_disks++;
2156 }
2157 }
2158
2159 conf->raid_disks = mddev->raid_disks;
2160 /*
2161 * 0 for a fully functional array, 1 for a degraded array.
2162 */
2163 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
2164 conf->mddev = mddev;
2165 conf->chunk_size = mddev->chunk_size;
2166 conf->level = mddev->level;
2167 conf->algorithm = mddev->layout;
2168 conf->max_nr_stripes = NR_STRIPES;
7ecaa1e6 2169 conf->expand_progress = MaxSector;
1da177e4
LT
2170
2171 /* device size must be a multiple of chunk size */
2172 mddev->size &= ~(mddev->chunk_size/1024 -1);
b1581566 2173 mddev->resync_max_sectors = mddev->size << 1;
1da177e4
LT
2174
2175 if (!conf->chunk_size || conf->chunk_size % 4) {
2176 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
2177 conf->chunk_size, mdname(mddev));
2178 goto abort;
2179 }
2180 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
2181 printk(KERN_ERR
2182 "raid5: unsupported parity algorithm %d for %s\n",
2183 conf->algorithm, mdname(mddev));
2184 goto abort;
2185 }
2186 if (mddev->degraded > 1) {
2187 printk(KERN_ERR "raid5: not enough operational devices for %s"
2188 " (%d/%d failed)\n",
2189 mdname(mddev), conf->failed_disks, conf->raid_disks);
2190 goto abort;
2191 }
2192
2193 if (mddev->degraded == 1 &&
2194 mddev->recovery_cp != MaxSector) {
6ff8d8ec
N
2195 if (mddev->ok_start_degraded)
2196 printk(KERN_WARNING
2197 "raid5: starting dirty degraded array: %s"
2198 "- data corruption possible.\n",
2199 mdname(mddev));
2200 else {
2201 printk(KERN_ERR
2202 "raid5: cannot start dirty degraded array for %s\n",
2203 mdname(mddev));
2204 goto abort;
2205 }
1da177e4
LT
2206 }
2207
2208 {
2209 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
2210 if (!mddev->thread) {
2211 printk(KERN_ERR
2212 "raid5: couldn't allocate thread for %s\n",
2213 mdname(mddev));
2214 goto abort;
2215 }
2216 }
5036805b 2217 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1da177e4
LT
2218 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
2219 if (grow_stripes(conf, conf->max_nr_stripes)) {
2220 printk(KERN_ERR
2221 "raid5: couldn't allocate %dkB for buffers\n", memory);
2222 shrink_stripes(conf);
2223 md_unregister_thread(mddev->thread);
2224 goto abort;
2225 } else
2226 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
2227 memory, mdname(mddev));
2228
2229 if (mddev->degraded == 0)
2230 printk("raid5: raid level %d set %s active with %d out of %d"
2231 " devices, algorithm %d\n", conf->level, mdname(mddev),
2232 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
2233 conf->algorithm);
2234 else
2235 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
2236 " out of %d devices, algorithm %d\n", conf->level,
2237 mdname(mddev), mddev->raid_disks - mddev->degraded,
2238 mddev->raid_disks, conf->algorithm);
2239
2240 print_raid5_conf(conf);
2241
2242 /* read-ahead size must cover two whole stripes, which is
2243 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
2244 */
2245 {
2246 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
2d1f3b5d 2247 / PAGE_SIZE;
1da177e4
LT
2248 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
2249 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
2250 }
2251
2252 /* Ok, everything is just fine now */
007583c9 2253 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
7a5febe9
N
2254
2255 mddev->queue->unplug_fn = raid5_unplug_device;
2256 mddev->queue->issue_flush_fn = raid5_issue_flush;
2257
1da177e4
LT
2258 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
2259 return 0;
2260abort:
2261 if (conf) {
2262 print_raid5_conf(conf);
b55e6bfc 2263 kfree(conf->disks);
fccddba0 2264 kfree(conf->stripe_hashtbl);
1da177e4
LT
2265 kfree(conf);
2266 }
2267 mddev->private = NULL;
2268 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
2269 return -EIO;
2270}
2271
2272
2273
3f294f4f 2274static int stop(mddev_t *mddev)
1da177e4
LT
2275{
2276 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2277
2278 md_unregister_thread(mddev->thread);
2279 mddev->thread = NULL;
2280 shrink_stripes(conf);
fccddba0 2281 kfree(conf->stripe_hashtbl);
1da177e4 2282 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
007583c9 2283 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
b55e6bfc 2284 kfree(conf->disks);
96de1e66 2285 kfree(conf);
1da177e4
LT
2286 mddev->private = NULL;
2287 return 0;
2288}
2289
2290#if RAID5_DEBUG
2291static void print_sh (struct stripe_head *sh)
2292{
2293 int i;
2294
2295 printk("sh %llu, pd_idx %d, state %ld.\n",
2296 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2297 printk("sh %llu, count %d.\n",
2298 (unsigned long long)sh->sector, atomic_read(&sh->count));
2299 printk("sh %llu, ", (unsigned long long)sh->sector);
7ecaa1e6 2300 for (i = 0; i < sh->disks; i++) {
1da177e4
LT
2301 printk("(cache%d: %p %ld) ",
2302 i, sh->dev[i].page, sh->dev[i].flags);
2303 }
2304 printk("\n");
2305}
2306
2307static void printall (raid5_conf_t *conf)
2308{
2309 struct stripe_head *sh;
fccddba0 2310 struct hlist_node *hn;
1da177e4
LT
2311 int i;
2312
2313 spin_lock_irq(&conf->device_lock);
2314 for (i = 0; i < NR_HASH; i++) {
fccddba0 2315 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
1da177e4
LT
2316 if (sh->raid_conf != conf)
2317 continue;
2318 print_sh(sh);
2319 }
2320 }
2321 spin_unlock_irq(&conf->device_lock);
2322}
2323#endif
2324
2325static void status (struct seq_file *seq, mddev_t *mddev)
2326{
2327 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2328 int i;
2329
2330 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2331 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2332 for (i = 0; i < conf->raid_disks; i++)
2333 seq_printf (seq, "%s",
2334 conf->disks[i].rdev &&
b2d444d7 2335 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
1da177e4
LT
2336 seq_printf (seq, "]");
2337#if RAID5_DEBUG
2338#define D(x) \
2339 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2340 printall(conf);
2341#endif
2342}
2343
2344static void print_raid5_conf (raid5_conf_t *conf)
2345{
2346 int i;
2347 struct disk_info *tmp;
2348
2349 printk("RAID5 conf printout:\n");
2350 if (!conf) {
2351 printk("(conf==NULL)\n");
2352 return;
2353 }
2354 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2355 conf->working_disks, conf->failed_disks);
2356
2357 for (i = 0; i < conf->raid_disks; i++) {
2358 char b[BDEVNAME_SIZE];
2359 tmp = conf->disks + i;
2360 if (tmp->rdev)
2361 printk(" disk %d, o:%d, dev:%s\n",
b2d444d7 2362 i, !test_bit(Faulty, &tmp->rdev->flags),
1da177e4
LT
2363 bdevname(tmp->rdev->bdev,b));
2364 }
2365}
2366
2367static int raid5_spare_active(mddev_t *mddev)
2368{
2369 int i;
2370 raid5_conf_t *conf = mddev->private;
2371 struct disk_info *tmp;
2372
2373 for (i = 0; i < conf->raid_disks; i++) {
2374 tmp = conf->disks + i;
2375 if (tmp->rdev
b2d444d7
N
2376 && !test_bit(Faulty, &tmp->rdev->flags)
2377 && !test_bit(In_sync, &tmp->rdev->flags)) {
1da177e4
LT
2378 mddev->degraded--;
2379 conf->failed_disks--;
2380 conf->working_disks++;
b2d444d7 2381 set_bit(In_sync, &tmp->rdev->flags);
1da177e4
LT
2382 }
2383 }
2384 print_raid5_conf(conf);
2385 return 0;
2386}
2387
2388static int raid5_remove_disk(mddev_t *mddev, int number)
2389{
2390 raid5_conf_t *conf = mddev->private;
2391 int err = 0;
2392 mdk_rdev_t *rdev;
2393 struct disk_info *p = conf->disks + number;
2394
2395 print_raid5_conf(conf);
2396 rdev = p->rdev;
2397 if (rdev) {
b2d444d7 2398 if (test_bit(In_sync, &rdev->flags) ||
1da177e4
LT
2399 atomic_read(&rdev->nr_pending)) {
2400 err = -EBUSY;
2401 goto abort;
2402 }
2403 p->rdev = NULL;
fbd568a3 2404 synchronize_rcu();
1da177e4
LT
2405 if (atomic_read(&rdev->nr_pending)) {
2406 /* lost the race, try later */
2407 err = -EBUSY;
2408 p->rdev = rdev;
2409 }
2410 }
2411abort:
2412
2413 print_raid5_conf(conf);
2414 return err;
2415}
2416
2417static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2418{
2419 raid5_conf_t *conf = mddev->private;
2420 int found = 0;
2421 int disk;
2422 struct disk_info *p;
2423
2424 if (mddev->degraded > 1)
2425 /* no point adding a device */
2426 return 0;
2427
2428 /*
2429 * find the disk ...
2430 */
2431 for (disk=0; disk < mddev->raid_disks; disk++)
2432 if ((p=conf->disks + disk)->rdev == NULL) {
b2d444d7 2433 clear_bit(In_sync, &rdev->flags);
1da177e4
LT
2434 rdev->raid_disk = disk;
2435 found = 1;
72626685
N
2436 if (rdev->saved_raid_disk != disk)
2437 conf->fullsync = 1;
d6065f7b 2438 rcu_assign_pointer(p->rdev, rdev);
1da177e4
LT
2439 break;
2440 }
2441 print_raid5_conf(conf);
2442 return found;
2443}
2444
2445static int raid5_resize(mddev_t *mddev, sector_t sectors)
2446{
2447 /* no resync is happening, and there is enough space
2448 * on all devices, so we can resize.
2449 * We need to make sure resync covers any new space.
2450 * If the array is shrinking we should possibly wait until
2451 * any io in the removed space completes, but it hardly seems
2452 * worth it.
2453 */
2454 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2455 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2456 set_capacity(mddev->gendisk, mddev->array_size << 1);
2457 mddev->changed = 1;
2458 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2459 mddev->recovery_cp = mddev->size << 1;
2460 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2461 }
2462 mddev->size = sectors /2;
4b5c7ae8 2463 mddev->resync_max_sectors = sectors;
1da177e4
LT
2464 return 0;
2465}
2466
72626685
N
2467static void raid5_quiesce(mddev_t *mddev, int state)
2468{
2469 raid5_conf_t *conf = mddev_to_conf(mddev);
2470
2471 switch(state) {
2472 case 1: /* stop all writes */
2473 spin_lock_irq(&conf->device_lock);
2474 conf->quiesce = 1;
2475 wait_event_lock_irq(conf->wait_for_stripe,
2476 atomic_read(&conf->active_stripes) == 0,
2477 conf->device_lock, /* nothing */);
2478 spin_unlock_irq(&conf->device_lock);
2479 break;
2480
2481 case 0: /* re-enable writes */
2482 spin_lock_irq(&conf->device_lock);
2483 conf->quiesce = 0;
2484 wake_up(&conf->wait_for_stripe);
2485 spin_unlock_irq(&conf->device_lock);
2486 break;
2487 }
72626685 2488}
b15c2e57 2489
2604b703 2490static struct mdk_personality raid5_personality =
1da177e4
LT
2491{
2492 .name = "raid5",
2604b703 2493 .level = 5,
1da177e4
LT
2494 .owner = THIS_MODULE,
2495 .make_request = make_request,
2496 .run = run,
2497 .stop = stop,
2498 .status = status,
2499 .error_handler = error,
2500 .hot_add_disk = raid5_add_disk,
2501 .hot_remove_disk= raid5_remove_disk,
2502 .spare_active = raid5_spare_active,
2503 .sync_request = sync_request,
2504 .resize = raid5_resize,
72626685 2505 .quiesce = raid5_quiesce,
1da177e4
LT
2506};
2507
2604b703 2508static struct mdk_personality raid4_personality =
1da177e4 2509{
2604b703
N
2510 .name = "raid4",
2511 .level = 4,
2512 .owner = THIS_MODULE,
2513 .make_request = make_request,
2514 .run = run,
2515 .stop = stop,
2516 .status = status,
2517 .error_handler = error,
2518 .hot_add_disk = raid5_add_disk,
2519 .hot_remove_disk= raid5_remove_disk,
2520 .spare_active = raid5_spare_active,
2521 .sync_request = sync_request,
2522 .resize = raid5_resize,
2523 .quiesce = raid5_quiesce,
2524};
2525
2526static int __init raid5_init(void)
2527{
2528 register_md_personality(&raid5_personality);
2529 register_md_personality(&raid4_personality);
2530 return 0;
1da177e4
LT
2531}
2532
2604b703 2533static void raid5_exit(void)
1da177e4 2534{
2604b703
N
2535 unregister_md_personality(&raid5_personality);
2536 unregister_md_personality(&raid4_personality);
1da177e4
LT
2537}
2538
2539module_init(raid5_init);
2540module_exit(raid5_exit);
2541MODULE_LICENSE("GPL");
2542MODULE_ALIAS("md-personality-4"); /* RAID5 */
d9d166c2
N
2543MODULE_ALIAS("md-raid5");
2544MODULE_ALIAS("md-raid4");
2604b703
N
2545MODULE_ALIAS("md-level-5");
2546MODULE_ALIAS("md-level-4");
This page took 0.264583 seconds and 5 git commands to generate.