2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->seq_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/module.h>
51 #include <linux/async.h>
52 #include <linux/seq_file.h>
53 #include <linux/cpu.h>
54 #include <linux/slab.h>
55 #include <linux/ratelimit.h>
56 #include <linux/nodemask.h>
57 #include <linux/flex_array.h>
58 #include <trace/events/block.h>
65 #define cpu_to_group(cpu) cpu_to_node(cpu)
66 #define ANY_GROUP NUMA_NO_NODE
68 static bool devices_handle_discard_safely
= false;
69 module_param(devices_handle_discard_safely
, bool, 0644);
70 MODULE_PARM_DESC(devices_handle_discard_safely
,
71 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
72 static struct workqueue_struct
*raid5_wq
;
77 #define NR_STRIPES 256
78 #define STRIPE_SIZE PAGE_SIZE
79 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
80 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
81 #define IO_THRESHOLD 1
82 #define BYPASS_THRESHOLD 1
83 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
84 #define HASH_MASK (NR_HASH - 1)
85 #define MAX_STRIPE_BATCH 8
87 static inline struct hlist_head
*stripe_hash(struct r5conf
*conf
, sector_t sect
)
89 int hash
= (sect
>> STRIPE_SHIFT
) & HASH_MASK
;
90 return &conf
->stripe_hashtbl
[hash
];
93 static inline int stripe_hash_locks_hash(sector_t sect
)
95 return (sect
>> STRIPE_SHIFT
) & STRIPE_HASH_LOCKS_MASK
;
98 static inline void lock_device_hash_lock(struct r5conf
*conf
, int hash
)
100 spin_lock_irq(conf
->hash_locks
+ hash
);
101 spin_lock(&conf
->device_lock
);
104 static inline void unlock_device_hash_lock(struct r5conf
*conf
, int hash
)
106 spin_unlock(&conf
->device_lock
);
107 spin_unlock_irq(conf
->hash_locks
+ hash
);
110 static inline void lock_all_device_hash_locks_irq(struct r5conf
*conf
)
114 spin_lock(conf
->hash_locks
);
115 for (i
= 1; i
< NR_STRIPE_HASH_LOCKS
; i
++)
116 spin_lock_nest_lock(conf
->hash_locks
+ i
, conf
->hash_locks
);
117 spin_lock(&conf
->device_lock
);
120 static inline void unlock_all_device_hash_locks_irq(struct r5conf
*conf
)
123 spin_unlock(&conf
->device_lock
);
124 for (i
= NR_STRIPE_HASH_LOCKS
; i
; i
--)
125 spin_unlock(conf
->hash_locks
+ i
- 1);
129 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
130 * order without overlap. There may be several bio's per stripe+device, and
131 * a bio could span several devices.
132 * When walking this list for a particular stripe+device, we must never proceed
133 * beyond a bio that extends past this device, as the next bio might no longer
135 * This function is used to determine the 'next' bio in the list, given the sector
136 * of the current stripe+device
138 static inline struct bio
*r5_next_bio(struct bio
*bio
, sector_t sector
)
140 int sectors
= bio_sectors(bio
);
141 if (bio
->bi_iter
.bi_sector
+ sectors
< sector
+ STRIPE_SECTORS
)
148 * We maintain a biased count of active stripes in the bottom 16 bits of
149 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
151 static inline int raid5_bi_processed_stripes(struct bio
*bio
)
153 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
154 return (atomic_read(segments
) >> 16) & 0xffff;
157 static inline int raid5_dec_bi_active_stripes(struct bio
*bio
)
159 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
160 return atomic_sub_return(1, segments
) & 0xffff;
163 static inline void raid5_inc_bi_active_stripes(struct bio
*bio
)
165 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
166 atomic_inc(segments
);
169 static inline void raid5_set_bi_processed_stripes(struct bio
*bio
,
172 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
176 old
= atomic_read(segments
);
177 new = (old
& 0xffff) | (cnt
<< 16);
178 } while (atomic_cmpxchg(segments
, old
, new) != old
);
181 static inline void raid5_set_bi_stripes(struct bio
*bio
, unsigned int cnt
)
183 atomic_t
*segments
= (atomic_t
*)&bio
->bi_phys_segments
;
184 atomic_set(segments
, cnt
);
187 /* Find first data disk in a raid6 stripe */
188 static inline int raid6_d0(struct stripe_head
*sh
)
191 /* ddf always start from first device */
193 /* md starts just after Q block */
194 if (sh
->qd_idx
== sh
->disks
- 1)
197 return sh
->qd_idx
+ 1;
199 static inline int raid6_next_disk(int disk
, int raid_disks
)
202 return (disk
< raid_disks
) ? disk
: 0;
205 /* When walking through the disks in a raid5, starting at raid6_d0,
206 * We need to map each disk to a 'slot', where the data disks are slot
207 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
208 * is raid_disks-1. This help does that mapping.
210 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
211 int *count
, int syndrome_disks
)
217 if (idx
== sh
->pd_idx
)
218 return syndrome_disks
;
219 if (idx
== sh
->qd_idx
)
220 return syndrome_disks
+ 1;
226 static void return_io(struct bio_list
*return_bi
)
229 while ((bi
= bio_list_pop(return_bi
)) != NULL
) {
230 bi
->bi_iter
.bi_size
= 0;
231 trace_block_bio_complete(bdev_get_queue(bi
->bi_bdev
),
237 static void print_raid5_conf (struct r5conf
*conf
);
239 static int stripe_operations_active(struct stripe_head
*sh
)
241 return sh
->check_state
|| sh
->reconstruct_state
||
242 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
243 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
246 static void raid5_wakeup_stripe_thread(struct stripe_head
*sh
)
248 struct r5conf
*conf
= sh
->raid_conf
;
249 struct r5worker_group
*group
;
251 int i
, cpu
= sh
->cpu
;
253 if (!cpu_online(cpu
)) {
254 cpu
= cpumask_any(cpu_online_mask
);
258 if (list_empty(&sh
->lru
)) {
259 struct r5worker_group
*group
;
260 group
= conf
->worker_groups
+ cpu_to_group(cpu
);
261 list_add_tail(&sh
->lru
, &group
->handle_list
);
262 group
->stripes_cnt
++;
266 if (conf
->worker_cnt_per_group
== 0) {
267 md_wakeup_thread(conf
->mddev
->thread
);
271 group
= conf
->worker_groups
+ cpu_to_group(sh
->cpu
);
273 group
->workers
[0].working
= true;
274 /* at least one worker should run to avoid race */
275 queue_work_on(sh
->cpu
, raid5_wq
, &group
->workers
[0].work
);
277 thread_cnt
= group
->stripes_cnt
/ MAX_STRIPE_BATCH
- 1;
278 /* wakeup more workers */
279 for (i
= 1; i
< conf
->worker_cnt_per_group
&& thread_cnt
> 0; i
++) {
280 if (group
->workers
[i
].working
== false) {
281 group
->workers
[i
].working
= true;
282 queue_work_on(sh
->cpu
, raid5_wq
,
283 &group
->workers
[i
].work
);
289 static void do_release_stripe(struct r5conf
*conf
, struct stripe_head
*sh
,
290 struct list_head
*temp_inactive_list
)
292 BUG_ON(!list_empty(&sh
->lru
));
293 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
294 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
295 if (test_bit(STRIPE_DELAYED
, &sh
->state
) &&
296 !test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
297 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
298 else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
299 sh
->bm_seq
- conf
->seq_write
> 0)
300 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
302 clear_bit(STRIPE_DELAYED
, &sh
->state
);
303 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
304 if (conf
->worker_cnt_per_group
== 0) {
305 list_add_tail(&sh
->lru
, &conf
->handle_list
);
307 raid5_wakeup_stripe_thread(sh
);
311 md_wakeup_thread(conf
->mddev
->thread
);
313 BUG_ON(stripe_operations_active(sh
));
314 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
315 if (atomic_dec_return(&conf
->preread_active_stripes
)
317 md_wakeup_thread(conf
->mddev
->thread
);
318 atomic_dec(&conf
->active_stripes
);
319 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
))
320 list_add_tail(&sh
->lru
, temp_inactive_list
);
324 static void __release_stripe(struct r5conf
*conf
, struct stripe_head
*sh
,
325 struct list_head
*temp_inactive_list
)
327 if (atomic_dec_and_test(&sh
->count
))
328 do_release_stripe(conf
, sh
, temp_inactive_list
);
332 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
334 * Be careful: Only one task can add/delete stripes from temp_inactive_list at
335 * given time. Adding stripes only takes device lock, while deleting stripes
336 * only takes hash lock.
338 static void release_inactive_stripe_list(struct r5conf
*conf
,
339 struct list_head
*temp_inactive_list
,
343 unsigned long do_wakeup
= 0;
347 if (hash
== NR_STRIPE_HASH_LOCKS
) {
348 size
= NR_STRIPE_HASH_LOCKS
;
349 hash
= NR_STRIPE_HASH_LOCKS
- 1;
353 struct list_head
*list
= &temp_inactive_list
[size
- 1];
356 * We don't hold any lock here yet, get_active_stripe() might
357 * remove stripes from the list
359 if (!list_empty_careful(list
)) {
360 spin_lock_irqsave(conf
->hash_locks
+ hash
, flags
);
361 if (list_empty(conf
->inactive_list
+ hash
) &&
363 atomic_dec(&conf
->empty_inactive_list_nr
);
364 list_splice_tail_init(list
, conf
->inactive_list
+ hash
);
365 do_wakeup
|= 1 << hash
;
366 spin_unlock_irqrestore(conf
->hash_locks
+ hash
, flags
);
372 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++) {
373 if (do_wakeup
& (1 << i
))
374 wake_up(&conf
->wait_for_stripe
[i
]);
378 if (atomic_read(&conf
->active_stripes
) == 0)
379 wake_up(&conf
->wait_for_quiescent
);
380 if (conf
->retry_read_aligned
)
381 md_wakeup_thread(conf
->mddev
->thread
);
385 /* should hold conf->device_lock already */
386 static int release_stripe_list(struct r5conf
*conf
,
387 struct list_head
*temp_inactive_list
)
389 struct stripe_head
*sh
;
391 struct llist_node
*head
;
393 head
= llist_del_all(&conf
->released_stripes
);
394 head
= llist_reverse_order(head
);
398 sh
= llist_entry(head
, struct stripe_head
, release_list
);
399 head
= llist_next(head
);
400 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
402 clear_bit(STRIPE_ON_RELEASE_LIST
, &sh
->state
);
404 * Don't worry the bit is set here, because if the bit is set
405 * again, the count is always > 1. This is true for
406 * STRIPE_ON_UNPLUG_LIST bit too.
408 hash
= sh
->hash_lock_index
;
409 __release_stripe(conf
, sh
, &temp_inactive_list
[hash
]);
416 static void release_stripe(struct stripe_head
*sh
)
418 struct r5conf
*conf
= sh
->raid_conf
;
420 struct list_head list
;
424 /* Avoid release_list until the last reference.
426 if (atomic_add_unless(&sh
->count
, -1, 1))
429 if (unlikely(!conf
->mddev
->thread
) ||
430 test_and_set_bit(STRIPE_ON_RELEASE_LIST
, &sh
->state
))
432 wakeup
= llist_add(&sh
->release_list
, &conf
->released_stripes
);
434 md_wakeup_thread(conf
->mddev
->thread
);
437 local_irq_save(flags
);
438 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
439 if (atomic_dec_and_lock(&sh
->count
, &conf
->device_lock
)) {
440 INIT_LIST_HEAD(&list
);
441 hash
= sh
->hash_lock_index
;
442 do_release_stripe(conf
, sh
, &list
);
443 spin_unlock(&conf
->device_lock
);
444 release_inactive_stripe_list(conf
, &list
, hash
);
446 local_irq_restore(flags
);
449 static inline void remove_hash(struct stripe_head
*sh
)
451 pr_debug("remove_hash(), stripe %llu\n",
452 (unsigned long long)sh
->sector
);
454 hlist_del_init(&sh
->hash
);
457 static inline void insert_hash(struct r5conf
*conf
, struct stripe_head
*sh
)
459 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
461 pr_debug("insert_hash(), stripe %llu\n",
462 (unsigned long long)sh
->sector
);
464 hlist_add_head(&sh
->hash
, hp
);
467 /* find an idle stripe, make sure it is unhashed, and return it. */
468 static struct stripe_head
*get_free_stripe(struct r5conf
*conf
, int hash
)
470 struct stripe_head
*sh
= NULL
;
471 struct list_head
*first
;
473 if (list_empty(conf
->inactive_list
+ hash
))
475 first
= (conf
->inactive_list
+ hash
)->next
;
476 sh
= list_entry(first
, struct stripe_head
, lru
);
477 list_del_init(first
);
479 atomic_inc(&conf
->active_stripes
);
480 BUG_ON(hash
!= sh
->hash_lock_index
);
481 if (list_empty(conf
->inactive_list
+ hash
))
482 atomic_inc(&conf
->empty_inactive_list_nr
);
487 static void shrink_buffers(struct stripe_head
*sh
)
491 int num
= sh
->raid_conf
->pool_size
;
493 for (i
= 0; i
< num
; i
++) {
494 WARN_ON(sh
->dev
[i
].page
!= sh
->dev
[i
].orig_page
);
498 sh
->dev
[i
].page
= NULL
;
503 static int grow_buffers(struct stripe_head
*sh
, gfp_t gfp
)
506 int num
= sh
->raid_conf
->pool_size
;
508 for (i
= 0; i
< num
; i
++) {
511 if (!(page
= alloc_page(gfp
))) {
514 sh
->dev
[i
].page
= page
;
515 sh
->dev
[i
].orig_page
= page
;
520 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
521 static void stripe_set_idx(sector_t stripe
, struct r5conf
*conf
, int previous
,
522 struct stripe_head
*sh
);
524 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
526 struct r5conf
*conf
= sh
->raid_conf
;
529 BUG_ON(atomic_read(&sh
->count
) != 0);
530 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
531 BUG_ON(stripe_operations_active(sh
));
532 BUG_ON(sh
->batch_head
);
534 pr_debug("init_stripe called, stripe %llu\n",
535 (unsigned long long)sector
);
537 seq
= read_seqcount_begin(&conf
->gen_lock
);
538 sh
->generation
= conf
->generation
- previous
;
539 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
541 stripe_set_idx(sector
, conf
, previous
, sh
);
544 for (i
= sh
->disks
; i
--; ) {
545 struct r5dev
*dev
= &sh
->dev
[i
];
547 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
548 test_bit(R5_LOCKED
, &dev
->flags
)) {
549 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
550 (unsigned long long)sh
->sector
, i
, dev
->toread
,
551 dev
->read
, dev
->towrite
, dev
->written
,
552 test_bit(R5_LOCKED
, &dev
->flags
));
556 raid5_build_block(sh
, i
, previous
);
558 if (read_seqcount_retry(&conf
->gen_lock
, seq
))
560 sh
->overwrite_disks
= 0;
561 insert_hash(conf
, sh
);
562 sh
->cpu
= smp_processor_id();
563 set_bit(STRIPE_BATCH_READY
, &sh
->state
);
566 static struct stripe_head
*__find_stripe(struct r5conf
*conf
, sector_t sector
,
569 struct stripe_head
*sh
;
571 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
572 hlist_for_each_entry(sh
, stripe_hash(conf
, sector
), hash
)
573 if (sh
->sector
== sector
&& sh
->generation
== generation
)
575 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
580 * Need to check if array has failed when deciding whether to:
582 * - remove non-faulty devices
585 * This determination is simple when no reshape is happening.
586 * However if there is a reshape, we need to carefully check
587 * both the before and after sections.
588 * This is because some failed devices may only affect one
589 * of the two sections, and some non-in_sync devices may
590 * be insync in the section most affected by failed devices.
592 static int calc_degraded(struct r5conf
*conf
)
594 int degraded
, degraded2
;
599 for (i
= 0; i
< conf
->previous_raid_disks
; i
++) {
600 struct md_rdev
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
601 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
602 rdev
= rcu_dereference(conf
->disks
[i
].replacement
);
603 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
605 else if (test_bit(In_sync
, &rdev
->flags
))
608 /* not in-sync or faulty.
609 * If the reshape increases the number of devices,
610 * this is being recovered by the reshape, so
611 * this 'previous' section is not in_sync.
612 * If the number of devices is being reduced however,
613 * the device can only be part of the array if
614 * we are reverting a reshape, so this section will
617 if (conf
->raid_disks
>= conf
->previous_raid_disks
)
621 if (conf
->raid_disks
== conf
->previous_raid_disks
)
625 for (i
= 0; i
< conf
->raid_disks
; i
++) {
626 struct md_rdev
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
627 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
628 rdev
= rcu_dereference(conf
->disks
[i
].replacement
);
629 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
631 else if (test_bit(In_sync
, &rdev
->flags
))
634 /* not in-sync or faulty.
635 * If reshape increases the number of devices, this
636 * section has already been recovered, else it
637 * almost certainly hasn't.
639 if (conf
->raid_disks
<= conf
->previous_raid_disks
)
643 if (degraded2
> degraded
)
648 static int has_failed(struct r5conf
*conf
)
652 if (conf
->mddev
->reshape_position
== MaxSector
)
653 return conf
->mddev
->degraded
> conf
->max_degraded
;
655 degraded
= calc_degraded(conf
);
656 if (degraded
> conf
->max_degraded
)
661 static struct stripe_head
*
662 get_active_stripe(struct r5conf
*conf
, sector_t sector
,
663 int previous
, int noblock
, int noquiesce
)
665 struct stripe_head
*sh
;
666 int hash
= stripe_hash_locks_hash(sector
);
668 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
670 spin_lock_irq(conf
->hash_locks
+ hash
);
673 wait_event_lock_irq(conf
->wait_for_quiescent
,
674 conf
->quiesce
== 0 || noquiesce
,
675 *(conf
->hash_locks
+ hash
));
676 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
678 if (!test_bit(R5_INACTIVE_BLOCKED
, &conf
->cache_state
)) {
679 sh
= get_free_stripe(conf
, hash
);
680 if (!sh
&& !test_bit(R5_DID_ALLOC
,
682 set_bit(R5_ALLOC_MORE
,
685 if (noblock
&& sh
== NULL
)
688 set_bit(R5_INACTIVE_BLOCKED
,
690 wait_event_exclusive_cmd(
691 conf
->wait_for_stripe
[hash
],
692 !list_empty(conf
->inactive_list
+ hash
) &&
693 (atomic_read(&conf
->active_stripes
)
694 < (conf
->max_nr_stripes
* 3 / 4)
695 || !test_bit(R5_INACTIVE_BLOCKED
,
696 &conf
->cache_state
)),
697 spin_unlock_irq(conf
->hash_locks
+ hash
),
698 spin_lock_irq(conf
->hash_locks
+ hash
));
699 clear_bit(R5_INACTIVE_BLOCKED
,
702 init_stripe(sh
, sector
, previous
);
703 atomic_inc(&sh
->count
);
705 } else if (!atomic_inc_not_zero(&sh
->count
)) {
706 spin_lock(&conf
->device_lock
);
707 if (!atomic_read(&sh
->count
)) {
708 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
709 atomic_inc(&conf
->active_stripes
);
710 BUG_ON(list_empty(&sh
->lru
) &&
711 !test_bit(STRIPE_EXPANDING
, &sh
->state
));
712 list_del_init(&sh
->lru
);
714 sh
->group
->stripes_cnt
--;
718 atomic_inc(&sh
->count
);
719 spin_unlock(&conf
->device_lock
);
721 } while (sh
== NULL
);
723 if (!list_empty(conf
->inactive_list
+ hash
))
724 wake_up(&conf
->wait_for_stripe
[hash
]);
726 spin_unlock_irq(conf
->hash_locks
+ hash
);
730 static bool is_full_stripe_write(struct stripe_head
*sh
)
732 BUG_ON(sh
->overwrite_disks
> (sh
->disks
- sh
->raid_conf
->max_degraded
));
733 return sh
->overwrite_disks
== (sh
->disks
- sh
->raid_conf
->max_degraded
);
736 static void lock_two_stripes(struct stripe_head
*sh1
, struct stripe_head
*sh2
)
740 spin_lock(&sh2
->stripe_lock
);
741 spin_lock_nested(&sh1
->stripe_lock
, 1);
743 spin_lock(&sh1
->stripe_lock
);
744 spin_lock_nested(&sh2
->stripe_lock
, 1);
748 static void unlock_two_stripes(struct stripe_head
*sh1
, struct stripe_head
*sh2
)
750 spin_unlock(&sh1
->stripe_lock
);
751 spin_unlock(&sh2
->stripe_lock
);
755 /* Only freshly new full stripe normal write stripe can be added to a batch list */
756 static bool stripe_can_batch(struct stripe_head
*sh
)
758 return test_bit(STRIPE_BATCH_READY
, &sh
->state
) &&
759 !test_bit(STRIPE_BITMAP_PENDING
, &sh
->state
) &&
760 is_full_stripe_write(sh
);
763 /* we only do back search */
764 static void stripe_add_to_batch_list(struct r5conf
*conf
, struct stripe_head
*sh
)
766 struct stripe_head
*head
;
767 sector_t head_sector
, tmp_sec
;
771 if (!stripe_can_batch(sh
))
773 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
774 tmp_sec
= sh
->sector
;
775 if (!sector_div(tmp_sec
, conf
->chunk_sectors
))
777 head_sector
= sh
->sector
- STRIPE_SECTORS
;
779 hash
= stripe_hash_locks_hash(head_sector
);
780 spin_lock_irq(conf
->hash_locks
+ hash
);
781 head
= __find_stripe(conf
, head_sector
, conf
->generation
);
782 if (head
&& !atomic_inc_not_zero(&head
->count
)) {
783 spin_lock(&conf
->device_lock
);
784 if (!atomic_read(&head
->count
)) {
785 if (!test_bit(STRIPE_HANDLE
, &head
->state
))
786 atomic_inc(&conf
->active_stripes
);
787 BUG_ON(list_empty(&head
->lru
) &&
788 !test_bit(STRIPE_EXPANDING
, &head
->state
));
789 list_del_init(&head
->lru
);
791 head
->group
->stripes_cnt
--;
795 atomic_inc(&head
->count
);
796 spin_unlock(&conf
->device_lock
);
798 spin_unlock_irq(conf
->hash_locks
+ hash
);
802 if (!stripe_can_batch(head
))
805 lock_two_stripes(head
, sh
);
806 /* clear_batch_ready clear the flag */
807 if (!stripe_can_batch(head
) || !stripe_can_batch(sh
))
814 while (dd_idx
== sh
->pd_idx
|| dd_idx
== sh
->qd_idx
)
816 if (head
->dev
[dd_idx
].towrite
->bi_rw
!= sh
->dev
[dd_idx
].towrite
->bi_rw
)
819 if (head
->batch_head
) {
820 spin_lock(&head
->batch_head
->batch_lock
);
821 /* This batch list is already running */
822 if (!stripe_can_batch(head
)) {
823 spin_unlock(&head
->batch_head
->batch_lock
);
828 * at this point, head's BATCH_READY could be cleared, but we
829 * can still add the stripe to batch list
831 list_add(&sh
->batch_list
, &head
->batch_list
);
832 spin_unlock(&head
->batch_head
->batch_lock
);
834 sh
->batch_head
= head
->batch_head
;
836 head
->batch_head
= head
;
837 sh
->batch_head
= head
->batch_head
;
838 spin_lock(&head
->batch_lock
);
839 list_add_tail(&sh
->batch_list
, &head
->batch_list
);
840 spin_unlock(&head
->batch_lock
);
843 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
844 if (atomic_dec_return(&conf
->preread_active_stripes
)
846 md_wakeup_thread(conf
->mddev
->thread
);
848 if (test_and_clear_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
849 int seq
= sh
->bm_seq
;
850 if (test_bit(STRIPE_BIT_DELAY
, &sh
->batch_head
->state
) &&
851 sh
->batch_head
->bm_seq
> seq
)
852 seq
= sh
->batch_head
->bm_seq
;
853 set_bit(STRIPE_BIT_DELAY
, &sh
->batch_head
->state
);
854 sh
->batch_head
->bm_seq
= seq
;
857 atomic_inc(&sh
->count
);
859 unlock_two_stripes(head
, sh
);
861 release_stripe(head
);
864 /* Determine if 'data_offset' or 'new_data_offset' should be used
865 * in this stripe_head.
867 static int use_new_offset(struct r5conf
*conf
, struct stripe_head
*sh
)
869 sector_t progress
= conf
->reshape_progress
;
870 /* Need a memory barrier to make sure we see the value
871 * of conf->generation, or ->data_offset that was set before
872 * reshape_progress was updated.
875 if (progress
== MaxSector
)
877 if (sh
->generation
== conf
->generation
- 1)
879 /* We are in a reshape, and this is a new-generation stripe,
880 * so use new_data_offset.
886 raid5_end_read_request(struct bio
*bi
);
888 raid5_end_write_request(struct bio
*bi
);
890 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
892 struct r5conf
*conf
= sh
->raid_conf
;
893 int i
, disks
= sh
->disks
;
894 struct stripe_head
*head_sh
= sh
;
898 for (i
= disks
; i
--; ) {
900 int replace_only
= 0;
901 struct bio
*bi
, *rbi
;
902 struct md_rdev
*rdev
, *rrdev
= NULL
;
905 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
)) {
906 if (test_and_clear_bit(R5_WantFUA
, &sh
->dev
[i
].flags
))
910 if (test_bit(R5_Discard
, &sh
->dev
[i
].flags
))
912 } else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
914 else if (test_and_clear_bit(R5_WantReplace
,
915 &sh
->dev
[i
].flags
)) {
920 if (test_and_clear_bit(R5_SyncIO
, &sh
->dev
[i
].flags
))
924 bi
= &sh
->dev
[i
].req
;
925 rbi
= &sh
->dev
[i
].rreq
; /* For writing to replacement */
928 rrdev
= rcu_dereference(conf
->disks
[i
].replacement
);
929 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
930 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
939 /* We raced and saw duplicates */
942 if (test_bit(R5_ReadRepl
, &head_sh
->dev
[i
].flags
) && rrdev
)
947 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
950 atomic_inc(&rdev
->nr_pending
);
951 if (rrdev
&& test_bit(Faulty
, &rrdev
->flags
))
954 atomic_inc(&rrdev
->nr_pending
);
957 /* We have already checked bad blocks for reads. Now
958 * need to check for writes. We never accept write errors
959 * on the replacement, so we don't to check rrdev.
961 while ((rw
& WRITE
) && rdev
&&
962 test_bit(WriteErrorSeen
, &rdev
->flags
)) {
965 int bad
= is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
966 &first_bad
, &bad_sectors
);
971 set_bit(BlockedBadBlocks
, &rdev
->flags
);
972 if (!conf
->mddev
->external
&&
973 conf
->mddev
->flags
) {
974 /* It is very unlikely, but we might
975 * still need to write out the
976 * bad block log - better give it
978 md_check_recovery(conf
->mddev
);
981 * Because md_wait_for_blocked_rdev
982 * will dec nr_pending, we must
983 * increment it first.
985 atomic_inc(&rdev
->nr_pending
);
986 md_wait_for_blocked_rdev(rdev
, conf
->mddev
);
988 /* Acknowledged bad block - skip the write */
989 rdev_dec_pending(rdev
, conf
->mddev
);
995 if (s
->syncing
|| s
->expanding
|| s
->expanded
997 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
999 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
1002 bi
->bi_bdev
= rdev
->bdev
;
1004 bi
->bi_end_io
= (rw
& WRITE
)
1005 ? raid5_end_write_request
1006 : raid5_end_read_request
;
1007 bi
->bi_private
= sh
;
1009 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
1010 __func__
, (unsigned long long)sh
->sector
,
1012 atomic_inc(&sh
->count
);
1014 atomic_inc(&head_sh
->count
);
1015 if (use_new_offset(conf
, sh
))
1016 bi
->bi_iter
.bi_sector
= (sh
->sector
1017 + rdev
->new_data_offset
);
1019 bi
->bi_iter
.bi_sector
= (sh
->sector
1020 + rdev
->data_offset
);
1021 if (test_bit(R5_ReadNoMerge
, &head_sh
->dev
[i
].flags
))
1022 bi
->bi_rw
|= REQ_NOMERGE
;
1024 if (test_bit(R5_SkipCopy
, &sh
->dev
[i
].flags
))
1025 WARN_ON(test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
1026 sh
->dev
[i
].vec
.bv_page
= sh
->dev
[i
].page
;
1028 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
1029 bi
->bi_io_vec
[0].bv_offset
= 0;
1030 bi
->bi_iter
.bi_size
= STRIPE_SIZE
;
1032 * If this is discard request, set bi_vcnt 0. We don't
1033 * want to confuse SCSI because SCSI will replace payload
1035 if (rw
& REQ_DISCARD
)
1038 set_bit(R5_DOUBLE_LOCKED
, &sh
->dev
[i
].flags
);
1040 if (conf
->mddev
->gendisk
)
1041 trace_block_bio_remap(bdev_get_queue(bi
->bi_bdev
),
1042 bi
, disk_devt(conf
->mddev
->gendisk
),
1044 generic_make_request(bi
);
1047 if (s
->syncing
|| s
->expanding
|| s
->expanded
1049 md_sync_acct(rrdev
->bdev
, STRIPE_SECTORS
);
1051 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
1054 rbi
->bi_bdev
= rrdev
->bdev
;
1056 BUG_ON(!(rw
& WRITE
));
1057 rbi
->bi_end_io
= raid5_end_write_request
;
1058 rbi
->bi_private
= sh
;
1060 pr_debug("%s: for %llu schedule op %ld on "
1061 "replacement disc %d\n",
1062 __func__
, (unsigned long long)sh
->sector
,
1064 atomic_inc(&sh
->count
);
1066 atomic_inc(&head_sh
->count
);
1067 if (use_new_offset(conf
, sh
))
1068 rbi
->bi_iter
.bi_sector
= (sh
->sector
1069 + rrdev
->new_data_offset
);
1071 rbi
->bi_iter
.bi_sector
= (sh
->sector
1072 + rrdev
->data_offset
);
1073 if (test_bit(R5_SkipCopy
, &sh
->dev
[i
].flags
))
1074 WARN_ON(test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
1075 sh
->dev
[i
].rvec
.bv_page
= sh
->dev
[i
].page
;
1077 rbi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
1078 rbi
->bi_io_vec
[0].bv_offset
= 0;
1079 rbi
->bi_iter
.bi_size
= STRIPE_SIZE
;
1081 * If this is discard request, set bi_vcnt 0. We don't
1082 * want to confuse SCSI because SCSI will replace payload
1084 if (rw
& REQ_DISCARD
)
1086 if (conf
->mddev
->gendisk
)
1087 trace_block_bio_remap(bdev_get_queue(rbi
->bi_bdev
),
1088 rbi
, disk_devt(conf
->mddev
->gendisk
),
1090 generic_make_request(rbi
);
1092 if (!rdev
&& !rrdev
) {
1094 set_bit(STRIPE_DEGRADED
, &sh
->state
);
1095 pr_debug("skip op %ld on disc %d for sector %llu\n",
1096 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
1097 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1098 set_bit(STRIPE_HANDLE
, &sh
->state
);
1101 if (!head_sh
->batch_head
)
1103 sh
= list_first_entry(&sh
->batch_list
, struct stripe_head
,
1110 static struct dma_async_tx_descriptor
*
1111 async_copy_data(int frombio
, struct bio
*bio
, struct page
**page
,
1112 sector_t sector
, struct dma_async_tx_descriptor
*tx
,
1113 struct stripe_head
*sh
)
1116 struct bvec_iter iter
;
1117 struct page
*bio_page
;
1119 struct async_submit_ctl submit
;
1120 enum async_tx_flags flags
= 0;
1122 if (bio
->bi_iter
.bi_sector
>= sector
)
1123 page_offset
= (signed)(bio
->bi_iter
.bi_sector
- sector
) * 512;
1125 page_offset
= (signed)(sector
- bio
->bi_iter
.bi_sector
) * -512;
1128 flags
|= ASYNC_TX_FENCE
;
1129 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
1131 bio_for_each_segment(bvl
, bio
, iter
) {
1132 int len
= bvl
.bv_len
;
1136 if (page_offset
< 0) {
1137 b_offset
= -page_offset
;
1138 page_offset
+= b_offset
;
1142 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
1143 clen
= STRIPE_SIZE
- page_offset
;
1148 b_offset
+= bvl
.bv_offset
;
1149 bio_page
= bvl
.bv_page
;
1151 if (sh
->raid_conf
->skip_copy
&&
1152 b_offset
== 0 && page_offset
== 0 &&
1153 clen
== STRIPE_SIZE
)
1156 tx
= async_memcpy(*page
, bio_page
, page_offset
,
1157 b_offset
, clen
, &submit
);
1159 tx
= async_memcpy(bio_page
, *page
, b_offset
,
1160 page_offset
, clen
, &submit
);
1162 /* chain the operations */
1163 submit
.depend_tx
= tx
;
1165 if (clen
< len
) /* hit end of page */
1173 static void ops_complete_biofill(void *stripe_head_ref
)
1175 struct stripe_head
*sh
= stripe_head_ref
;
1176 struct bio_list return_bi
= BIO_EMPTY_LIST
;
1179 pr_debug("%s: stripe %llu\n", __func__
,
1180 (unsigned long long)sh
->sector
);
1182 /* clear completed biofills */
1183 for (i
= sh
->disks
; i
--; ) {
1184 struct r5dev
*dev
= &sh
->dev
[i
];
1186 /* acknowledge completion of a biofill operation */
1187 /* and check if we need to reply to a read request,
1188 * new R5_Wantfill requests are held off until
1189 * !STRIPE_BIOFILL_RUN
1191 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
1192 struct bio
*rbi
, *rbi2
;
1197 while (rbi
&& rbi
->bi_iter
.bi_sector
<
1198 dev
->sector
+ STRIPE_SECTORS
) {
1199 rbi2
= r5_next_bio(rbi
, dev
->sector
);
1200 if (!raid5_dec_bi_active_stripes(rbi
))
1201 bio_list_add(&return_bi
, rbi
);
1206 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
1208 return_io(&return_bi
);
1210 set_bit(STRIPE_HANDLE
, &sh
->state
);
1214 static void ops_run_biofill(struct stripe_head
*sh
)
1216 struct dma_async_tx_descriptor
*tx
= NULL
;
1217 struct async_submit_ctl submit
;
1220 BUG_ON(sh
->batch_head
);
1221 pr_debug("%s: stripe %llu\n", __func__
,
1222 (unsigned long long)sh
->sector
);
1224 for (i
= sh
->disks
; i
--; ) {
1225 struct r5dev
*dev
= &sh
->dev
[i
];
1226 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
1228 spin_lock_irq(&sh
->stripe_lock
);
1229 dev
->read
= rbi
= dev
->toread
;
1231 spin_unlock_irq(&sh
->stripe_lock
);
1232 while (rbi
&& rbi
->bi_iter
.bi_sector
<
1233 dev
->sector
+ STRIPE_SECTORS
) {
1234 tx
= async_copy_data(0, rbi
, &dev
->page
,
1235 dev
->sector
, tx
, sh
);
1236 rbi
= r5_next_bio(rbi
, dev
->sector
);
1241 atomic_inc(&sh
->count
);
1242 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
1243 async_trigger_callback(&submit
);
1246 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
1253 tgt
= &sh
->dev
[target
];
1254 set_bit(R5_UPTODATE
, &tgt
->flags
);
1255 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1256 clear_bit(R5_Wantcompute
, &tgt
->flags
);
1259 static void ops_complete_compute(void *stripe_head_ref
)
1261 struct stripe_head
*sh
= stripe_head_ref
;
1263 pr_debug("%s: stripe %llu\n", __func__
,
1264 (unsigned long long)sh
->sector
);
1266 /* mark the computed target(s) as uptodate */
1267 mark_target_uptodate(sh
, sh
->ops
.target
);
1268 mark_target_uptodate(sh
, sh
->ops
.target2
);
1270 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
1271 if (sh
->check_state
== check_state_compute_run
)
1272 sh
->check_state
= check_state_compute_result
;
1273 set_bit(STRIPE_HANDLE
, &sh
->state
);
1277 /* return a pointer to the address conversion region of the scribble buffer */
1278 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
1279 struct raid5_percpu
*percpu
, int i
)
1283 addr
= flex_array_get(percpu
->scribble
, i
);
1284 return addr
+ sizeof(struct page
*) * (sh
->disks
+ 2);
1287 /* return a pointer to the address conversion region of the scribble buffer */
1288 static struct page
**to_addr_page(struct raid5_percpu
*percpu
, int i
)
1292 addr
= flex_array_get(percpu
->scribble
, i
);
1296 static struct dma_async_tx_descriptor
*
1297 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1299 int disks
= sh
->disks
;
1300 struct page
**xor_srcs
= to_addr_page(percpu
, 0);
1301 int target
= sh
->ops
.target
;
1302 struct r5dev
*tgt
= &sh
->dev
[target
];
1303 struct page
*xor_dest
= tgt
->page
;
1305 struct dma_async_tx_descriptor
*tx
;
1306 struct async_submit_ctl submit
;
1309 BUG_ON(sh
->batch_head
);
1311 pr_debug("%s: stripe %llu block: %d\n",
1312 __func__
, (unsigned long long)sh
->sector
, target
);
1313 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1315 for (i
= disks
; i
--; )
1317 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1319 atomic_inc(&sh
->count
);
1321 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
1322 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
, 0));
1323 if (unlikely(count
== 1))
1324 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1326 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1331 /* set_syndrome_sources - populate source buffers for gen_syndrome
1332 * @srcs - (struct page *) array of size sh->disks
1333 * @sh - stripe_head to parse
1335 * Populates srcs in proper layout order for the stripe and returns the
1336 * 'count' of sources to be used in a call to async_gen_syndrome. The P
1337 * destination buffer is recorded in srcs[count] and the Q destination
1338 * is recorded in srcs[count+1]].
1340 static int set_syndrome_sources(struct page
**srcs
,
1341 struct stripe_head
*sh
,
1344 int disks
= sh
->disks
;
1345 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
1346 int d0_idx
= raid6_d0(sh
);
1350 for (i
= 0; i
< disks
; i
++)
1356 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
1357 struct r5dev
*dev
= &sh
->dev
[i
];
1359 if (i
== sh
->qd_idx
|| i
== sh
->pd_idx
||
1360 (srctype
== SYNDROME_SRC_ALL
) ||
1361 (srctype
== SYNDROME_SRC_WANT_DRAIN
&&
1362 test_bit(R5_Wantdrain
, &dev
->flags
)) ||
1363 (srctype
== SYNDROME_SRC_WRITTEN
&&
1365 srcs
[slot
] = sh
->dev
[i
].page
;
1366 i
= raid6_next_disk(i
, disks
);
1367 } while (i
!= d0_idx
);
1369 return syndrome_disks
;
1372 static struct dma_async_tx_descriptor
*
1373 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1375 int disks
= sh
->disks
;
1376 struct page
**blocks
= to_addr_page(percpu
, 0);
1378 int qd_idx
= sh
->qd_idx
;
1379 struct dma_async_tx_descriptor
*tx
;
1380 struct async_submit_ctl submit
;
1386 BUG_ON(sh
->batch_head
);
1387 if (sh
->ops
.target
< 0)
1388 target
= sh
->ops
.target2
;
1389 else if (sh
->ops
.target2
< 0)
1390 target
= sh
->ops
.target
;
1392 /* we should only have one valid target */
1395 pr_debug("%s: stripe %llu block: %d\n",
1396 __func__
, (unsigned long long)sh
->sector
, target
);
1398 tgt
= &sh
->dev
[target
];
1399 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1402 atomic_inc(&sh
->count
);
1404 if (target
== qd_idx
) {
1405 count
= set_syndrome_sources(blocks
, sh
, SYNDROME_SRC_ALL
);
1406 blocks
[count
] = NULL
; /* regenerating p is not necessary */
1407 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
1408 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1409 ops_complete_compute
, sh
,
1410 to_addr_conv(sh
, percpu
, 0));
1411 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1413 /* Compute any data- or p-drive using XOR */
1415 for (i
= disks
; i
-- ; ) {
1416 if (i
== target
|| i
== qd_idx
)
1418 blocks
[count
++] = sh
->dev
[i
].page
;
1421 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
1422 NULL
, ops_complete_compute
, sh
,
1423 to_addr_conv(sh
, percpu
, 0));
1424 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
1430 static struct dma_async_tx_descriptor
*
1431 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1433 int i
, count
, disks
= sh
->disks
;
1434 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
1435 int d0_idx
= raid6_d0(sh
);
1436 int faila
= -1, failb
= -1;
1437 int target
= sh
->ops
.target
;
1438 int target2
= sh
->ops
.target2
;
1439 struct r5dev
*tgt
= &sh
->dev
[target
];
1440 struct r5dev
*tgt2
= &sh
->dev
[target2
];
1441 struct dma_async_tx_descriptor
*tx
;
1442 struct page
**blocks
= to_addr_page(percpu
, 0);
1443 struct async_submit_ctl submit
;
1445 BUG_ON(sh
->batch_head
);
1446 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1447 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
1448 BUG_ON(target
< 0 || target2
< 0);
1449 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
1450 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
1452 /* we need to open-code set_syndrome_sources to handle the
1453 * slot number conversion for 'faila' and 'failb'
1455 for (i
= 0; i
< disks
; i
++)
1460 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
1462 blocks
[slot
] = sh
->dev
[i
].page
;
1468 i
= raid6_next_disk(i
, disks
);
1469 } while (i
!= d0_idx
);
1471 BUG_ON(faila
== failb
);
1474 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1475 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
1477 atomic_inc(&sh
->count
);
1479 if (failb
== syndrome_disks
+1) {
1480 /* Q disk is one of the missing disks */
1481 if (faila
== syndrome_disks
) {
1482 /* Missing P+Q, just recompute */
1483 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1484 ops_complete_compute
, sh
,
1485 to_addr_conv(sh
, percpu
, 0));
1486 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
1487 STRIPE_SIZE
, &submit
);
1491 int qd_idx
= sh
->qd_idx
;
1493 /* Missing D+Q: recompute D from P, then recompute Q */
1494 if (target
== qd_idx
)
1495 data_target
= target2
;
1497 data_target
= target
;
1500 for (i
= disks
; i
-- ; ) {
1501 if (i
== data_target
|| i
== qd_idx
)
1503 blocks
[count
++] = sh
->dev
[i
].page
;
1505 dest
= sh
->dev
[data_target
].page
;
1506 init_async_submit(&submit
,
1507 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
1509 to_addr_conv(sh
, percpu
, 0));
1510 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
1513 count
= set_syndrome_sources(blocks
, sh
, SYNDROME_SRC_ALL
);
1514 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
1515 ops_complete_compute
, sh
,
1516 to_addr_conv(sh
, percpu
, 0));
1517 return async_gen_syndrome(blocks
, 0, count
+2,
1518 STRIPE_SIZE
, &submit
);
1521 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
1522 ops_complete_compute
, sh
,
1523 to_addr_conv(sh
, percpu
, 0));
1524 if (failb
== syndrome_disks
) {
1525 /* We're missing D+P. */
1526 return async_raid6_datap_recov(syndrome_disks
+2,
1530 /* We're missing D+D. */
1531 return async_raid6_2data_recov(syndrome_disks
+2,
1532 STRIPE_SIZE
, faila
, failb
,
1538 static void ops_complete_prexor(void *stripe_head_ref
)
1540 struct stripe_head
*sh
= stripe_head_ref
;
1542 pr_debug("%s: stripe %llu\n", __func__
,
1543 (unsigned long long)sh
->sector
);
1546 static struct dma_async_tx_descriptor
*
1547 ops_run_prexor5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1548 struct dma_async_tx_descriptor
*tx
)
1550 int disks
= sh
->disks
;
1551 struct page
**xor_srcs
= to_addr_page(percpu
, 0);
1552 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1553 struct async_submit_ctl submit
;
1555 /* existing parity data subtracted */
1556 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1558 BUG_ON(sh
->batch_head
);
1559 pr_debug("%s: stripe %llu\n", __func__
,
1560 (unsigned long long)sh
->sector
);
1562 for (i
= disks
; i
--; ) {
1563 struct r5dev
*dev
= &sh
->dev
[i
];
1564 /* Only process blocks that are known to be uptodate */
1565 if (test_bit(R5_Wantdrain
, &dev
->flags
))
1566 xor_srcs
[count
++] = dev
->page
;
1569 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
1570 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
, 0));
1571 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1576 static struct dma_async_tx_descriptor
*
1577 ops_run_prexor6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1578 struct dma_async_tx_descriptor
*tx
)
1580 struct page
**blocks
= to_addr_page(percpu
, 0);
1582 struct async_submit_ctl submit
;
1584 pr_debug("%s: stripe %llu\n", __func__
,
1585 (unsigned long long)sh
->sector
);
1587 count
= set_syndrome_sources(blocks
, sh
, SYNDROME_SRC_WANT_DRAIN
);
1589 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_PQ_XOR_DST
, tx
,
1590 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
, 0));
1591 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1596 static struct dma_async_tx_descriptor
*
1597 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
1599 int disks
= sh
->disks
;
1601 struct stripe_head
*head_sh
= sh
;
1603 pr_debug("%s: stripe %llu\n", __func__
,
1604 (unsigned long long)sh
->sector
);
1606 for (i
= disks
; i
--; ) {
1611 if (test_and_clear_bit(R5_Wantdrain
, &head_sh
->dev
[i
].flags
)) {
1616 spin_lock_irq(&sh
->stripe_lock
);
1617 chosen
= dev
->towrite
;
1618 dev
->towrite
= NULL
;
1619 sh
->overwrite_disks
= 0;
1620 BUG_ON(dev
->written
);
1621 wbi
= dev
->written
= chosen
;
1622 spin_unlock_irq(&sh
->stripe_lock
);
1623 WARN_ON(dev
->page
!= dev
->orig_page
);
1625 while (wbi
&& wbi
->bi_iter
.bi_sector
<
1626 dev
->sector
+ STRIPE_SECTORS
) {
1627 if (wbi
->bi_rw
& REQ_FUA
)
1628 set_bit(R5_WantFUA
, &dev
->flags
);
1629 if (wbi
->bi_rw
& REQ_SYNC
)
1630 set_bit(R5_SyncIO
, &dev
->flags
);
1631 if (wbi
->bi_rw
& REQ_DISCARD
)
1632 set_bit(R5_Discard
, &dev
->flags
);
1634 tx
= async_copy_data(1, wbi
, &dev
->page
,
1635 dev
->sector
, tx
, sh
);
1636 if (dev
->page
!= dev
->orig_page
) {
1637 set_bit(R5_SkipCopy
, &dev
->flags
);
1638 clear_bit(R5_UPTODATE
, &dev
->flags
);
1639 clear_bit(R5_OVERWRITE
, &dev
->flags
);
1642 wbi
= r5_next_bio(wbi
, dev
->sector
);
1645 if (head_sh
->batch_head
) {
1646 sh
= list_first_entry(&sh
->batch_list
,
1659 static void ops_complete_reconstruct(void *stripe_head_ref
)
1661 struct stripe_head
*sh
= stripe_head_ref
;
1662 int disks
= sh
->disks
;
1663 int pd_idx
= sh
->pd_idx
;
1664 int qd_idx
= sh
->qd_idx
;
1666 bool fua
= false, sync
= false, discard
= false;
1668 pr_debug("%s: stripe %llu\n", __func__
,
1669 (unsigned long long)sh
->sector
);
1671 for (i
= disks
; i
--; ) {
1672 fua
|= test_bit(R5_WantFUA
, &sh
->dev
[i
].flags
);
1673 sync
|= test_bit(R5_SyncIO
, &sh
->dev
[i
].flags
);
1674 discard
|= test_bit(R5_Discard
, &sh
->dev
[i
].flags
);
1677 for (i
= disks
; i
--; ) {
1678 struct r5dev
*dev
= &sh
->dev
[i
];
1680 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
) {
1681 if (!discard
&& !test_bit(R5_SkipCopy
, &dev
->flags
))
1682 set_bit(R5_UPTODATE
, &dev
->flags
);
1684 set_bit(R5_WantFUA
, &dev
->flags
);
1686 set_bit(R5_SyncIO
, &dev
->flags
);
1690 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
1691 sh
->reconstruct_state
= reconstruct_state_drain_result
;
1692 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
1693 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
1695 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
1696 sh
->reconstruct_state
= reconstruct_state_result
;
1699 set_bit(STRIPE_HANDLE
, &sh
->state
);
1704 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1705 struct dma_async_tx_descriptor
*tx
)
1707 int disks
= sh
->disks
;
1708 struct page
**xor_srcs
;
1709 struct async_submit_ctl submit
;
1710 int count
, pd_idx
= sh
->pd_idx
, i
;
1711 struct page
*xor_dest
;
1713 unsigned long flags
;
1715 struct stripe_head
*head_sh
= sh
;
1718 pr_debug("%s: stripe %llu\n", __func__
,
1719 (unsigned long long)sh
->sector
);
1721 for (i
= 0; i
< sh
->disks
; i
++) {
1724 if (!test_bit(R5_Discard
, &sh
->dev
[i
].flags
))
1727 if (i
>= sh
->disks
) {
1728 atomic_inc(&sh
->count
);
1729 set_bit(R5_Discard
, &sh
->dev
[pd_idx
].flags
);
1730 ops_complete_reconstruct(sh
);
1735 xor_srcs
= to_addr_page(percpu
, j
);
1736 /* check if prexor is active which means only process blocks
1737 * that are part of a read-modify-write (written)
1739 if (head_sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1741 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1742 for (i
= disks
; i
--; ) {
1743 struct r5dev
*dev
= &sh
->dev
[i
];
1744 if (head_sh
->dev
[i
].written
)
1745 xor_srcs
[count
++] = dev
->page
;
1748 xor_dest
= sh
->dev
[pd_idx
].page
;
1749 for (i
= disks
; i
--; ) {
1750 struct r5dev
*dev
= &sh
->dev
[i
];
1752 xor_srcs
[count
++] = dev
->page
;
1756 /* 1/ if we prexor'd then the dest is reused as a source
1757 * 2/ if we did not prexor then we are redoing the parity
1758 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1759 * for the synchronous xor case
1761 last_stripe
= !head_sh
->batch_head
||
1762 list_first_entry(&sh
->batch_list
,
1763 struct stripe_head
, batch_list
) == head_sh
;
1765 flags
= ASYNC_TX_ACK
|
1766 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1768 atomic_inc(&head_sh
->count
);
1769 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, head_sh
,
1770 to_addr_conv(sh
, percpu
, j
));
1772 flags
= prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
;
1773 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
,
1774 to_addr_conv(sh
, percpu
, j
));
1777 if (unlikely(count
== 1))
1778 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1780 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1783 sh
= list_first_entry(&sh
->batch_list
, struct stripe_head
,
1790 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1791 struct dma_async_tx_descriptor
*tx
)
1793 struct async_submit_ctl submit
;
1794 struct page
**blocks
;
1795 int count
, i
, j
= 0;
1796 struct stripe_head
*head_sh
= sh
;
1799 unsigned long txflags
;
1801 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1803 for (i
= 0; i
< sh
->disks
; i
++) {
1804 if (sh
->pd_idx
== i
|| sh
->qd_idx
== i
)
1806 if (!test_bit(R5_Discard
, &sh
->dev
[i
].flags
))
1809 if (i
>= sh
->disks
) {
1810 atomic_inc(&sh
->count
);
1811 set_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
);
1812 set_bit(R5_Discard
, &sh
->dev
[sh
->qd_idx
].flags
);
1813 ops_complete_reconstruct(sh
);
1818 blocks
= to_addr_page(percpu
, j
);
1820 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1821 synflags
= SYNDROME_SRC_WRITTEN
;
1822 txflags
= ASYNC_TX_ACK
| ASYNC_TX_PQ_XOR_DST
;
1824 synflags
= SYNDROME_SRC_ALL
;
1825 txflags
= ASYNC_TX_ACK
;
1828 count
= set_syndrome_sources(blocks
, sh
, synflags
);
1829 last_stripe
= !head_sh
->batch_head
||
1830 list_first_entry(&sh
->batch_list
,
1831 struct stripe_head
, batch_list
) == head_sh
;
1834 atomic_inc(&head_sh
->count
);
1835 init_async_submit(&submit
, txflags
, tx
, ops_complete_reconstruct
,
1836 head_sh
, to_addr_conv(sh
, percpu
, j
));
1838 init_async_submit(&submit
, 0, tx
, NULL
, NULL
,
1839 to_addr_conv(sh
, percpu
, j
));
1840 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1843 sh
= list_first_entry(&sh
->batch_list
, struct stripe_head
,
1849 static void ops_complete_check(void *stripe_head_ref
)
1851 struct stripe_head
*sh
= stripe_head_ref
;
1853 pr_debug("%s: stripe %llu\n", __func__
,
1854 (unsigned long long)sh
->sector
);
1856 sh
->check_state
= check_state_check_result
;
1857 set_bit(STRIPE_HANDLE
, &sh
->state
);
1861 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1863 int disks
= sh
->disks
;
1864 int pd_idx
= sh
->pd_idx
;
1865 int qd_idx
= sh
->qd_idx
;
1866 struct page
*xor_dest
;
1867 struct page
**xor_srcs
= to_addr_page(percpu
, 0);
1868 struct dma_async_tx_descriptor
*tx
;
1869 struct async_submit_ctl submit
;
1873 pr_debug("%s: stripe %llu\n", __func__
,
1874 (unsigned long long)sh
->sector
);
1876 BUG_ON(sh
->batch_head
);
1878 xor_dest
= sh
->dev
[pd_idx
].page
;
1879 xor_srcs
[count
++] = xor_dest
;
1880 for (i
= disks
; i
--; ) {
1881 if (i
== pd_idx
|| i
== qd_idx
)
1883 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1886 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1887 to_addr_conv(sh
, percpu
, 0));
1888 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1889 &sh
->ops
.zero_sum_result
, &submit
);
1891 atomic_inc(&sh
->count
);
1892 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1893 tx
= async_trigger_callback(&submit
);
1896 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1898 struct page
**srcs
= to_addr_page(percpu
, 0);
1899 struct async_submit_ctl submit
;
1902 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1903 (unsigned long long)sh
->sector
, checkp
);
1905 BUG_ON(sh
->batch_head
);
1906 count
= set_syndrome_sources(srcs
, sh
, SYNDROME_SRC_ALL
);
1910 atomic_inc(&sh
->count
);
1911 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1912 sh
, to_addr_conv(sh
, percpu
, 0));
1913 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1914 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1917 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1919 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1920 struct dma_async_tx_descriptor
*tx
= NULL
;
1921 struct r5conf
*conf
= sh
->raid_conf
;
1922 int level
= conf
->level
;
1923 struct raid5_percpu
*percpu
;
1927 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1928 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1929 ops_run_biofill(sh
);
1933 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1935 tx
= ops_run_compute5(sh
, percpu
);
1937 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1938 tx
= ops_run_compute6_1(sh
, percpu
);
1940 tx
= ops_run_compute6_2(sh
, percpu
);
1942 /* terminate the chain if reconstruct is not set to be run */
1943 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1947 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
)) {
1949 tx
= ops_run_prexor5(sh
, percpu
, tx
);
1951 tx
= ops_run_prexor6(sh
, percpu
, tx
);
1954 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1955 tx
= ops_run_biodrain(sh
, tx
);
1959 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1961 ops_run_reconstruct5(sh
, percpu
, tx
);
1963 ops_run_reconstruct6(sh
, percpu
, tx
);
1966 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1967 if (sh
->check_state
== check_state_run
)
1968 ops_run_check_p(sh
, percpu
);
1969 else if (sh
->check_state
== check_state_run_q
)
1970 ops_run_check_pq(sh
, percpu
, 0);
1971 else if (sh
->check_state
== check_state_run_pq
)
1972 ops_run_check_pq(sh
, percpu
, 1);
1977 if (overlap_clear
&& !sh
->batch_head
)
1978 for (i
= disks
; i
--; ) {
1979 struct r5dev
*dev
= &sh
->dev
[i
];
1980 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1981 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1986 static struct stripe_head
*alloc_stripe(struct kmem_cache
*sc
, gfp_t gfp
)
1988 struct stripe_head
*sh
;
1990 sh
= kmem_cache_zalloc(sc
, gfp
);
1992 spin_lock_init(&sh
->stripe_lock
);
1993 spin_lock_init(&sh
->batch_lock
);
1994 INIT_LIST_HEAD(&sh
->batch_list
);
1995 INIT_LIST_HEAD(&sh
->lru
);
1996 atomic_set(&sh
->count
, 1);
2000 static int grow_one_stripe(struct r5conf
*conf
, gfp_t gfp
)
2002 struct stripe_head
*sh
;
2004 sh
= alloc_stripe(conf
->slab_cache
, gfp
);
2008 sh
->raid_conf
= conf
;
2010 if (grow_buffers(sh
, gfp
)) {
2012 kmem_cache_free(conf
->slab_cache
, sh
);
2015 sh
->hash_lock_index
=
2016 conf
->max_nr_stripes
% NR_STRIPE_HASH_LOCKS
;
2017 /* we just created an active stripe so... */
2018 atomic_inc(&conf
->active_stripes
);
2021 conf
->max_nr_stripes
++;
2025 static int grow_stripes(struct r5conf
*conf
, int num
)
2027 struct kmem_cache
*sc
;
2028 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
2030 if (conf
->mddev
->gendisk
)
2031 sprintf(conf
->cache_name
[0],
2032 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
2034 sprintf(conf
->cache_name
[0],
2035 "raid%d-%p", conf
->level
, conf
->mddev
);
2036 sprintf(conf
->cache_name
[1], "%s-alt", conf
->cache_name
[0]);
2038 conf
->active_name
= 0;
2039 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
2040 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
2044 conf
->slab_cache
= sc
;
2045 conf
->pool_size
= devs
;
2047 if (!grow_one_stripe(conf
, GFP_KERNEL
))
2054 * scribble_len - return the required size of the scribble region
2055 * @num - total number of disks in the array
2057 * The size must be enough to contain:
2058 * 1/ a struct page pointer for each device in the array +2
2059 * 2/ room to convert each entry in (1) to its corresponding dma
2060 * (dma_map_page()) or page (page_address()) address.
2062 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
2063 * calculate over all devices (not just the data blocks), using zeros in place
2064 * of the P and Q blocks.
2066 static struct flex_array
*scribble_alloc(int num
, int cnt
, gfp_t flags
)
2068 struct flex_array
*ret
;
2071 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
2072 ret
= flex_array_alloc(len
, cnt
, flags
);
2075 /* always prealloc all elements, so no locking is required */
2076 if (flex_array_prealloc(ret
, 0, cnt
, flags
)) {
2077 flex_array_free(ret
);
2083 static int resize_chunks(struct r5conf
*conf
, int new_disks
, int new_sectors
)
2088 mddev_suspend(conf
->mddev
);
2090 for_each_present_cpu(cpu
) {
2091 struct raid5_percpu
*percpu
;
2092 struct flex_array
*scribble
;
2094 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
2095 scribble
= scribble_alloc(new_disks
,
2096 new_sectors
/ STRIPE_SECTORS
,
2100 flex_array_free(percpu
->scribble
);
2101 percpu
->scribble
= scribble
;
2108 mddev_resume(conf
->mddev
);
2112 static int resize_stripes(struct r5conf
*conf
, int newsize
)
2114 /* Make all the stripes able to hold 'newsize' devices.
2115 * New slots in each stripe get 'page' set to a new page.
2117 * This happens in stages:
2118 * 1/ create a new kmem_cache and allocate the required number of
2120 * 2/ gather all the old stripe_heads and transfer the pages across
2121 * to the new stripe_heads. This will have the side effect of
2122 * freezing the array as once all stripe_heads have been collected,
2123 * no IO will be possible. Old stripe heads are freed once their
2124 * pages have been transferred over, and the old kmem_cache is
2125 * freed when all stripes are done.
2126 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
2127 * we simple return a failre status - no need to clean anything up.
2128 * 4/ allocate new pages for the new slots in the new stripe_heads.
2129 * If this fails, we don't bother trying the shrink the
2130 * stripe_heads down again, we just leave them as they are.
2131 * As each stripe_head is processed the new one is released into
2134 * Once step2 is started, we cannot afford to wait for a write,
2135 * so we use GFP_NOIO allocations.
2137 struct stripe_head
*osh
, *nsh
;
2138 LIST_HEAD(newstripes
);
2139 struct disk_info
*ndisks
;
2141 struct kmem_cache
*sc
;
2145 if (newsize
<= conf
->pool_size
)
2146 return 0; /* never bother to shrink */
2148 err
= md_allow_write(conf
->mddev
);
2153 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
2154 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
2159 /* Need to ensure auto-resizing doesn't interfere */
2160 mutex_lock(&conf
->cache_size_mutex
);
2162 for (i
= conf
->max_nr_stripes
; i
; i
--) {
2163 nsh
= alloc_stripe(sc
, GFP_KERNEL
);
2167 nsh
->raid_conf
= conf
;
2168 list_add(&nsh
->lru
, &newstripes
);
2171 /* didn't get enough, give up */
2172 while (!list_empty(&newstripes
)) {
2173 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
2174 list_del(&nsh
->lru
);
2175 kmem_cache_free(sc
, nsh
);
2177 kmem_cache_destroy(sc
);
2178 mutex_unlock(&conf
->cache_size_mutex
);
2181 /* Step 2 - Must use GFP_NOIO now.
2182 * OK, we have enough stripes, start collecting inactive
2183 * stripes and copying them over
2187 list_for_each_entry(nsh
, &newstripes
, lru
) {
2188 lock_device_hash_lock(conf
, hash
);
2189 wait_event_exclusive_cmd(conf
->wait_for_stripe
[hash
],
2190 !list_empty(conf
->inactive_list
+ hash
),
2191 unlock_device_hash_lock(conf
, hash
),
2192 lock_device_hash_lock(conf
, hash
));
2193 osh
= get_free_stripe(conf
, hash
);
2194 unlock_device_hash_lock(conf
, hash
);
2196 for(i
=0; i
<conf
->pool_size
; i
++) {
2197 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
2198 nsh
->dev
[i
].orig_page
= osh
->dev
[i
].page
;
2200 nsh
->hash_lock_index
= hash
;
2201 kmem_cache_free(conf
->slab_cache
, osh
);
2203 if (cnt
>= conf
->max_nr_stripes
/ NR_STRIPE_HASH_LOCKS
+
2204 !!((conf
->max_nr_stripes
% NR_STRIPE_HASH_LOCKS
) > hash
)) {
2209 kmem_cache_destroy(conf
->slab_cache
);
2212 * At this point, we are holding all the stripes so the array
2213 * is completely stalled, so now is a good time to resize
2214 * conf->disks and the scribble region
2216 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
2218 for (i
=0; i
<conf
->raid_disks
; i
++)
2219 ndisks
[i
] = conf
->disks
[i
];
2221 conf
->disks
= ndisks
;
2225 mutex_unlock(&conf
->cache_size_mutex
);
2226 /* Step 4, return new stripes to service */
2227 while(!list_empty(&newstripes
)) {
2228 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
2229 list_del_init(&nsh
->lru
);
2231 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
2232 if (nsh
->dev
[i
].page
== NULL
) {
2233 struct page
*p
= alloc_page(GFP_NOIO
);
2234 nsh
->dev
[i
].page
= p
;
2235 nsh
->dev
[i
].orig_page
= p
;
2239 release_stripe(nsh
);
2241 /* critical section pass, GFP_NOIO no longer needed */
2243 conf
->slab_cache
= sc
;
2244 conf
->active_name
= 1-conf
->active_name
;
2246 conf
->pool_size
= newsize
;
2250 static int drop_one_stripe(struct r5conf
*conf
)
2252 struct stripe_head
*sh
;
2253 int hash
= (conf
->max_nr_stripes
- 1) & STRIPE_HASH_LOCKS_MASK
;
2255 spin_lock_irq(conf
->hash_locks
+ hash
);
2256 sh
= get_free_stripe(conf
, hash
);
2257 spin_unlock_irq(conf
->hash_locks
+ hash
);
2260 BUG_ON(atomic_read(&sh
->count
));
2262 kmem_cache_free(conf
->slab_cache
, sh
);
2263 atomic_dec(&conf
->active_stripes
);
2264 conf
->max_nr_stripes
--;
2268 static void shrink_stripes(struct r5conf
*conf
)
2270 while (conf
->max_nr_stripes
&&
2271 drop_one_stripe(conf
))
2274 kmem_cache_destroy(conf
->slab_cache
);
2275 conf
->slab_cache
= NULL
;
2278 static void raid5_end_read_request(struct bio
* bi
)
2280 struct stripe_head
*sh
= bi
->bi_private
;
2281 struct r5conf
*conf
= sh
->raid_conf
;
2282 int disks
= sh
->disks
, i
;
2283 char b
[BDEVNAME_SIZE
];
2284 struct md_rdev
*rdev
= NULL
;
2287 for (i
=0 ; i
<disks
; i
++)
2288 if (bi
== &sh
->dev
[i
].req
)
2291 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
2292 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
2298 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
))
2299 /* If replacement finished while this request was outstanding,
2300 * 'replacement' might be NULL already.
2301 * In that case it moved down to 'rdev'.
2302 * rdev is not removed until all requests are finished.
2304 rdev
= conf
->disks
[i
].replacement
;
2306 rdev
= conf
->disks
[i
].rdev
;
2308 if (use_new_offset(conf
, sh
))
2309 s
= sh
->sector
+ rdev
->new_data_offset
;
2311 s
= sh
->sector
+ rdev
->data_offset
;
2312 if (!bi
->bi_error
) {
2313 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
2314 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2315 /* Note that this cannot happen on a
2316 * replacement device. We just fail those on
2321 "md/raid:%s: read error corrected"
2322 " (%lu sectors at %llu on %s)\n",
2323 mdname(conf
->mddev
), STRIPE_SECTORS
,
2324 (unsigned long long)s
,
2325 bdevname(rdev
->bdev
, b
));
2326 atomic_add(STRIPE_SECTORS
, &rdev
->corrected_errors
);
2327 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
2328 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
2329 } else if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
))
2330 clear_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
2332 if (atomic_read(&rdev
->read_errors
))
2333 atomic_set(&rdev
->read_errors
, 0);
2335 const char *bdn
= bdevname(rdev
->bdev
, b
);
2339 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
2340 atomic_inc(&rdev
->read_errors
);
2341 if (test_bit(R5_ReadRepl
, &sh
->dev
[i
].flags
))
2344 "md/raid:%s: read error on replacement device "
2345 "(sector %llu on %s).\n",
2346 mdname(conf
->mddev
),
2347 (unsigned long long)s
,
2349 else if (conf
->mddev
->degraded
>= conf
->max_degraded
) {
2353 "md/raid:%s: read error not correctable "
2354 "(sector %llu on %s).\n",
2355 mdname(conf
->mddev
),
2356 (unsigned long long)s
,
2358 } else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
)) {
2363 "md/raid:%s: read error NOT corrected!! "
2364 "(sector %llu on %s).\n",
2365 mdname(conf
->mddev
),
2366 (unsigned long long)s
,
2368 } else if (atomic_read(&rdev
->read_errors
)
2369 > conf
->max_nr_stripes
)
2371 "md/raid:%s: Too many read errors, failing device %s.\n",
2372 mdname(conf
->mddev
), bdn
);
2375 if (set_bad
&& test_bit(In_sync
, &rdev
->flags
)
2376 && !test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
))
2379 if (test_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
)) {
2380 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
2381 clear_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
2383 set_bit(R5_ReadNoMerge
, &sh
->dev
[i
].flags
);
2385 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
2386 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
2388 && test_bit(In_sync
, &rdev
->flags
)
2389 && rdev_set_badblocks(
2390 rdev
, sh
->sector
, STRIPE_SECTORS
, 0)))
2391 md_error(conf
->mddev
, rdev
);
2394 rdev_dec_pending(rdev
, conf
->mddev
);
2395 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
2396 set_bit(STRIPE_HANDLE
, &sh
->state
);
2400 static void raid5_end_write_request(struct bio
*bi
)
2402 struct stripe_head
*sh
= bi
->bi_private
;
2403 struct r5conf
*conf
= sh
->raid_conf
;
2404 int disks
= sh
->disks
, i
;
2405 struct md_rdev
*uninitialized_var(rdev
);
2408 int replacement
= 0;
2410 for (i
= 0 ; i
< disks
; i
++) {
2411 if (bi
== &sh
->dev
[i
].req
) {
2412 rdev
= conf
->disks
[i
].rdev
;
2415 if (bi
== &sh
->dev
[i
].rreq
) {
2416 rdev
= conf
->disks
[i
].replacement
;
2420 /* rdev was removed and 'replacement'
2421 * replaced it. rdev is not removed
2422 * until all requests are finished.
2424 rdev
= conf
->disks
[i
].rdev
;
2428 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
2429 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
2438 md_error(conf
->mddev
, rdev
);
2439 else if (is_badblock(rdev
, sh
->sector
,
2441 &first_bad
, &bad_sectors
))
2442 set_bit(R5_MadeGoodRepl
, &sh
->dev
[i
].flags
);
2445 set_bit(STRIPE_DEGRADED
, &sh
->state
);
2446 set_bit(WriteErrorSeen
, &rdev
->flags
);
2447 set_bit(R5_WriteError
, &sh
->dev
[i
].flags
);
2448 if (!test_and_set_bit(WantReplacement
, &rdev
->flags
))
2449 set_bit(MD_RECOVERY_NEEDED
,
2450 &rdev
->mddev
->recovery
);
2451 } else if (is_badblock(rdev
, sh
->sector
,
2453 &first_bad
, &bad_sectors
)) {
2454 set_bit(R5_MadeGood
, &sh
->dev
[i
].flags
);
2455 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))
2456 /* That was a successful write so make
2457 * sure it looks like we already did
2460 set_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
2463 rdev_dec_pending(rdev
, conf
->mddev
);
2465 if (sh
->batch_head
&& bi
->bi_error
&& !replacement
)
2466 set_bit(STRIPE_BATCH_ERR
, &sh
->batch_head
->state
);
2468 if (!test_and_clear_bit(R5_DOUBLE_LOCKED
, &sh
->dev
[i
].flags
))
2469 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
2470 set_bit(STRIPE_HANDLE
, &sh
->state
);
2473 if (sh
->batch_head
&& sh
!= sh
->batch_head
)
2474 release_stripe(sh
->batch_head
);
2477 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
2479 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
2481 struct r5dev
*dev
= &sh
->dev
[i
];
2483 bio_init(&dev
->req
);
2484 dev
->req
.bi_io_vec
= &dev
->vec
;
2485 dev
->req
.bi_max_vecs
= 1;
2486 dev
->req
.bi_private
= sh
;
2488 bio_init(&dev
->rreq
);
2489 dev
->rreq
.bi_io_vec
= &dev
->rvec
;
2490 dev
->rreq
.bi_max_vecs
= 1;
2491 dev
->rreq
.bi_private
= sh
;
2494 dev
->sector
= compute_blocknr(sh
, i
, previous
);
2497 static void error(struct mddev
*mddev
, struct md_rdev
*rdev
)
2499 char b
[BDEVNAME_SIZE
];
2500 struct r5conf
*conf
= mddev
->private;
2501 unsigned long flags
;
2502 pr_debug("raid456: error called\n");
2504 spin_lock_irqsave(&conf
->device_lock
, flags
);
2505 clear_bit(In_sync
, &rdev
->flags
);
2506 mddev
->degraded
= calc_degraded(conf
);
2507 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
2508 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2510 set_bit(Blocked
, &rdev
->flags
);
2511 set_bit(Faulty
, &rdev
->flags
);
2512 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
2513 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
2515 "md/raid:%s: Disk failure on %s, disabling device.\n"
2516 "md/raid:%s: Operation continuing on %d devices.\n",
2518 bdevname(rdev
->bdev
, b
),
2520 conf
->raid_disks
- mddev
->degraded
);
2524 * Input: a 'big' sector number,
2525 * Output: index of the data and parity disk, and the sector # in them.
2527 static sector_t
raid5_compute_sector(struct r5conf
*conf
, sector_t r_sector
,
2528 int previous
, int *dd_idx
,
2529 struct stripe_head
*sh
)
2531 sector_t stripe
, stripe2
;
2532 sector_t chunk_number
;
2533 unsigned int chunk_offset
;
2536 sector_t new_sector
;
2537 int algorithm
= previous
? conf
->prev_algo
2539 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
2540 : conf
->chunk_sectors
;
2541 int raid_disks
= previous
? conf
->previous_raid_disks
2543 int data_disks
= raid_disks
- conf
->max_degraded
;
2545 /* First compute the information on this sector */
2548 * Compute the chunk number and the sector offset inside the chunk
2550 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
2551 chunk_number
= r_sector
;
2554 * Compute the stripe number
2556 stripe
= chunk_number
;
2557 *dd_idx
= sector_div(stripe
, data_disks
);
2560 * Select the parity disk based on the user selected algorithm.
2562 pd_idx
= qd_idx
= -1;
2563 switch(conf
->level
) {
2565 pd_idx
= data_disks
;
2568 switch (algorithm
) {
2569 case ALGORITHM_LEFT_ASYMMETRIC
:
2570 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
2571 if (*dd_idx
>= pd_idx
)
2574 case ALGORITHM_RIGHT_ASYMMETRIC
:
2575 pd_idx
= sector_div(stripe2
, raid_disks
);
2576 if (*dd_idx
>= pd_idx
)
2579 case ALGORITHM_LEFT_SYMMETRIC
:
2580 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
);
2581 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2583 case ALGORITHM_RIGHT_SYMMETRIC
:
2584 pd_idx
= sector_div(stripe2
, raid_disks
);
2585 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2587 case ALGORITHM_PARITY_0
:
2591 case ALGORITHM_PARITY_N
:
2592 pd_idx
= data_disks
;
2600 switch (algorithm
) {
2601 case ALGORITHM_LEFT_ASYMMETRIC
:
2602 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2603 qd_idx
= pd_idx
+ 1;
2604 if (pd_idx
== raid_disks
-1) {
2605 (*dd_idx
)++; /* Q D D D P */
2607 } else if (*dd_idx
>= pd_idx
)
2608 (*dd_idx
) += 2; /* D D P Q D */
2610 case ALGORITHM_RIGHT_ASYMMETRIC
:
2611 pd_idx
= sector_div(stripe2
, raid_disks
);
2612 qd_idx
= pd_idx
+ 1;
2613 if (pd_idx
== raid_disks
-1) {
2614 (*dd_idx
)++; /* Q D D D P */
2616 } else if (*dd_idx
>= pd_idx
)
2617 (*dd_idx
) += 2; /* D D P Q D */
2619 case ALGORITHM_LEFT_SYMMETRIC
:
2620 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2621 qd_idx
= (pd_idx
+ 1) % raid_disks
;
2622 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
2624 case ALGORITHM_RIGHT_SYMMETRIC
:
2625 pd_idx
= sector_div(stripe2
, raid_disks
);
2626 qd_idx
= (pd_idx
+ 1) % raid_disks
;
2627 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
2630 case ALGORITHM_PARITY_0
:
2635 case ALGORITHM_PARITY_N
:
2636 pd_idx
= data_disks
;
2637 qd_idx
= data_disks
+ 1;
2640 case ALGORITHM_ROTATING_ZERO_RESTART
:
2641 /* Exactly the same as RIGHT_ASYMMETRIC, but or
2642 * of blocks for computing Q is different.
2644 pd_idx
= sector_div(stripe2
, raid_disks
);
2645 qd_idx
= pd_idx
+ 1;
2646 if (pd_idx
== raid_disks
-1) {
2647 (*dd_idx
)++; /* Q D D D P */
2649 } else if (*dd_idx
>= pd_idx
)
2650 (*dd_idx
) += 2; /* D D P Q D */
2654 case ALGORITHM_ROTATING_N_RESTART
:
2655 /* Same a left_asymmetric, by first stripe is
2656 * D D D P Q rather than
2660 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2661 qd_idx
= pd_idx
+ 1;
2662 if (pd_idx
== raid_disks
-1) {
2663 (*dd_idx
)++; /* Q D D D P */
2665 } else if (*dd_idx
>= pd_idx
)
2666 (*dd_idx
) += 2; /* D D P Q D */
2670 case ALGORITHM_ROTATING_N_CONTINUE
:
2671 /* Same as left_symmetric but Q is before P */
2672 pd_idx
= raid_disks
- 1 - sector_div(stripe2
, raid_disks
);
2673 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
2674 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
2678 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2679 /* RAID5 left_asymmetric, with Q on last device */
2680 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
2681 if (*dd_idx
>= pd_idx
)
2683 qd_idx
= raid_disks
- 1;
2686 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2687 pd_idx
= sector_div(stripe2
, raid_disks
-1);
2688 if (*dd_idx
>= pd_idx
)
2690 qd_idx
= raid_disks
- 1;
2693 case ALGORITHM_LEFT_SYMMETRIC_6
:
2694 pd_idx
= data_disks
- sector_div(stripe2
, raid_disks
-1);
2695 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
2696 qd_idx
= raid_disks
- 1;
2699 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2700 pd_idx
= sector_div(stripe2
, raid_disks
-1);
2701 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
2702 qd_idx
= raid_disks
- 1;
2705 case ALGORITHM_PARITY_0_6
:
2708 qd_idx
= raid_disks
- 1;
2718 sh
->pd_idx
= pd_idx
;
2719 sh
->qd_idx
= qd_idx
;
2720 sh
->ddf_layout
= ddf_layout
;
2723 * Finally, compute the new sector number
2725 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
2729 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
2731 struct r5conf
*conf
= sh
->raid_conf
;
2732 int raid_disks
= sh
->disks
;
2733 int data_disks
= raid_disks
- conf
->max_degraded
;
2734 sector_t new_sector
= sh
->sector
, check
;
2735 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
2736 : conf
->chunk_sectors
;
2737 int algorithm
= previous
? conf
->prev_algo
2741 sector_t chunk_number
;
2742 int dummy1
, dd_idx
= i
;
2744 struct stripe_head sh2
;
2746 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
2747 stripe
= new_sector
;
2749 if (i
== sh
->pd_idx
)
2751 switch(conf
->level
) {
2754 switch (algorithm
) {
2755 case ALGORITHM_LEFT_ASYMMETRIC
:
2756 case ALGORITHM_RIGHT_ASYMMETRIC
:
2760 case ALGORITHM_LEFT_SYMMETRIC
:
2761 case ALGORITHM_RIGHT_SYMMETRIC
:
2764 i
-= (sh
->pd_idx
+ 1);
2766 case ALGORITHM_PARITY_0
:
2769 case ALGORITHM_PARITY_N
:
2776 if (i
== sh
->qd_idx
)
2777 return 0; /* It is the Q disk */
2778 switch (algorithm
) {
2779 case ALGORITHM_LEFT_ASYMMETRIC
:
2780 case ALGORITHM_RIGHT_ASYMMETRIC
:
2781 case ALGORITHM_ROTATING_ZERO_RESTART
:
2782 case ALGORITHM_ROTATING_N_RESTART
:
2783 if (sh
->pd_idx
== raid_disks
-1)
2784 i
--; /* Q D D D P */
2785 else if (i
> sh
->pd_idx
)
2786 i
-= 2; /* D D P Q D */
2788 case ALGORITHM_LEFT_SYMMETRIC
:
2789 case ALGORITHM_RIGHT_SYMMETRIC
:
2790 if (sh
->pd_idx
== raid_disks
-1)
2791 i
--; /* Q D D D P */
2796 i
-= (sh
->pd_idx
+ 2);
2799 case ALGORITHM_PARITY_0
:
2802 case ALGORITHM_PARITY_N
:
2804 case ALGORITHM_ROTATING_N_CONTINUE
:
2805 /* Like left_symmetric, but P is before Q */
2806 if (sh
->pd_idx
== 0)
2807 i
--; /* P D D D Q */
2812 i
-= (sh
->pd_idx
+ 1);
2815 case ALGORITHM_LEFT_ASYMMETRIC_6
:
2816 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
2820 case ALGORITHM_LEFT_SYMMETRIC_6
:
2821 case ALGORITHM_RIGHT_SYMMETRIC_6
:
2823 i
+= data_disks
+ 1;
2824 i
-= (sh
->pd_idx
+ 1);
2826 case ALGORITHM_PARITY_0_6
:
2835 chunk_number
= stripe
* data_disks
+ i
;
2836 r_sector
= chunk_number
* sectors_per_chunk
+ chunk_offset
;
2838 check
= raid5_compute_sector(conf
, r_sector
,
2839 previous
, &dummy1
, &sh2
);
2840 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
2841 || sh2
.qd_idx
!= sh
->qd_idx
) {
2842 printk(KERN_ERR
"md/raid:%s: compute_blocknr: map not correct\n",
2843 mdname(conf
->mddev
));
2850 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2851 int rcw
, int expand
)
2853 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
, disks
= sh
->disks
;
2854 struct r5conf
*conf
= sh
->raid_conf
;
2855 int level
= conf
->level
;
2859 for (i
= disks
; i
--; ) {
2860 struct r5dev
*dev
= &sh
->dev
[i
];
2863 set_bit(R5_LOCKED
, &dev
->flags
);
2864 set_bit(R5_Wantdrain
, &dev
->flags
);
2866 clear_bit(R5_UPTODATE
, &dev
->flags
);
2870 /* if we are not expanding this is a proper write request, and
2871 * there will be bios with new data to be drained into the
2876 /* False alarm, nothing to do */
2878 sh
->reconstruct_state
= reconstruct_state_drain_run
;
2879 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2881 sh
->reconstruct_state
= reconstruct_state_run
;
2883 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2885 if (s
->locked
+ conf
->max_degraded
== disks
)
2886 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2887 atomic_inc(&conf
->pending_full_writes
);
2889 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2890 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2891 BUG_ON(level
== 6 &&
2892 (!(test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
) ||
2893 test_bit(R5_Wantcompute
, &sh
->dev
[qd_idx
].flags
))));
2895 for (i
= disks
; i
--; ) {
2896 struct r5dev
*dev
= &sh
->dev
[i
];
2897 if (i
== pd_idx
|| i
== qd_idx
)
2901 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2902 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2903 set_bit(R5_Wantdrain
, &dev
->flags
);
2904 set_bit(R5_LOCKED
, &dev
->flags
);
2905 clear_bit(R5_UPTODATE
, &dev
->flags
);
2910 /* False alarm - nothing to do */
2912 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2913 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2914 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2915 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2918 /* keep the parity disk(s) locked while asynchronous operations
2921 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2922 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2926 int qd_idx
= sh
->qd_idx
;
2927 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2929 set_bit(R5_LOCKED
, &dev
->flags
);
2930 clear_bit(R5_UPTODATE
, &dev
->flags
);
2934 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2935 __func__
, (unsigned long long)sh
->sector
,
2936 s
->locked
, s
->ops_request
);
2940 * Each stripe/dev can have one or more bion attached.
2941 * toread/towrite point to the first in a chain.
2942 * The bi_next chain must be in order.
2944 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
,
2945 int forwrite
, int previous
)
2948 struct r5conf
*conf
= sh
->raid_conf
;
2951 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2952 (unsigned long long)bi
->bi_iter
.bi_sector
,
2953 (unsigned long long)sh
->sector
);
2956 * If several bio share a stripe. The bio bi_phys_segments acts as a
2957 * reference count to avoid race. The reference count should already be
2958 * increased before this function is called (for example, in
2959 * make_request()), so other bio sharing this stripe will not free the
2960 * stripe. If a stripe is owned by one stripe, the stripe lock will
2963 spin_lock_irq(&sh
->stripe_lock
);
2964 /* Don't allow new IO added to stripes in batch list */
2968 bip
= &sh
->dev
[dd_idx
].towrite
;
2972 bip
= &sh
->dev
[dd_idx
].toread
;
2973 while (*bip
&& (*bip
)->bi_iter
.bi_sector
< bi
->bi_iter
.bi_sector
) {
2974 if (bio_end_sector(*bip
) > bi
->bi_iter
.bi_sector
)
2976 bip
= & (*bip
)->bi_next
;
2978 if (*bip
&& (*bip
)->bi_iter
.bi_sector
< bio_end_sector(bi
))
2981 if (!forwrite
|| previous
)
2982 clear_bit(STRIPE_BATCH_READY
, &sh
->state
);
2984 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2988 raid5_inc_bi_active_stripes(bi
);
2991 /* check if page is covered */
2992 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2993 for (bi
=sh
->dev
[dd_idx
].towrite
;
2994 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2995 bi
&& bi
->bi_iter
.bi_sector
<= sector
;
2996 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2997 if (bio_end_sector(bi
) >= sector
)
2998 sector
= bio_end_sector(bi
);
3000 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
3001 if (!test_and_set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
))
3002 sh
->overwrite_disks
++;
3005 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
3006 (unsigned long long)(*bip
)->bi_iter
.bi_sector
,
3007 (unsigned long long)sh
->sector
, dd_idx
);
3009 if (conf
->mddev
->bitmap
&& firstwrite
) {
3010 /* Cannot hold spinlock over bitmap_startwrite,
3011 * but must ensure this isn't added to a batch until
3012 * we have added to the bitmap and set bm_seq.
3013 * So set STRIPE_BITMAP_PENDING to prevent
3015 * If multiple add_stripe_bio() calls race here they
3016 * much all set STRIPE_BITMAP_PENDING. So only the first one
3017 * to complete "bitmap_startwrite" gets to set
3018 * STRIPE_BIT_DELAY. This is important as once a stripe
3019 * is added to a batch, STRIPE_BIT_DELAY cannot be changed
3022 set_bit(STRIPE_BITMAP_PENDING
, &sh
->state
);
3023 spin_unlock_irq(&sh
->stripe_lock
);
3024 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
3026 spin_lock_irq(&sh
->stripe_lock
);
3027 clear_bit(STRIPE_BITMAP_PENDING
, &sh
->state
);
3028 if (!sh
->batch_head
) {
3029 sh
->bm_seq
= conf
->seq_flush
+1;
3030 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
3033 spin_unlock_irq(&sh
->stripe_lock
);
3035 if (stripe_can_batch(sh
))
3036 stripe_add_to_batch_list(conf
, sh
);
3040 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
3041 spin_unlock_irq(&sh
->stripe_lock
);
3045 static void end_reshape(struct r5conf
*conf
);
3047 static void stripe_set_idx(sector_t stripe
, struct r5conf
*conf
, int previous
,
3048 struct stripe_head
*sh
)
3050 int sectors_per_chunk
=
3051 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
3053 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
3054 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
3056 raid5_compute_sector(conf
,
3057 stripe
* (disks
- conf
->max_degraded
)
3058 *sectors_per_chunk
+ chunk_offset
,
3064 handle_failed_stripe(struct r5conf
*conf
, struct stripe_head
*sh
,
3065 struct stripe_head_state
*s
, int disks
,
3066 struct bio_list
*return_bi
)
3069 BUG_ON(sh
->batch_head
);
3070 for (i
= disks
; i
--; ) {
3074 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
3075 struct md_rdev
*rdev
;
3077 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3078 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
3079 atomic_inc(&rdev
->nr_pending
);
3084 if (!rdev_set_badblocks(
3088 md_error(conf
->mddev
, rdev
);
3089 rdev_dec_pending(rdev
, conf
->mddev
);
3092 spin_lock_irq(&sh
->stripe_lock
);
3093 /* fail all writes first */
3094 bi
= sh
->dev
[i
].towrite
;
3095 sh
->dev
[i
].towrite
= NULL
;
3096 sh
->overwrite_disks
= 0;
3097 spin_unlock_irq(&sh
->stripe_lock
);
3101 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
3102 wake_up(&conf
->wait_for_overlap
);
3104 while (bi
&& bi
->bi_iter
.bi_sector
<
3105 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
3106 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
3108 bi
->bi_error
= -EIO
;
3109 if (!raid5_dec_bi_active_stripes(bi
)) {
3110 md_write_end(conf
->mddev
);
3111 bio_list_add(return_bi
, bi
);
3116 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
3117 STRIPE_SECTORS
, 0, 0);
3119 /* and fail all 'written' */
3120 bi
= sh
->dev
[i
].written
;
3121 sh
->dev
[i
].written
= NULL
;
3122 if (test_and_clear_bit(R5_SkipCopy
, &sh
->dev
[i
].flags
)) {
3123 WARN_ON(test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
3124 sh
->dev
[i
].page
= sh
->dev
[i
].orig_page
;
3127 if (bi
) bitmap_end
= 1;
3128 while (bi
&& bi
->bi_iter
.bi_sector
<
3129 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
3130 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
3132 bi
->bi_error
= -EIO
;
3133 if (!raid5_dec_bi_active_stripes(bi
)) {
3134 md_write_end(conf
->mddev
);
3135 bio_list_add(return_bi
, bi
);
3140 /* fail any reads if this device is non-operational and
3141 * the data has not reached the cache yet.
3143 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
3144 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
3145 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
3146 spin_lock_irq(&sh
->stripe_lock
);
3147 bi
= sh
->dev
[i
].toread
;
3148 sh
->dev
[i
].toread
= NULL
;
3149 spin_unlock_irq(&sh
->stripe_lock
);
3150 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
3151 wake_up(&conf
->wait_for_overlap
);
3154 while (bi
&& bi
->bi_iter
.bi_sector
<
3155 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
3156 struct bio
*nextbi
=
3157 r5_next_bio(bi
, sh
->dev
[i
].sector
);
3159 bi
->bi_error
= -EIO
;
3160 if (!raid5_dec_bi_active_stripes(bi
))
3161 bio_list_add(return_bi
, bi
);
3166 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
3167 STRIPE_SECTORS
, 0, 0);
3168 /* If we were in the middle of a write the parity block might
3169 * still be locked - so just clear all R5_LOCKED flags
3171 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3176 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
3177 if (atomic_dec_and_test(&conf
->pending_full_writes
))
3178 md_wakeup_thread(conf
->mddev
->thread
);
3182 handle_failed_sync(struct r5conf
*conf
, struct stripe_head
*sh
,
3183 struct stripe_head_state
*s
)
3188 BUG_ON(sh
->batch_head
);
3189 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3190 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
))
3191 wake_up(&conf
->wait_for_overlap
);
3194 /* There is nothing more to do for sync/check/repair.
3195 * Don't even need to abort as that is handled elsewhere
3196 * if needed, and not always wanted e.g. if there is a known
3198 * For recover/replace we need to record a bad block on all
3199 * non-sync devices, or abort the recovery
3201 if (test_bit(MD_RECOVERY_RECOVER
, &conf
->mddev
->recovery
)) {
3202 /* During recovery devices cannot be removed, so
3203 * locking and refcounting of rdevs is not needed
3205 for (i
= 0; i
< conf
->raid_disks
; i
++) {
3206 struct md_rdev
*rdev
= conf
->disks
[i
].rdev
;
3208 && !test_bit(Faulty
, &rdev
->flags
)
3209 && !test_bit(In_sync
, &rdev
->flags
)
3210 && !rdev_set_badblocks(rdev
, sh
->sector
,
3213 rdev
= conf
->disks
[i
].replacement
;
3215 && !test_bit(Faulty
, &rdev
->flags
)
3216 && !test_bit(In_sync
, &rdev
->flags
)
3217 && !rdev_set_badblocks(rdev
, sh
->sector
,
3222 conf
->recovery_disabled
=
3223 conf
->mddev
->recovery_disabled
;
3225 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, !abort
);
3228 static int want_replace(struct stripe_head
*sh
, int disk_idx
)
3230 struct md_rdev
*rdev
;
3232 /* Doing recovery so rcu locking not required */
3233 rdev
= sh
->raid_conf
->disks
[disk_idx
].replacement
;
3235 && !test_bit(Faulty
, &rdev
->flags
)
3236 && !test_bit(In_sync
, &rdev
->flags
)
3237 && (rdev
->recovery_offset
<= sh
->sector
3238 || rdev
->mddev
->recovery_cp
<= sh
->sector
))
3244 /* fetch_block - checks the given member device to see if its data needs
3245 * to be read or computed to satisfy a request.
3247 * Returns 1 when no more member devices need to be checked, otherwise returns
3248 * 0 to tell the loop in handle_stripe_fill to continue
3251 static int need_this_block(struct stripe_head
*sh
, struct stripe_head_state
*s
,
3252 int disk_idx
, int disks
)
3254 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
3255 struct r5dev
*fdev
[2] = { &sh
->dev
[s
->failed_num
[0]],
3256 &sh
->dev
[s
->failed_num
[1]] };
3260 if (test_bit(R5_LOCKED
, &dev
->flags
) ||
3261 test_bit(R5_UPTODATE
, &dev
->flags
))
3262 /* No point reading this as we already have it or have
3263 * decided to get it.
3268 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)))
3269 /* We need this block to directly satisfy a request */
3272 if (s
->syncing
|| s
->expanding
||
3273 (s
->replacing
&& want_replace(sh
, disk_idx
)))
3274 /* When syncing, or expanding we read everything.
3275 * When replacing, we need the replaced block.
3279 if ((s
->failed
>= 1 && fdev
[0]->toread
) ||
3280 (s
->failed
>= 2 && fdev
[1]->toread
))
3281 /* If we want to read from a failed device, then
3282 * we need to actually read every other device.
3286 /* Sometimes neither read-modify-write nor reconstruct-write
3287 * cycles can work. In those cases we read every block we
3288 * can. Then the parity-update is certain to have enough to
3290 * This can only be a problem when we need to write something,
3291 * and some device has failed. If either of those tests
3292 * fail we need look no further.
3294 if (!s
->failed
|| !s
->to_write
)
3297 if (test_bit(R5_Insync
, &dev
->flags
) &&
3298 !test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3299 /* Pre-reads at not permitted until after short delay
3300 * to gather multiple requests. However if this
3301 * device is no Insync, the block could only be be computed
3302 * and there is no need to delay that.
3306 for (i
= 0; i
< s
->failed
&& i
< 2; i
++) {
3307 if (fdev
[i
]->towrite
&&
3308 !test_bit(R5_UPTODATE
, &fdev
[i
]->flags
) &&
3309 !test_bit(R5_OVERWRITE
, &fdev
[i
]->flags
))
3310 /* If we have a partial write to a failed
3311 * device, then we will need to reconstruct
3312 * the content of that device, so all other
3313 * devices must be read.
3318 /* If we are forced to do a reconstruct-write, either because
3319 * the current RAID6 implementation only supports that, or
3320 * or because parity cannot be trusted and we are currently
3321 * recovering it, there is extra need to be careful.
3322 * If one of the devices that we would need to read, because
3323 * it is not being overwritten (and maybe not written at all)
3324 * is missing/faulty, then we need to read everything we can.
3326 if (sh
->raid_conf
->level
!= 6 &&
3327 sh
->sector
< sh
->raid_conf
->mddev
->recovery_cp
)
3328 /* reconstruct-write isn't being forced */
3330 for (i
= 0; i
< s
->failed
&& i
< 2; i
++) {
3331 if (s
->failed_num
[i
] != sh
->pd_idx
&&
3332 s
->failed_num
[i
] != sh
->qd_idx
&&
3333 !test_bit(R5_UPTODATE
, &fdev
[i
]->flags
) &&
3334 !test_bit(R5_OVERWRITE
, &fdev
[i
]->flags
))
3341 static int fetch_block(struct stripe_head
*sh
, struct stripe_head_state
*s
,
3342 int disk_idx
, int disks
)
3344 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
3346 /* is the data in this block needed, and can we get it? */
3347 if (need_this_block(sh
, s
, disk_idx
, disks
)) {
3348 /* we would like to get this block, possibly by computing it,
3349 * otherwise read it if the backing disk is insync
3351 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
3352 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
3353 BUG_ON(sh
->batch_head
);
3354 if ((s
->uptodate
== disks
- 1) &&
3355 (s
->failed
&& (disk_idx
== s
->failed_num
[0] ||
3356 disk_idx
== s
->failed_num
[1]))) {
3357 /* have disk failed, and we're requested to fetch it;
3360 pr_debug("Computing stripe %llu block %d\n",
3361 (unsigned long long)sh
->sector
, disk_idx
);
3362 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
3363 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
3364 set_bit(R5_Wantcompute
, &dev
->flags
);
3365 sh
->ops
.target
= disk_idx
;
3366 sh
->ops
.target2
= -1; /* no 2nd target */
3368 /* Careful: from this point on 'uptodate' is in the eye
3369 * of raid_run_ops which services 'compute' operations
3370 * before writes. R5_Wantcompute flags a block that will
3371 * be R5_UPTODATE by the time it is needed for a
3372 * subsequent operation.
3376 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
3377 /* Computing 2-failure is *very* expensive; only
3378 * do it if failed >= 2
3381 for (other
= disks
; other
--; ) {
3382 if (other
== disk_idx
)
3384 if (!test_bit(R5_UPTODATE
,
3385 &sh
->dev
[other
].flags
))
3389 pr_debug("Computing stripe %llu blocks %d,%d\n",
3390 (unsigned long long)sh
->sector
,
3392 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
3393 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
3394 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
3395 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
3396 sh
->ops
.target
= disk_idx
;
3397 sh
->ops
.target2
= other
;
3401 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
3402 set_bit(R5_LOCKED
, &dev
->flags
);
3403 set_bit(R5_Wantread
, &dev
->flags
);
3405 pr_debug("Reading block %d (sync=%d)\n",
3406 disk_idx
, s
->syncing
);
3414 * handle_stripe_fill - read or compute data to satisfy pending requests.
3416 static void handle_stripe_fill(struct stripe_head
*sh
,
3417 struct stripe_head_state
*s
,
3422 /* look for blocks to read/compute, skip this if a compute
3423 * is already in flight, or if the stripe contents are in the
3424 * midst of changing due to a write
3426 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
3427 !sh
->reconstruct_state
)
3428 for (i
= disks
; i
--; )
3429 if (fetch_block(sh
, s
, i
, disks
))
3431 set_bit(STRIPE_HANDLE
, &sh
->state
);
3434 static void break_stripe_batch_list(struct stripe_head
*head_sh
,
3435 unsigned long handle_flags
);
3436 /* handle_stripe_clean_event
3437 * any written block on an uptodate or failed drive can be returned.
3438 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
3439 * never LOCKED, so we don't need to test 'failed' directly.
3441 static void handle_stripe_clean_event(struct r5conf
*conf
,
3442 struct stripe_head
*sh
, int disks
, struct bio_list
*return_bi
)
3446 int discard_pending
= 0;
3447 struct stripe_head
*head_sh
= sh
;
3448 bool do_endio
= false;
3450 for (i
= disks
; i
--; )
3451 if (sh
->dev
[i
].written
) {
3453 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
3454 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
3455 test_bit(R5_Discard
, &dev
->flags
) ||
3456 test_bit(R5_SkipCopy
, &dev
->flags
))) {
3457 /* We can return any write requests */
3458 struct bio
*wbi
, *wbi2
;
3459 pr_debug("Return write for disc %d\n", i
);
3460 if (test_and_clear_bit(R5_Discard
, &dev
->flags
))
3461 clear_bit(R5_UPTODATE
, &dev
->flags
);
3462 if (test_and_clear_bit(R5_SkipCopy
, &dev
->flags
)) {
3463 WARN_ON(test_bit(R5_UPTODATE
, &dev
->flags
));
3468 dev
->page
= dev
->orig_page
;
3470 dev
->written
= NULL
;
3471 while (wbi
&& wbi
->bi_iter
.bi_sector
<
3472 dev
->sector
+ STRIPE_SECTORS
) {
3473 wbi2
= r5_next_bio(wbi
, dev
->sector
);
3474 if (!raid5_dec_bi_active_stripes(wbi
)) {
3475 md_write_end(conf
->mddev
);
3476 bio_list_add(return_bi
, wbi
);
3480 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
3482 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
3484 if (head_sh
->batch_head
) {
3485 sh
= list_first_entry(&sh
->batch_list
,
3488 if (sh
!= head_sh
) {
3495 } else if (test_bit(R5_Discard
, &dev
->flags
))
3496 discard_pending
= 1;
3497 WARN_ON(test_bit(R5_SkipCopy
, &dev
->flags
));
3498 WARN_ON(dev
->page
!= dev
->orig_page
);
3500 if (!discard_pending
&&
3501 test_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
)) {
3503 clear_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
);
3504 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
3505 if (sh
->qd_idx
>= 0) {
3506 clear_bit(R5_Discard
, &sh
->dev
[sh
->qd_idx
].flags
);
3507 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->qd_idx
].flags
);
3509 /* now that discard is done we can proceed with any sync */
3510 clear_bit(STRIPE_DISCARD
, &sh
->state
);
3512 * SCSI discard will change some bio fields and the stripe has
3513 * no updated data, so remove it from hash list and the stripe
3514 * will be reinitialized
3517 hash
= sh
->hash_lock_index
;
3518 spin_lock_irq(conf
->hash_locks
+ hash
);
3520 spin_unlock_irq(conf
->hash_locks
+ hash
);
3521 if (head_sh
->batch_head
) {
3522 sh
= list_first_entry(&sh
->batch_list
,
3523 struct stripe_head
, batch_list
);
3529 if (test_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
))
3530 set_bit(STRIPE_HANDLE
, &sh
->state
);
3534 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
3535 if (atomic_dec_and_test(&conf
->pending_full_writes
))
3536 md_wakeup_thread(conf
->mddev
->thread
);
3538 if (head_sh
->batch_head
&& do_endio
)
3539 break_stripe_batch_list(head_sh
, STRIPE_EXPAND_SYNC_FLAGS
);
3542 static void handle_stripe_dirtying(struct r5conf
*conf
,
3543 struct stripe_head
*sh
,
3544 struct stripe_head_state
*s
,
3547 int rmw
= 0, rcw
= 0, i
;
3548 sector_t recovery_cp
= conf
->mddev
->recovery_cp
;
3550 /* Check whether resync is now happening or should start.
3551 * If yes, then the array is dirty (after unclean shutdown or
3552 * initial creation), so parity in some stripes might be inconsistent.
3553 * In this case, we need to always do reconstruct-write, to ensure
3554 * that in case of drive failure or read-error correction, we
3555 * generate correct data from the parity.
3557 if (conf
->rmw_level
== PARITY_DISABLE_RMW
||
3558 (recovery_cp
< MaxSector
&& sh
->sector
>= recovery_cp
&&
3560 /* Calculate the real rcw later - for now make it
3561 * look like rcw is cheaper
3564 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
3565 conf
->rmw_level
, (unsigned long long)recovery_cp
,
3566 (unsigned long long)sh
->sector
);
3567 } else for (i
= disks
; i
--; ) {
3568 /* would I have to read this buffer for read_modify_write */
3569 struct r5dev
*dev
= &sh
->dev
[i
];
3570 if ((dev
->towrite
|| i
== sh
->pd_idx
|| i
== sh
->qd_idx
) &&
3571 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3572 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3573 test_bit(R5_Wantcompute
, &dev
->flags
))) {
3574 if (test_bit(R5_Insync
, &dev
->flags
))
3577 rmw
+= 2*disks
; /* cannot read it */
3579 /* Would I have to read this buffer for reconstruct_write */
3580 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
3581 i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
&&
3582 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3583 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3584 test_bit(R5_Wantcompute
, &dev
->flags
))) {
3585 if (test_bit(R5_Insync
, &dev
->flags
))
3591 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
3592 (unsigned long long)sh
->sector
, rmw
, rcw
);
3593 set_bit(STRIPE_HANDLE
, &sh
->state
);
3594 if ((rmw
< rcw
|| (rmw
== rcw
&& conf
->rmw_level
== PARITY_ENABLE_RMW
)) && rmw
> 0) {
3595 /* prefer read-modify-write, but need to get some data */
3596 if (conf
->mddev
->queue
)
3597 blk_add_trace_msg(conf
->mddev
->queue
,
3598 "raid5 rmw %llu %d",
3599 (unsigned long long)sh
->sector
, rmw
);
3600 for (i
= disks
; i
--; ) {
3601 struct r5dev
*dev
= &sh
->dev
[i
];
3602 if ((dev
->towrite
|| i
== sh
->pd_idx
|| i
== sh
->qd_idx
) &&
3603 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3604 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3605 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
3606 test_bit(R5_Insync
, &dev
->flags
)) {
3607 if (test_bit(STRIPE_PREREAD_ACTIVE
,
3609 pr_debug("Read_old block %d for r-m-w\n",
3611 set_bit(R5_LOCKED
, &dev
->flags
);
3612 set_bit(R5_Wantread
, &dev
->flags
);
3615 set_bit(STRIPE_DELAYED
, &sh
->state
);
3616 set_bit(STRIPE_HANDLE
, &sh
->state
);
3621 if ((rcw
< rmw
|| (rcw
== rmw
&& conf
->rmw_level
!= PARITY_ENABLE_RMW
)) && rcw
> 0) {
3622 /* want reconstruct write, but need to get some data */
3625 for (i
= disks
; i
--; ) {
3626 struct r5dev
*dev
= &sh
->dev
[i
];
3627 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
3628 i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
&&
3629 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3630 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
3631 test_bit(R5_Wantcompute
, &dev
->flags
))) {
3633 if (test_bit(R5_Insync
, &dev
->flags
) &&
3634 test_bit(STRIPE_PREREAD_ACTIVE
,
3636 pr_debug("Read_old block "
3637 "%d for Reconstruct\n", i
);
3638 set_bit(R5_LOCKED
, &dev
->flags
);
3639 set_bit(R5_Wantread
, &dev
->flags
);
3643 set_bit(STRIPE_DELAYED
, &sh
->state
);
3644 set_bit(STRIPE_HANDLE
, &sh
->state
);
3648 if (rcw
&& conf
->mddev
->queue
)
3649 blk_add_trace_msg(conf
->mddev
->queue
, "raid5 rcw %llu %d %d %d",
3650 (unsigned long long)sh
->sector
,
3651 rcw
, qread
, test_bit(STRIPE_DELAYED
, &sh
->state
));
3654 if (rcw
> disks
&& rmw
> disks
&&
3655 !test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3656 set_bit(STRIPE_DELAYED
, &sh
->state
);
3658 /* now if nothing is locked, and if we have enough data,
3659 * we can start a write request
3661 /* since handle_stripe can be called at any time we need to handle the
3662 * case where a compute block operation has been submitted and then a
3663 * subsequent call wants to start a write request. raid_run_ops only
3664 * handles the case where compute block and reconstruct are requested
3665 * simultaneously. If this is not the case then new writes need to be
3666 * held off until the compute completes.
3668 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
3669 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
3670 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
3671 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
3674 static void handle_parity_checks5(struct r5conf
*conf
, struct stripe_head
*sh
,
3675 struct stripe_head_state
*s
, int disks
)
3677 struct r5dev
*dev
= NULL
;
3679 BUG_ON(sh
->batch_head
);
3680 set_bit(STRIPE_HANDLE
, &sh
->state
);
3682 switch (sh
->check_state
) {
3683 case check_state_idle
:
3684 /* start a new check operation if there are no failures */
3685 if (s
->failed
== 0) {
3686 BUG_ON(s
->uptodate
!= disks
);
3687 sh
->check_state
= check_state_run
;
3688 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
3689 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
3693 dev
= &sh
->dev
[s
->failed_num
[0]];
3695 case check_state_compute_result
:
3696 sh
->check_state
= check_state_idle
;
3698 dev
= &sh
->dev
[sh
->pd_idx
];
3700 /* check that a write has not made the stripe insync */
3701 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
3704 /* either failed parity check, or recovery is happening */
3705 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3706 BUG_ON(s
->uptodate
!= disks
);
3708 set_bit(R5_LOCKED
, &dev
->flags
);
3710 set_bit(R5_Wantwrite
, &dev
->flags
);
3712 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
3713 set_bit(STRIPE_INSYNC
, &sh
->state
);
3715 case check_state_run
:
3716 break; /* we will be called again upon completion */
3717 case check_state_check_result
:
3718 sh
->check_state
= check_state_idle
;
3720 /* if a failure occurred during the check operation, leave
3721 * STRIPE_INSYNC not set and let the stripe be handled again
3726 /* handle a successful check operation, if parity is correct
3727 * we are done. Otherwise update the mismatch count and repair
3728 * parity if !MD_RECOVERY_CHECK
3730 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
3731 /* parity is correct (on disc,
3732 * not in buffer any more)
3734 set_bit(STRIPE_INSYNC
, &sh
->state
);
3736 atomic64_add(STRIPE_SECTORS
, &conf
->mddev
->resync_mismatches
);
3737 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
3738 /* don't try to repair!! */
3739 set_bit(STRIPE_INSYNC
, &sh
->state
);
3741 sh
->check_state
= check_state_compute_run
;
3742 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
3743 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
3744 set_bit(R5_Wantcompute
,
3745 &sh
->dev
[sh
->pd_idx
].flags
);
3746 sh
->ops
.target
= sh
->pd_idx
;
3747 sh
->ops
.target2
= -1;
3752 case check_state_compute_run
:
3755 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
3756 __func__
, sh
->check_state
,
3757 (unsigned long long) sh
->sector
);
3762 static void handle_parity_checks6(struct r5conf
*conf
, struct stripe_head
*sh
,
3763 struct stripe_head_state
*s
,
3766 int pd_idx
= sh
->pd_idx
;
3767 int qd_idx
= sh
->qd_idx
;
3770 BUG_ON(sh
->batch_head
);
3771 set_bit(STRIPE_HANDLE
, &sh
->state
);
3773 BUG_ON(s
->failed
> 2);
3775 /* Want to check and possibly repair P and Q.
3776 * However there could be one 'failed' device, in which
3777 * case we can only check one of them, possibly using the
3778 * other to generate missing data
3781 switch (sh
->check_state
) {
3782 case check_state_idle
:
3783 /* start a new check operation if there are < 2 failures */
3784 if (s
->failed
== s
->q_failed
) {
3785 /* The only possible failed device holds Q, so it
3786 * makes sense to check P (If anything else were failed,
3787 * we would have used P to recreate it).
3789 sh
->check_state
= check_state_run
;
3791 if (!s
->q_failed
&& s
->failed
< 2) {
3792 /* Q is not failed, and we didn't use it to generate
3793 * anything, so it makes sense to check it
3795 if (sh
->check_state
== check_state_run
)
3796 sh
->check_state
= check_state_run_pq
;
3798 sh
->check_state
= check_state_run_q
;
3801 /* discard potentially stale zero_sum_result */
3802 sh
->ops
.zero_sum_result
= 0;
3804 if (sh
->check_state
== check_state_run
) {
3805 /* async_xor_zero_sum destroys the contents of P */
3806 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
3809 if (sh
->check_state
>= check_state_run
&&
3810 sh
->check_state
<= check_state_run_pq
) {
3811 /* async_syndrome_zero_sum preserves P and Q, so
3812 * no need to mark them !uptodate here
3814 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
3818 /* we have 2-disk failure */
3819 BUG_ON(s
->failed
!= 2);
3821 case check_state_compute_result
:
3822 sh
->check_state
= check_state_idle
;
3824 /* check that a write has not made the stripe insync */
3825 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
3828 /* now write out any block on a failed drive,
3829 * or P or Q if they were recomputed
3831 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
3832 if (s
->failed
== 2) {
3833 dev
= &sh
->dev
[s
->failed_num
[1]];
3835 set_bit(R5_LOCKED
, &dev
->flags
);
3836 set_bit(R5_Wantwrite
, &dev
->flags
);
3838 if (s
->failed
>= 1) {
3839 dev
= &sh
->dev
[s
->failed_num
[0]];
3841 set_bit(R5_LOCKED
, &dev
->flags
);
3842 set_bit(R5_Wantwrite
, &dev
->flags
);
3844 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
3845 dev
= &sh
->dev
[pd_idx
];
3847 set_bit(R5_LOCKED
, &dev
->flags
);
3848 set_bit(R5_Wantwrite
, &dev
->flags
);
3850 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
3851 dev
= &sh
->dev
[qd_idx
];
3853 set_bit(R5_LOCKED
, &dev
->flags
);
3854 set_bit(R5_Wantwrite
, &dev
->flags
);
3856 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
3858 set_bit(STRIPE_INSYNC
, &sh
->state
);
3860 case check_state_run
:
3861 case check_state_run_q
:
3862 case check_state_run_pq
:
3863 break; /* we will be called again upon completion */
3864 case check_state_check_result
:
3865 sh
->check_state
= check_state_idle
;
3867 /* handle a successful check operation, if parity is correct
3868 * we are done. Otherwise update the mismatch count and repair
3869 * parity if !MD_RECOVERY_CHECK
3871 if (sh
->ops
.zero_sum_result
== 0) {
3872 /* both parities are correct */
3874 set_bit(STRIPE_INSYNC
, &sh
->state
);
3876 /* in contrast to the raid5 case we can validate
3877 * parity, but still have a failure to write
3880 sh
->check_state
= check_state_compute_result
;
3881 /* Returning at this point means that we may go
3882 * off and bring p and/or q uptodate again so
3883 * we make sure to check zero_sum_result again
3884 * to verify if p or q need writeback
3888 atomic64_add(STRIPE_SECTORS
, &conf
->mddev
->resync_mismatches
);
3889 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
3890 /* don't try to repair!! */
3891 set_bit(STRIPE_INSYNC
, &sh
->state
);
3893 int *target
= &sh
->ops
.target
;
3895 sh
->ops
.target
= -1;
3896 sh
->ops
.target2
= -1;
3897 sh
->check_state
= check_state_compute_run
;
3898 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
3899 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
3900 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
3901 set_bit(R5_Wantcompute
,
3902 &sh
->dev
[pd_idx
].flags
);
3904 target
= &sh
->ops
.target2
;
3907 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
3908 set_bit(R5_Wantcompute
,
3909 &sh
->dev
[qd_idx
].flags
);
3916 case check_state_compute_run
:
3919 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
3920 __func__
, sh
->check_state
,
3921 (unsigned long long) sh
->sector
);
3926 static void handle_stripe_expansion(struct r5conf
*conf
, struct stripe_head
*sh
)
3930 /* We have read all the blocks in this stripe and now we need to
3931 * copy some of them into a target stripe for expand.
3933 struct dma_async_tx_descriptor
*tx
= NULL
;
3934 BUG_ON(sh
->batch_head
);
3935 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3936 for (i
= 0; i
< sh
->disks
; i
++)
3937 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
3939 struct stripe_head
*sh2
;
3940 struct async_submit_ctl submit
;
3942 sector_t bn
= compute_blocknr(sh
, i
, 1);
3943 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
3945 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
3947 /* so far only the early blocks of this stripe
3948 * have been requested. When later blocks
3949 * get requested, we will try again
3952 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
3953 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
3954 /* must have already done this block */
3955 release_stripe(sh2
);
3959 /* place all the copies on one channel */
3960 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
3961 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
3962 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
3965 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
3966 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
3967 for (j
= 0; j
< conf
->raid_disks
; j
++)
3968 if (j
!= sh2
->pd_idx
&&
3970 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
3972 if (j
== conf
->raid_disks
) {
3973 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
3974 set_bit(STRIPE_HANDLE
, &sh2
->state
);
3976 release_stripe(sh2
);
3979 /* done submitting copies, wait for them to complete */
3980 async_tx_quiesce(&tx
);
3984 * handle_stripe - do things to a stripe.
3986 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3987 * state of various bits to see what needs to be done.
3989 * return some read requests which now have data
3990 * return some write requests which are safely on storage
3991 * schedule a read on some buffers
3992 * schedule a write of some buffers
3993 * return confirmation of parity correctness
3997 static void analyse_stripe(struct stripe_head
*sh
, struct stripe_head_state
*s
)
3999 struct r5conf
*conf
= sh
->raid_conf
;
4000 int disks
= sh
->disks
;
4003 int do_recovery
= 0;
4005 memset(s
, 0, sizeof(*s
));
4007 s
->expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
) && !sh
->batch_head
;
4008 s
->expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
) && !sh
->batch_head
;
4009 s
->failed_num
[0] = -1;
4010 s
->failed_num
[1] = -1;
4012 /* Now to look around and see what can be done */
4014 for (i
=disks
; i
--; ) {
4015 struct md_rdev
*rdev
;
4022 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
4024 dev
->toread
, dev
->towrite
, dev
->written
);
4025 /* maybe we can reply to a read
4027 * new wantfill requests are only permitted while
4028 * ops_complete_biofill is guaranteed to be inactive
4030 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
4031 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
4032 set_bit(R5_Wantfill
, &dev
->flags
);
4034 /* now count some things */
4035 if (test_bit(R5_LOCKED
, &dev
->flags
))
4037 if (test_bit(R5_UPTODATE
, &dev
->flags
))
4039 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
4041 BUG_ON(s
->compute
> 2);
4044 if (test_bit(R5_Wantfill
, &dev
->flags
))
4046 else if (dev
->toread
)
4050 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
4055 /* Prefer to use the replacement for reads, but only
4056 * if it is recovered enough and has no bad blocks.
4058 rdev
= rcu_dereference(conf
->disks
[i
].replacement
);
4059 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) &&
4060 rdev
->recovery_offset
>= sh
->sector
+ STRIPE_SECTORS
&&
4061 !is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
4062 &first_bad
, &bad_sectors
))
4063 set_bit(R5_ReadRepl
, &dev
->flags
);
4065 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
))
4066 set_bit(R5_NeedReplace
, &dev
->flags
);
4068 clear_bit(R5_NeedReplace
, &dev
->flags
);
4069 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
4070 clear_bit(R5_ReadRepl
, &dev
->flags
);
4072 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
4075 is_bad
= is_badblock(rdev
, sh
->sector
, STRIPE_SECTORS
,
4076 &first_bad
, &bad_sectors
);
4077 if (s
->blocked_rdev
== NULL
4078 && (test_bit(Blocked
, &rdev
->flags
)
4081 set_bit(BlockedBadBlocks
,
4083 s
->blocked_rdev
= rdev
;
4084 atomic_inc(&rdev
->nr_pending
);
4087 clear_bit(R5_Insync
, &dev
->flags
);
4091 /* also not in-sync */
4092 if (!test_bit(WriteErrorSeen
, &rdev
->flags
) &&
4093 test_bit(R5_UPTODATE
, &dev
->flags
)) {
4094 /* treat as in-sync, but with a read error
4095 * which we can now try to correct
4097 set_bit(R5_Insync
, &dev
->flags
);
4098 set_bit(R5_ReadError
, &dev
->flags
);
4100 } else if (test_bit(In_sync
, &rdev
->flags
))
4101 set_bit(R5_Insync
, &dev
->flags
);
4102 else if (sh
->sector
+ STRIPE_SECTORS
<= rdev
->recovery_offset
)
4103 /* in sync if before recovery_offset */
4104 set_bit(R5_Insync
, &dev
->flags
);
4105 else if (test_bit(R5_UPTODATE
, &dev
->flags
) &&
4106 test_bit(R5_Expanded
, &dev
->flags
))
4107 /* If we've reshaped into here, we assume it is Insync.
4108 * We will shortly update recovery_offset to make
4111 set_bit(R5_Insync
, &dev
->flags
);
4113 if (test_bit(R5_WriteError
, &dev
->flags
)) {
4114 /* This flag does not apply to '.replacement'
4115 * only to .rdev, so make sure to check that*/
4116 struct md_rdev
*rdev2
= rcu_dereference(
4117 conf
->disks
[i
].rdev
);
4119 clear_bit(R5_Insync
, &dev
->flags
);
4120 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
4121 s
->handle_bad_blocks
= 1;
4122 atomic_inc(&rdev2
->nr_pending
);
4124 clear_bit(R5_WriteError
, &dev
->flags
);
4126 if (test_bit(R5_MadeGood
, &dev
->flags
)) {
4127 /* This flag does not apply to '.replacement'
4128 * only to .rdev, so make sure to check that*/
4129 struct md_rdev
*rdev2
= rcu_dereference(
4130 conf
->disks
[i
].rdev
);
4131 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
4132 s
->handle_bad_blocks
= 1;
4133 atomic_inc(&rdev2
->nr_pending
);
4135 clear_bit(R5_MadeGood
, &dev
->flags
);
4137 if (test_bit(R5_MadeGoodRepl
, &dev
->flags
)) {
4138 struct md_rdev
*rdev2
= rcu_dereference(
4139 conf
->disks
[i
].replacement
);
4140 if (rdev2
&& !test_bit(Faulty
, &rdev2
->flags
)) {
4141 s
->handle_bad_blocks
= 1;
4142 atomic_inc(&rdev2
->nr_pending
);
4144 clear_bit(R5_MadeGoodRepl
, &dev
->flags
);
4146 if (!test_bit(R5_Insync
, &dev
->flags
)) {
4147 /* The ReadError flag will just be confusing now */
4148 clear_bit(R5_ReadError
, &dev
->flags
);
4149 clear_bit(R5_ReWrite
, &dev
->flags
);
4151 if (test_bit(R5_ReadError
, &dev
->flags
))
4152 clear_bit(R5_Insync
, &dev
->flags
);
4153 if (!test_bit(R5_Insync
, &dev
->flags
)) {
4155 s
->failed_num
[s
->failed
] = i
;
4157 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
))
4161 if (test_bit(STRIPE_SYNCING
, &sh
->state
)) {
4162 /* If there is a failed device being replaced,
4163 * we must be recovering.
4164 * else if we are after recovery_cp, we must be syncing
4165 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
4166 * else we can only be replacing
4167 * sync and recovery both need to read all devices, and so
4168 * use the same flag.
4171 sh
->sector
>= conf
->mddev
->recovery_cp
||
4172 test_bit(MD_RECOVERY_REQUESTED
, &(conf
->mddev
->recovery
)))
4180 static int clear_batch_ready(struct stripe_head
*sh
)
4182 /* Return '1' if this is a member of batch, or
4183 * '0' if it is a lone stripe or a head which can now be
4186 struct stripe_head
*tmp
;
4187 if (!test_and_clear_bit(STRIPE_BATCH_READY
, &sh
->state
))
4188 return (sh
->batch_head
&& sh
->batch_head
!= sh
);
4189 spin_lock(&sh
->stripe_lock
);
4190 if (!sh
->batch_head
) {
4191 spin_unlock(&sh
->stripe_lock
);
4196 * this stripe could be added to a batch list before we check
4197 * BATCH_READY, skips it
4199 if (sh
->batch_head
!= sh
) {
4200 spin_unlock(&sh
->stripe_lock
);
4203 spin_lock(&sh
->batch_lock
);
4204 list_for_each_entry(tmp
, &sh
->batch_list
, batch_list
)
4205 clear_bit(STRIPE_BATCH_READY
, &tmp
->state
);
4206 spin_unlock(&sh
->batch_lock
);
4207 spin_unlock(&sh
->stripe_lock
);
4210 * BATCH_READY is cleared, no new stripes can be added.
4211 * batch_list can be accessed without lock
4216 static void break_stripe_batch_list(struct stripe_head
*head_sh
,
4217 unsigned long handle_flags
)
4219 struct stripe_head
*sh
, *next
;
4223 list_for_each_entry_safe(sh
, next
, &head_sh
->batch_list
, batch_list
) {
4225 list_del_init(&sh
->batch_list
);
4227 WARN_ON_ONCE(sh
->state
& ((1 << STRIPE_ACTIVE
) |
4228 (1 << STRIPE_SYNCING
) |
4229 (1 << STRIPE_REPLACED
) |
4230 (1 << STRIPE_PREREAD_ACTIVE
) |
4231 (1 << STRIPE_DELAYED
) |
4232 (1 << STRIPE_BIT_DELAY
) |
4233 (1 << STRIPE_FULL_WRITE
) |
4234 (1 << STRIPE_BIOFILL_RUN
) |
4235 (1 << STRIPE_COMPUTE_RUN
) |
4236 (1 << STRIPE_OPS_REQ_PENDING
) |
4237 (1 << STRIPE_DISCARD
) |
4238 (1 << STRIPE_BATCH_READY
) |
4239 (1 << STRIPE_BATCH_ERR
) |
4240 (1 << STRIPE_BITMAP_PENDING
)));
4241 WARN_ON_ONCE(head_sh
->state
& ((1 << STRIPE_DISCARD
) |
4242 (1 << STRIPE_REPLACED
)));
4244 set_mask_bits(&sh
->state
, ~(STRIPE_EXPAND_SYNC_FLAGS
|
4245 (1 << STRIPE_DEGRADED
)),
4246 head_sh
->state
& (1 << STRIPE_INSYNC
));
4248 sh
->check_state
= head_sh
->check_state
;
4249 sh
->reconstruct_state
= head_sh
->reconstruct_state
;
4250 for (i
= 0; i
< sh
->disks
; i
++) {
4251 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
4253 sh
->dev
[i
].flags
= head_sh
->dev
[i
].flags
&
4254 (~((1 << R5_WriteError
) | (1 << R5_Overlap
)));
4256 spin_lock_irq(&sh
->stripe_lock
);
4257 sh
->batch_head
= NULL
;
4258 spin_unlock_irq(&sh
->stripe_lock
);
4259 if (handle_flags
== 0 ||
4260 sh
->state
& handle_flags
)
4261 set_bit(STRIPE_HANDLE
, &sh
->state
);
4264 spin_lock_irq(&head_sh
->stripe_lock
);
4265 head_sh
->batch_head
= NULL
;
4266 spin_unlock_irq(&head_sh
->stripe_lock
);
4267 for (i
= 0; i
< head_sh
->disks
; i
++)
4268 if (test_and_clear_bit(R5_Overlap
, &head_sh
->dev
[i
].flags
))
4270 if (head_sh
->state
& handle_flags
)
4271 set_bit(STRIPE_HANDLE
, &head_sh
->state
);
4274 wake_up(&head_sh
->raid_conf
->wait_for_overlap
);
4277 static void handle_stripe(struct stripe_head
*sh
)
4279 struct stripe_head_state s
;
4280 struct r5conf
*conf
= sh
->raid_conf
;
4283 int disks
= sh
->disks
;
4284 struct r5dev
*pdev
, *qdev
;
4286 clear_bit(STRIPE_HANDLE
, &sh
->state
);
4287 if (test_and_set_bit_lock(STRIPE_ACTIVE
, &sh
->state
)) {
4288 /* already being handled, ensure it gets handled
4289 * again when current action finishes */
4290 set_bit(STRIPE_HANDLE
, &sh
->state
);
4294 if (clear_batch_ready(sh
) ) {
4295 clear_bit_unlock(STRIPE_ACTIVE
, &sh
->state
);
4299 if (test_and_clear_bit(STRIPE_BATCH_ERR
, &sh
->state
))
4300 break_stripe_batch_list(sh
, 0);
4302 if (test_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
) && !sh
->batch_head
) {
4303 spin_lock(&sh
->stripe_lock
);
4304 /* Cannot process 'sync' concurrently with 'discard' */
4305 if (!test_bit(STRIPE_DISCARD
, &sh
->state
) &&
4306 test_and_clear_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
)) {
4307 set_bit(STRIPE_SYNCING
, &sh
->state
);
4308 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4309 clear_bit(STRIPE_REPLACED
, &sh
->state
);
4311 spin_unlock(&sh
->stripe_lock
);
4313 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4315 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
4316 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
4317 (unsigned long long)sh
->sector
, sh
->state
,
4318 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->qd_idx
,
4319 sh
->check_state
, sh
->reconstruct_state
);
4321 analyse_stripe(sh
, &s
);
4323 if (s
.handle_bad_blocks
) {
4324 set_bit(STRIPE_HANDLE
, &sh
->state
);
4328 if (unlikely(s
.blocked_rdev
)) {
4329 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
4330 s
.replacing
|| s
.to_write
|| s
.written
) {
4331 set_bit(STRIPE_HANDLE
, &sh
->state
);
4334 /* There is nothing for the blocked_rdev to block */
4335 rdev_dec_pending(s
.blocked_rdev
, conf
->mddev
);
4336 s
.blocked_rdev
= NULL
;
4339 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
4340 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
4341 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
4344 pr_debug("locked=%d uptodate=%d to_read=%d"
4345 " to_write=%d failed=%d failed_num=%d,%d\n",
4346 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
4347 s
.failed_num
[0], s
.failed_num
[1]);
4348 /* check if the array has lost more than max_degraded devices and,
4349 * if so, some requests might need to be failed.
4351 if (s
.failed
> conf
->max_degraded
) {
4352 sh
->check_state
= 0;
4353 sh
->reconstruct_state
= 0;
4354 break_stripe_batch_list(sh
, 0);
4355 if (s
.to_read
+s
.to_write
+s
.written
)
4356 handle_failed_stripe(conf
, sh
, &s
, disks
, &s
.return_bi
);
4357 if (s
.syncing
+ s
.replacing
)
4358 handle_failed_sync(conf
, sh
, &s
);
4361 /* Now we check to see if any write operations have recently
4365 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
4367 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
4368 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
4369 sh
->reconstruct_state
= reconstruct_state_idle
;
4371 /* All the 'written' buffers and the parity block are ready to
4372 * be written back to disk
4374 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
) &&
4375 !test_bit(R5_Discard
, &sh
->dev
[sh
->pd_idx
].flags
));
4376 BUG_ON(sh
->qd_idx
>= 0 &&
4377 !test_bit(R5_UPTODATE
, &sh
->dev
[sh
->qd_idx
].flags
) &&
4378 !test_bit(R5_Discard
, &sh
->dev
[sh
->qd_idx
].flags
));
4379 for (i
= disks
; i
--; ) {
4380 struct r5dev
*dev
= &sh
->dev
[i
];
4381 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
4382 (i
== sh
->pd_idx
|| i
== sh
->qd_idx
||
4384 pr_debug("Writing block %d\n", i
);
4385 set_bit(R5_Wantwrite
, &dev
->flags
);
4390 if (!test_bit(R5_Insync
, &dev
->flags
) ||
4391 ((i
== sh
->pd_idx
|| i
== sh
->qd_idx
) &&
4393 set_bit(STRIPE_INSYNC
, &sh
->state
);
4396 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4397 s
.dec_preread_active
= 1;
4401 * might be able to return some write requests if the parity blocks
4402 * are safe, or on a failed drive
4404 pdev
= &sh
->dev
[sh
->pd_idx
];
4405 s
.p_failed
= (s
.failed
>= 1 && s
.failed_num
[0] == sh
->pd_idx
)
4406 || (s
.failed
>= 2 && s
.failed_num
[1] == sh
->pd_idx
);
4407 qdev
= &sh
->dev
[sh
->qd_idx
];
4408 s
.q_failed
= (s
.failed
>= 1 && s
.failed_num
[0] == sh
->qd_idx
)
4409 || (s
.failed
>= 2 && s
.failed_num
[1] == sh
->qd_idx
)
4413 (s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
4414 && !test_bit(R5_LOCKED
, &pdev
->flags
)
4415 && (test_bit(R5_UPTODATE
, &pdev
->flags
) ||
4416 test_bit(R5_Discard
, &pdev
->flags
))))) &&
4417 (s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
4418 && !test_bit(R5_LOCKED
, &qdev
->flags
)
4419 && (test_bit(R5_UPTODATE
, &qdev
->flags
) ||
4420 test_bit(R5_Discard
, &qdev
->flags
))))))
4421 handle_stripe_clean_event(conf
, sh
, disks
, &s
.return_bi
);
4423 /* Now we might consider reading some blocks, either to check/generate
4424 * parity, or to satisfy requests
4425 * or to load a block that is being partially written.
4427 if (s
.to_read
|| s
.non_overwrite
4428 || (conf
->level
== 6 && s
.to_write
&& s
.failed
)
4429 || (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
))
4432 handle_stripe_fill(sh
, &s
, disks
);
4434 /* Now to consider new write requests and what else, if anything
4435 * should be read. We do not handle new writes when:
4436 * 1/ A 'write' operation (copy+xor) is already in flight.
4437 * 2/ A 'check' operation is in flight, as it may clobber the parity
4440 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
4441 handle_stripe_dirtying(conf
, sh
, &s
, disks
);
4443 /* maybe we need to check and possibly fix the parity for this stripe
4444 * Any reads will already have been scheduled, so we just see if enough
4445 * data is available. The parity check is held off while parity
4446 * dependent operations are in flight.
4448 if (sh
->check_state
||
4449 (s
.syncing
&& s
.locked
== 0 &&
4450 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
4451 !test_bit(STRIPE_INSYNC
, &sh
->state
))) {
4452 if (conf
->level
== 6)
4453 handle_parity_checks6(conf
, sh
, &s
, disks
);
4455 handle_parity_checks5(conf
, sh
, &s
, disks
);
4458 if ((s
.replacing
|| s
.syncing
) && s
.locked
== 0
4459 && !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)
4460 && !test_bit(STRIPE_REPLACED
, &sh
->state
)) {
4461 /* Write out to replacement devices where possible */
4462 for (i
= 0; i
< conf
->raid_disks
; i
++)
4463 if (test_bit(R5_NeedReplace
, &sh
->dev
[i
].flags
)) {
4464 WARN_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
));
4465 set_bit(R5_WantReplace
, &sh
->dev
[i
].flags
);
4466 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
4470 set_bit(STRIPE_INSYNC
, &sh
->state
);
4471 set_bit(STRIPE_REPLACED
, &sh
->state
);
4473 if ((s
.syncing
|| s
.replacing
) && s
.locked
== 0 &&
4474 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
4475 test_bit(STRIPE_INSYNC
, &sh
->state
)) {
4476 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
4477 clear_bit(STRIPE_SYNCING
, &sh
->state
);
4478 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
))
4479 wake_up(&conf
->wait_for_overlap
);
4482 /* If the failed drives are just a ReadError, then we might need
4483 * to progress the repair/check process
4485 if (s
.failed
<= conf
->max_degraded
&& !conf
->mddev
->ro
)
4486 for (i
= 0; i
< s
.failed
; i
++) {
4487 struct r5dev
*dev
= &sh
->dev
[s
.failed_num
[i
]];
4488 if (test_bit(R5_ReadError
, &dev
->flags
)
4489 && !test_bit(R5_LOCKED
, &dev
->flags
)
4490 && test_bit(R5_UPTODATE
, &dev
->flags
)
4492 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
4493 set_bit(R5_Wantwrite
, &dev
->flags
);
4494 set_bit(R5_ReWrite
, &dev
->flags
);
4495 set_bit(R5_LOCKED
, &dev
->flags
);
4498 /* let's read it back */
4499 set_bit(R5_Wantread
, &dev
->flags
);
4500 set_bit(R5_LOCKED
, &dev
->flags
);
4506 /* Finish reconstruct operations initiated by the expansion process */
4507 if (sh
->reconstruct_state
== reconstruct_state_result
) {
4508 struct stripe_head
*sh_src
4509 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
4510 if (sh_src
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh_src
->state
)) {
4511 /* sh cannot be written until sh_src has been read.
4512 * so arrange for sh to be delayed a little
4514 set_bit(STRIPE_DELAYED
, &sh
->state
);
4515 set_bit(STRIPE_HANDLE
, &sh
->state
);
4516 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
4518 atomic_inc(&conf
->preread_active_stripes
);
4519 release_stripe(sh_src
);
4523 release_stripe(sh_src
);
4525 sh
->reconstruct_state
= reconstruct_state_idle
;
4526 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
4527 for (i
= conf
->raid_disks
; i
--; ) {
4528 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
4529 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
4534 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
4535 !sh
->reconstruct_state
) {
4536 /* Need to write out all blocks after computing parity */
4537 sh
->disks
= conf
->raid_disks
;
4538 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
4539 schedule_reconstruction(sh
, &s
, 1, 1);
4540 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
4541 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4542 atomic_dec(&conf
->reshape_stripes
);
4543 wake_up(&conf
->wait_for_overlap
);
4544 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
4547 if (s
.expanding
&& s
.locked
== 0 &&
4548 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
4549 handle_stripe_expansion(conf
, sh
);
4552 /* wait for this device to become unblocked */
4553 if (unlikely(s
.blocked_rdev
)) {
4554 if (conf
->mddev
->external
)
4555 md_wait_for_blocked_rdev(s
.blocked_rdev
,
4558 /* Internal metadata will immediately
4559 * be written by raid5d, so we don't
4560 * need to wait here.
4562 rdev_dec_pending(s
.blocked_rdev
,
4566 if (s
.handle_bad_blocks
)
4567 for (i
= disks
; i
--; ) {
4568 struct md_rdev
*rdev
;
4569 struct r5dev
*dev
= &sh
->dev
[i
];
4570 if (test_and_clear_bit(R5_WriteError
, &dev
->flags
)) {
4571 /* We own a safe reference to the rdev */
4572 rdev
= conf
->disks
[i
].rdev
;
4573 if (!rdev_set_badblocks(rdev
, sh
->sector
,
4575 md_error(conf
->mddev
, rdev
);
4576 rdev_dec_pending(rdev
, conf
->mddev
);
4578 if (test_and_clear_bit(R5_MadeGood
, &dev
->flags
)) {
4579 rdev
= conf
->disks
[i
].rdev
;
4580 rdev_clear_badblocks(rdev
, sh
->sector
,
4582 rdev_dec_pending(rdev
, conf
->mddev
);
4584 if (test_and_clear_bit(R5_MadeGoodRepl
, &dev
->flags
)) {
4585 rdev
= conf
->disks
[i
].replacement
;
4587 /* rdev have been moved down */
4588 rdev
= conf
->disks
[i
].rdev
;
4589 rdev_clear_badblocks(rdev
, sh
->sector
,
4591 rdev_dec_pending(rdev
, conf
->mddev
);
4596 raid_run_ops(sh
, s
.ops_request
);
4600 if (s
.dec_preread_active
) {
4601 /* We delay this until after ops_run_io so that if make_request
4602 * is waiting on a flush, it won't continue until the writes
4603 * have actually been submitted.
4605 atomic_dec(&conf
->preread_active_stripes
);
4606 if (atomic_read(&conf
->preread_active_stripes
) <
4608 md_wakeup_thread(conf
->mddev
->thread
);
4611 if (!bio_list_empty(&s
.return_bi
)) {
4612 if (test_bit(MD_CHANGE_PENDING
, &conf
->mddev
->flags
)) {
4613 spin_lock_irq(&conf
->device_lock
);
4614 bio_list_merge(&conf
->return_bi
, &s
.return_bi
);
4615 spin_unlock_irq(&conf
->device_lock
);
4616 md_wakeup_thread(conf
->mddev
->thread
);
4618 return_io(&s
.return_bi
);
4621 clear_bit_unlock(STRIPE_ACTIVE
, &sh
->state
);
4624 static void raid5_activate_delayed(struct r5conf
*conf
)
4626 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
4627 while (!list_empty(&conf
->delayed_list
)) {
4628 struct list_head
*l
= conf
->delayed_list
.next
;
4629 struct stripe_head
*sh
;
4630 sh
= list_entry(l
, struct stripe_head
, lru
);
4632 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4633 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4634 atomic_inc(&conf
->preread_active_stripes
);
4635 list_add_tail(&sh
->lru
, &conf
->hold_list
);
4636 raid5_wakeup_stripe_thread(sh
);
4641 static void activate_bit_delay(struct r5conf
*conf
,
4642 struct list_head
*temp_inactive_list
)
4644 /* device_lock is held */
4645 struct list_head head
;
4646 list_add(&head
, &conf
->bitmap_list
);
4647 list_del_init(&conf
->bitmap_list
);
4648 while (!list_empty(&head
)) {
4649 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
4651 list_del_init(&sh
->lru
);
4652 atomic_inc(&sh
->count
);
4653 hash
= sh
->hash_lock_index
;
4654 __release_stripe(conf
, sh
, &temp_inactive_list
[hash
]);
4658 static int raid5_congested(struct mddev
*mddev
, int bits
)
4660 struct r5conf
*conf
= mddev
->private;
4662 /* No difference between reads and writes. Just check
4663 * how busy the stripe_cache is
4666 if (test_bit(R5_INACTIVE_BLOCKED
, &conf
->cache_state
))
4670 if (atomic_read(&conf
->empty_inactive_list_nr
))
4676 static int in_chunk_boundary(struct mddev
*mddev
, struct bio
*bio
)
4678 struct r5conf
*conf
= mddev
->private;
4679 sector_t sector
= bio
->bi_iter
.bi_sector
+ get_start_sect(bio
->bi_bdev
);
4680 unsigned int chunk_sectors
;
4681 unsigned int bio_sectors
= bio_sectors(bio
);
4683 chunk_sectors
= min(conf
->chunk_sectors
, conf
->prev_chunk_sectors
);
4684 return chunk_sectors
>=
4685 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
4689 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
4690 * later sampled by raid5d.
4692 static void add_bio_to_retry(struct bio
*bi
,struct r5conf
*conf
)
4694 unsigned long flags
;
4696 spin_lock_irqsave(&conf
->device_lock
, flags
);
4698 bi
->bi_next
= conf
->retry_read_aligned_list
;
4699 conf
->retry_read_aligned_list
= bi
;
4701 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
4702 md_wakeup_thread(conf
->mddev
->thread
);
4705 static struct bio
*remove_bio_from_retry(struct r5conf
*conf
)
4709 bi
= conf
->retry_read_aligned
;
4711 conf
->retry_read_aligned
= NULL
;
4714 bi
= conf
->retry_read_aligned_list
;
4716 conf
->retry_read_aligned_list
= bi
->bi_next
;
4719 * this sets the active strip count to 1 and the processed
4720 * strip count to zero (upper 8 bits)
4722 raid5_set_bi_stripes(bi
, 1); /* biased count of active stripes */
4729 * The "raid5_align_endio" should check if the read succeeded and if it
4730 * did, call bio_endio on the original bio (having bio_put the new bio
4732 * If the read failed..
4734 static void raid5_align_endio(struct bio
*bi
)
4736 struct bio
* raid_bi
= bi
->bi_private
;
4737 struct mddev
*mddev
;
4738 struct r5conf
*conf
;
4739 struct md_rdev
*rdev
;
4740 int error
= bi
->bi_error
;
4744 rdev
= (void*)raid_bi
->bi_next
;
4745 raid_bi
->bi_next
= NULL
;
4746 mddev
= rdev
->mddev
;
4747 conf
= mddev
->private;
4749 rdev_dec_pending(rdev
, conf
->mddev
);
4752 trace_block_bio_complete(bdev_get_queue(raid_bi
->bi_bdev
),
4755 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4756 wake_up(&conf
->wait_for_quiescent
);
4760 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
4762 add_bio_to_retry(raid_bi
, conf
);
4765 static int raid5_read_one_chunk(struct mddev
*mddev
, struct bio
*raid_bio
)
4767 struct r5conf
*conf
= mddev
->private;
4769 struct bio
* align_bi
;
4770 struct md_rdev
*rdev
;
4771 sector_t end_sector
;
4773 if (!in_chunk_boundary(mddev
, raid_bio
)) {
4774 pr_debug("%s: non aligned\n", __func__
);
4778 * use bio_clone_mddev to make a copy of the bio
4780 align_bi
= bio_clone_mddev(raid_bio
, GFP_NOIO
, mddev
);
4784 * set bi_end_io to a new function, and set bi_private to the
4787 align_bi
->bi_end_io
= raid5_align_endio
;
4788 align_bi
->bi_private
= raid_bio
;
4792 align_bi
->bi_iter
.bi_sector
=
4793 raid5_compute_sector(conf
, raid_bio
->bi_iter
.bi_sector
,
4796 end_sector
= bio_end_sector(align_bi
);
4798 rdev
= rcu_dereference(conf
->disks
[dd_idx
].replacement
);
4799 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
) ||
4800 rdev
->recovery_offset
< end_sector
) {
4801 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
4803 (test_bit(Faulty
, &rdev
->flags
) ||
4804 !(test_bit(In_sync
, &rdev
->flags
) ||
4805 rdev
->recovery_offset
>= end_sector
)))
4812 atomic_inc(&rdev
->nr_pending
);
4814 raid_bio
->bi_next
= (void*)rdev
;
4815 align_bi
->bi_bdev
= rdev
->bdev
;
4816 bio_clear_flag(align_bi
, BIO_SEG_VALID
);
4818 if (is_badblock(rdev
, align_bi
->bi_iter
.bi_sector
,
4819 bio_sectors(align_bi
),
4820 &first_bad
, &bad_sectors
)) {
4822 rdev_dec_pending(rdev
, mddev
);
4826 /* No reshape active, so we can trust rdev->data_offset */
4827 align_bi
->bi_iter
.bi_sector
+= rdev
->data_offset
;
4829 spin_lock_irq(&conf
->device_lock
);
4830 wait_event_lock_irq(conf
->wait_for_quiescent
,
4833 atomic_inc(&conf
->active_aligned_reads
);
4834 spin_unlock_irq(&conf
->device_lock
);
4837 trace_block_bio_remap(bdev_get_queue(align_bi
->bi_bdev
),
4838 align_bi
, disk_devt(mddev
->gendisk
),
4839 raid_bio
->bi_iter
.bi_sector
);
4840 generic_make_request(align_bi
);
4849 static struct bio
*chunk_aligned_read(struct mddev
*mddev
, struct bio
*raid_bio
)
4854 sector_t sector
= raid_bio
->bi_iter
.bi_sector
;
4855 unsigned chunk_sects
= mddev
->chunk_sectors
;
4856 unsigned sectors
= chunk_sects
- (sector
& (chunk_sects
-1));
4858 if (sectors
< bio_sectors(raid_bio
)) {
4859 split
= bio_split(raid_bio
, sectors
, GFP_NOIO
, fs_bio_set
);
4860 bio_chain(split
, raid_bio
);
4864 if (!raid5_read_one_chunk(mddev
, split
)) {
4865 if (split
!= raid_bio
)
4866 generic_make_request(raid_bio
);
4869 } while (split
!= raid_bio
);
4874 /* __get_priority_stripe - get the next stripe to process
4876 * Full stripe writes are allowed to pass preread active stripes up until
4877 * the bypass_threshold is exceeded. In general the bypass_count
4878 * increments when the handle_list is handled before the hold_list; however, it
4879 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
4880 * stripe with in flight i/o. The bypass_count will be reset when the
4881 * head of the hold_list has changed, i.e. the head was promoted to the
4884 static struct stripe_head
*__get_priority_stripe(struct r5conf
*conf
, int group
)
4886 struct stripe_head
*sh
= NULL
, *tmp
;
4887 struct list_head
*handle_list
= NULL
;
4888 struct r5worker_group
*wg
= NULL
;
4890 if (conf
->worker_cnt_per_group
== 0) {
4891 handle_list
= &conf
->handle_list
;
4892 } else if (group
!= ANY_GROUP
) {
4893 handle_list
= &conf
->worker_groups
[group
].handle_list
;
4894 wg
= &conf
->worker_groups
[group
];
4897 for (i
= 0; i
< conf
->group_cnt
; i
++) {
4898 handle_list
= &conf
->worker_groups
[i
].handle_list
;
4899 wg
= &conf
->worker_groups
[i
];
4900 if (!list_empty(handle_list
))
4905 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
4907 list_empty(handle_list
) ? "empty" : "busy",
4908 list_empty(&conf
->hold_list
) ? "empty" : "busy",
4909 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
4911 if (!list_empty(handle_list
)) {
4912 sh
= list_entry(handle_list
->next
, typeof(*sh
), lru
);
4914 if (list_empty(&conf
->hold_list
))
4915 conf
->bypass_count
= 0;
4916 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
4917 if (conf
->hold_list
.next
== conf
->last_hold
)
4918 conf
->bypass_count
++;
4920 conf
->last_hold
= conf
->hold_list
.next
;
4921 conf
->bypass_count
-= conf
->bypass_threshold
;
4922 if (conf
->bypass_count
< 0)
4923 conf
->bypass_count
= 0;
4926 } else if (!list_empty(&conf
->hold_list
) &&
4927 ((conf
->bypass_threshold
&&
4928 conf
->bypass_count
> conf
->bypass_threshold
) ||
4929 atomic_read(&conf
->pending_full_writes
) == 0)) {
4931 list_for_each_entry(tmp
, &conf
->hold_list
, lru
) {
4932 if (conf
->worker_cnt_per_group
== 0 ||
4933 group
== ANY_GROUP
||
4934 !cpu_online(tmp
->cpu
) ||
4935 cpu_to_group(tmp
->cpu
) == group
) {
4942 conf
->bypass_count
-= conf
->bypass_threshold
;
4943 if (conf
->bypass_count
< 0)
4944 conf
->bypass_count
= 0;
4956 list_del_init(&sh
->lru
);
4957 BUG_ON(atomic_inc_return(&sh
->count
) != 1);
4961 struct raid5_plug_cb
{
4962 struct blk_plug_cb cb
;
4963 struct list_head list
;
4964 struct list_head temp_inactive_list
[NR_STRIPE_HASH_LOCKS
];
4967 static void raid5_unplug(struct blk_plug_cb
*blk_cb
, bool from_schedule
)
4969 struct raid5_plug_cb
*cb
= container_of(
4970 blk_cb
, struct raid5_plug_cb
, cb
);
4971 struct stripe_head
*sh
;
4972 struct mddev
*mddev
= cb
->cb
.data
;
4973 struct r5conf
*conf
= mddev
->private;
4977 if (cb
->list
.next
&& !list_empty(&cb
->list
)) {
4978 spin_lock_irq(&conf
->device_lock
);
4979 while (!list_empty(&cb
->list
)) {
4980 sh
= list_first_entry(&cb
->list
, struct stripe_head
, lru
);
4981 list_del_init(&sh
->lru
);
4983 * avoid race release_stripe_plug() sees
4984 * STRIPE_ON_UNPLUG_LIST clear but the stripe
4985 * is still in our list
4987 smp_mb__before_atomic();
4988 clear_bit(STRIPE_ON_UNPLUG_LIST
, &sh
->state
);
4990 * STRIPE_ON_RELEASE_LIST could be set here. In that
4991 * case, the count is always > 1 here
4993 hash
= sh
->hash_lock_index
;
4994 __release_stripe(conf
, sh
, &cb
->temp_inactive_list
[hash
]);
4997 spin_unlock_irq(&conf
->device_lock
);
4999 release_inactive_stripe_list(conf
, cb
->temp_inactive_list
,
5000 NR_STRIPE_HASH_LOCKS
);
5002 trace_block_unplug(mddev
->queue
, cnt
, !from_schedule
);
5006 static void release_stripe_plug(struct mddev
*mddev
,
5007 struct stripe_head
*sh
)
5009 struct blk_plug_cb
*blk_cb
= blk_check_plugged(
5010 raid5_unplug
, mddev
,
5011 sizeof(struct raid5_plug_cb
));
5012 struct raid5_plug_cb
*cb
;
5019 cb
= container_of(blk_cb
, struct raid5_plug_cb
, cb
);
5021 if (cb
->list
.next
== NULL
) {
5023 INIT_LIST_HEAD(&cb
->list
);
5024 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
5025 INIT_LIST_HEAD(cb
->temp_inactive_list
+ i
);
5028 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST
, &sh
->state
))
5029 list_add_tail(&sh
->lru
, &cb
->list
);
5034 static void make_discard_request(struct mddev
*mddev
, struct bio
*bi
)
5036 struct r5conf
*conf
= mddev
->private;
5037 sector_t logical_sector
, last_sector
;
5038 struct stripe_head
*sh
;
5042 if (mddev
->reshape_position
!= MaxSector
)
5043 /* Skip discard while reshape is happening */
5046 logical_sector
= bi
->bi_iter
.bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
5047 last_sector
= bi
->bi_iter
.bi_sector
+ (bi
->bi_iter
.bi_size
>>9);
5050 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
5052 stripe_sectors
= conf
->chunk_sectors
*
5053 (conf
->raid_disks
- conf
->max_degraded
);
5054 logical_sector
= DIV_ROUND_UP_SECTOR_T(logical_sector
,
5056 sector_div(last_sector
, stripe_sectors
);
5058 logical_sector
*= conf
->chunk_sectors
;
5059 last_sector
*= conf
->chunk_sectors
;
5061 for (; logical_sector
< last_sector
;
5062 logical_sector
+= STRIPE_SECTORS
) {
5066 sh
= get_active_stripe(conf
, logical_sector
, 0, 0, 0);
5067 prepare_to_wait(&conf
->wait_for_overlap
, &w
,
5068 TASK_UNINTERRUPTIBLE
);
5069 set_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
);
5070 if (test_bit(STRIPE_SYNCING
, &sh
->state
)) {
5075 clear_bit(R5_Overlap
, &sh
->dev
[sh
->pd_idx
].flags
);
5076 spin_lock_irq(&sh
->stripe_lock
);
5077 for (d
= 0; d
< conf
->raid_disks
; d
++) {
5078 if (d
== sh
->pd_idx
|| d
== sh
->qd_idx
)
5080 if (sh
->dev
[d
].towrite
|| sh
->dev
[d
].toread
) {
5081 set_bit(R5_Overlap
, &sh
->dev
[d
].flags
);
5082 spin_unlock_irq(&sh
->stripe_lock
);
5088 set_bit(STRIPE_DISCARD
, &sh
->state
);
5089 finish_wait(&conf
->wait_for_overlap
, &w
);
5090 sh
->overwrite_disks
= 0;
5091 for (d
= 0; d
< conf
->raid_disks
; d
++) {
5092 if (d
== sh
->pd_idx
|| d
== sh
->qd_idx
)
5094 sh
->dev
[d
].towrite
= bi
;
5095 set_bit(R5_OVERWRITE
, &sh
->dev
[d
].flags
);
5096 raid5_inc_bi_active_stripes(bi
);
5097 sh
->overwrite_disks
++;
5099 spin_unlock_irq(&sh
->stripe_lock
);
5100 if (conf
->mddev
->bitmap
) {
5102 d
< conf
->raid_disks
- conf
->max_degraded
;
5104 bitmap_startwrite(mddev
->bitmap
,
5108 sh
->bm_seq
= conf
->seq_flush
+ 1;
5109 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
5112 set_bit(STRIPE_HANDLE
, &sh
->state
);
5113 clear_bit(STRIPE_DELAYED
, &sh
->state
);
5114 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
5115 atomic_inc(&conf
->preread_active_stripes
);
5116 release_stripe_plug(mddev
, sh
);
5119 remaining
= raid5_dec_bi_active_stripes(bi
);
5120 if (remaining
== 0) {
5121 md_write_end(mddev
);
5126 static void make_request(struct mddev
*mddev
, struct bio
* bi
)
5128 struct r5conf
*conf
= mddev
->private;
5130 sector_t new_sector
;
5131 sector_t logical_sector
, last_sector
;
5132 struct stripe_head
*sh
;
5133 const int rw
= bio_data_dir(bi
);
5138 if (unlikely(bi
->bi_rw
& REQ_FLUSH
)) {
5139 md_flush_request(mddev
, bi
);
5143 md_write_start(mddev
, bi
);
5146 * If array is degraded, better not do chunk aligned read because
5147 * later we might have to read it again in order to reconstruct
5148 * data on failed drives.
5150 if (rw
== READ
&& mddev
->degraded
== 0 &&
5151 mddev
->reshape_position
== MaxSector
) {
5152 bi
= chunk_aligned_read(mddev
, bi
);
5157 if (unlikely(bi
->bi_rw
& REQ_DISCARD
)) {
5158 make_discard_request(mddev
, bi
);
5162 logical_sector
= bi
->bi_iter
.bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
5163 last_sector
= bio_end_sector(bi
);
5165 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
5167 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
5168 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
5174 seq
= read_seqcount_begin(&conf
->gen_lock
);
5177 prepare_to_wait(&conf
->wait_for_overlap
, &w
,
5178 TASK_UNINTERRUPTIBLE
);
5179 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
5180 /* spinlock is needed as reshape_progress may be
5181 * 64bit on a 32bit platform, and so it might be
5182 * possible to see a half-updated value
5183 * Of course reshape_progress could change after
5184 * the lock is dropped, so once we get a reference
5185 * to the stripe that we think it is, we will have
5188 spin_lock_irq(&conf
->device_lock
);
5189 if (mddev
->reshape_backwards
5190 ? logical_sector
< conf
->reshape_progress
5191 : logical_sector
>= conf
->reshape_progress
) {
5194 if (mddev
->reshape_backwards
5195 ? logical_sector
< conf
->reshape_safe
5196 : logical_sector
>= conf
->reshape_safe
) {
5197 spin_unlock_irq(&conf
->device_lock
);
5203 spin_unlock_irq(&conf
->device_lock
);
5206 new_sector
= raid5_compute_sector(conf
, logical_sector
,
5209 pr_debug("raid456: make_request, sector %llu logical %llu\n",
5210 (unsigned long long)new_sector
,
5211 (unsigned long long)logical_sector
);
5213 sh
= get_active_stripe(conf
, new_sector
, previous
,
5214 (bi
->bi_rw
&RWA_MASK
), 0);
5216 if (unlikely(previous
)) {
5217 /* expansion might have moved on while waiting for a
5218 * stripe, so we must do the range check again.
5219 * Expansion could still move past after this
5220 * test, but as we are holding a reference to
5221 * 'sh', we know that if that happens,
5222 * STRIPE_EXPANDING will get set and the expansion
5223 * won't proceed until we finish with the stripe.
5226 spin_lock_irq(&conf
->device_lock
);
5227 if (mddev
->reshape_backwards
5228 ? logical_sector
>= conf
->reshape_progress
5229 : logical_sector
< conf
->reshape_progress
)
5230 /* mismatch, need to try again */
5232 spin_unlock_irq(&conf
->device_lock
);
5240 if (read_seqcount_retry(&conf
->gen_lock
, seq
)) {
5241 /* Might have got the wrong stripe_head
5249 logical_sector
>= mddev
->suspend_lo
&&
5250 logical_sector
< mddev
->suspend_hi
) {
5252 /* As the suspend_* range is controlled by
5253 * userspace, we want an interruptible
5256 flush_signals(current
);
5257 prepare_to_wait(&conf
->wait_for_overlap
,
5258 &w
, TASK_INTERRUPTIBLE
);
5259 if (logical_sector
>= mddev
->suspend_lo
&&
5260 logical_sector
< mddev
->suspend_hi
) {
5267 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
5268 !add_stripe_bio(sh
, bi
, dd_idx
, rw
, previous
)) {
5269 /* Stripe is busy expanding or
5270 * add failed due to overlap. Flush everything
5273 md_wakeup_thread(mddev
->thread
);
5279 set_bit(STRIPE_HANDLE
, &sh
->state
);
5280 clear_bit(STRIPE_DELAYED
, &sh
->state
);
5281 if ((!sh
->batch_head
|| sh
== sh
->batch_head
) &&
5282 (bi
->bi_rw
& REQ_SYNC
) &&
5283 !test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
5284 atomic_inc(&conf
->preread_active_stripes
);
5285 release_stripe_plug(mddev
, sh
);
5287 /* cannot get stripe for read-ahead, just give-up */
5288 bi
->bi_error
= -EIO
;
5292 finish_wait(&conf
->wait_for_overlap
, &w
);
5294 remaining
= raid5_dec_bi_active_stripes(bi
);
5295 if (remaining
== 0) {
5298 md_write_end(mddev
);
5300 trace_block_bio_complete(bdev_get_queue(bi
->bi_bdev
),
5306 static sector_t
raid5_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
);
5308 static sector_t
reshape_request(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
)
5310 /* reshaping is quite different to recovery/resync so it is
5311 * handled quite separately ... here.
5313 * On each call to sync_request, we gather one chunk worth of
5314 * destination stripes and flag them as expanding.
5315 * Then we find all the source stripes and request reads.
5316 * As the reads complete, handle_stripe will copy the data
5317 * into the destination stripe and release that stripe.
5319 struct r5conf
*conf
= mddev
->private;
5320 struct stripe_head
*sh
;
5321 sector_t first_sector
, last_sector
;
5322 int raid_disks
= conf
->previous_raid_disks
;
5323 int data_disks
= raid_disks
- conf
->max_degraded
;
5324 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5327 sector_t writepos
, readpos
, safepos
;
5328 sector_t stripe_addr
;
5329 int reshape_sectors
;
5330 struct list_head stripes
;
5333 if (sector_nr
== 0) {
5334 /* If restarting in the middle, skip the initial sectors */
5335 if (mddev
->reshape_backwards
&&
5336 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
5337 sector_nr
= raid5_size(mddev
, 0, 0)
5338 - conf
->reshape_progress
;
5339 } else if (mddev
->reshape_backwards
&&
5340 conf
->reshape_progress
== MaxSector
) {
5341 /* shouldn't happen, but just in case, finish up.*/
5342 sector_nr
= MaxSector
;
5343 } else if (!mddev
->reshape_backwards
&&
5344 conf
->reshape_progress
> 0)
5345 sector_nr
= conf
->reshape_progress
;
5346 sector_div(sector_nr
, new_data_disks
);
5348 mddev
->curr_resync_completed
= sector_nr
;
5349 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5356 /* We need to process a full chunk at a time.
5357 * If old and new chunk sizes differ, we need to process the
5361 reshape_sectors
= max(conf
->chunk_sectors
, conf
->prev_chunk_sectors
);
5363 /* We update the metadata at least every 10 seconds, or when
5364 * the data about to be copied would over-write the source of
5365 * the data at the front of the range. i.e. one new_stripe
5366 * along from reshape_progress new_maps to after where
5367 * reshape_safe old_maps to
5369 writepos
= conf
->reshape_progress
;
5370 sector_div(writepos
, new_data_disks
);
5371 readpos
= conf
->reshape_progress
;
5372 sector_div(readpos
, data_disks
);
5373 safepos
= conf
->reshape_safe
;
5374 sector_div(safepos
, data_disks
);
5375 if (mddev
->reshape_backwards
) {
5376 BUG_ON(writepos
< reshape_sectors
);
5377 writepos
-= reshape_sectors
;
5378 readpos
+= reshape_sectors
;
5379 safepos
+= reshape_sectors
;
5381 writepos
+= reshape_sectors
;
5382 /* readpos and safepos are worst-case calculations.
5383 * A negative number is overly pessimistic, and causes
5384 * obvious problems for unsigned storage. So clip to 0.
5386 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
5387 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
5390 /* Having calculated the 'writepos' possibly use it
5391 * to set 'stripe_addr' which is where we will write to.
5393 if (mddev
->reshape_backwards
) {
5394 BUG_ON(conf
->reshape_progress
== 0);
5395 stripe_addr
= writepos
;
5396 BUG_ON((mddev
->dev_sectors
&
5397 ~((sector_t
)reshape_sectors
- 1))
5398 - reshape_sectors
- stripe_addr
5401 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
5402 stripe_addr
= sector_nr
;
5405 /* 'writepos' is the most advanced device address we might write.
5406 * 'readpos' is the least advanced device address we might read.
5407 * 'safepos' is the least address recorded in the metadata as having
5409 * If there is a min_offset_diff, these are adjusted either by
5410 * increasing the safepos/readpos if diff is negative, or
5411 * increasing writepos if diff is positive.
5412 * If 'readpos' is then behind 'writepos', there is no way that we can
5413 * ensure safety in the face of a crash - that must be done by userspace
5414 * making a backup of the data. So in that case there is no particular
5415 * rush to update metadata.
5416 * Otherwise if 'safepos' is behind 'writepos', then we really need to
5417 * update the metadata to advance 'safepos' to match 'readpos' so that
5418 * we can be safe in the event of a crash.
5419 * So we insist on updating metadata if safepos is behind writepos and
5420 * readpos is beyond writepos.
5421 * In any case, update the metadata every 10 seconds.
5422 * Maybe that number should be configurable, but I'm not sure it is
5423 * worth it.... maybe it could be a multiple of safemode_delay???
5425 if (conf
->min_offset_diff
< 0) {
5426 safepos
+= -conf
->min_offset_diff
;
5427 readpos
+= -conf
->min_offset_diff
;
5429 writepos
+= conf
->min_offset_diff
;
5431 if ((mddev
->reshape_backwards
5432 ? (safepos
> writepos
&& readpos
< writepos
)
5433 : (safepos
< writepos
&& readpos
> writepos
)) ||
5434 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
5435 /* Cannot proceed until we've updated the superblock... */
5436 wait_event(conf
->wait_for_overlap
,
5437 atomic_read(&conf
->reshape_stripes
)==0
5438 || test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
5439 if (atomic_read(&conf
->reshape_stripes
) != 0)
5441 mddev
->reshape_position
= conf
->reshape_progress
;
5442 mddev
->curr_resync_completed
= sector_nr
;
5443 conf
->reshape_checkpoint
= jiffies
;
5444 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5445 md_wakeup_thread(mddev
->thread
);
5446 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
5447 test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
5448 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5450 spin_lock_irq(&conf
->device_lock
);
5451 conf
->reshape_safe
= mddev
->reshape_position
;
5452 spin_unlock_irq(&conf
->device_lock
);
5453 wake_up(&conf
->wait_for_overlap
);
5454 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5457 INIT_LIST_HEAD(&stripes
);
5458 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
5460 int skipped_disk
= 0;
5461 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
5462 set_bit(STRIPE_EXPANDING
, &sh
->state
);
5463 atomic_inc(&conf
->reshape_stripes
);
5464 /* If any of this stripe is beyond the end of the old
5465 * array, then we need to zero those blocks
5467 for (j
=sh
->disks
; j
--;) {
5469 if (j
== sh
->pd_idx
)
5471 if (conf
->level
== 6 &&
5474 s
= compute_blocknr(sh
, j
, 0);
5475 if (s
< raid5_size(mddev
, 0, 0)) {
5479 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
5480 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
5481 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
5483 if (!skipped_disk
) {
5484 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
5485 set_bit(STRIPE_HANDLE
, &sh
->state
);
5487 list_add(&sh
->lru
, &stripes
);
5489 spin_lock_irq(&conf
->device_lock
);
5490 if (mddev
->reshape_backwards
)
5491 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
5493 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
5494 spin_unlock_irq(&conf
->device_lock
);
5495 /* Ok, those stripe are ready. We can start scheduling
5496 * reads on the source stripes.
5497 * The source stripes are determined by mapping the first and last
5498 * block on the destination stripes.
5501 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
5504 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
5505 * new_data_disks
- 1),
5507 if (last_sector
>= mddev
->dev_sectors
)
5508 last_sector
= mddev
->dev_sectors
- 1;
5509 while (first_sector
<= last_sector
) {
5510 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
5511 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
5512 set_bit(STRIPE_HANDLE
, &sh
->state
);
5514 first_sector
+= STRIPE_SECTORS
;
5516 /* Now that the sources are clearly marked, we can release
5517 * the destination stripes
5519 while (!list_empty(&stripes
)) {
5520 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
5521 list_del_init(&sh
->lru
);
5524 /* If this takes us to the resync_max point where we have to pause,
5525 * then we need to write out the superblock.
5527 sector_nr
+= reshape_sectors
;
5528 retn
= reshape_sectors
;
5530 if (mddev
->curr_resync_completed
> mddev
->resync_max
||
5531 (sector_nr
- mddev
->curr_resync_completed
) * 2
5532 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
5533 /* Cannot proceed until we've updated the superblock... */
5534 wait_event(conf
->wait_for_overlap
,
5535 atomic_read(&conf
->reshape_stripes
) == 0
5536 || test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
5537 if (atomic_read(&conf
->reshape_stripes
) != 0)
5539 mddev
->reshape_position
= conf
->reshape_progress
;
5540 mddev
->curr_resync_completed
= sector_nr
;
5541 conf
->reshape_checkpoint
= jiffies
;
5542 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5543 md_wakeup_thread(mddev
->thread
);
5544 wait_event(mddev
->sb_wait
,
5545 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
5546 || test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
));
5547 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5549 spin_lock_irq(&conf
->device_lock
);
5550 conf
->reshape_safe
= mddev
->reshape_position
;
5551 spin_unlock_irq(&conf
->device_lock
);
5552 wake_up(&conf
->wait_for_overlap
);
5553 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5559 static inline sector_t
sync_request(struct mddev
*mddev
, sector_t sector_nr
, int *skipped
)
5561 struct r5conf
*conf
= mddev
->private;
5562 struct stripe_head
*sh
;
5563 sector_t max_sector
= mddev
->dev_sectors
;
5564 sector_t sync_blocks
;
5565 int still_degraded
= 0;
5568 if (sector_nr
>= max_sector
) {
5569 /* just being told to finish up .. nothing much to do */
5571 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
5576 if (mddev
->curr_resync
< max_sector
) /* aborted */
5577 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
5579 else /* completed sync */
5581 bitmap_close_sync(mddev
->bitmap
);
5586 /* Allow raid5_quiesce to complete */
5587 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
5589 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5590 return reshape_request(mddev
, sector_nr
, skipped
);
5592 /* No need to check resync_max as we never do more than one
5593 * stripe, and as resync_max will always be on a chunk boundary,
5594 * if the check in md_do_sync didn't fire, there is no chance
5595 * of overstepping resync_max here
5598 /* if there is too many failed drives and we are trying
5599 * to resync, then assert that we are finished, because there is
5600 * nothing we can do.
5602 if (mddev
->degraded
>= conf
->max_degraded
&&
5603 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5604 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
5608 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
5610 !bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
5611 sync_blocks
>= STRIPE_SECTORS
) {
5612 /* we can skip this block, and probably more */
5613 sync_blocks
/= STRIPE_SECTORS
;
5615 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
5618 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
5620 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
5622 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
5623 /* make sure we don't swamp the stripe cache if someone else
5624 * is trying to get access
5626 schedule_timeout_uninterruptible(1);
5628 /* Need to check if array will still be degraded after recovery/resync
5629 * Note in case of > 1 drive failures it's possible we're rebuilding
5630 * one drive while leaving another faulty drive in array.
5633 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5634 struct md_rdev
*rdev
= ACCESS_ONCE(conf
->disks
[i
].rdev
);
5636 if (rdev
== NULL
|| test_bit(Faulty
, &rdev
->flags
))
5641 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
5643 set_bit(STRIPE_SYNC_REQUESTED
, &sh
->state
);
5644 set_bit(STRIPE_HANDLE
, &sh
->state
);
5648 return STRIPE_SECTORS
;
5651 static int retry_aligned_read(struct r5conf
*conf
, struct bio
*raid_bio
)
5653 /* We may not be able to submit a whole bio at once as there
5654 * may not be enough stripe_heads available.
5655 * We cannot pre-allocate enough stripe_heads as we may need
5656 * more than exist in the cache (if we allow ever large chunks).
5657 * So we do one stripe head at a time and record in
5658 * ->bi_hw_segments how many have been done.
5660 * We *know* that this entire raid_bio is in one chunk, so
5661 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
5663 struct stripe_head
*sh
;
5665 sector_t sector
, logical_sector
, last_sector
;
5670 logical_sector
= raid_bio
->bi_iter
.bi_sector
&
5671 ~((sector_t
)STRIPE_SECTORS
-1);
5672 sector
= raid5_compute_sector(conf
, logical_sector
,
5674 last_sector
= bio_end_sector(raid_bio
);
5676 for (; logical_sector
< last_sector
;
5677 logical_sector
+= STRIPE_SECTORS
,
5678 sector
+= STRIPE_SECTORS
,
5681 if (scnt
< raid5_bi_processed_stripes(raid_bio
))
5682 /* already done this stripe */
5685 sh
= get_active_stripe(conf
, sector
, 0, 1, 1);
5688 /* failed to get a stripe - must wait */
5689 raid5_set_bi_processed_stripes(raid_bio
, scnt
);
5690 conf
->retry_read_aligned
= raid_bio
;
5694 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0, 0)) {
5696 raid5_set_bi_processed_stripes(raid_bio
, scnt
);
5697 conf
->retry_read_aligned
= raid_bio
;
5701 set_bit(R5_ReadNoMerge
, &sh
->dev
[dd_idx
].flags
);
5706 remaining
= raid5_dec_bi_active_stripes(raid_bio
);
5707 if (remaining
== 0) {
5708 trace_block_bio_complete(bdev_get_queue(raid_bio
->bi_bdev
),
5710 bio_endio(raid_bio
);
5712 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
5713 wake_up(&conf
->wait_for_quiescent
);
5717 static int handle_active_stripes(struct r5conf
*conf
, int group
,
5718 struct r5worker
*worker
,
5719 struct list_head
*temp_inactive_list
)
5721 struct stripe_head
*batch
[MAX_STRIPE_BATCH
], *sh
;
5722 int i
, batch_size
= 0, hash
;
5723 bool release_inactive
= false;
5725 while (batch_size
< MAX_STRIPE_BATCH
&&
5726 (sh
= __get_priority_stripe(conf
, group
)) != NULL
)
5727 batch
[batch_size
++] = sh
;
5729 if (batch_size
== 0) {
5730 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
5731 if (!list_empty(temp_inactive_list
+ i
))
5733 if (i
== NR_STRIPE_HASH_LOCKS
)
5735 release_inactive
= true;
5737 spin_unlock_irq(&conf
->device_lock
);
5739 release_inactive_stripe_list(conf
, temp_inactive_list
,
5740 NR_STRIPE_HASH_LOCKS
);
5742 if (release_inactive
) {
5743 spin_lock_irq(&conf
->device_lock
);
5747 for (i
= 0; i
< batch_size
; i
++)
5748 handle_stripe(batch
[i
]);
5752 spin_lock_irq(&conf
->device_lock
);
5753 for (i
= 0; i
< batch_size
; i
++) {
5754 hash
= batch
[i
]->hash_lock_index
;
5755 __release_stripe(conf
, batch
[i
], &temp_inactive_list
[hash
]);
5760 static void raid5_do_work(struct work_struct
*work
)
5762 struct r5worker
*worker
= container_of(work
, struct r5worker
, work
);
5763 struct r5worker_group
*group
= worker
->group
;
5764 struct r5conf
*conf
= group
->conf
;
5765 int group_id
= group
- conf
->worker_groups
;
5767 struct blk_plug plug
;
5769 pr_debug("+++ raid5worker active\n");
5771 blk_start_plug(&plug
);
5773 spin_lock_irq(&conf
->device_lock
);
5775 int batch_size
, released
;
5777 released
= release_stripe_list(conf
, worker
->temp_inactive_list
);
5779 batch_size
= handle_active_stripes(conf
, group_id
, worker
,
5780 worker
->temp_inactive_list
);
5781 worker
->working
= false;
5782 if (!batch_size
&& !released
)
5784 handled
+= batch_size
;
5786 pr_debug("%d stripes handled\n", handled
);
5788 spin_unlock_irq(&conf
->device_lock
);
5789 blk_finish_plug(&plug
);
5791 pr_debug("--- raid5worker inactive\n");
5795 * This is our raid5 kernel thread.
5797 * We scan the hash table for stripes which can be handled now.
5798 * During the scan, completed stripes are saved for us by the interrupt
5799 * handler, so that they will not have to wait for our next wakeup.
5801 static void raid5d(struct md_thread
*thread
)
5803 struct mddev
*mddev
= thread
->mddev
;
5804 struct r5conf
*conf
= mddev
->private;
5806 struct blk_plug plug
;
5808 pr_debug("+++ raid5d active\n");
5810 md_check_recovery(mddev
);
5812 if (!bio_list_empty(&conf
->return_bi
) &&
5813 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
)) {
5814 struct bio_list tmp
= BIO_EMPTY_LIST
;
5815 spin_lock_irq(&conf
->device_lock
);
5816 if (!test_bit(MD_CHANGE_PENDING
, &mddev
->flags
)) {
5817 bio_list_merge(&tmp
, &conf
->return_bi
);
5818 bio_list_init(&conf
->return_bi
);
5820 spin_unlock_irq(&conf
->device_lock
);
5824 blk_start_plug(&plug
);
5826 spin_lock_irq(&conf
->device_lock
);
5829 int batch_size
, released
;
5831 released
= release_stripe_list(conf
, conf
->temp_inactive_list
);
5833 clear_bit(R5_DID_ALLOC
, &conf
->cache_state
);
5836 !list_empty(&conf
->bitmap_list
)) {
5837 /* Now is a good time to flush some bitmap updates */
5839 spin_unlock_irq(&conf
->device_lock
);
5840 bitmap_unplug(mddev
->bitmap
);
5841 spin_lock_irq(&conf
->device_lock
);
5842 conf
->seq_write
= conf
->seq_flush
;
5843 activate_bit_delay(conf
, conf
->temp_inactive_list
);
5845 raid5_activate_delayed(conf
);
5847 while ((bio
= remove_bio_from_retry(conf
))) {
5849 spin_unlock_irq(&conf
->device_lock
);
5850 ok
= retry_aligned_read(conf
, bio
);
5851 spin_lock_irq(&conf
->device_lock
);
5857 batch_size
= handle_active_stripes(conf
, ANY_GROUP
, NULL
,
5858 conf
->temp_inactive_list
);
5859 if (!batch_size
&& !released
)
5861 handled
+= batch_size
;
5863 if (mddev
->flags
& ~(1<<MD_CHANGE_PENDING
)) {
5864 spin_unlock_irq(&conf
->device_lock
);
5865 md_check_recovery(mddev
);
5866 spin_lock_irq(&conf
->device_lock
);
5869 pr_debug("%d stripes handled\n", handled
);
5871 spin_unlock_irq(&conf
->device_lock
);
5872 if (test_and_clear_bit(R5_ALLOC_MORE
, &conf
->cache_state
) &&
5873 mutex_trylock(&conf
->cache_size_mutex
)) {
5874 grow_one_stripe(conf
, __GFP_NOWARN
);
5875 /* Set flag even if allocation failed. This helps
5876 * slow down allocation requests when mem is short
5878 set_bit(R5_DID_ALLOC
, &conf
->cache_state
);
5879 mutex_unlock(&conf
->cache_size_mutex
);
5882 async_tx_issue_pending_all();
5883 blk_finish_plug(&plug
);
5885 pr_debug("--- raid5d inactive\n");
5889 raid5_show_stripe_cache_size(struct mddev
*mddev
, char *page
)
5891 struct r5conf
*conf
;
5893 spin_lock(&mddev
->lock
);
5894 conf
= mddev
->private;
5896 ret
= sprintf(page
, "%d\n", conf
->min_nr_stripes
);
5897 spin_unlock(&mddev
->lock
);
5902 raid5_set_cache_size(struct mddev
*mddev
, int size
)
5904 struct r5conf
*conf
= mddev
->private;
5907 if (size
<= 16 || size
> 32768)
5910 conf
->min_nr_stripes
= size
;
5911 mutex_lock(&conf
->cache_size_mutex
);
5912 while (size
< conf
->max_nr_stripes
&&
5913 drop_one_stripe(conf
))
5915 mutex_unlock(&conf
->cache_size_mutex
);
5918 err
= md_allow_write(mddev
);
5922 mutex_lock(&conf
->cache_size_mutex
);
5923 while (size
> conf
->max_nr_stripes
)
5924 if (!grow_one_stripe(conf
, GFP_KERNEL
))
5926 mutex_unlock(&conf
->cache_size_mutex
);
5930 EXPORT_SYMBOL(raid5_set_cache_size
);
5933 raid5_store_stripe_cache_size(struct mddev
*mddev
, const char *page
, size_t len
)
5935 struct r5conf
*conf
;
5939 if (len
>= PAGE_SIZE
)
5941 if (kstrtoul(page
, 10, &new))
5943 err
= mddev_lock(mddev
);
5946 conf
= mddev
->private;
5950 err
= raid5_set_cache_size(mddev
, new);
5951 mddev_unlock(mddev
);
5956 static struct md_sysfs_entry
5957 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
5958 raid5_show_stripe_cache_size
,
5959 raid5_store_stripe_cache_size
);
5962 raid5_show_rmw_level(struct mddev
*mddev
, char *page
)
5964 struct r5conf
*conf
= mddev
->private;
5966 return sprintf(page
, "%d\n", conf
->rmw_level
);
5972 raid5_store_rmw_level(struct mddev
*mddev
, const char *page
, size_t len
)
5974 struct r5conf
*conf
= mddev
->private;
5980 if (len
>= PAGE_SIZE
)
5983 if (kstrtoul(page
, 10, &new))
5986 if (new != PARITY_DISABLE_RMW
&& !raid6_call
.xor_syndrome
)
5989 if (new != PARITY_DISABLE_RMW
&&
5990 new != PARITY_ENABLE_RMW
&&
5991 new != PARITY_PREFER_RMW
)
5994 conf
->rmw_level
= new;
5998 static struct md_sysfs_entry
5999 raid5_rmw_level
= __ATTR(rmw_level
, S_IRUGO
| S_IWUSR
,
6000 raid5_show_rmw_level
,
6001 raid5_store_rmw_level
);
6005 raid5_show_preread_threshold(struct mddev
*mddev
, char *page
)
6007 struct r5conf
*conf
;
6009 spin_lock(&mddev
->lock
);
6010 conf
= mddev
->private;
6012 ret
= sprintf(page
, "%d\n", conf
->bypass_threshold
);
6013 spin_unlock(&mddev
->lock
);
6018 raid5_store_preread_threshold(struct mddev
*mddev
, const char *page
, size_t len
)
6020 struct r5conf
*conf
;
6024 if (len
>= PAGE_SIZE
)
6026 if (kstrtoul(page
, 10, &new))
6029 err
= mddev_lock(mddev
);
6032 conf
= mddev
->private;
6035 else if (new > conf
->min_nr_stripes
)
6038 conf
->bypass_threshold
= new;
6039 mddev_unlock(mddev
);
6043 static struct md_sysfs_entry
6044 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
6046 raid5_show_preread_threshold
,
6047 raid5_store_preread_threshold
);
6050 raid5_show_skip_copy(struct mddev
*mddev
, char *page
)
6052 struct r5conf
*conf
;
6054 spin_lock(&mddev
->lock
);
6055 conf
= mddev
->private;
6057 ret
= sprintf(page
, "%d\n", conf
->skip_copy
);
6058 spin_unlock(&mddev
->lock
);
6063 raid5_store_skip_copy(struct mddev
*mddev
, const char *page
, size_t len
)
6065 struct r5conf
*conf
;
6069 if (len
>= PAGE_SIZE
)
6071 if (kstrtoul(page
, 10, &new))
6075 err
= mddev_lock(mddev
);
6078 conf
= mddev
->private;
6081 else if (new != conf
->skip_copy
) {
6082 mddev_suspend(mddev
);
6083 conf
->skip_copy
= new;
6085 mddev
->queue
->backing_dev_info
.capabilities
|=
6086 BDI_CAP_STABLE_WRITES
;
6088 mddev
->queue
->backing_dev_info
.capabilities
&=
6089 ~BDI_CAP_STABLE_WRITES
;
6090 mddev_resume(mddev
);
6092 mddev_unlock(mddev
);
6096 static struct md_sysfs_entry
6097 raid5_skip_copy
= __ATTR(skip_copy
, S_IRUGO
| S_IWUSR
,
6098 raid5_show_skip_copy
,
6099 raid5_store_skip_copy
);
6102 stripe_cache_active_show(struct mddev
*mddev
, char *page
)
6104 struct r5conf
*conf
= mddev
->private;
6106 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
6111 static struct md_sysfs_entry
6112 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
6115 raid5_show_group_thread_cnt(struct mddev
*mddev
, char *page
)
6117 struct r5conf
*conf
;
6119 spin_lock(&mddev
->lock
);
6120 conf
= mddev
->private;
6122 ret
= sprintf(page
, "%d\n", conf
->worker_cnt_per_group
);
6123 spin_unlock(&mddev
->lock
);
6127 static int alloc_thread_groups(struct r5conf
*conf
, int cnt
,
6129 int *worker_cnt_per_group
,
6130 struct r5worker_group
**worker_groups
);
6132 raid5_store_group_thread_cnt(struct mddev
*mddev
, const char *page
, size_t len
)
6134 struct r5conf
*conf
;
6137 struct r5worker_group
*new_groups
, *old_groups
;
6138 int group_cnt
, worker_cnt_per_group
;
6140 if (len
>= PAGE_SIZE
)
6142 if (kstrtoul(page
, 10, &new))
6145 err
= mddev_lock(mddev
);
6148 conf
= mddev
->private;
6151 else if (new != conf
->worker_cnt_per_group
) {
6152 mddev_suspend(mddev
);
6154 old_groups
= conf
->worker_groups
;
6156 flush_workqueue(raid5_wq
);
6158 err
= alloc_thread_groups(conf
, new,
6159 &group_cnt
, &worker_cnt_per_group
,
6162 spin_lock_irq(&conf
->device_lock
);
6163 conf
->group_cnt
= group_cnt
;
6164 conf
->worker_cnt_per_group
= worker_cnt_per_group
;
6165 conf
->worker_groups
= new_groups
;
6166 spin_unlock_irq(&conf
->device_lock
);
6169 kfree(old_groups
[0].workers
);
6172 mddev_resume(mddev
);
6174 mddev_unlock(mddev
);
6179 static struct md_sysfs_entry
6180 raid5_group_thread_cnt
= __ATTR(group_thread_cnt
, S_IRUGO
| S_IWUSR
,
6181 raid5_show_group_thread_cnt
,
6182 raid5_store_group_thread_cnt
);
6184 static struct attribute
*raid5_attrs
[] = {
6185 &raid5_stripecache_size
.attr
,
6186 &raid5_stripecache_active
.attr
,
6187 &raid5_preread_bypass_threshold
.attr
,
6188 &raid5_group_thread_cnt
.attr
,
6189 &raid5_skip_copy
.attr
,
6190 &raid5_rmw_level
.attr
,
6193 static struct attribute_group raid5_attrs_group
= {
6195 .attrs
= raid5_attrs
,
6198 static int alloc_thread_groups(struct r5conf
*conf
, int cnt
,
6200 int *worker_cnt_per_group
,
6201 struct r5worker_group
**worker_groups
)
6205 struct r5worker
*workers
;
6207 *worker_cnt_per_group
= cnt
;
6210 *worker_groups
= NULL
;
6213 *group_cnt
= num_possible_nodes();
6214 size
= sizeof(struct r5worker
) * cnt
;
6215 workers
= kzalloc(size
* *group_cnt
, GFP_NOIO
);
6216 *worker_groups
= kzalloc(sizeof(struct r5worker_group
) *
6217 *group_cnt
, GFP_NOIO
);
6218 if (!*worker_groups
|| !workers
) {
6220 kfree(*worker_groups
);
6224 for (i
= 0; i
< *group_cnt
; i
++) {
6225 struct r5worker_group
*group
;
6227 group
= &(*worker_groups
)[i
];
6228 INIT_LIST_HEAD(&group
->handle_list
);
6230 group
->workers
= workers
+ i
* cnt
;
6232 for (j
= 0; j
< cnt
; j
++) {
6233 struct r5worker
*worker
= group
->workers
+ j
;
6234 worker
->group
= group
;
6235 INIT_WORK(&worker
->work
, raid5_do_work
);
6237 for (k
= 0; k
< NR_STRIPE_HASH_LOCKS
; k
++)
6238 INIT_LIST_HEAD(worker
->temp_inactive_list
+ k
);
6245 static void free_thread_groups(struct r5conf
*conf
)
6247 if (conf
->worker_groups
)
6248 kfree(conf
->worker_groups
[0].workers
);
6249 kfree(conf
->worker_groups
);
6250 conf
->worker_groups
= NULL
;
6254 raid5_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
6256 struct r5conf
*conf
= mddev
->private;
6259 sectors
= mddev
->dev_sectors
;
6261 /* size is defined by the smallest of previous and new size */
6262 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
6264 sectors
&= ~((sector_t
)conf
->chunk_sectors
- 1);
6265 sectors
&= ~((sector_t
)conf
->prev_chunk_sectors
- 1);
6266 return sectors
* (raid_disks
- conf
->max_degraded
);
6269 static void free_scratch_buffer(struct r5conf
*conf
, struct raid5_percpu
*percpu
)
6271 safe_put_page(percpu
->spare_page
);
6272 if (percpu
->scribble
)
6273 flex_array_free(percpu
->scribble
);
6274 percpu
->spare_page
= NULL
;
6275 percpu
->scribble
= NULL
;
6278 static int alloc_scratch_buffer(struct r5conf
*conf
, struct raid5_percpu
*percpu
)
6280 if (conf
->level
== 6 && !percpu
->spare_page
)
6281 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
6282 if (!percpu
->scribble
)
6283 percpu
->scribble
= scribble_alloc(max(conf
->raid_disks
,
6284 conf
->previous_raid_disks
),
6285 max(conf
->chunk_sectors
,
6286 conf
->prev_chunk_sectors
)
6290 if (!percpu
->scribble
|| (conf
->level
== 6 && !percpu
->spare_page
)) {
6291 free_scratch_buffer(conf
, percpu
);
6298 static void raid5_free_percpu(struct r5conf
*conf
)
6305 #ifdef CONFIG_HOTPLUG_CPU
6306 unregister_cpu_notifier(&conf
->cpu_notify
);
6310 for_each_possible_cpu(cpu
)
6311 free_scratch_buffer(conf
, per_cpu_ptr(conf
->percpu
, cpu
));
6314 free_percpu(conf
->percpu
);
6317 static void free_conf(struct r5conf
*conf
)
6319 if (conf
->shrinker
.seeks
)
6320 unregister_shrinker(&conf
->shrinker
);
6321 free_thread_groups(conf
);
6322 shrink_stripes(conf
);
6323 raid5_free_percpu(conf
);
6325 kfree(conf
->stripe_hashtbl
);
6329 #ifdef CONFIG_HOTPLUG_CPU
6330 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
6333 struct r5conf
*conf
= container_of(nfb
, struct r5conf
, cpu_notify
);
6334 long cpu
= (long)hcpu
;
6335 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
6338 case CPU_UP_PREPARE
:
6339 case CPU_UP_PREPARE_FROZEN
:
6340 if (alloc_scratch_buffer(conf
, percpu
)) {
6341 pr_err("%s: failed memory allocation for cpu%ld\n",
6343 return notifier_from_errno(-ENOMEM
);
6347 case CPU_DEAD_FROZEN
:
6348 free_scratch_buffer(conf
, per_cpu_ptr(conf
->percpu
, cpu
));
6357 static int raid5_alloc_percpu(struct r5conf
*conf
)
6362 conf
->percpu
= alloc_percpu(struct raid5_percpu
);
6366 #ifdef CONFIG_HOTPLUG_CPU
6367 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
6368 conf
->cpu_notify
.priority
= 0;
6369 err
= register_cpu_notifier(&conf
->cpu_notify
);
6375 for_each_present_cpu(cpu
) {
6376 err
= alloc_scratch_buffer(conf
, per_cpu_ptr(conf
->percpu
, cpu
));
6378 pr_err("%s: failed memory allocation for cpu%ld\n",
6388 static unsigned long raid5_cache_scan(struct shrinker
*shrink
,
6389 struct shrink_control
*sc
)
6391 struct r5conf
*conf
= container_of(shrink
, struct r5conf
, shrinker
);
6392 unsigned long ret
= SHRINK_STOP
;
6394 if (mutex_trylock(&conf
->cache_size_mutex
)) {
6396 while (ret
< sc
->nr_to_scan
&&
6397 conf
->max_nr_stripes
> conf
->min_nr_stripes
) {
6398 if (drop_one_stripe(conf
) == 0) {
6404 mutex_unlock(&conf
->cache_size_mutex
);
6409 static unsigned long raid5_cache_count(struct shrinker
*shrink
,
6410 struct shrink_control
*sc
)
6412 struct r5conf
*conf
= container_of(shrink
, struct r5conf
, shrinker
);
6414 if (conf
->max_nr_stripes
< conf
->min_nr_stripes
)
6415 /* unlikely, but not impossible */
6417 return conf
->max_nr_stripes
- conf
->min_nr_stripes
;
6420 static struct r5conf
*setup_conf(struct mddev
*mddev
)
6422 struct r5conf
*conf
;
6423 int raid_disk
, memory
, max_disks
;
6424 struct md_rdev
*rdev
;
6425 struct disk_info
*disk
;
6428 int group_cnt
, worker_cnt_per_group
;
6429 struct r5worker_group
*new_group
;
6431 if (mddev
->new_level
!= 5
6432 && mddev
->new_level
!= 4
6433 && mddev
->new_level
!= 6) {
6434 printk(KERN_ERR
"md/raid:%s: raid level not set to 4/5/6 (%d)\n",
6435 mdname(mddev
), mddev
->new_level
);
6436 return ERR_PTR(-EIO
);
6438 if ((mddev
->new_level
== 5
6439 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
6440 (mddev
->new_level
== 6
6441 && !algorithm_valid_raid6(mddev
->new_layout
))) {
6442 printk(KERN_ERR
"md/raid:%s: layout %d not supported\n",
6443 mdname(mddev
), mddev
->new_layout
);
6444 return ERR_PTR(-EIO
);
6446 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
6447 printk(KERN_ERR
"md/raid:%s: not enough configured devices (%d, minimum 4)\n",
6448 mdname(mddev
), mddev
->raid_disks
);
6449 return ERR_PTR(-EINVAL
);
6452 if (!mddev
->new_chunk_sectors
||
6453 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
6454 !is_power_of_2(mddev
->new_chunk_sectors
)) {
6455 printk(KERN_ERR
"md/raid:%s: invalid chunk size %d\n",
6456 mdname(mddev
), mddev
->new_chunk_sectors
<< 9);
6457 return ERR_PTR(-EINVAL
);
6460 conf
= kzalloc(sizeof(struct r5conf
), GFP_KERNEL
);
6463 /* Don't enable multi-threading by default*/
6464 if (!alloc_thread_groups(conf
, 0, &group_cnt
, &worker_cnt_per_group
,
6466 conf
->group_cnt
= group_cnt
;
6467 conf
->worker_cnt_per_group
= worker_cnt_per_group
;
6468 conf
->worker_groups
= new_group
;
6471 spin_lock_init(&conf
->device_lock
);
6472 seqcount_init(&conf
->gen_lock
);
6473 mutex_init(&conf
->cache_size_mutex
);
6474 init_waitqueue_head(&conf
->wait_for_quiescent
);
6475 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++) {
6476 init_waitqueue_head(&conf
->wait_for_stripe
[i
]);
6478 init_waitqueue_head(&conf
->wait_for_overlap
);
6479 INIT_LIST_HEAD(&conf
->handle_list
);
6480 INIT_LIST_HEAD(&conf
->hold_list
);
6481 INIT_LIST_HEAD(&conf
->delayed_list
);
6482 INIT_LIST_HEAD(&conf
->bitmap_list
);
6483 bio_list_init(&conf
->return_bi
);
6484 init_llist_head(&conf
->released_stripes
);
6485 atomic_set(&conf
->active_stripes
, 0);
6486 atomic_set(&conf
->preread_active_stripes
, 0);
6487 atomic_set(&conf
->active_aligned_reads
, 0);
6488 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
6489 conf
->recovery_disabled
= mddev
->recovery_disabled
- 1;
6491 conf
->raid_disks
= mddev
->raid_disks
;
6492 if (mddev
->reshape_position
== MaxSector
)
6493 conf
->previous_raid_disks
= mddev
->raid_disks
;
6495 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
6496 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
6498 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
6503 conf
->mddev
= mddev
;
6505 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
6508 /* We init hash_locks[0] separately to that it can be used
6509 * as the reference lock in the spin_lock_nest_lock() call
6510 * in lock_all_device_hash_locks_irq in order to convince
6511 * lockdep that we know what we are doing.
6513 spin_lock_init(conf
->hash_locks
);
6514 for (i
= 1; i
< NR_STRIPE_HASH_LOCKS
; i
++)
6515 spin_lock_init(conf
->hash_locks
+ i
);
6517 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
6518 INIT_LIST_HEAD(conf
->inactive_list
+ i
);
6520 for (i
= 0; i
< NR_STRIPE_HASH_LOCKS
; i
++)
6521 INIT_LIST_HEAD(conf
->temp_inactive_list
+ i
);
6523 conf
->level
= mddev
->new_level
;
6524 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
6525 if (raid5_alloc_percpu(conf
) != 0)
6528 pr_debug("raid456: run(%s) called.\n", mdname(mddev
));
6530 rdev_for_each(rdev
, mddev
) {
6531 raid_disk
= rdev
->raid_disk
;
6532 if (raid_disk
>= max_disks
6535 disk
= conf
->disks
+ raid_disk
;
6537 if (test_bit(Replacement
, &rdev
->flags
)) {
6538 if (disk
->replacement
)
6540 disk
->replacement
= rdev
;
6547 if (test_bit(In_sync
, &rdev
->flags
)) {
6548 char b
[BDEVNAME_SIZE
];
6549 printk(KERN_INFO
"md/raid:%s: device %s operational as raid"
6551 mdname(mddev
), bdevname(rdev
->bdev
, b
), raid_disk
);
6552 } else if (rdev
->saved_raid_disk
!= raid_disk
)
6553 /* Cannot rely on bitmap to complete recovery */
6557 conf
->level
= mddev
->new_level
;
6558 if (conf
->level
== 6) {
6559 conf
->max_degraded
= 2;
6560 if (raid6_call
.xor_syndrome
)
6561 conf
->rmw_level
= PARITY_ENABLE_RMW
;
6563 conf
->rmw_level
= PARITY_DISABLE_RMW
;
6565 conf
->max_degraded
= 1;
6566 conf
->rmw_level
= PARITY_ENABLE_RMW
;
6568 conf
->algorithm
= mddev
->new_layout
;
6569 conf
->reshape_progress
= mddev
->reshape_position
;
6570 if (conf
->reshape_progress
!= MaxSector
) {
6571 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
6572 conf
->prev_algo
= mddev
->layout
;
6574 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
6575 conf
->prev_algo
= conf
->algorithm
;
6578 conf
->min_nr_stripes
= NR_STRIPES
;
6579 memory
= conf
->min_nr_stripes
* (sizeof(struct stripe_head
) +
6580 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
6581 atomic_set(&conf
->empty_inactive_list_nr
, NR_STRIPE_HASH_LOCKS
);
6582 if (grow_stripes(conf
, conf
->min_nr_stripes
)) {
6584 "md/raid:%s: couldn't allocate %dkB for buffers\n",
6585 mdname(mddev
), memory
);
6588 printk(KERN_INFO
"md/raid:%s: allocated %dkB\n",
6589 mdname(mddev
), memory
);
6591 * Losing a stripe head costs more than the time to refill it,
6592 * it reduces the queue depth and so can hurt throughput.
6593 * So set it rather large, scaled by number of devices.
6595 conf
->shrinker
.seeks
= DEFAULT_SEEKS
* conf
->raid_disks
* 4;
6596 conf
->shrinker
.scan_objects
= raid5_cache_scan
;
6597 conf
->shrinker
.count_objects
= raid5_cache_count
;
6598 conf
->shrinker
.batch
= 128;
6599 conf
->shrinker
.flags
= 0;
6600 register_shrinker(&conf
->shrinker
);
6602 sprintf(pers_name
, "raid%d", mddev
->new_level
);
6603 conf
->thread
= md_register_thread(raid5d
, mddev
, pers_name
);
6604 if (!conf
->thread
) {
6606 "md/raid:%s: couldn't allocate thread.\n",
6616 return ERR_PTR(-EIO
);
6618 return ERR_PTR(-ENOMEM
);
6621 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
6624 case ALGORITHM_PARITY_0
:
6625 if (raid_disk
< max_degraded
)
6628 case ALGORITHM_PARITY_N
:
6629 if (raid_disk
>= raid_disks
- max_degraded
)
6632 case ALGORITHM_PARITY_0_6
:
6633 if (raid_disk
== 0 ||
6634 raid_disk
== raid_disks
- 1)
6637 case ALGORITHM_LEFT_ASYMMETRIC_6
:
6638 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
6639 case ALGORITHM_LEFT_SYMMETRIC_6
:
6640 case ALGORITHM_RIGHT_SYMMETRIC_6
:
6641 if (raid_disk
== raid_disks
- 1)
6647 static int run(struct mddev
*mddev
)
6649 struct r5conf
*conf
;
6650 int working_disks
= 0;
6651 int dirty_parity_disks
= 0;
6652 struct md_rdev
*rdev
;
6653 sector_t reshape_offset
= 0;
6655 long long min_offset_diff
= 0;
6658 if (mddev
->recovery_cp
!= MaxSector
)
6659 printk(KERN_NOTICE
"md/raid:%s: not clean"
6660 " -- starting background reconstruction\n",
6663 rdev_for_each(rdev
, mddev
) {
6665 if (rdev
->raid_disk
< 0)
6667 diff
= (rdev
->new_data_offset
- rdev
->data_offset
);
6669 min_offset_diff
= diff
;
6671 } else if (mddev
->reshape_backwards
&&
6672 diff
< min_offset_diff
)
6673 min_offset_diff
= diff
;
6674 else if (!mddev
->reshape_backwards
&&
6675 diff
> min_offset_diff
)
6676 min_offset_diff
= diff
;
6679 if (mddev
->reshape_position
!= MaxSector
) {
6680 /* Check that we can continue the reshape.
6681 * Difficulties arise if the stripe we would write to
6682 * next is at or after the stripe we would read from next.
6683 * For a reshape that changes the number of devices, this
6684 * is only possible for a very short time, and mdadm makes
6685 * sure that time appears to have past before assembling
6686 * the array. So we fail if that time hasn't passed.
6687 * For a reshape that keeps the number of devices the same
6688 * mdadm must be monitoring the reshape can keeping the
6689 * critical areas read-only and backed up. It will start
6690 * the array in read-only mode, so we check for that.
6692 sector_t here_new
, here_old
;
6694 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
6698 if (mddev
->new_level
!= mddev
->level
) {
6699 printk(KERN_ERR
"md/raid:%s: unsupported reshape "
6700 "required - aborting.\n",
6704 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
6705 /* reshape_position must be on a new-stripe boundary, and one
6706 * further up in new geometry must map after here in old
6708 * If the chunk sizes are different, then as we perform reshape
6709 * in units of the largest of the two, reshape_position needs
6710 * be a multiple of the largest chunk size times new data disks.
6712 here_new
= mddev
->reshape_position
;
6713 chunk_sectors
= max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
);
6714 new_data_disks
= mddev
->raid_disks
- max_degraded
;
6715 if (sector_div(here_new
, chunk_sectors
* new_data_disks
)) {
6716 printk(KERN_ERR
"md/raid:%s: reshape_position not "
6717 "on a stripe boundary\n", mdname(mddev
));
6720 reshape_offset
= here_new
* chunk_sectors
;
6721 /* here_new is the stripe we will write to */
6722 here_old
= mddev
->reshape_position
;
6723 sector_div(here_old
, chunk_sectors
* (old_disks
-max_degraded
));
6724 /* here_old is the first stripe that we might need to read
6726 if (mddev
->delta_disks
== 0) {
6727 /* We cannot be sure it is safe to start an in-place
6728 * reshape. It is only safe if user-space is monitoring
6729 * and taking constant backups.
6730 * mdadm always starts a situation like this in
6731 * readonly mode so it can take control before
6732 * allowing any writes. So just check for that.
6734 if (abs(min_offset_diff
) >= mddev
->chunk_sectors
&&
6735 abs(min_offset_diff
) >= mddev
->new_chunk_sectors
)
6736 /* not really in-place - so OK */;
6737 else if (mddev
->ro
== 0) {
6738 printk(KERN_ERR
"md/raid:%s: in-place reshape "
6739 "must be started in read-only mode "
6744 } else if (mddev
->reshape_backwards
6745 ? (here_new
* chunk_sectors
+ min_offset_diff
<=
6746 here_old
* chunk_sectors
)
6747 : (here_new
* chunk_sectors
>=
6748 here_old
* chunk_sectors
+ (-min_offset_diff
))) {
6749 /* Reading from the same stripe as writing to - bad */
6750 printk(KERN_ERR
"md/raid:%s: reshape_position too early for "
6751 "auto-recovery - aborting.\n",
6755 printk(KERN_INFO
"md/raid:%s: reshape will continue\n",
6757 /* OK, we should be able to continue; */
6759 BUG_ON(mddev
->level
!= mddev
->new_level
);
6760 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
6761 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
6762 BUG_ON(mddev
->delta_disks
!= 0);
6765 if (mddev
->private == NULL
)
6766 conf
= setup_conf(mddev
);
6768 conf
= mddev
->private;
6771 return PTR_ERR(conf
);
6773 conf
->min_offset_diff
= min_offset_diff
;
6774 mddev
->thread
= conf
->thread
;
6775 conf
->thread
= NULL
;
6776 mddev
->private = conf
;
6778 for (i
= 0; i
< conf
->raid_disks
&& conf
->previous_raid_disks
;
6780 rdev
= conf
->disks
[i
].rdev
;
6781 if (!rdev
&& conf
->disks
[i
].replacement
) {
6782 /* The replacement is all we have yet */
6783 rdev
= conf
->disks
[i
].replacement
;
6784 conf
->disks
[i
].replacement
= NULL
;
6785 clear_bit(Replacement
, &rdev
->flags
);
6786 conf
->disks
[i
].rdev
= rdev
;
6790 if (conf
->disks
[i
].replacement
&&
6791 conf
->reshape_progress
!= MaxSector
) {
6792 /* replacements and reshape simply do not mix. */
6793 printk(KERN_ERR
"md: cannot handle concurrent "
6794 "replacement and reshape.\n");
6797 if (test_bit(In_sync
, &rdev
->flags
)) {
6801 /* This disc is not fully in-sync. However if it
6802 * just stored parity (beyond the recovery_offset),
6803 * when we don't need to be concerned about the
6804 * array being dirty.
6805 * When reshape goes 'backwards', we never have
6806 * partially completed devices, so we only need
6807 * to worry about reshape going forwards.
6809 /* Hack because v0.91 doesn't store recovery_offset properly. */
6810 if (mddev
->major_version
== 0 &&
6811 mddev
->minor_version
> 90)
6812 rdev
->recovery_offset
= reshape_offset
;
6814 if (rdev
->recovery_offset
< reshape_offset
) {
6815 /* We need to check old and new layout */
6816 if (!only_parity(rdev
->raid_disk
,
6819 conf
->max_degraded
))
6822 if (!only_parity(rdev
->raid_disk
,
6824 conf
->previous_raid_disks
,
6825 conf
->max_degraded
))
6827 dirty_parity_disks
++;
6831 * 0 for a fully functional array, 1 or 2 for a degraded array.
6833 mddev
->degraded
= calc_degraded(conf
);
6835 if (has_failed(conf
)) {
6836 printk(KERN_ERR
"md/raid:%s: not enough operational devices"
6837 " (%d/%d failed)\n",
6838 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
6842 /* device size must be a multiple of chunk size */
6843 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
6844 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
6846 if (mddev
->degraded
> dirty_parity_disks
&&
6847 mddev
->recovery_cp
!= MaxSector
) {
6848 if (mddev
->ok_start_degraded
)
6850 "md/raid:%s: starting dirty degraded array"
6851 " - data corruption possible.\n",
6855 "md/raid:%s: cannot start dirty degraded array.\n",
6861 if (mddev
->degraded
== 0)
6862 printk(KERN_INFO
"md/raid:%s: raid level %d active with %d out of %d"
6863 " devices, algorithm %d\n", mdname(mddev
), conf
->level
,
6864 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
6867 printk(KERN_ALERT
"md/raid:%s: raid level %d active with %d"
6868 " out of %d devices, algorithm %d\n",
6869 mdname(mddev
), conf
->level
,
6870 mddev
->raid_disks
- mddev
->degraded
,
6871 mddev
->raid_disks
, mddev
->new_layout
);
6873 print_raid5_conf(conf
);
6875 if (conf
->reshape_progress
!= MaxSector
) {
6876 conf
->reshape_safe
= conf
->reshape_progress
;
6877 atomic_set(&conf
->reshape_stripes
, 0);
6878 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6879 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6880 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6881 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6882 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
6886 /* Ok, everything is just fine now */
6887 if (mddev
->to_remove
== &raid5_attrs_group
)
6888 mddev
->to_remove
= NULL
;
6889 else if (mddev
->kobj
.sd
&&
6890 sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
6892 "raid5: failed to create sysfs attributes for %s\n",
6894 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
6898 bool discard_supported
= true;
6899 /* read-ahead size must cover two whole stripes, which
6900 * is 2 * (datadisks) * chunksize where 'n' is the
6901 * number of raid devices
6903 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
6904 int stripe
= data_disks
*
6905 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
6906 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
6907 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
6909 chunk_size
= mddev
->chunk_sectors
<< 9;
6910 blk_queue_io_min(mddev
->queue
, chunk_size
);
6911 blk_queue_io_opt(mddev
->queue
, chunk_size
*
6912 (conf
->raid_disks
- conf
->max_degraded
));
6913 mddev
->queue
->limits
.raid_partial_stripes_expensive
= 1;
6915 * We can only discard a whole stripe. It doesn't make sense to
6916 * discard data disk but write parity disk
6918 stripe
= stripe
* PAGE_SIZE
;
6919 /* Round up to power of 2, as discard handling
6920 * currently assumes that */
6921 while ((stripe
-1) & stripe
)
6922 stripe
= (stripe
| (stripe
-1)) + 1;
6923 mddev
->queue
->limits
.discard_alignment
= stripe
;
6924 mddev
->queue
->limits
.discard_granularity
= stripe
;
6926 * unaligned part of discard request will be ignored, so can't
6927 * guarantee discard_zeroes_data
6929 mddev
->queue
->limits
.discard_zeroes_data
= 0;
6931 blk_queue_max_write_same_sectors(mddev
->queue
, 0);
6933 rdev_for_each(rdev
, mddev
) {
6934 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
6935 rdev
->data_offset
<< 9);
6936 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
6937 rdev
->new_data_offset
<< 9);
6939 * discard_zeroes_data is required, otherwise data
6940 * could be lost. Consider a scenario: discard a stripe
6941 * (the stripe could be inconsistent if
6942 * discard_zeroes_data is 0); write one disk of the
6943 * stripe (the stripe could be inconsistent again
6944 * depending on which disks are used to calculate
6945 * parity); the disk is broken; The stripe data of this
6948 if (!blk_queue_discard(bdev_get_queue(rdev
->bdev
)) ||
6949 !bdev_get_queue(rdev
->bdev
)->
6950 limits
.discard_zeroes_data
)
6951 discard_supported
= false;
6952 /* Unfortunately, discard_zeroes_data is not currently
6953 * a guarantee - just a hint. So we only allow DISCARD
6954 * if the sysadmin has confirmed that only safe devices
6955 * are in use by setting a module parameter.
6957 if (!devices_handle_discard_safely
) {
6958 if (discard_supported
) {
6959 pr_info("md/raid456: discard support disabled due to uncertainty.\n");
6960 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
6962 discard_supported
= false;
6966 if (discard_supported
&&
6967 mddev
->queue
->limits
.max_discard_sectors
>= stripe
&&
6968 mddev
->queue
->limits
.discard_granularity
>= stripe
)
6969 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
,
6972 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
,
6978 md_unregister_thread(&mddev
->thread
);
6979 print_raid5_conf(conf
);
6981 mddev
->private = NULL
;
6982 printk(KERN_ALERT
"md/raid:%s: failed to run raid set.\n", mdname(mddev
));
6986 static void raid5_free(struct mddev
*mddev
, void *priv
)
6988 struct r5conf
*conf
= priv
;
6991 mddev
->to_remove
= &raid5_attrs_group
;
6994 static void status(struct seq_file
*seq
, struct mddev
*mddev
)
6996 struct r5conf
*conf
= mddev
->private;
6999 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
7000 conf
->chunk_sectors
/ 2, mddev
->layout
);
7001 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
7002 for (i
= 0; i
< conf
->raid_disks
; i
++)
7003 seq_printf (seq
, "%s",
7004 conf
->disks
[i
].rdev
&&
7005 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
7006 seq_printf (seq
, "]");
7009 static void print_raid5_conf (struct r5conf
*conf
)
7012 struct disk_info
*tmp
;
7014 printk(KERN_DEBUG
"RAID conf printout:\n");
7016 printk("(conf==NULL)\n");
7019 printk(KERN_DEBUG
" --- level:%d rd:%d wd:%d\n", conf
->level
,
7021 conf
->raid_disks
- conf
->mddev
->degraded
);
7023 for (i
= 0; i
< conf
->raid_disks
; i
++) {
7024 char b
[BDEVNAME_SIZE
];
7025 tmp
= conf
->disks
+ i
;
7027 printk(KERN_DEBUG
" disk %d, o:%d, dev:%s\n",
7028 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
7029 bdevname(tmp
->rdev
->bdev
, b
));
7033 static int raid5_spare_active(struct mddev
*mddev
)
7036 struct r5conf
*conf
= mddev
->private;
7037 struct disk_info
*tmp
;
7039 unsigned long flags
;
7041 for (i
= 0; i
< conf
->raid_disks
; i
++) {
7042 tmp
= conf
->disks
+ i
;
7043 if (tmp
->replacement
7044 && tmp
->replacement
->recovery_offset
== MaxSector
7045 && !test_bit(Faulty
, &tmp
->replacement
->flags
)
7046 && !test_and_set_bit(In_sync
, &tmp
->replacement
->flags
)) {
7047 /* Replacement has just become active. */
7049 || !test_and_clear_bit(In_sync
, &tmp
->rdev
->flags
))
7052 /* Replaced device not technically faulty,
7053 * but we need to be sure it gets removed
7054 * and never re-added.
7056 set_bit(Faulty
, &tmp
->rdev
->flags
);
7057 sysfs_notify_dirent_safe(
7058 tmp
->rdev
->sysfs_state
);
7060 sysfs_notify_dirent_safe(tmp
->replacement
->sysfs_state
);
7061 } else if (tmp
->rdev
7062 && tmp
->rdev
->recovery_offset
== MaxSector
7063 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
7064 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
7066 sysfs_notify_dirent_safe(tmp
->rdev
->sysfs_state
);
7069 spin_lock_irqsave(&conf
->device_lock
, flags
);
7070 mddev
->degraded
= calc_degraded(conf
);
7071 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
7072 print_raid5_conf(conf
);
7076 static int raid5_remove_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
7078 struct r5conf
*conf
= mddev
->private;
7080 int number
= rdev
->raid_disk
;
7081 struct md_rdev
**rdevp
;
7082 struct disk_info
*p
= conf
->disks
+ number
;
7084 print_raid5_conf(conf
);
7085 if (rdev
== p
->rdev
)
7087 else if (rdev
== p
->replacement
)
7088 rdevp
= &p
->replacement
;
7092 if (number
>= conf
->raid_disks
&&
7093 conf
->reshape_progress
== MaxSector
)
7094 clear_bit(In_sync
, &rdev
->flags
);
7096 if (test_bit(In_sync
, &rdev
->flags
) ||
7097 atomic_read(&rdev
->nr_pending
)) {
7101 /* Only remove non-faulty devices if recovery
7104 if (!test_bit(Faulty
, &rdev
->flags
) &&
7105 mddev
->recovery_disabled
!= conf
->recovery_disabled
&&
7106 !has_failed(conf
) &&
7107 (!p
->replacement
|| p
->replacement
== rdev
) &&
7108 number
< conf
->raid_disks
) {
7114 if (atomic_read(&rdev
->nr_pending
)) {
7115 /* lost the race, try later */
7118 } else if (p
->replacement
) {
7119 /* We must have just cleared 'rdev' */
7120 p
->rdev
= p
->replacement
;
7121 clear_bit(Replacement
, &p
->replacement
->flags
);
7122 smp_mb(); /* Make sure other CPUs may see both as identical
7123 * but will never see neither - if they are careful
7125 p
->replacement
= NULL
;
7126 clear_bit(WantReplacement
, &rdev
->flags
);
7128 /* We might have just removed the Replacement as faulty-
7129 * clear the bit just in case
7131 clear_bit(WantReplacement
, &rdev
->flags
);
7134 print_raid5_conf(conf
);
7138 static int raid5_add_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
7140 struct r5conf
*conf
= mddev
->private;
7143 struct disk_info
*p
;
7145 int last
= conf
->raid_disks
- 1;
7147 if (mddev
->recovery_disabled
== conf
->recovery_disabled
)
7150 if (rdev
->saved_raid_disk
< 0 && has_failed(conf
))
7151 /* no point adding a device */
7154 if (rdev
->raid_disk
>= 0)
7155 first
= last
= rdev
->raid_disk
;
7158 * find the disk ... but prefer rdev->saved_raid_disk
7161 if (rdev
->saved_raid_disk
>= 0 &&
7162 rdev
->saved_raid_disk
>= first
&&
7163 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
7164 first
= rdev
->saved_raid_disk
;
7166 for (disk
= first
; disk
<= last
; disk
++) {
7167 p
= conf
->disks
+ disk
;
7168 if (p
->rdev
== NULL
) {
7169 clear_bit(In_sync
, &rdev
->flags
);
7170 rdev
->raid_disk
= disk
;
7172 if (rdev
->saved_raid_disk
!= disk
)
7174 rcu_assign_pointer(p
->rdev
, rdev
);
7178 for (disk
= first
; disk
<= last
; disk
++) {
7179 p
= conf
->disks
+ disk
;
7180 if (test_bit(WantReplacement
, &p
->rdev
->flags
) &&
7181 p
->replacement
== NULL
) {
7182 clear_bit(In_sync
, &rdev
->flags
);
7183 set_bit(Replacement
, &rdev
->flags
);
7184 rdev
->raid_disk
= disk
;
7187 rcu_assign_pointer(p
->replacement
, rdev
);
7192 print_raid5_conf(conf
);
7196 static int raid5_resize(struct mddev
*mddev
, sector_t sectors
)
7198 /* no resync is happening, and there is enough space
7199 * on all devices, so we can resize.
7200 * We need to make sure resync covers any new space.
7201 * If the array is shrinking we should possibly wait until
7202 * any io in the removed space completes, but it hardly seems
7206 struct r5conf
*conf
= mddev
->private;
7208 sectors
&= ~((sector_t
)conf
->chunk_sectors
- 1);
7209 newsize
= raid5_size(mddev
, sectors
, mddev
->raid_disks
);
7210 if (mddev
->external_size
&&
7211 mddev
->array_sectors
> newsize
)
7213 if (mddev
->bitmap
) {
7214 int ret
= bitmap_resize(mddev
->bitmap
, sectors
, 0, 0);
7218 md_set_array_sectors(mddev
, newsize
);
7219 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
7220 revalidate_disk(mddev
->gendisk
);
7221 if (sectors
> mddev
->dev_sectors
&&
7222 mddev
->recovery_cp
> mddev
->dev_sectors
) {
7223 mddev
->recovery_cp
= mddev
->dev_sectors
;
7224 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
7226 mddev
->dev_sectors
= sectors
;
7227 mddev
->resync_max_sectors
= sectors
;
7231 static int check_stripe_cache(struct mddev
*mddev
)
7233 /* Can only proceed if there are plenty of stripe_heads.
7234 * We need a minimum of one full stripe,, and for sensible progress
7235 * it is best to have about 4 times that.
7236 * If we require 4 times, then the default 256 4K stripe_heads will
7237 * allow for chunk sizes up to 256K, which is probably OK.
7238 * If the chunk size is greater, user-space should request more
7239 * stripe_heads first.
7241 struct r5conf
*conf
= mddev
->private;
7242 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
7243 > conf
->min_nr_stripes
||
7244 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
7245 > conf
->min_nr_stripes
) {
7246 printk(KERN_WARNING
"md/raid:%s: reshape: not enough stripes. Needed %lu\n",
7248 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
7255 static int check_reshape(struct mddev
*mddev
)
7257 struct r5conf
*conf
= mddev
->private;
7259 if (mddev
->delta_disks
== 0 &&
7260 mddev
->new_layout
== mddev
->layout
&&
7261 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
7262 return 0; /* nothing to do */
7263 if (has_failed(conf
))
7265 if (mddev
->delta_disks
< 0 && mddev
->reshape_position
== MaxSector
) {
7266 /* We might be able to shrink, but the devices must
7267 * be made bigger first.
7268 * For raid6, 4 is the minimum size.
7269 * Otherwise 2 is the minimum
7272 if (mddev
->level
== 6)
7274 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
7278 if (!check_stripe_cache(mddev
))
7281 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
||
7282 mddev
->delta_disks
> 0)
7283 if (resize_chunks(conf
,
7284 conf
->previous_raid_disks
7285 + max(0, mddev
->delta_disks
),
7286 max(mddev
->new_chunk_sectors
,
7287 mddev
->chunk_sectors
)
7290 return resize_stripes(conf
, (conf
->previous_raid_disks
7291 + mddev
->delta_disks
));
7294 static int raid5_start_reshape(struct mddev
*mddev
)
7296 struct r5conf
*conf
= mddev
->private;
7297 struct md_rdev
*rdev
;
7299 unsigned long flags
;
7301 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
7304 if (!check_stripe_cache(mddev
))
7307 if (has_failed(conf
))
7310 rdev_for_each(rdev
, mddev
) {
7311 if (!test_bit(In_sync
, &rdev
->flags
)
7312 && !test_bit(Faulty
, &rdev
->flags
))
7316 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
7317 /* Not enough devices even to make a degraded array
7322 /* Refuse to reduce size of the array. Any reductions in
7323 * array size must be through explicit setting of array_size
7326 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
7327 < mddev
->array_sectors
) {
7328 printk(KERN_ERR
"md/raid:%s: array size must be reduced "
7329 "before number of disks\n", mdname(mddev
));
7333 atomic_set(&conf
->reshape_stripes
, 0);
7334 spin_lock_irq(&conf
->device_lock
);
7335 write_seqcount_begin(&conf
->gen_lock
);
7336 conf
->previous_raid_disks
= conf
->raid_disks
;
7337 conf
->raid_disks
+= mddev
->delta_disks
;
7338 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
7339 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
7340 conf
->prev_algo
= conf
->algorithm
;
7341 conf
->algorithm
= mddev
->new_layout
;
7343 /* Code that selects data_offset needs to see the generation update
7344 * if reshape_progress has been set - so a memory barrier needed.
7347 if (mddev
->reshape_backwards
)
7348 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
7350 conf
->reshape_progress
= 0;
7351 conf
->reshape_safe
= conf
->reshape_progress
;
7352 write_seqcount_end(&conf
->gen_lock
);
7353 spin_unlock_irq(&conf
->device_lock
);
7355 /* Now make sure any requests that proceeded on the assumption
7356 * the reshape wasn't running - like Discard or Read - have
7359 mddev_suspend(mddev
);
7360 mddev_resume(mddev
);
7362 /* Add some new drives, as many as will fit.
7363 * We know there are enough to make the newly sized array work.
7364 * Don't add devices if we are reducing the number of
7365 * devices in the array. This is because it is not possible
7366 * to correctly record the "partially reconstructed" state of
7367 * such devices during the reshape and confusion could result.
7369 if (mddev
->delta_disks
>= 0) {
7370 rdev_for_each(rdev
, mddev
)
7371 if (rdev
->raid_disk
< 0 &&
7372 !test_bit(Faulty
, &rdev
->flags
)) {
7373 if (raid5_add_disk(mddev
, rdev
) == 0) {
7375 >= conf
->previous_raid_disks
)
7376 set_bit(In_sync
, &rdev
->flags
);
7378 rdev
->recovery_offset
= 0;
7380 if (sysfs_link_rdev(mddev
, rdev
))
7381 /* Failure here is OK */;
7383 } else if (rdev
->raid_disk
>= conf
->previous_raid_disks
7384 && !test_bit(Faulty
, &rdev
->flags
)) {
7385 /* This is a spare that was manually added */
7386 set_bit(In_sync
, &rdev
->flags
);
7389 /* When a reshape changes the number of devices,
7390 * ->degraded is measured against the larger of the
7391 * pre and post number of devices.
7393 spin_lock_irqsave(&conf
->device_lock
, flags
);
7394 mddev
->degraded
= calc_degraded(conf
);
7395 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
7397 mddev
->raid_disks
= conf
->raid_disks
;
7398 mddev
->reshape_position
= conf
->reshape_progress
;
7399 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
7401 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
7402 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
7403 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
7404 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
7405 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
7406 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
7408 if (!mddev
->sync_thread
) {
7409 mddev
->recovery
= 0;
7410 spin_lock_irq(&conf
->device_lock
);
7411 write_seqcount_begin(&conf
->gen_lock
);
7412 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
7413 mddev
->new_chunk_sectors
=
7414 conf
->chunk_sectors
= conf
->prev_chunk_sectors
;
7415 mddev
->new_layout
= conf
->algorithm
= conf
->prev_algo
;
7416 rdev_for_each(rdev
, mddev
)
7417 rdev
->new_data_offset
= rdev
->data_offset
;
7419 conf
->generation
--;
7420 conf
->reshape_progress
= MaxSector
;
7421 mddev
->reshape_position
= MaxSector
;
7422 write_seqcount_end(&conf
->gen_lock
);
7423 spin_unlock_irq(&conf
->device_lock
);
7426 conf
->reshape_checkpoint
= jiffies
;
7427 md_wakeup_thread(mddev
->sync_thread
);
7428 md_new_event(mddev
);
7432 /* This is called from the reshape thread and should make any
7433 * changes needed in 'conf'
7435 static void end_reshape(struct r5conf
*conf
)
7438 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
7439 struct md_rdev
*rdev
;
7441 spin_lock_irq(&conf
->device_lock
);
7442 conf
->previous_raid_disks
= conf
->raid_disks
;
7443 rdev_for_each(rdev
, conf
->mddev
)
7444 rdev
->data_offset
= rdev
->new_data_offset
;
7446 conf
->reshape_progress
= MaxSector
;
7447 conf
->mddev
->reshape_position
= MaxSector
;
7448 spin_unlock_irq(&conf
->device_lock
);
7449 wake_up(&conf
->wait_for_overlap
);
7451 /* read-ahead size must cover two whole stripes, which is
7452 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
7454 if (conf
->mddev
->queue
) {
7455 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
7456 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
7458 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
7459 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
7464 /* This is called from the raid5d thread with mddev_lock held.
7465 * It makes config changes to the device.
7467 static void raid5_finish_reshape(struct mddev
*mddev
)
7469 struct r5conf
*conf
= mddev
->private;
7471 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
7473 if (mddev
->delta_disks
> 0) {
7474 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
7475 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
7476 revalidate_disk(mddev
->gendisk
);
7479 spin_lock_irq(&conf
->device_lock
);
7480 mddev
->degraded
= calc_degraded(conf
);
7481 spin_unlock_irq(&conf
->device_lock
);
7482 for (d
= conf
->raid_disks
;
7483 d
< conf
->raid_disks
- mddev
->delta_disks
;
7485 struct md_rdev
*rdev
= conf
->disks
[d
].rdev
;
7487 clear_bit(In_sync
, &rdev
->flags
);
7488 rdev
= conf
->disks
[d
].replacement
;
7490 clear_bit(In_sync
, &rdev
->flags
);
7493 mddev
->layout
= conf
->algorithm
;
7494 mddev
->chunk_sectors
= conf
->chunk_sectors
;
7495 mddev
->reshape_position
= MaxSector
;
7496 mddev
->delta_disks
= 0;
7497 mddev
->reshape_backwards
= 0;
7501 static void raid5_quiesce(struct mddev
*mddev
, int state
)
7503 struct r5conf
*conf
= mddev
->private;
7506 case 2: /* resume for a suspend */
7507 wake_up(&conf
->wait_for_overlap
);
7510 case 1: /* stop all writes */
7511 lock_all_device_hash_locks_irq(conf
);
7512 /* '2' tells resync/reshape to pause so that all
7513 * active stripes can drain
7516 wait_event_cmd(conf
->wait_for_quiescent
,
7517 atomic_read(&conf
->active_stripes
) == 0 &&
7518 atomic_read(&conf
->active_aligned_reads
) == 0,
7519 unlock_all_device_hash_locks_irq(conf
),
7520 lock_all_device_hash_locks_irq(conf
));
7522 unlock_all_device_hash_locks_irq(conf
);
7523 /* allow reshape to continue */
7524 wake_up(&conf
->wait_for_overlap
);
7527 case 0: /* re-enable writes */
7528 lock_all_device_hash_locks_irq(conf
);
7530 wake_up(&conf
->wait_for_quiescent
);
7531 wake_up(&conf
->wait_for_overlap
);
7532 unlock_all_device_hash_locks_irq(conf
);
7537 static void *raid45_takeover_raid0(struct mddev
*mddev
, int level
)
7539 struct r0conf
*raid0_conf
= mddev
->private;
7542 /* for raid0 takeover only one zone is supported */
7543 if (raid0_conf
->nr_strip_zones
> 1) {
7544 printk(KERN_ERR
"md/raid:%s: cannot takeover raid0 with more than one zone.\n",
7546 return ERR_PTR(-EINVAL
);
7549 sectors
= raid0_conf
->strip_zone
[0].zone_end
;
7550 sector_div(sectors
, raid0_conf
->strip_zone
[0].nb_dev
);
7551 mddev
->dev_sectors
= sectors
;
7552 mddev
->new_level
= level
;
7553 mddev
->new_layout
= ALGORITHM_PARITY_N
;
7554 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
7555 mddev
->raid_disks
+= 1;
7556 mddev
->delta_disks
= 1;
7557 /* make sure it will be not marked as dirty */
7558 mddev
->recovery_cp
= MaxSector
;
7560 return setup_conf(mddev
);
7563 static void *raid5_takeover_raid1(struct mddev
*mddev
)
7567 if (mddev
->raid_disks
!= 2 ||
7568 mddev
->degraded
> 1)
7569 return ERR_PTR(-EINVAL
);
7571 /* Should check if there are write-behind devices? */
7573 chunksect
= 64*2; /* 64K by default */
7575 /* The array must be an exact multiple of chunksize */
7576 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
7579 if ((chunksect
<<9) < STRIPE_SIZE
)
7580 /* array size does not allow a suitable chunk size */
7581 return ERR_PTR(-EINVAL
);
7583 mddev
->new_level
= 5;
7584 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
7585 mddev
->new_chunk_sectors
= chunksect
;
7587 return setup_conf(mddev
);
7590 static void *raid5_takeover_raid6(struct mddev
*mddev
)
7594 switch (mddev
->layout
) {
7595 case ALGORITHM_LEFT_ASYMMETRIC_6
:
7596 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
7598 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
7599 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
7601 case ALGORITHM_LEFT_SYMMETRIC_6
:
7602 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
7604 case ALGORITHM_RIGHT_SYMMETRIC_6
:
7605 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
7607 case ALGORITHM_PARITY_0_6
:
7608 new_layout
= ALGORITHM_PARITY_0
;
7610 case ALGORITHM_PARITY_N
:
7611 new_layout
= ALGORITHM_PARITY_N
;
7614 return ERR_PTR(-EINVAL
);
7616 mddev
->new_level
= 5;
7617 mddev
->new_layout
= new_layout
;
7618 mddev
->delta_disks
= -1;
7619 mddev
->raid_disks
-= 1;
7620 return setup_conf(mddev
);
7623 static int raid5_check_reshape(struct mddev
*mddev
)
7625 /* For a 2-drive array, the layout and chunk size can be changed
7626 * immediately as not restriping is needed.
7627 * For larger arrays we record the new value - after validation
7628 * to be used by a reshape pass.
7630 struct r5conf
*conf
= mddev
->private;
7631 int new_chunk
= mddev
->new_chunk_sectors
;
7633 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
7635 if (new_chunk
> 0) {
7636 if (!is_power_of_2(new_chunk
))
7638 if (new_chunk
< (PAGE_SIZE
>>9))
7640 if (mddev
->array_sectors
& (new_chunk
-1))
7641 /* not factor of array size */
7645 /* They look valid */
7647 if (mddev
->raid_disks
== 2) {
7648 /* can make the change immediately */
7649 if (mddev
->new_layout
>= 0) {
7650 conf
->algorithm
= mddev
->new_layout
;
7651 mddev
->layout
= mddev
->new_layout
;
7653 if (new_chunk
> 0) {
7654 conf
->chunk_sectors
= new_chunk
;
7655 mddev
->chunk_sectors
= new_chunk
;
7657 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
7658 md_wakeup_thread(mddev
->thread
);
7660 return check_reshape(mddev
);
7663 static int raid6_check_reshape(struct mddev
*mddev
)
7665 int new_chunk
= mddev
->new_chunk_sectors
;
7667 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
7669 if (new_chunk
> 0) {
7670 if (!is_power_of_2(new_chunk
))
7672 if (new_chunk
< (PAGE_SIZE
>> 9))
7674 if (mddev
->array_sectors
& (new_chunk
-1))
7675 /* not factor of array size */
7679 /* They look valid */
7680 return check_reshape(mddev
);
7683 static void *raid5_takeover(struct mddev
*mddev
)
7685 /* raid5 can take over:
7686 * raid0 - if there is only one strip zone - make it a raid4 layout
7687 * raid1 - if there are two drives. We need to know the chunk size
7688 * raid4 - trivial - just use a raid4 layout.
7689 * raid6 - Providing it is a *_6 layout
7691 if (mddev
->level
== 0)
7692 return raid45_takeover_raid0(mddev
, 5);
7693 if (mddev
->level
== 1)
7694 return raid5_takeover_raid1(mddev
);
7695 if (mddev
->level
== 4) {
7696 mddev
->new_layout
= ALGORITHM_PARITY_N
;
7697 mddev
->new_level
= 5;
7698 return setup_conf(mddev
);
7700 if (mddev
->level
== 6)
7701 return raid5_takeover_raid6(mddev
);
7703 return ERR_PTR(-EINVAL
);
7706 static void *raid4_takeover(struct mddev
*mddev
)
7708 /* raid4 can take over:
7709 * raid0 - if there is only one strip zone
7710 * raid5 - if layout is right
7712 if (mddev
->level
== 0)
7713 return raid45_takeover_raid0(mddev
, 4);
7714 if (mddev
->level
== 5 &&
7715 mddev
->layout
== ALGORITHM_PARITY_N
) {
7716 mddev
->new_layout
= 0;
7717 mddev
->new_level
= 4;
7718 return setup_conf(mddev
);
7720 return ERR_PTR(-EINVAL
);
7723 static struct md_personality raid5_personality
;
7725 static void *raid6_takeover(struct mddev
*mddev
)
7727 /* Currently can only take over a raid5. We map the
7728 * personality to an equivalent raid6 personality
7729 * with the Q block at the end.
7733 if (mddev
->pers
!= &raid5_personality
)
7734 return ERR_PTR(-EINVAL
);
7735 if (mddev
->degraded
> 1)
7736 return ERR_PTR(-EINVAL
);
7737 if (mddev
->raid_disks
> 253)
7738 return ERR_PTR(-EINVAL
);
7739 if (mddev
->raid_disks
< 3)
7740 return ERR_PTR(-EINVAL
);
7742 switch (mddev
->layout
) {
7743 case ALGORITHM_LEFT_ASYMMETRIC
:
7744 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
7746 case ALGORITHM_RIGHT_ASYMMETRIC
:
7747 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
7749 case ALGORITHM_LEFT_SYMMETRIC
:
7750 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
7752 case ALGORITHM_RIGHT_SYMMETRIC
:
7753 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
7755 case ALGORITHM_PARITY_0
:
7756 new_layout
= ALGORITHM_PARITY_0_6
;
7758 case ALGORITHM_PARITY_N
:
7759 new_layout
= ALGORITHM_PARITY_N
;
7762 return ERR_PTR(-EINVAL
);
7764 mddev
->new_level
= 6;
7765 mddev
->new_layout
= new_layout
;
7766 mddev
->delta_disks
= 1;
7767 mddev
->raid_disks
+= 1;
7768 return setup_conf(mddev
);
7771 static struct md_personality raid6_personality
=
7775 .owner
= THIS_MODULE
,
7776 .make_request
= make_request
,
7780 .error_handler
= error
,
7781 .hot_add_disk
= raid5_add_disk
,
7782 .hot_remove_disk
= raid5_remove_disk
,
7783 .spare_active
= raid5_spare_active
,
7784 .sync_request
= sync_request
,
7785 .resize
= raid5_resize
,
7787 .check_reshape
= raid6_check_reshape
,
7788 .start_reshape
= raid5_start_reshape
,
7789 .finish_reshape
= raid5_finish_reshape
,
7790 .quiesce
= raid5_quiesce
,
7791 .takeover
= raid6_takeover
,
7792 .congested
= raid5_congested
,
7794 static struct md_personality raid5_personality
=
7798 .owner
= THIS_MODULE
,
7799 .make_request
= make_request
,
7803 .error_handler
= error
,
7804 .hot_add_disk
= raid5_add_disk
,
7805 .hot_remove_disk
= raid5_remove_disk
,
7806 .spare_active
= raid5_spare_active
,
7807 .sync_request
= sync_request
,
7808 .resize
= raid5_resize
,
7810 .check_reshape
= raid5_check_reshape
,
7811 .start_reshape
= raid5_start_reshape
,
7812 .finish_reshape
= raid5_finish_reshape
,
7813 .quiesce
= raid5_quiesce
,
7814 .takeover
= raid5_takeover
,
7815 .congested
= raid5_congested
,
7818 static struct md_personality raid4_personality
=
7822 .owner
= THIS_MODULE
,
7823 .make_request
= make_request
,
7827 .error_handler
= error
,
7828 .hot_add_disk
= raid5_add_disk
,
7829 .hot_remove_disk
= raid5_remove_disk
,
7830 .spare_active
= raid5_spare_active
,
7831 .sync_request
= sync_request
,
7832 .resize
= raid5_resize
,
7834 .check_reshape
= raid5_check_reshape
,
7835 .start_reshape
= raid5_start_reshape
,
7836 .finish_reshape
= raid5_finish_reshape
,
7837 .quiesce
= raid5_quiesce
,
7838 .takeover
= raid4_takeover
,
7839 .congested
= raid5_congested
,
7842 static int __init
raid5_init(void)
7844 raid5_wq
= alloc_workqueue("raid5wq",
7845 WQ_UNBOUND
|WQ_MEM_RECLAIM
|WQ_CPU_INTENSIVE
|WQ_SYSFS
, 0);
7848 register_md_personality(&raid6_personality
);
7849 register_md_personality(&raid5_personality
);
7850 register_md_personality(&raid4_personality
);
7854 static void raid5_exit(void)
7856 unregister_md_personality(&raid6_personality
);
7857 unregister_md_personality(&raid5_personality
);
7858 unregister_md_personality(&raid4_personality
);
7859 destroy_workqueue(raid5_wq
);
7862 module_init(raid5_init
);
7863 module_exit(raid5_exit
);
7864 MODULE_LICENSE("GPL");
7865 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
7866 MODULE_ALIAS("md-personality-4"); /* RAID5 */
7867 MODULE_ALIAS("md-raid5");
7868 MODULE_ALIAS("md-raid4");
7869 MODULE_ALIAS("md-level-5");
7870 MODULE_ALIAS("md-level-4");
7871 MODULE_ALIAS("md-personality-8"); /* RAID6 */
7872 MODULE_ALIAS("md-raid6");
7873 MODULE_ALIAS("md-level-6");
7875 /* This used to be two separate modules, they were: */
7876 MODULE_ALIAS("raid5");
7877 MODULE_ALIAS("raid6");