2 * multipath.c : Multiple Devices driver for Linux
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8 * MULTIPATH management functions.
10 * derived from raid1.c.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * You should have received a copy of the GNU General Public License
18 * (for example /usr/src/linux/COPYING); if not, write to the Free
19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/blkdev.h>
23 #include <linux/module.h>
24 #include <linux/raid/md_u.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
28 #include "multipath.h"
30 #define MAX_WORK_PER_DISK 128
32 #define NR_RESERVED_BUFS 32
34 static int multipath_map (struct mpconf
*conf
)
36 int i
, disks
= conf
->raid_disks
;
39 * Later we do read balancing on the read side
40 * now we use the first available disk.
44 for (i
= 0; i
< disks
; i
++) {
45 struct md_rdev
*rdev
= rcu_dereference(conf
->multipaths
[i
].rdev
);
46 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
47 atomic_inc(&rdev
->nr_pending
);
54 printk(KERN_ERR
"multipath_map(): no more operational IO paths?\n");
58 static void multipath_reschedule_retry (struct multipath_bh
*mp_bh
)
61 struct mddev
*mddev
= mp_bh
->mddev
;
62 struct mpconf
*conf
= mddev
->private;
64 spin_lock_irqsave(&conf
->device_lock
, flags
);
65 list_add(&mp_bh
->retry_list
, &conf
->retry_list
);
66 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
67 md_wakeup_thread(mddev
->thread
);
71 * multipath_end_bh_io() is called when we have finished servicing a multipathed
72 * operation and are ready to return a success/failure code to the buffer
75 static void multipath_end_bh_io (struct multipath_bh
*mp_bh
, int err
)
77 struct bio
*bio
= mp_bh
->master_bio
;
78 struct mpconf
*conf
= mp_bh
->mddev
->private;
81 mempool_free(mp_bh
, conf
->pool
);
84 static void multipath_end_request(struct bio
*bio
, int error
)
86 int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
87 struct multipath_bh
*mp_bh
= bio
->bi_private
;
88 struct mpconf
*conf
= mp_bh
->mddev
->private;
89 struct md_rdev
*rdev
= conf
->multipaths
[mp_bh
->path
].rdev
;
92 multipath_end_bh_io(mp_bh
, 0);
93 else if (!(bio
->bi_rw
& REQ_RAHEAD
)) {
97 char b
[BDEVNAME_SIZE
];
98 md_error (mp_bh
->mddev
, rdev
);
99 printk(KERN_ERR
"multipath: %s: rescheduling sector %llu\n",
100 bdevname(rdev
->bdev
,b
),
101 (unsigned long long)bio
->bi_iter
.bi_sector
);
102 multipath_reschedule_retry(mp_bh
);
104 multipath_end_bh_io(mp_bh
, error
);
105 rdev_dec_pending(rdev
, conf
->mddev
);
108 static void multipath_make_request(struct mddev
*mddev
, struct bio
* bio
)
110 struct mpconf
*conf
= mddev
->private;
111 struct multipath_bh
* mp_bh
;
112 struct multipath_info
*multipath
;
114 if (unlikely(bio
->bi_rw
& REQ_FLUSH
)) {
115 md_flush_request(mddev
, bio
);
119 mp_bh
= mempool_alloc(conf
->pool
, GFP_NOIO
);
121 mp_bh
->master_bio
= bio
;
122 mp_bh
->mddev
= mddev
;
124 mp_bh
->path
= multipath_map(conf
);
125 if (mp_bh
->path
< 0) {
126 bio_endio(bio
, -EIO
);
127 mempool_free(mp_bh
, conf
->pool
);
130 multipath
= conf
->multipaths
+ mp_bh
->path
;
133 mp_bh
->bio
.bi_iter
.bi_sector
+= multipath
->rdev
->data_offset
;
134 mp_bh
->bio
.bi_bdev
= multipath
->rdev
->bdev
;
135 mp_bh
->bio
.bi_rw
|= REQ_FAILFAST_TRANSPORT
;
136 mp_bh
->bio
.bi_end_io
= multipath_end_request
;
137 mp_bh
->bio
.bi_private
= mp_bh
;
138 generic_make_request(&mp_bh
->bio
);
142 static void multipath_status (struct seq_file
*seq
, struct mddev
*mddev
)
144 struct mpconf
*conf
= mddev
->private;
147 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
,
148 conf
->raid_disks
- mddev
->degraded
);
149 for (i
= 0; i
< conf
->raid_disks
; i
++)
150 seq_printf (seq
, "%s",
151 conf
->multipaths
[i
].rdev
&&
152 test_bit(In_sync
, &conf
->multipaths
[i
].rdev
->flags
) ? "U" : "_");
153 seq_printf (seq
, "]");
156 static int multipath_congested(void *data
, int bits
)
158 struct mddev
*mddev
= data
;
159 struct mpconf
*conf
= mddev
->private;
162 if (mddev_congested(mddev
, bits
))
166 for (i
= 0; i
< mddev
->raid_disks
; i
++) {
167 struct md_rdev
*rdev
= rcu_dereference(conf
->multipaths
[i
].rdev
);
168 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
)) {
169 struct request_queue
*q
= bdev_get_queue(rdev
->bdev
);
171 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
172 /* Just like multipath_map, we just check the
173 * first available device
183 * Careful, this can execute in IRQ contexts as well!
185 static void multipath_error (struct mddev
*mddev
, struct md_rdev
*rdev
)
187 struct mpconf
*conf
= mddev
->private;
188 char b
[BDEVNAME_SIZE
];
190 if (conf
->raid_disks
- mddev
->degraded
<= 1) {
192 * Uh oh, we can do nothing if this is our last path, but
193 * first check if this is a queued request for a device
194 * which has just failed.
197 "multipath: only one IO path left and IO error.\n");
198 /* leave it active... it's all we have */
202 * Mark disk as unusable
204 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
206 spin_lock_irqsave(&conf
->device_lock
, flags
);
208 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
210 set_bit(Faulty
, &rdev
->flags
);
211 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
212 printk(KERN_ALERT
"multipath: IO failure on %s,"
213 " disabling IO path.\n"
214 "multipath: Operation continuing"
215 " on %d IO paths.\n",
216 bdevname(rdev
->bdev
, b
),
217 conf
->raid_disks
- mddev
->degraded
);
220 static void print_multipath_conf (struct mpconf
*conf
)
223 struct multipath_info
*tmp
;
225 printk("MULTIPATH conf printout:\n");
227 printk("(conf==NULL)\n");
230 printk(" --- wd:%d rd:%d\n", conf
->raid_disks
- conf
->mddev
->degraded
,
233 for (i
= 0; i
< conf
->raid_disks
; i
++) {
234 char b
[BDEVNAME_SIZE
];
235 tmp
= conf
->multipaths
+ i
;
237 printk(" disk%d, o:%d, dev:%s\n",
238 i
,!test_bit(Faulty
, &tmp
->rdev
->flags
),
239 bdevname(tmp
->rdev
->bdev
,b
));
243 static int multipath_add_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
245 struct mpconf
*conf
= mddev
->private;
246 struct request_queue
*q
;
249 struct multipath_info
*p
;
251 int last
= mddev
->raid_disks
- 1;
253 if (rdev
->raid_disk
>= 0)
254 first
= last
= rdev
->raid_disk
;
256 print_multipath_conf(conf
);
258 for (path
= first
; path
<= last
; path
++)
259 if ((p
=conf
->multipaths
+path
)->rdev
== NULL
) {
260 q
= rdev
->bdev
->bd_disk
->queue
;
261 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
262 rdev
->data_offset
<< 9);
264 /* as we don't honour merge_bvec_fn, we must never risk
265 * violating it, so limit ->max_segments to one, lying
266 * within a single page.
267 * (Note: it is very unlikely that a device with
268 * merge_bvec_fn will be involved in multipath.)
270 if (q
->merge_bvec_fn
) {
271 blk_queue_max_segments(mddev
->queue
, 1);
272 blk_queue_segment_boundary(mddev
->queue
,
273 PAGE_CACHE_SIZE
- 1);
276 spin_lock_irq(&conf
->device_lock
);
278 rdev
->raid_disk
= path
;
279 set_bit(In_sync
, &rdev
->flags
);
280 spin_unlock_irq(&conf
->device_lock
);
281 rcu_assign_pointer(p
->rdev
, rdev
);
283 md_integrity_add_rdev(rdev
, mddev
);
287 print_multipath_conf(conf
);
292 static int multipath_remove_disk(struct mddev
*mddev
, struct md_rdev
*rdev
)
294 struct mpconf
*conf
= mddev
->private;
296 int number
= rdev
->raid_disk
;
297 struct multipath_info
*p
= conf
->multipaths
+ number
;
299 print_multipath_conf(conf
);
301 if (rdev
== p
->rdev
) {
302 if (test_bit(In_sync
, &rdev
->flags
) ||
303 atomic_read(&rdev
->nr_pending
)) {
304 printk(KERN_ERR
"hot-remove-disk, slot %d is identified"
305 " but is still operational!\n", number
);
311 if (atomic_read(&rdev
->nr_pending
)) {
312 /* lost the race, try later */
317 err
= md_integrity_register(mddev
);
321 print_multipath_conf(conf
);
326 * This is a kernel thread which:
328 * 1. Retries failed read operations on working multipaths.
329 * 2. Updates the raid superblock when problems encounter.
330 * 3. Performs writes following reads for array syncronising.
333 static void multipathd(struct md_thread
*thread
)
335 struct mddev
*mddev
= thread
->mddev
;
336 struct multipath_bh
*mp_bh
;
339 struct mpconf
*conf
= mddev
->private;
340 struct list_head
*head
= &conf
->retry_list
;
342 md_check_recovery(mddev
);
344 char b
[BDEVNAME_SIZE
];
345 spin_lock_irqsave(&conf
->device_lock
, flags
);
346 if (list_empty(head
))
348 mp_bh
= list_entry(head
->prev
, struct multipath_bh
, retry_list
);
349 list_del(head
->prev
);
350 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
353 bio
->bi_iter
.bi_sector
= mp_bh
->master_bio
->bi_iter
.bi_sector
;
355 if ((mp_bh
->path
= multipath_map (conf
))<0) {
356 printk(KERN_ALERT
"multipath: %s: unrecoverable IO read"
357 " error for block %llu\n",
358 bdevname(bio
->bi_bdev
,b
),
359 (unsigned long long)bio
->bi_iter
.bi_sector
);
360 multipath_end_bh_io(mp_bh
, -EIO
);
362 printk(KERN_ERR
"multipath: %s: redirecting sector %llu"
363 " to another IO path\n",
364 bdevname(bio
->bi_bdev
,b
),
365 (unsigned long long)bio
->bi_iter
.bi_sector
);
366 *bio
= *(mp_bh
->master_bio
);
367 bio
->bi_iter
.bi_sector
+=
368 conf
->multipaths
[mp_bh
->path
].rdev
->data_offset
;
369 bio
->bi_bdev
= conf
->multipaths
[mp_bh
->path
].rdev
->bdev
;
370 bio
->bi_rw
|= REQ_FAILFAST_TRANSPORT
;
371 bio
->bi_end_io
= multipath_end_request
;
372 bio
->bi_private
= mp_bh
;
373 generic_make_request(bio
);
376 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
379 static sector_t
multipath_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
381 WARN_ONCE(sectors
|| raid_disks
,
382 "%s does not support generic reshape\n", __func__
);
384 return mddev
->dev_sectors
;
387 static int multipath_run (struct mddev
*mddev
)
391 struct multipath_info
*disk
;
392 struct md_rdev
*rdev
;
395 if (md_check_no_bitmap(mddev
))
398 if (mddev
->level
!= LEVEL_MULTIPATH
) {
399 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
400 mdname(mddev
), mddev
->level
);
404 * copy the already verified devices into our private MULTIPATH
405 * bookkeeping area. [whatever we allocate in multipath_run(),
406 * should be freed in multipath_stop()]
409 conf
= kzalloc(sizeof(struct mpconf
), GFP_KERNEL
);
410 mddev
->private = conf
;
413 "multipath: couldn't allocate memory for %s\n",
418 conf
->multipaths
= kzalloc(sizeof(struct multipath_info
)*mddev
->raid_disks
,
420 if (!conf
->multipaths
) {
422 "multipath: couldn't allocate memory for %s\n",
428 rdev_for_each(rdev
, mddev
) {
429 disk_idx
= rdev
->raid_disk
;
431 disk_idx
>= mddev
->raid_disks
)
434 disk
= conf
->multipaths
+ disk_idx
;
436 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
437 rdev
->data_offset
<< 9);
439 /* as we don't honour merge_bvec_fn, we must never risk
440 * violating it, not that we ever expect a device with
441 * a merge_bvec_fn to be involved in multipath */
442 if (rdev
->bdev
->bd_disk
->queue
->merge_bvec_fn
) {
443 blk_queue_max_segments(mddev
->queue
, 1);
444 blk_queue_segment_boundary(mddev
->queue
,
445 PAGE_CACHE_SIZE
- 1);
448 if (!test_bit(Faulty
, &rdev
->flags
))
452 conf
->raid_disks
= mddev
->raid_disks
;
454 spin_lock_init(&conf
->device_lock
);
455 INIT_LIST_HEAD(&conf
->retry_list
);
457 if (!working_disks
) {
458 printk(KERN_ERR
"multipath: no operational IO paths for %s\n",
462 mddev
->degraded
= conf
->raid_disks
- working_disks
;
464 conf
->pool
= mempool_create_kmalloc_pool(NR_RESERVED_BUFS
,
465 sizeof(struct multipath_bh
));
466 if (conf
->pool
== NULL
) {
468 "multipath: couldn't allocate memory for %s\n",
474 mddev
->thread
= md_register_thread(multipathd
, mddev
,
476 if (!mddev
->thread
) {
477 printk(KERN_ERR
"multipath: couldn't allocate thread"
478 " for %s\n", mdname(mddev
));
484 "multipath: array %s active with %d out of %d IO paths\n",
485 mdname(mddev
), conf
->raid_disks
- mddev
->degraded
,
488 * Ok, everything is just fine now
490 md_set_array_sectors(mddev
, multipath_size(mddev
, 0, 0));
492 mddev
->queue
->backing_dev_info
.congested_fn
= multipath_congested
;
493 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
495 if (md_integrity_register(mddev
))
502 mempool_destroy(conf
->pool
);
503 kfree(conf
->multipaths
);
505 mddev
->private = NULL
;
510 static int multipath_stop (struct mddev
*mddev
)
512 struct mpconf
*conf
= mddev
->private;
514 md_unregister_thread(&mddev
->thread
);
515 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
516 mempool_destroy(conf
->pool
);
517 kfree(conf
->multipaths
);
519 mddev
->private = NULL
;
523 static struct md_personality multipath_personality
=
526 .level
= LEVEL_MULTIPATH
,
527 .owner
= THIS_MODULE
,
528 .make_request
= multipath_make_request
,
529 .run
= multipath_run
,
530 .stop
= multipath_stop
,
531 .status
= multipath_status
,
532 .error_handler
= multipath_error
,
533 .hot_add_disk
= multipath_add_disk
,
534 .hot_remove_disk
= multipath_remove_disk
,
535 .size
= multipath_size
,
538 static int __init
multipath_init (void)
540 return register_md_personality (&multipath_personality
);
543 static void __exit
multipath_exit (void)
545 unregister_md_personality (&multipath_personality
);
548 module_init(multipath_init
);
549 module_exit(multipath_exit
);
550 MODULE_LICENSE("GPL");
551 MODULE_DESCRIPTION("simple multi-path personality for MD");
552 MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
553 MODULE_ALIAS("md-multipath");
554 MODULE_ALIAS("md-level--4");