2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/blkdev.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
29 static int raid0_congested(void *data
, int bits
)
31 struct mddev
*mddev
= data
;
32 struct r0conf
*conf
= mddev
->private;
33 struct md_rdev
**devlist
= conf
->devlist
;
34 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
37 if (mddev_congested(mddev
, bits
))
40 for (i
= 0; i
< raid_disks
&& !ret
; i
++) {
41 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
43 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
49 * inform the user of the raid configuration
51 static void dump_zones(struct mddev
*mddev
)
54 sector_t zone_size
= 0;
55 sector_t zone_start
= 0;
56 char b
[BDEVNAME_SIZE
];
57 struct r0conf
*conf
= mddev
->private;
58 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
59 printk(KERN_INFO
"md: RAID0 configuration for %s - %d zone%s\n",
61 conf
->nr_strip_zones
, conf
->nr_strip_zones
==1?"":"s");
62 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
63 printk(KERN_INFO
"md: zone%d=[", j
);
64 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
65 printk(KERN_CONT
"%s%s", k
?"/":"",
66 bdevname(conf
->devlist
[j
*raid_disks
68 printk(KERN_CONT
"]\n");
70 zone_size
= conf
->strip_zone
[j
].zone_end
- zone_start
;
71 printk(KERN_INFO
" zone-offset=%10lluKB, "
72 "device-offset=%10lluKB, size=%10lluKB\n",
73 (unsigned long long)zone_start
>>1,
74 (unsigned long long)conf
->strip_zone
[j
].dev_start
>>1,
75 (unsigned long long)zone_size
>>1);
76 zone_start
= conf
->strip_zone
[j
].zone_end
;
78 printk(KERN_INFO
"\n");
81 static int create_strip_zones(struct mddev
*mddev
, struct r0conf
**private_conf
)
84 sector_t curr_zone_end
, sectors
;
85 struct md_rdev
*smallest
, *rdev1
, *rdev2
, *rdev
, **dev
;
86 struct strip_zone
*zone
;
88 char b
[BDEVNAME_SIZE
];
89 char b2
[BDEVNAME_SIZE
];
90 struct r0conf
*conf
= kzalloc(sizeof(*conf
), GFP_KERNEL
);
91 bool discard_supported
= false;
95 rdev_for_each(rdev1
, mddev
) {
96 pr_debug("md/raid0:%s: looking at %s\n",
98 bdevname(rdev1
->bdev
, b
));
101 /* round size to chunk_size */
102 sectors
= rdev1
->sectors
;
103 sector_div(sectors
, mddev
->chunk_sectors
);
104 rdev1
->sectors
= sectors
* mddev
->chunk_sectors
;
106 rdev_for_each(rdev2
, mddev
) {
107 pr_debug("md/raid0:%s: comparing %s(%llu)"
110 bdevname(rdev1
->bdev
,b
),
111 (unsigned long long)rdev1
->sectors
,
112 bdevname(rdev2
->bdev
,b2
),
113 (unsigned long long)rdev2
->sectors
);
114 if (rdev2
== rdev1
) {
115 pr_debug("md/raid0:%s: END\n",
119 if (rdev2
->sectors
== rdev1
->sectors
) {
121 * Not unique, don't count it as a new
124 pr_debug("md/raid0:%s: EQUAL\n",
129 pr_debug("md/raid0:%s: NOT EQUAL\n",
133 pr_debug("md/raid0:%s: ==> UNIQUE\n",
135 conf
->nr_strip_zones
++;
136 pr_debug("md/raid0:%s: %d zones\n",
137 mdname(mddev
), conf
->nr_strip_zones
);
140 pr_debug("md/raid0:%s: FINAL %d zones\n",
141 mdname(mddev
), conf
->nr_strip_zones
);
143 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
144 conf
->nr_strip_zones
, GFP_KERNEL
);
145 if (!conf
->strip_zone
)
147 conf
->devlist
= kzalloc(sizeof(struct md_rdev
*)*
148 conf
->nr_strip_zones
*mddev
->raid_disks
,
153 /* The first zone must contain all devices, so here we check that
154 * there is a proper alignment of slots to devices and find them all
156 zone
= &conf
->strip_zone
[0];
161 rdev_for_each(rdev1
, mddev
) {
162 int j
= rdev1
->raid_disk
;
164 if (mddev
->level
== 10) {
165 /* taking over a raid10-n2 array */
167 rdev1
->new_raid_disk
= j
;
170 if (mddev
->level
== 1) {
171 /* taiking over a raid1 array-
172 * we have only one active disk
175 rdev1
->new_raid_disk
= j
;
178 if (j
< 0 || j
>= mddev
->raid_disks
) {
179 printk(KERN_ERR
"md/raid0:%s: bad disk number %d - "
180 "aborting!\n", mdname(mddev
), j
);
184 printk(KERN_ERR
"md/raid0:%s: multiple devices for %d - "
185 "aborting!\n", mdname(mddev
), j
);
190 disk_stack_limits(mddev
->gendisk
, rdev1
->bdev
,
191 rdev1
->data_offset
<< 9);
193 if (rdev1
->bdev
->bd_disk
->queue
->merge_bvec_fn
)
194 conf
->has_merge_bvec
= 1;
196 if (!smallest
|| (rdev1
->sectors
< smallest
->sectors
))
200 if (blk_queue_discard(bdev_get_queue(rdev1
->bdev
)))
201 discard_supported
= true;
203 if (cnt
!= mddev
->raid_disks
) {
204 printk(KERN_ERR
"md/raid0:%s: too few disks (%d of %d) - "
205 "aborting!\n", mdname(mddev
), cnt
, mddev
->raid_disks
);
209 zone
->zone_end
= smallest
->sectors
* cnt
;
211 curr_zone_end
= zone
->zone_end
;
213 /* now do the other zones */
214 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
218 zone
= conf
->strip_zone
+ i
;
219 dev
= conf
->devlist
+ i
* mddev
->raid_disks
;
221 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev
), i
);
222 zone
->dev_start
= smallest
->sectors
;
226 for (j
=0; j
<cnt
; j
++) {
227 rdev
= conf
->devlist
[j
];
228 if (rdev
->sectors
<= zone
->dev_start
) {
229 pr_debug("md/raid0:%s: checking %s ... nope\n",
231 bdevname(rdev
->bdev
, b
));
234 pr_debug("md/raid0:%s: checking %s ..."
235 " contained as device %d\n",
237 bdevname(rdev
->bdev
, b
), c
);
240 if (!smallest
|| rdev
->sectors
< smallest
->sectors
) {
242 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
244 (unsigned long long)rdev
->sectors
);
249 sectors
= (smallest
->sectors
- zone
->dev_start
) * c
;
250 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
252 zone
->nb_dev
, (unsigned long long)sectors
);
254 curr_zone_end
+= sectors
;
255 zone
->zone_end
= curr_zone_end
;
257 pr_debug("md/raid0:%s: current zone start: %llu\n",
259 (unsigned long long)smallest
->sectors
);
261 mddev
->queue
->backing_dev_info
.congested_fn
= raid0_congested
;
262 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
265 * now since we have the hard sector sizes, we can make sure
266 * chunk size is a multiple of that sector size
268 if ((mddev
->chunk_sectors
<< 9) % queue_logical_block_size(mddev
->queue
)) {
269 printk(KERN_ERR
"md/raid0:%s: chunk_size of %d not valid\n",
271 mddev
->chunk_sectors
<< 9);
275 blk_queue_io_min(mddev
->queue
, mddev
->chunk_sectors
<< 9);
276 blk_queue_io_opt(mddev
->queue
,
277 (mddev
->chunk_sectors
<< 9) * mddev
->raid_disks
);
279 if (!discard_supported
)
280 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, mddev
->queue
);
282 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, mddev
->queue
);
284 pr_debug("md/raid0:%s: done.\n", mdname(mddev
));
285 *private_conf
= conf
;
289 kfree(conf
->strip_zone
);
290 kfree(conf
->devlist
);
292 *private_conf
= NULL
;
296 /* Find the zone which holds a particular offset
297 * Update *sectorp to be an offset in that zone
299 static struct strip_zone
*find_zone(struct r0conf
*conf
,
303 struct strip_zone
*z
= conf
->strip_zone
;
304 sector_t sector
= *sectorp
;
306 for (i
= 0; i
< conf
->nr_strip_zones
; i
++)
307 if (sector
< z
[i
].zone_end
) {
309 *sectorp
= sector
- z
[i
-1].zone_end
;
316 * remaps the bio to the target device. we separate two flows.
317 * power 2 flow and a general flow for the sake of perfromance
319 static struct md_rdev
*map_sector(struct mddev
*mddev
, struct strip_zone
*zone
,
320 sector_t sector
, sector_t
*sector_offset
)
322 unsigned int sect_in_chunk
;
324 struct r0conf
*conf
= mddev
->private;
325 int raid_disks
= conf
->strip_zone
[0].nb_dev
;
326 unsigned int chunk_sects
= mddev
->chunk_sectors
;
328 if (is_power_of_2(chunk_sects
)) {
329 int chunksect_bits
= ffz(~chunk_sects
);
330 /* find the sector offset inside the chunk */
331 sect_in_chunk
= sector
& (chunk_sects
- 1);
332 sector
>>= chunksect_bits
;
334 chunk
= *sector_offset
;
335 /* quotient is the chunk in real device*/
336 sector_div(chunk
, zone
->nb_dev
<< chunksect_bits
);
338 sect_in_chunk
= sector_div(sector
, chunk_sects
);
339 chunk
= *sector_offset
;
340 sector_div(chunk
, chunk_sects
* zone
->nb_dev
);
343 * position the bio over the real device
344 * real sector = chunk in device + starting of zone
345 * + the position in the chunk
347 *sector_offset
= (chunk
* chunk_sects
) + sect_in_chunk
;
348 return conf
->devlist
[(zone
- conf
->strip_zone
)*raid_disks
349 + sector_div(sector
, zone
->nb_dev
)];
353 * raid0_mergeable_bvec -- tell bio layer if two requests can be merged
355 * @bvm: properties of new bio
356 * @biovec: the request that could be merged to it.
358 * Return amount of bytes we can accept at this offset
360 static int raid0_mergeable_bvec(struct request_queue
*q
,
361 struct bvec_merge_data
*bvm
,
362 struct bio_vec
*biovec
)
364 struct mddev
*mddev
= q
->queuedata
;
365 struct r0conf
*conf
= mddev
->private;
366 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
367 sector_t sector_offset
= sector
;
369 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
370 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
371 struct strip_zone
*zone
;
372 struct md_rdev
*rdev
;
373 struct request_queue
*subq
;
375 if (is_power_of_2(chunk_sectors
))
376 max
= (chunk_sectors
- ((sector
& (chunk_sectors
-1))
377 + bio_sectors
)) << 9;
379 max
= (chunk_sectors
- (sector_div(sector
, chunk_sectors
)
380 + bio_sectors
)) << 9;
382 max
= 0; /* bio_add cannot handle a negative return */
383 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
384 return biovec
->bv_len
;
385 if (max
< biovec
->bv_len
)
386 /* too small already, no need to check further */
388 if (!conf
->has_merge_bvec
)
391 /* May need to check subordinate device */
392 sector
= sector_offset
;
393 zone
= find_zone(mddev
->private, §or_offset
);
394 rdev
= map_sector(mddev
, zone
, sector
, §or_offset
);
395 subq
= bdev_get_queue(rdev
->bdev
);
396 if (subq
->merge_bvec_fn
) {
397 bvm
->bi_bdev
= rdev
->bdev
;
398 bvm
->bi_sector
= sector_offset
+ zone
->dev_start
+
400 return min(max
, subq
->merge_bvec_fn(subq
, bvm
, biovec
));
405 static sector_t
raid0_size(struct mddev
*mddev
, sector_t sectors
, int raid_disks
)
407 sector_t array_sectors
= 0;
408 struct md_rdev
*rdev
;
410 WARN_ONCE(sectors
|| raid_disks
,
411 "%s does not support generic reshape\n", __func__
);
413 rdev_for_each(rdev
, mddev
)
414 array_sectors
+= rdev
->sectors
;
416 return array_sectors
;
419 static int raid0_stop(struct mddev
*mddev
);
421 static int raid0_run(struct mddev
*mddev
)
426 if (mddev
->chunk_sectors
== 0) {
427 printk(KERN_ERR
"md/raid0:%s: chunk size must be set.\n",
431 if (md_check_no_bitmap(mddev
))
433 blk_queue_max_hw_sectors(mddev
->queue
, mddev
->chunk_sectors
);
434 blk_queue_max_write_same_sectors(mddev
->queue
, mddev
->chunk_sectors
);
435 blk_queue_max_discard_sectors(mddev
->queue
, mddev
->chunk_sectors
);
437 /* if private is not null, we are here after takeover */
438 if (mddev
->private == NULL
) {
439 ret
= create_strip_zones(mddev
, &conf
);
442 mddev
->private = conf
;
444 conf
= mddev
->private;
446 /* calculate array device size */
447 md_set_array_sectors(mddev
, raid0_size(mddev
, 0, 0));
449 printk(KERN_INFO
"md/raid0:%s: md_size is %llu sectors.\n",
451 (unsigned long long)mddev
->array_sectors
);
452 /* calculate the max read-ahead size.
453 * For read-ahead of large files to be effective, we need to
454 * readahead at least twice a whole stripe. i.e. number of devices
455 * multiplied by chunk size times 2.
456 * If an individual device has an ra_pages greater than the
457 * chunk size, then we will not drive that device as hard as it
458 * wants. We consider this a configuration error: a larger
459 * chunksize should be used in that case.
462 int stripe
= mddev
->raid_disks
*
463 (mddev
->chunk_sectors
<< 9) / PAGE_SIZE
;
464 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
465 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
468 blk_queue_merge_bvec(mddev
->queue
, raid0_mergeable_bvec
);
471 ret
= md_integrity_register(mddev
);
478 static int raid0_stop(struct mddev
*mddev
)
480 struct r0conf
*conf
= mddev
->private;
482 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
483 kfree(conf
->strip_zone
);
484 kfree(conf
->devlist
);
486 mddev
->private = NULL
;
491 * Is io distribute over 1 or more chunks ?
493 static inline int is_io_in_chunk_boundary(struct mddev
*mddev
,
494 unsigned int chunk_sects
, struct bio
*bio
)
496 if (likely(is_power_of_2(chunk_sects
))) {
497 return chunk_sects
>= ((bio
->bi_sector
& (chunk_sects
-1))
498 + (bio
->bi_size
>> 9));
500 sector_t sector
= bio
->bi_sector
;
501 return chunk_sects
>= (sector_div(sector
, chunk_sects
)
502 + (bio
->bi_size
>> 9));
506 static void raid0_make_request(struct mddev
*mddev
, struct bio
*bio
)
508 unsigned int chunk_sects
;
509 sector_t sector_offset
;
510 struct strip_zone
*zone
;
511 struct md_rdev
*tmp_dev
;
513 if (unlikely(bio
->bi_rw
& REQ_FLUSH
)) {
514 md_flush_request(mddev
, bio
);
518 chunk_sects
= mddev
->chunk_sectors
;
519 if (unlikely(!is_io_in_chunk_boundary(mddev
, chunk_sects
, bio
))) {
520 sector_t sector
= bio
->bi_sector
;
522 /* Sanity check -- queue functions should prevent this happening */
523 if ((bio
->bi_vcnt
!= 1 && bio
->bi_vcnt
!= 0) ||
526 /* This is a one page bio that upper layers
527 * refuse to split for us, so we need to split it.
529 if (likely(is_power_of_2(chunk_sects
)))
530 bp
= bio_split(bio
, chunk_sects
- (sector
&
533 bp
= bio_split(bio
, chunk_sects
-
534 sector_div(sector
, chunk_sects
));
535 raid0_make_request(mddev
, &bp
->bio1
);
536 raid0_make_request(mddev
, &bp
->bio2
);
537 bio_pair_release(bp
);
541 sector_offset
= bio
->bi_sector
;
542 zone
= find_zone(mddev
->private, §or_offset
);
543 tmp_dev
= map_sector(mddev
, zone
, bio
->bi_sector
,
545 bio
->bi_bdev
= tmp_dev
->bdev
;
546 bio
->bi_sector
= sector_offset
+ zone
->dev_start
+
547 tmp_dev
->data_offset
;
549 if (unlikely((bio
->bi_rw
& REQ_DISCARD
) &&
550 !blk_queue_discard(bdev_get_queue(bio
->bi_bdev
)))) {
556 generic_make_request(bio
);
560 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
561 " or bigger than %dk %llu %d\n",
562 mdname(mddev
), chunk_sects
/ 2,
563 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
569 static void raid0_status(struct seq_file
*seq
, struct mddev
*mddev
)
571 seq_printf(seq
, " %dk chunks", mddev
->chunk_sectors
/ 2);
575 static void *raid0_takeover_raid45(struct mddev
*mddev
)
577 struct md_rdev
*rdev
;
578 struct r0conf
*priv_conf
;
580 if (mddev
->degraded
!= 1) {
581 printk(KERN_ERR
"md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
584 return ERR_PTR(-EINVAL
);
587 rdev_for_each(rdev
, mddev
) {
588 /* check slot number for a disk */
589 if (rdev
->raid_disk
== mddev
->raid_disks
-1) {
590 printk(KERN_ERR
"md/raid0:%s: raid5 must have missing parity disk!\n",
592 return ERR_PTR(-EINVAL
);
596 /* Set new parameters */
597 mddev
->new_level
= 0;
598 mddev
->new_layout
= 0;
599 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
601 mddev
->delta_disks
= -1;
602 /* make sure it will be not marked as dirty */
603 mddev
->recovery_cp
= MaxSector
;
605 create_strip_zones(mddev
, &priv_conf
);
609 static void *raid0_takeover_raid10(struct mddev
*mddev
)
611 struct r0conf
*priv_conf
;
614 * - far_copies must be 1
615 * - near_copies must be 2
616 * - disks number must be even
617 * - all mirrors must be already degraded
619 if (mddev
->layout
!= ((1 << 8) + 2)) {
620 printk(KERN_ERR
"md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
623 return ERR_PTR(-EINVAL
);
625 if (mddev
->raid_disks
& 1) {
626 printk(KERN_ERR
"md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
628 return ERR_PTR(-EINVAL
);
630 if (mddev
->degraded
!= (mddev
->raid_disks
>>1)) {
631 printk(KERN_ERR
"md/raid0:%s: All mirrors must be already degraded!\n",
633 return ERR_PTR(-EINVAL
);
636 /* Set new parameters */
637 mddev
->new_level
= 0;
638 mddev
->new_layout
= 0;
639 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
640 mddev
->delta_disks
= - mddev
->raid_disks
/ 2;
641 mddev
->raid_disks
+= mddev
->delta_disks
;
643 /* make sure it will be not marked as dirty */
644 mddev
->recovery_cp
= MaxSector
;
646 create_strip_zones(mddev
, &priv_conf
);
650 static void *raid0_takeover_raid1(struct mddev
*mddev
)
652 struct r0conf
*priv_conf
;
656 * - (N - 1) mirror drives must be already faulty
658 if ((mddev
->raid_disks
- 1) != mddev
->degraded
) {
659 printk(KERN_ERR
"md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
661 return ERR_PTR(-EINVAL
);
665 * a raid1 doesn't have the notion of chunk size, so
666 * figure out the largest suitable size we can use.
668 chunksect
= 64 * 2; /* 64K by default */
670 /* The array must be an exact multiple of chunksize */
671 while (chunksect
&& (mddev
->array_sectors
& (chunksect
- 1)))
674 if ((chunksect
<< 9) < PAGE_SIZE
)
675 /* array size does not allow a suitable chunk size */
676 return ERR_PTR(-EINVAL
);
678 /* Set new parameters */
679 mddev
->new_level
= 0;
680 mddev
->new_layout
= 0;
681 mddev
->new_chunk_sectors
= chunksect
;
682 mddev
->chunk_sectors
= chunksect
;
683 mddev
->delta_disks
= 1 - mddev
->raid_disks
;
684 mddev
->raid_disks
= 1;
685 /* make sure it will be not marked as dirty */
686 mddev
->recovery_cp
= MaxSector
;
688 create_strip_zones(mddev
, &priv_conf
);
692 static void *raid0_takeover(struct mddev
*mddev
)
694 /* raid0 can take over:
695 * raid4 - if all data disks are active.
696 * raid5 - providing it is Raid4 layout and one disk is faulty
697 * raid10 - assuming we have all necessary active disks
698 * raid1 - with (N -1) mirror drives faulty
700 if (mddev
->level
== 4)
701 return raid0_takeover_raid45(mddev
);
703 if (mddev
->level
== 5) {
704 if (mddev
->layout
== ALGORITHM_PARITY_N
)
705 return raid0_takeover_raid45(mddev
);
707 printk(KERN_ERR
"md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
708 mdname(mddev
), ALGORITHM_PARITY_N
);
711 if (mddev
->level
== 10)
712 return raid0_takeover_raid10(mddev
);
714 if (mddev
->level
== 1)
715 return raid0_takeover_raid1(mddev
);
717 printk(KERN_ERR
"Takeover from raid%i to raid0 not supported\n",
720 return ERR_PTR(-EINVAL
);
723 static void raid0_quiesce(struct mddev
*mddev
, int state
)
727 static struct md_personality raid0_personality
=
731 .owner
= THIS_MODULE
,
732 .make_request
= raid0_make_request
,
735 .status
= raid0_status
,
737 .takeover
= raid0_takeover
,
738 .quiesce
= raid0_quiesce
,
741 static int __init
raid0_init (void)
743 return register_md_personality (&raid0_personality
);
746 static void raid0_exit (void)
748 unregister_md_personality (&raid0_personality
);
751 module_init(raid0_init
);
752 module_exit(raid0_exit
);
753 MODULE_LICENSE("GPL");
754 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
755 MODULE_ALIAS("md-personality-2"); /* RAID0 */
756 MODULE_ALIAS("md-raid0");
757 MODULE_ALIAS("md-level-0");