2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
48 #include <linux/init.h>
50 #include <linux/file.h>
53 #include <linux/kmod.h>
56 #include <asm/unaligned.h>
58 #define MAJOR_NR MD_MAJOR
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
69 static void autostart_arrays (int part
);
72 static LIST_HEAD(pers_list
);
73 static DEFINE_SPINLOCK(pers_lock
);
75 static void md_print_devices(void);
77 static DECLARE_WAIT_QUEUE_HEAD(resync_wait
);
79 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
82 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
83 * is 1000 KB/sec, so the extra system load does not show up that much.
84 * Increase it if you want to have more _guaranteed_ speed. Note that
85 * the RAID driver will use the maximum available bandwidth if the IO
86 * subsystem is idle. There is also an 'absolute maximum' reconstruction
87 * speed limit - in case reconstruction slows down your system despite
90 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
91 * or /sys/block/mdX/md/sync_speed_{min,max}
94 static int sysctl_speed_limit_min
= 1000;
95 static int sysctl_speed_limit_max
= 200000;
96 static inline int speed_min(mddev_t
*mddev
)
98 return mddev
->sync_speed_min
?
99 mddev
->sync_speed_min
: sysctl_speed_limit_min
;
102 static inline int speed_max(mddev_t
*mddev
)
104 return mddev
->sync_speed_max
?
105 mddev
->sync_speed_max
: sysctl_speed_limit_max
;
108 static struct ctl_table_header
*raid_table_header
;
110 static ctl_table raid_table
[] = {
112 .ctl_name
= DEV_RAID_SPEED_LIMIT_MIN
,
113 .procname
= "speed_limit_min",
114 .data
= &sysctl_speed_limit_min
,
115 .maxlen
= sizeof(int),
116 .mode
= S_IRUGO
|S_IWUSR
,
117 .proc_handler
= &proc_dointvec
,
120 .ctl_name
= DEV_RAID_SPEED_LIMIT_MAX
,
121 .procname
= "speed_limit_max",
122 .data
= &sysctl_speed_limit_max
,
123 .maxlen
= sizeof(int),
124 .mode
= S_IRUGO
|S_IWUSR
,
125 .proc_handler
= &proc_dointvec
,
130 static ctl_table raid_dir_table
[] = {
132 .ctl_name
= DEV_RAID
,
135 .mode
= S_IRUGO
|S_IXUGO
,
141 static ctl_table raid_root_table
[] = {
147 .child
= raid_dir_table
,
152 static struct block_device_operations md_fops
;
154 static int start_readonly
;
157 * We have a system wide 'event count' that is incremented
158 * on any 'interesting' event, and readers of /proc/mdstat
159 * can use 'poll' or 'select' to find out when the event
163 * start array, stop array, error, add device, remove device,
164 * start build, activate spare
166 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters
);
167 static atomic_t md_event_count
;
168 void md_new_event(mddev_t
*mddev
)
170 atomic_inc(&md_event_count
);
171 wake_up(&md_event_waiters
);
172 sysfs_notify(&mddev
->kobj
, NULL
, "sync_action");
174 EXPORT_SYMBOL_GPL(md_new_event
);
176 /* Alternate version that can be called from interrupts
177 * when calling sysfs_notify isn't needed.
179 static void md_new_event_inintr(mddev_t
*mddev
)
181 atomic_inc(&md_event_count
);
182 wake_up(&md_event_waiters
);
186 * Enables to iterate over all existing md arrays
187 * all_mddevs_lock protects this list.
189 static LIST_HEAD(all_mddevs
);
190 static DEFINE_SPINLOCK(all_mddevs_lock
);
194 * iterates through all used mddevs in the system.
195 * We take care to grab the all_mddevs_lock whenever navigating
196 * the list, and to always hold a refcount when unlocked.
197 * Any code which breaks out of this loop while own
198 * a reference to the current mddev and must mddev_put it.
200 #define for_each_mddev(mddev,tmp) \
202 for (({ spin_lock(&all_mddevs_lock); \
203 tmp = all_mddevs.next; \
205 ({ if (tmp != &all_mddevs) \
206 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
207 spin_unlock(&all_mddevs_lock); \
208 if (mddev) mddev_put(mddev); \
209 mddev = list_entry(tmp, mddev_t, all_mddevs); \
210 tmp != &all_mddevs;}); \
211 ({ spin_lock(&all_mddevs_lock); \
216 static int md_fail_request (struct request_queue
*q
, struct bio
*bio
)
222 static inline mddev_t
*mddev_get(mddev_t
*mddev
)
224 atomic_inc(&mddev
->active
);
228 static void mddev_put(mddev_t
*mddev
)
230 if (!atomic_dec_and_lock(&mddev
->active
, &all_mddevs_lock
))
232 if (!mddev
->raid_disks
&& list_empty(&mddev
->disks
)) {
233 list_del(&mddev
->all_mddevs
);
234 spin_unlock(&all_mddevs_lock
);
235 blk_cleanup_queue(mddev
->queue
);
236 kobject_put(&mddev
->kobj
);
238 spin_unlock(&all_mddevs_lock
);
241 static mddev_t
* mddev_find(dev_t unit
)
243 mddev_t
*mddev
, *new = NULL
;
246 spin_lock(&all_mddevs_lock
);
247 list_for_each_entry(mddev
, &all_mddevs
, all_mddevs
)
248 if (mddev
->unit
== unit
) {
250 spin_unlock(&all_mddevs_lock
);
256 list_add(&new->all_mddevs
, &all_mddevs
);
257 spin_unlock(&all_mddevs_lock
);
260 spin_unlock(&all_mddevs_lock
);
262 new = kzalloc(sizeof(*new), GFP_KERNEL
);
267 if (MAJOR(unit
) == MD_MAJOR
)
268 new->md_minor
= MINOR(unit
);
270 new->md_minor
= MINOR(unit
) >> MdpMinorShift
;
272 mutex_init(&new->reconfig_mutex
);
273 INIT_LIST_HEAD(&new->disks
);
274 INIT_LIST_HEAD(&new->all_mddevs
);
275 init_timer(&new->safemode_timer
);
276 atomic_set(&new->active
, 1);
277 spin_lock_init(&new->write_lock
);
278 init_waitqueue_head(&new->sb_wait
);
279 init_waitqueue_head(&new->recovery_wait
);
280 new->reshape_position
= MaxSector
;
282 new->resync_max
= MaxSector
;
283 new->level
= LEVEL_NONE
;
285 new->queue
= blk_alloc_queue(GFP_KERNEL
);
290 /* Can be unlocked because the queue is new: no concurrency */
291 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER
, new->queue
);
293 blk_queue_make_request(new->queue
, md_fail_request
);
298 static inline int mddev_lock(mddev_t
* mddev
)
300 return mutex_lock_interruptible(&mddev
->reconfig_mutex
);
303 static inline int mddev_trylock(mddev_t
* mddev
)
305 return mutex_trylock(&mddev
->reconfig_mutex
);
308 static inline void mddev_unlock(mddev_t
* mddev
)
310 mutex_unlock(&mddev
->reconfig_mutex
);
312 md_wakeup_thread(mddev
->thread
);
315 static mdk_rdev_t
* find_rdev_nr(mddev_t
*mddev
, int nr
)
318 struct list_head
*tmp
;
320 rdev_for_each(rdev
, tmp
, mddev
) {
321 if (rdev
->desc_nr
== nr
)
327 static mdk_rdev_t
* find_rdev(mddev_t
* mddev
, dev_t dev
)
329 struct list_head
*tmp
;
332 rdev_for_each(rdev
, tmp
, mddev
) {
333 if (rdev
->bdev
->bd_dev
== dev
)
339 static struct mdk_personality
*find_pers(int level
, char *clevel
)
341 struct mdk_personality
*pers
;
342 list_for_each_entry(pers
, &pers_list
, list
) {
343 if (level
!= LEVEL_NONE
&& pers
->level
== level
)
345 if (strcmp(pers
->name
, clevel
)==0)
351 static inline sector_t
calc_dev_sboffset(struct block_device
*bdev
)
353 sector_t size
= bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
354 return MD_NEW_SIZE_BLOCKS(size
);
357 static sector_t
calc_dev_size(mdk_rdev_t
*rdev
, unsigned chunk_size
)
361 size
= rdev
->sb_offset
;
364 size
&= ~((sector_t
)chunk_size
/1024 - 1);
368 static int alloc_disk_sb(mdk_rdev_t
* rdev
)
373 rdev
->sb_page
= alloc_page(GFP_KERNEL
);
374 if (!rdev
->sb_page
) {
375 printk(KERN_ALERT
"md: out of memory.\n");
382 static void free_disk_sb(mdk_rdev_t
* rdev
)
385 put_page(rdev
->sb_page
);
387 rdev
->sb_page
= NULL
;
394 static void super_written(struct bio
*bio
, int error
)
396 mdk_rdev_t
*rdev
= bio
->bi_private
;
397 mddev_t
*mddev
= rdev
->mddev
;
399 if (error
|| !test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
400 printk("md: super_written gets error=%d, uptodate=%d\n",
401 error
, test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
402 WARN_ON(test_bit(BIO_UPTODATE
, &bio
->bi_flags
));
403 md_error(mddev
, rdev
);
406 if (atomic_dec_and_test(&mddev
->pending_writes
))
407 wake_up(&mddev
->sb_wait
);
411 static void super_written_barrier(struct bio
*bio
, int error
)
413 struct bio
*bio2
= bio
->bi_private
;
414 mdk_rdev_t
*rdev
= bio2
->bi_private
;
415 mddev_t
*mddev
= rdev
->mddev
;
417 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) &&
418 error
== -EOPNOTSUPP
) {
420 /* barriers don't appear to be supported :-( */
421 set_bit(BarriersNotsupp
, &rdev
->flags
);
422 mddev
->barriers_work
= 0;
423 spin_lock_irqsave(&mddev
->write_lock
, flags
);
424 bio2
->bi_next
= mddev
->biolist
;
425 mddev
->biolist
= bio2
;
426 spin_unlock_irqrestore(&mddev
->write_lock
, flags
);
427 wake_up(&mddev
->sb_wait
);
431 bio
->bi_private
= rdev
;
432 super_written(bio
, error
);
436 void md_super_write(mddev_t
*mddev
, mdk_rdev_t
*rdev
,
437 sector_t sector
, int size
, struct page
*page
)
439 /* write first size bytes of page to sector of rdev
440 * Increment mddev->pending_writes before returning
441 * and decrement it on completion, waking up sb_wait
442 * if zero is reached.
443 * If an error occurred, call md_error
445 * As we might need to resubmit the request if BIO_RW_BARRIER
446 * causes ENOTSUPP, we allocate a spare bio...
448 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
449 int rw
= (1<<BIO_RW
) | (1<<BIO_RW_SYNC
);
451 bio
->bi_bdev
= rdev
->bdev
;
452 bio
->bi_sector
= sector
;
453 bio_add_page(bio
, page
, size
, 0);
454 bio
->bi_private
= rdev
;
455 bio
->bi_end_io
= super_written
;
458 atomic_inc(&mddev
->pending_writes
);
459 if (!test_bit(BarriersNotsupp
, &rdev
->flags
)) {
461 rw
|= (1<<BIO_RW_BARRIER
);
462 rbio
= bio_clone(bio
, GFP_NOIO
);
463 rbio
->bi_private
= bio
;
464 rbio
->bi_end_io
= super_written_barrier
;
465 submit_bio(rw
, rbio
);
470 void md_super_wait(mddev_t
*mddev
)
472 /* wait for all superblock writes that were scheduled to complete.
473 * if any had to be retried (due to BARRIER problems), retry them
477 prepare_to_wait(&mddev
->sb_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
478 if (atomic_read(&mddev
->pending_writes
)==0)
480 while (mddev
->biolist
) {
482 spin_lock_irq(&mddev
->write_lock
);
483 bio
= mddev
->biolist
;
484 mddev
->biolist
= bio
->bi_next
;
486 spin_unlock_irq(&mddev
->write_lock
);
487 submit_bio(bio
->bi_rw
, bio
);
491 finish_wait(&mddev
->sb_wait
, &wq
);
494 static void bi_complete(struct bio
*bio
, int error
)
496 complete((struct completion
*)bio
->bi_private
);
499 int sync_page_io(struct block_device
*bdev
, sector_t sector
, int size
,
500 struct page
*page
, int rw
)
502 struct bio
*bio
= bio_alloc(GFP_NOIO
, 1);
503 struct completion event
;
506 rw
|= (1 << BIO_RW_SYNC
);
509 bio
->bi_sector
= sector
;
510 bio_add_page(bio
, page
, size
, 0);
511 init_completion(&event
);
512 bio
->bi_private
= &event
;
513 bio
->bi_end_io
= bi_complete
;
515 wait_for_completion(&event
);
517 ret
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
521 EXPORT_SYMBOL_GPL(sync_page_io
);
523 static int read_disk_sb(mdk_rdev_t
* rdev
, int size
)
525 char b
[BDEVNAME_SIZE
];
526 if (!rdev
->sb_page
) {
534 if (!sync_page_io(rdev
->bdev
, rdev
->sb_offset
<<1, size
, rdev
->sb_page
, READ
))
540 printk(KERN_WARNING
"md: disabled device %s, could not read superblock.\n",
541 bdevname(rdev
->bdev
,b
));
545 static int uuid_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
547 if ( (sb1
->set_uuid0
== sb2
->set_uuid0
) &&
548 (sb1
->set_uuid1
== sb2
->set_uuid1
) &&
549 (sb1
->set_uuid2
== sb2
->set_uuid2
) &&
550 (sb1
->set_uuid3
== sb2
->set_uuid3
))
558 static int sb_equal(mdp_super_t
*sb1
, mdp_super_t
*sb2
)
561 mdp_super_t
*tmp1
, *tmp2
;
563 tmp1
= kmalloc(sizeof(*tmp1
),GFP_KERNEL
);
564 tmp2
= kmalloc(sizeof(*tmp2
),GFP_KERNEL
);
566 if (!tmp1
|| !tmp2
) {
568 printk(KERN_INFO
"md.c: sb1 is not equal to sb2!\n");
576 * nr_disks is not constant
581 if (memcmp(tmp1
, tmp2
, MD_SB_GENERIC_CONSTANT_WORDS
* 4))
593 static u32
md_csum_fold(u32 csum
)
595 csum
= (csum
& 0xffff) + (csum
>> 16);
596 return (csum
& 0xffff) + (csum
>> 16);
599 static unsigned int calc_sb_csum(mdp_super_t
* sb
)
602 u32
*sb32
= (u32
*)sb
;
604 unsigned int disk_csum
, csum
;
606 disk_csum
= sb
->sb_csum
;
609 for (i
= 0; i
< MD_SB_BYTES
/4 ; i
++)
611 csum
= (newcsum
& 0xffffffff) + (newcsum
>>32);
615 /* This used to use csum_partial, which was wrong for several
616 * reasons including that different results are returned on
617 * different architectures. It isn't critical that we get exactly
618 * the same return value as before (we always csum_fold before
619 * testing, and that removes any differences). However as we
620 * know that csum_partial always returned a 16bit value on
621 * alphas, do a fold to maximise conformity to previous behaviour.
623 sb
->sb_csum
= md_csum_fold(disk_csum
);
625 sb
->sb_csum
= disk_csum
;
632 * Handle superblock details.
633 * We want to be able to handle multiple superblock formats
634 * so we have a common interface to them all, and an array of
635 * different handlers.
636 * We rely on user-space to write the initial superblock, and support
637 * reading and updating of superblocks.
638 * Interface methods are:
639 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
640 * loads and validates a superblock on dev.
641 * if refdev != NULL, compare superblocks on both devices
643 * 0 - dev has a superblock that is compatible with refdev
644 * 1 - dev has a superblock that is compatible and newer than refdev
645 * so dev should be used as the refdev in future
646 * -EINVAL superblock incompatible or invalid
647 * -othererror e.g. -EIO
649 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
650 * Verify that dev is acceptable into mddev.
651 * The first time, mddev->raid_disks will be 0, and data from
652 * dev should be merged in. Subsequent calls check that dev
653 * is new enough. Return 0 or -EINVAL
655 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
656 * Update the superblock for rdev with data in mddev
657 * This does not write to disc.
663 struct module
*owner
;
664 int (*load_super
)(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
);
665 int (*validate_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
666 void (*sync_super
)(mddev_t
*mddev
, mdk_rdev_t
*rdev
);
670 * load_super for 0.90.0
672 static int super_90_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
674 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
680 * Calculate the position of the superblock,
681 * it's at the end of the disk.
683 * It also happens to be a multiple of 4Kb.
685 sb_offset
= calc_dev_sboffset(rdev
->bdev
);
686 rdev
->sb_offset
= sb_offset
;
688 ret
= read_disk_sb(rdev
, MD_SB_BYTES
);
693 bdevname(rdev
->bdev
, b
);
694 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
696 if (sb
->md_magic
!= MD_SB_MAGIC
) {
697 printk(KERN_ERR
"md: invalid raid superblock magic on %s\n",
702 if (sb
->major_version
!= 0 ||
703 sb
->minor_version
< 90 ||
704 sb
->minor_version
> 91) {
705 printk(KERN_WARNING
"Bad version number %d.%d on %s\n",
706 sb
->major_version
, sb
->minor_version
,
711 if (sb
->raid_disks
<= 0)
714 if (md_csum_fold(calc_sb_csum(sb
)) != md_csum_fold(sb
->sb_csum
)) {
715 printk(KERN_WARNING
"md: invalid superblock checksum on %s\n",
720 rdev
->preferred_minor
= sb
->md_minor
;
721 rdev
->data_offset
= 0;
722 rdev
->sb_size
= MD_SB_BYTES
;
724 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
725 if (sb
->level
!= 1 && sb
->level
!= 4
726 && sb
->level
!= 5 && sb
->level
!= 6
727 && sb
->level
!= 10) {
728 /* FIXME use a better test */
730 "md: bitmaps not supported for this level.\n");
735 if (sb
->level
== LEVEL_MULTIPATH
)
738 rdev
->desc_nr
= sb
->this_disk
.number
;
744 mdp_super_t
*refsb
= (mdp_super_t
*)page_address(refdev
->sb_page
);
745 if (!uuid_equal(refsb
, sb
)) {
746 printk(KERN_WARNING
"md: %s has different UUID to %s\n",
747 b
, bdevname(refdev
->bdev
,b2
));
750 if (!sb_equal(refsb
, sb
)) {
751 printk(KERN_WARNING
"md: %s has same UUID"
752 " but different superblock to %s\n",
753 b
, bdevname(refdev
->bdev
, b2
));
757 ev2
= md_event(refsb
);
763 rdev
->size
= calc_dev_size(rdev
, sb
->chunk_size
);
765 if (rdev
->size
< sb
->size
&& sb
->level
> 1)
766 /* "this cannot possibly happen" ... */
774 * validate_super for 0.90.0
776 static int super_90_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
779 mdp_super_t
*sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
780 __u64 ev1
= md_event(sb
);
782 rdev
->raid_disk
= -1;
783 clear_bit(Faulty
, &rdev
->flags
);
784 clear_bit(In_sync
, &rdev
->flags
);
785 clear_bit(WriteMostly
, &rdev
->flags
);
786 clear_bit(BarriersNotsupp
, &rdev
->flags
);
788 if (mddev
->raid_disks
== 0) {
789 mddev
->major_version
= 0;
790 mddev
->minor_version
= sb
->minor_version
;
791 mddev
->patch_version
= sb
->patch_version
;
793 mddev
->chunk_size
= sb
->chunk_size
;
794 mddev
->ctime
= sb
->ctime
;
795 mddev
->utime
= sb
->utime
;
796 mddev
->level
= sb
->level
;
797 mddev
->clevel
[0] = 0;
798 mddev
->layout
= sb
->layout
;
799 mddev
->raid_disks
= sb
->raid_disks
;
800 mddev
->size
= sb
->size
;
802 mddev
->bitmap_offset
= 0;
803 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
805 if (mddev
->minor_version
>= 91) {
806 mddev
->reshape_position
= sb
->reshape_position
;
807 mddev
->delta_disks
= sb
->delta_disks
;
808 mddev
->new_level
= sb
->new_level
;
809 mddev
->new_layout
= sb
->new_layout
;
810 mddev
->new_chunk
= sb
->new_chunk
;
812 mddev
->reshape_position
= MaxSector
;
813 mddev
->delta_disks
= 0;
814 mddev
->new_level
= mddev
->level
;
815 mddev
->new_layout
= mddev
->layout
;
816 mddev
->new_chunk
= mddev
->chunk_size
;
819 if (sb
->state
& (1<<MD_SB_CLEAN
))
820 mddev
->recovery_cp
= MaxSector
;
822 if (sb
->events_hi
== sb
->cp_events_hi
&&
823 sb
->events_lo
== sb
->cp_events_lo
) {
824 mddev
->recovery_cp
= sb
->recovery_cp
;
826 mddev
->recovery_cp
= 0;
829 memcpy(mddev
->uuid
+0, &sb
->set_uuid0
, 4);
830 memcpy(mddev
->uuid
+4, &sb
->set_uuid1
, 4);
831 memcpy(mddev
->uuid
+8, &sb
->set_uuid2
, 4);
832 memcpy(mddev
->uuid
+12,&sb
->set_uuid3
, 4);
834 mddev
->max_disks
= MD_SB_DISKS
;
836 if (sb
->state
& (1<<MD_SB_BITMAP_PRESENT
) &&
837 mddev
->bitmap_file
== NULL
)
838 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
840 } else if (mddev
->pers
== NULL
) {
841 /* Insist on good event counter while assembling */
843 if (ev1
< mddev
->events
)
845 } else if (mddev
->bitmap
) {
846 /* if adding to array with a bitmap, then we can accept an
847 * older device ... but not too old.
849 if (ev1
< mddev
->bitmap
->events_cleared
)
852 if (ev1
< mddev
->events
)
853 /* just a hot-add of a new device, leave raid_disk at -1 */
857 if (mddev
->level
!= LEVEL_MULTIPATH
) {
858 desc
= sb
->disks
+ rdev
->desc_nr
;
860 if (desc
->state
& (1<<MD_DISK_FAULTY
))
861 set_bit(Faulty
, &rdev
->flags
);
862 else if (desc
->state
& (1<<MD_DISK_SYNC
) /* &&
863 desc->raid_disk < mddev->raid_disks */) {
864 set_bit(In_sync
, &rdev
->flags
);
865 rdev
->raid_disk
= desc
->raid_disk
;
867 if (desc
->state
& (1<<MD_DISK_WRITEMOSTLY
))
868 set_bit(WriteMostly
, &rdev
->flags
);
869 } else /* MULTIPATH are always insync */
870 set_bit(In_sync
, &rdev
->flags
);
875 * sync_super for 0.90.0
877 static void super_90_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
880 struct list_head
*tmp
;
882 int next_spare
= mddev
->raid_disks
;
885 /* make rdev->sb match mddev data..
888 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
889 * 3/ any empty disks < next_spare become removed
891 * disks[0] gets initialised to REMOVED because
892 * we cannot be sure from other fields if it has
893 * been initialised or not.
896 int active
=0, working
=0,failed
=0,spare
=0,nr_disks
=0;
898 rdev
->sb_size
= MD_SB_BYTES
;
900 sb
= (mdp_super_t
*)page_address(rdev
->sb_page
);
902 memset(sb
, 0, sizeof(*sb
));
904 sb
->md_magic
= MD_SB_MAGIC
;
905 sb
->major_version
= mddev
->major_version
;
906 sb
->patch_version
= mddev
->patch_version
;
907 sb
->gvalid_words
= 0; /* ignored */
908 memcpy(&sb
->set_uuid0
, mddev
->uuid
+0, 4);
909 memcpy(&sb
->set_uuid1
, mddev
->uuid
+4, 4);
910 memcpy(&sb
->set_uuid2
, mddev
->uuid
+8, 4);
911 memcpy(&sb
->set_uuid3
, mddev
->uuid
+12,4);
913 sb
->ctime
= mddev
->ctime
;
914 sb
->level
= mddev
->level
;
915 sb
->size
= mddev
->size
;
916 sb
->raid_disks
= mddev
->raid_disks
;
917 sb
->md_minor
= mddev
->md_minor
;
918 sb
->not_persistent
= 0;
919 sb
->utime
= mddev
->utime
;
921 sb
->events_hi
= (mddev
->events
>>32);
922 sb
->events_lo
= (u32
)mddev
->events
;
924 if (mddev
->reshape_position
== MaxSector
)
925 sb
->minor_version
= 90;
927 sb
->minor_version
= 91;
928 sb
->reshape_position
= mddev
->reshape_position
;
929 sb
->new_level
= mddev
->new_level
;
930 sb
->delta_disks
= mddev
->delta_disks
;
931 sb
->new_layout
= mddev
->new_layout
;
932 sb
->new_chunk
= mddev
->new_chunk
;
934 mddev
->minor_version
= sb
->minor_version
;
937 sb
->recovery_cp
= mddev
->recovery_cp
;
938 sb
->cp_events_hi
= (mddev
->events
>>32);
939 sb
->cp_events_lo
= (u32
)mddev
->events
;
940 if (mddev
->recovery_cp
== MaxSector
)
941 sb
->state
= (1<< MD_SB_CLEAN
);
945 sb
->layout
= mddev
->layout
;
946 sb
->chunk_size
= mddev
->chunk_size
;
948 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
)
949 sb
->state
|= (1<<MD_SB_BITMAP_PRESENT
);
951 sb
->disks
[0].state
= (1<<MD_DISK_REMOVED
);
952 rdev_for_each(rdev2
, tmp
, mddev
) {
955 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
956 && !test_bit(Faulty
, &rdev2
->flags
))
957 desc_nr
= rdev2
->raid_disk
;
959 desc_nr
= next_spare
++;
960 rdev2
->desc_nr
= desc_nr
;
961 d
= &sb
->disks
[rdev2
->desc_nr
];
963 d
->number
= rdev2
->desc_nr
;
964 d
->major
= MAJOR(rdev2
->bdev
->bd_dev
);
965 d
->minor
= MINOR(rdev2
->bdev
->bd_dev
);
966 if (rdev2
->raid_disk
>= 0 && test_bit(In_sync
, &rdev2
->flags
)
967 && !test_bit(Faulty
, &rdev2
->flags
))
968 d
->raid_disk
= rdev2
->raid_disk
;
970 d
->raid_disk
= rdev2
->desc_nr
; /* compatibility */
971 if (test_bit(Faulty
, &rdev2
->flags
))
972 d
->state
= (1<<MD_DISK_FAULTY
);
973 else if (test_bit(In_sync
, &rdev2
->flags
)) {
974 d
->state
= (1<<MD_DISK_ACTIVE
);
975 d
->state
|= (1<<MD_DISK_SYNC
);
983 if (test_bit(WriteMostly
, &rdev2
->flags
))
984 d
->state
|= (1<<MD_DISK_WRITEMOSTLY
);
986 /* now set the "removed" and "faulty" bits on any missing devices */
987 for (i
=0 ; i
< mddev
->raid_disks
; i
++) {
988 mdp_disk_t
*d
= &sb
->disks
[i
];
989 if (d
->state
== 0 && d
->number
== 0) {
992 d
->state
= (1<<MD_DISK_REMOVED
);
993 d
->state
|= (1<<MD_DISK_FAULTY
);
997 sb
->nr_disks
= nr_disks
;
998 sb
->active_disks
= active
;
999 sb
->working_disks
= working
;
1000 sb
->failed_disks
= failed
;
1001 sb
->spare_disks
= spare
;
1003 sb
->this_disk
= sb
->disks
[rdev
->desc_nr
];
1004 sb
->sb_csum
= calc_sb_csum(sb
);
1008 * version 1 superblock
1011 static __le32
calc_sb_1_csum(struct mdp_superblock_1
* sb
)
1015 unsigned long long newcsum
;
1016 int size
= 256 + le32_to_cpu(sb
->max_dev
)*2;
1017 __le32
*isuper
= (__le32
*)sb
;
1020 disk_csum
= sb
->sb_csum
;
1023 for (i
=0; size
>=4; size
-= 4 )
1024 newcsum
+= le32_to_cpu(*isuper
++);
1027 newcsum
+= le16_to_cpu(*(__le16
*) isuper
);
1029 csum
= (newcsum
& 0xffffffff) + (newcsum
>> 32);
1030 sb
->sb_csum
= disk_csum
;
1031 return cpu_to_le32(csum
);
1034 static int super_1_load(mdk_rdev_t
*rdev
, mdk_rdev_t
*refdev
, int minor_version
)
1036 struct mdp_superblock_1
*sb
;
1039 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
1043 * Calculate the position of the superblock.
1044 * It is always aligned to a 4K boundary and
1045 * depeding on minor_version, it can be:
1046 * 0: At least 8K, but less than 12K, from end of device
1047 * 1: At start of device
1048 * 2: 4K from start of device.
1050 switch(minor_version
) {
1052 sb_offset
= rdev
->bdev
->bd_inode
->i_size
>> 9;
1054 sb_offset
&= ~(sector_t
)(4*2-1);
1055 /* convert from sectors to K */
1067 rdev
->sb_offset
= sb_offset
;
1069 /* superblock is rarely larger than 1K, but it can be larger,
1070 * and it is safe to read 4k, so we do that
1072 ret
= read_disk_sb(rdev
, 4096);
1073 if (ret
) return ret
;
1076 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1078 if (sb
->magic
!= cpu_to_le32(MD_SB_MAGIC
) ||
1079 sb
->major_version
!= cpu_to_le32(1) ||
1080 le32_to_cpu(sb
->max_dev
) > (4096-256)/2 ||
1081 le64_to_cpu(sb
->super_offset
) != (rdev
->sb_offset
<<1) ||
1082 (le32_to_cpu(sb
->feature_map
) & ~MD_FEATURE_ALL
) != 0)
1085 if (calc_sb_1_csum(sb
) != sb
->sb_csum
) {
1086 printk("md: invalid superblock checksum on %s\n",
1087 bdevname(rdev
->bdev
,b
));
1090 if (le64_to_cpu(sb
->data_size
) < 10) {
1091 printk("md: data_size too small on %s\n",
1092 bdevname(rdev
->bdev
,b
));
1095 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
)) {
1096 if (sb
->level
!= cpu_to_le32(1) &&
1097 sb
->level
!= cpu_to_le32(4) &&
1098 sb
->level
!= cpu_to_le32(5) &&
1099 sb
->level
!= cpu_to_le32(6) &&
1100 sb
->level
!= cpu_to_le32(10)) {
1102 "md: bitmaps not supported for this level.\n");
1107 rdev
->preferred_minor
= 0xffff;
1108 rdev
->data_offset
= le64_to_cpu(sb
->data_offset
);
1109 atomic_set(&rdev
->corrected_errors
, le32_to_cpu(sb
->cnt_corrected_read
));
1111 rdev
->sb_size
= le32_to_cpu(sb
->max_dev
) * 2 + 256;
1112 bmask
= queue_hardsect_size(rdev
->bdev
->bd_disk
->queue
)-1;
1113 if (rdev
->sb_size
& bmask
)
1114 rdev
->sb_size
= (rdev
->sb_size
| bmask
) + 1;
1117 && rdev
->data_offset
< sb_offset
+ (rdev
->sb_size
/512))
1120 if (sb
->level
== cpu_to_le32(LEVEL_MULTIPATH
))
1123 rdev
->desc_nr
= le32_to_cpu(sb
->dev_number
);
1129 struct mdp_superblock_1
*refsb
=
1130 (struct mdp_superblock_1
*)page_address(refdev
->sb_page
);
1132 if (memcmp(sb
->set_uuid
, refsb
->set_uuid
, 16) != 0 ||
1133 sb
->level
!= refsb
->level
||
1134 sb
->layout
!= refsb
->layout
||
1135 sb
->chunksize
!= refsb
->chunksize
) {
1136 printk(KERN_WARNING
"md: %s has strangely different"
1137 " superblock to %s\n",
1138 bdevname(rdev
->bdev
,b
),
1139 bdevname(refdev
->bdev
,b2
));
1142 ev1
= le64_to_cpu(sb
->events
);
1143 ev2
= le64_to_cpu(refsb
->events
);
1151 rdev
->size
= ((rdev
->bdev
->bd_inode
->i_size
>>9) - le64_to_cpu(sb
->data_offset
)) / 2;
1153 rdev
->size
= rdev
->sb_offset
;
1154 if (rdev
->size
< le64_to_cpu(sb
->data_size
)/2)
1156 rdev
->size
= le64_to_cpu(sb
->data_size
)/2;
1157 if (le32_to_cpu(sb
->chunksize
))
1158 rdev
->size
&= ~((sector_t
)le32_to_cpu(sb
->chunksize
)/2 - 1);
1160 if (le64_to_cpu(sb
->size
) > rdev
->size
*2)
1165 static int super_1_validate(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1167 struct mdp_superblock_1
*sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1168 __u64 ev1
= le64_to_cpu(sb
->events
);
1170 rdev
->raid_disk
= -1;
1171 clear_bit(Faulty
, &rdev
->flags
);
1172 clear_bit(In_sync
, &rdev
->flags
);
1173 clear_bit(WriteMostly
, &rdev
->flags
);
1174 clear_bit(BarriersNotsupp
, &rdev
->flags
);
1176 if (mddev
->raid_disks
== 0) {
1177 mddev
->major_version
= 1;
1178 mddev
->patch_version
= 0;
1179 mddev
->external
= 0;
1180 mddev
->chunk_size
= le32_to_cpu(sb
->chunksize
) << 9;
1181 mddev
->ctime
= le64_to_cpu(sb
->ctime
) & ((1ULL << 32)-1);
1182 mddev
->utime
= le64_to_cpu(sb
->utime
) & ((1ULL << 32)-1);
1183 mddev
->level
= le32_to_cpu(sb
->level
);
1184 mddev
->clevel
[0] = 0;
1185 mddev
->layout
= le32_to_cpu(sb
->layout
);
1186 mddev
->raid_disks
= le32_to_cpu(sb
->raid_disks
);
1187 mddev
->size
= le64_to_cpu(sb
->size
)/2;
1188 mddev
->events
= ev1
;
1189 mddev
->bitmap_offset
= 0;
1190 mddev
->default_bitmap_offset
= 1024 >> 9;
1192 mddev
->recovery_cp
= le64_to_cpu(sb
->resync_offset
);
1193 memcpy(mddev
->uuid
, sb
->set_uuid
, 16);
1195 mddev
->max_disks
= (4096-256)/2;
1197 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_BITMAP_OFFSET
) &&
1198 mddev
->bitmap_file
== NULL
)
1199 mddev
->bitmap_offset
= (__s32
)le32_to_cpu(sb
->bitmap_offset
);
1201 if ((le32_to_cpu(sb
->feature_map
) & MD_FEATURE_RESHAPE_ACTIVE
)) {
1202 mddev
->reshape_position
= le64_to_cpu(sb
->reshape_position
);
1203 mddev
->delta_disks
= le32_to_cpu(sb
->delta_disks
);
1204 mddev
->new_level
= le32_to_cpu(sb
->new_level
);
1205 mddev
->new_layout
= le32_to_cpu(sb
->new_layout
);
1206 mddev
->new_chunk
= le32_to_cpu(sb
->new_chunk
)<<9;
1208 mddev
->reshape_position
= MaxSector
;
1209 mddev
->delta_disks
= 0;
1210 mddev
->new_level
= mddev
->level
;
1211 mddev
->new_layout
= mddev
->layout
;
1212 mddev
->new_chunk
= mddev
->chunk_size
;
1215 } else if (mddev
->pers
== NULL
) {
1216 /* Insist of good event counter while assembling */
1218 if (ev1
< mddev
->events
)
1220 } else if (mddev
->bitmap
) {
1221 /* If adding to array with a bitmap, then we can accept an
1222 * older device, but not too old.
1224 if (ev1
< mddev
->bitmap
->events_cleared
)
1227 if (ev1
< mddev
->events
)
1228 /* just a hot-add of a new device, leave raid_disk at -1 */
1231 if (mddev
->level
!= LEVEL_MULTIPATH
) {
1233 role
= le16_to_cpu(sb
->dev_roles
[rdev
->desc_nr
]);
1235 case 0xffff: /* spare */
1237 case 0xfffe: /* faulty */
1238 set_bit(Faulty
, &rdev
->flags
);
1241 if ((le32_to_cpu(sb
->feature_map
) &
1242 MD_FEATURE_RECOVERY_OFFSET
))
1243 rdev
->recovery_offset
= le64_to_cpu(sb
->recovery_offset
);
1245 set_bit(In_sync
, &rdev
->flags
);
1246 rdev
->raid_disk
= role
;
1249 if (sb
->devflags
& WriteMostly1
)
1250 set_bit(WriteMostly
, &rdev
->flags
);
1251 } else /* MULTIPATH are always insync */
1252 set_bit(In_sync
, &rdev
->flags
);
1257 static void super_1_sync(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1259 struct mdp_superblock_1
*sb
;
1260 struct list_head
*tmp
;
1263 /* make rdev->sb match mddev and rdev data. */
1265 sb
= (struct mdp_superblock_1
*)page_address(rdev
->sb_page
);
1267 sb
->feature_map
= 0;
1269 sb
->recovery_offset
= cpu_to_le64(0);
1270 memset(sb
->pad1
, 0, sizeof(sb
->pad1
));
1271 memset(sb
->pad2
, 0, sizeof(sb
->pad2
));
1272 memset(sb
->pad3
, 0, sizeof(sb
->pad3
));
1274 sb
->utime
= cpu_to_le64((__u64
)mddev
->utime
);
1275 sb
->events
= cpu_to_le64(mddev
->events
);
1277 sb
->resync_offset
= cpu_to_le64(mddev
->recovery_cp
);
1279 sb
->resync_offset
= cpu_to_le64(0);
1281 sb
->cnt_corrected_read
= cpu_to_le32(atomic_read(&rdev
->corrected_errors
));
1283 sb
->raid_disks
= cpu_to_le32(mddev
->raid_disks
);
1284 sb
->size
= cpu_to_le64(mddev
->size
<<1);
1286 if (mddev
->bitmap
&& mddev
->bitmap_file
== NULL
) {
1287 sb
->bitmap_offset
= cpu_to_le32((__u32
)mddev
->bitmap_offset
);
1288 sb
->feature_map
= cpu_to_le32(MD_FEATURE_BITMAP_OFFSET
);
1291 if (rdev
->raid_disk
>= 0 &&
1292 !test_bit(In_sync
, &rdev
->flags
) &&
1293 rdev
->recovery_offset
> 0) {
1294 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET
);
1295 sb
->recovery_offset
= cpu_to_le64(rdev
->recovery_offset
);
1298 if (mddev
->reshape_position
!= MaxSector
) {
1299 sb
->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE
);
1300 sb
->reshape_position
= cpu_to_le64(mddev
->reshape_position
);
1301 sb
->new_layout
= cpu_to_le32(mddev
->new_layout
);
1302 sb
->delta_disks
= cpu_to_le32(mddev
->delta_disks
);
1303 sb
->new_level
= cpu_to_le32(mddev
->new_level
);
1304 sb
->new_chunk
= cpu_to_le32(mddev
->new_chunk
>>9);
1308 rdev_for_each(rdev2
, tmp
, mddev
)
1309 if (rdev2
->desc_nr
+1 > max_dev
)
1310 max_dev
= rdev2
->desc_nr
+1;
1312 if (max_dev
> le32_to_cpu(sb
->max_dev
))
1313 sb
->max_dev
= cpu_to_le32(max_dev
);
1314 for (i
=0; i
<max_dev
;i
++)
1315 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1317 rdev_for_each(rdev2
, tmp
, mddev
) {
1319 if (test_bit(Faulty
, &rdev2
->flags
))
1320 sb
->dev_roles
[i
] = cpu_to_le16(0xfffe);
1321 else if (test_bit(In_sync
, &rdev2
->flags
))
1322 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1323 else if (rdev2
->raid_disk
>= 0 && rdev2
->recovery_offset
> 0)
1324 sb
->dev_roles
[i
] = cpu_to_le16(rdev2
->raid_disk
);
1326 sb
->dev_roles
[i
] = cpu_to_le16(0xffff);
1329 sb
->sb_csum
= calc_sb_1_csum(sb
);
1333 static struct super_type super_types
[] = {
1336 .owner
= THIS_MODULE
,
1337 .load_super
= super_90_load
,
1338 .validate_super
= super_90_validate
,
1339 .sync_super
= super_90_sync
,
1343 .owner
= THIS_MODULE
,
1344 .load_super
= super_1_load
,
1345 .validate_super
= super_1_validate
,
1346 .sync_super
= super_1_sync
,
1350 static int match_mddev_units(mddev_t
*mddev1
, mddev_t
*mddev2
)
1352 struct list_head
*tmp
, *tmp2
;
1353 mdk_rdev_t
*rdev
, *rdev2
;
1355 rdev_for_each(rdev
, tmp
, mddev1
)
1356 rdev_for_each(rdev2
, tmp2
, mddev2
)
1357 if (rdev
->bdev
->bd_contains
==
1358 rdev2
->bdev
->bd_contains
)
1364 static LIST_HEAD(pending_raid_disks
);
1366 static int bind_rdev_to_array(mdk_rdev_t
* rdev
, mddev_t
* mddev
)
1368 char b
[BDEVNAME_SIZE
];
1378 /* prevent duplicates */
1379 if (find_rdev(mddev
, rdev
->bdev
->bd_dev
))
1382 /* make sure rdev->size exceeds mddev->size */
1383 if (rdev
->size
&& (mddev
->size
== 0 || rdev
->size
< mddev
->size
)) {
1385 /* Cannot change size, so fail
1386 * If mddev->level <= 0, then we don't care
1387 * about aligning sizes (e.g. linear)
1389 if (mddev
->level
> 0)
1392 mddev
->size
= rdev
->size
;
1395 /* Verify rdev->desc_nr is unique.
1396 * If it is -1, assign a free number, else
1397 * check number is not in use
1399 if (rdev
->desc_nr
< 0) {
1401 if (mddev
->pers
) choice
= mddev
->raid_disks
;
1402 while (find_rdev_nr(mddev
, choice
))
1404 rdev
->desc_nr
= choice
;
1406 if (find_rdev_nr(mddev
, rdev
->desc_nr
))
1409 bdevname(rdev
->bdev
,b
);
1410 while ( (s
=strchr(b
, '/')) != NULL
)
1413 rdev
->mddev
= mddev
;
1414 printk(KERN_INFO
"md: bind<%s>\n", b
);
1416 if ((err
= kobject_add(&rdev
->kobj
, &mddev
->kobj
, "dev-%s", b
)))
1419 if (rdev
->bdev
->bd_part
)
1420 ko
= &rdev
->bdev
->bd_part
->dev
.kobj
;
1422 ko
= &rdev
->bdev
->bd_disk
->dev
.kobj
;
1423 if ((err
= sysfs_create_link(&rdev
->kobj
, ko
, "block"))) {
1424 kobject_del(&rdev
->kobj
);
1427 list_add(&rdev
->same_set
, &mddev
->disks
);
1428 bd_claim_by_disk(rdev
->bdev
, rdev
->bdev
->bd_holder
, mddev
->gendisk
);
1432 printk(KERN_WARNING
"md: failed to register dev-%s for %s\n",
1437 static void md_delayed_delete(struct work_struct
*ws
)
1439 mdk_rdev_t
*rdev
= container_of(ws
, mdk_rdev_t
, del_work
);
1440 kobject_del(&rdev
->kobj
);
1441 kobject_put(&rdev
->kobj
);
1444 static void unbind_rdev_from_array(mdk_rdev_t
* rdev
)
1446 char b
[BDEVNAME_SIZE
];
1451 bd_release_from_disk(rdev
->bdev
, rdev
->mddev
->gendisk
);
1452 list_del_init(&rdev
->same_set
);
1453 printk(KERN_INFO
"md: unbind<%s>\n", bdevname(rdev
->bdev
,b
));
1455 sysfs_remove_link(&rdev
->kobj
, "block");
1457 /* We need to delay this, otherwise we can deadlock when
1458 * writing to 'remove' to "dev/state"
1460 INIT_WORK(&rdev
->del_work
, md_delayed_delete
);
1461 kobject_get(&rdev
->kobj
);
1462 schedule_work(&rdev
->del_work
);
1466 * prevent the device from being mounted, repartitioned or
1467 * otherwise reused by a RAID array (or any other kernel
1468 * subsystem), by bd_claiming the device.
1470 static int lock_rdev(mdk_rdev_t
*rdev
, dev_t dev
, int shared
)
1473 struct block_device
*bdev
;
1474 char b
[BDEVNAME_SIZE
];
1476 bdev
= open_by_devnum(dev
, FMODE_READ
|FMODE_WRITE
);
1478 printk(KERN_ERR
"md: could not open %s.\n",
1479 __bdevname(dev
, b
));
1480 return PTR_ERR(bdev
);
1482 err
= bd_claim(bdev
, shared
? (mdk_rdev_t
*)lock_rdev
: rdev
);
1484 printk(KERN_ERR
"md: could not bd_claim %s.\n",
1490 set_bit(AllReserved
, &rdev
->flags
);
1495 static void unlock_rdev(mdk_rdev_t
*rdev
)
1497 struct block_device
*bdev
= rdev
->bdev
;
1505 void md_autodetect_dev(dev_t dev
);
1507 static void export_rdev(mdk_rdev_t
* rdev
)
1509 char b
[BDEVNAME_SIZE
];
1510 printk(KERN_INFO
"md: export_rdev(%s)\n",
1511 bdevname(rdev
->bdev
,b
));
1515 list_del_init(&rdev
->same_set
);
1517 if (test_bit(AutoDetected
, &rdev
->flags
))
1518 md_autodetect_dev(rdev
->bdev
->bd_dev
);
1521 kobject_put(&rdev
->kobj
);
1524 static void kick_rdev_from_array(mdk_rdev_t
* rdev
)
1526 unbind_rdev_from_array(rdev
);
1530 static void export_array(mddev_t
*mddev
)
1532 struct list_head
*tmp
;
1535 rdev_for_each(rdev
, tmp
, mddev
) {
1540 kick_rdev_from_array(rdev
);
1542 if (!list_empty(&mddev
->disks
))
1544 mddev
->raid_disks
= 0;
1545 mddev
->major_version
= 0;
1548 static void print_desc(mdp_disk_t
*desc
)
1550 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc
->number
,
1551 desc
->major
,desc
->minor
,desc
->raid_disk
,desc
->state
);
1554 static void print_sb(mdp_super_t
*sb
)
1559 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1560 sb
->major_version
, sb
->minor_version
, sb
->patch_version
,
1561 sb
->set_uuid0
, sb
->set_uuid1
, sb
->set_uuid2
, sb
->set_uuid3
,
1563 printk(KERN_INFO
"md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1564 sb
->level
, sb
->size
, sb
->nr_disks
, sb
->raid_disks
,
1565 sb
->md_minor
, sb
->layout
, sb
->chunk_size
);
1566 printk(KERN_INFO
"md: UT:%08x ST:%d AD:%d WD:%d"
1567 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1568 sb
->utime
, sb
->state
, sb
->active_disks
, sb
->working_disks
,
1569 sb
->failed_disks
, sb
->spare_disks
,
1570 sb
->sb_csum
, (unsigned long)sb
->events_lo
);
1573 for (i
= 0; i
< MD_SB_DISKS
; i
++) {
1576 desc
= sb
->disks
+ i
;
1577 if (desc
->number
|| desc
->major
|| desc
->minor
||
1578 desc
->raid_disk
|| (desc
->state
&& (desc
->state
!= 4))) {
1579 printk(" D %2d: ", i
);
1583 printk(KERN_INFO
"md: THIS: ");
1584 print_desc(&sb
->this_disk
);
1588 static void print_rdev(mdk_rdev_t
*rdev
)
1590 char b
[BDEVNAME_SIZE
];
1591 printk(KERN_INFO
"md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1592 bdevname(rdev
->bdev
,b
), (unsigned long long)rdev
->size
,
1593 test_bit(Faulty
, &rdev
->flags
), test_bit(In_sync
, &rdev
->flags
),
1595 if (rdev
->sb_loaded
) {
1596 printk(KERN_INFO
"md: rdev superblock:\n");
1597 print_sb((mdp_super_t
*)page_address(rdev
->sb_page
));
1599 printk(KERN_INFO
"md: no rdev superblock!\n");
1602 static void md_print_devices(void)
1604 struct list_head
*tmp
, *tmp2
;
1607 char b
[BDEVNAME_SIZE
];
1610 printk("md: **********************************\n");
1611 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1612 printk("md: **********************************\n");
1613 for_each_mddev(mddev
, tmp
) {
1616 bitmap_print_sb(mddev
->bitmap
);
1618 printk("%s: ", mdname(mddev
));
1619 rdev_for_each(rdev
, tmp2
, mddev
)
1620 printk("<%s>", bdevname(rdev
->bdev
,b
));
1623 rdev_for_each(rdev
, tmp2
, mddev
)
1626 printk("md: **********************************\n");
1631 static void sync_sbs(mddev_t
* mddev
, int nospares
)
1633 /* Update each superblock (in-memory image), but
1634 * if we are allowed to, skip spares which already
1635 * have the right event counter, or have one earlier
1636 * (which would mean they aren't being marked as dirty
1637 * with the rest of the array)
1640 struct list_head
*tmp
;
1642 rdev_for_each(rdev
, tmp
, mddev
) {
1643 if (rdev
->sb_events
== mddev
->events
||
1645 rdev
->raid_disk
< 0 &&
1646 (rdev
->sb_events
&1)==0 &&
1647 rdev
->sb_events
+1 == mddev
->events
)) {
1648 /* Don't update this superblock */
1649 rdev
->sb_loaded
= 2;
1651 super_types
[mddev
->major_version
].
1652 sync_super(mddev
, rdev
);
1653 rdev
->sb_loaded
= 1;
1658 static void md_update_sb(mddev_t
* mddev
, int force_change
)
1660 struct list_head
*tmp
;
1665 if (mddev
->external
)
1668 spin_lock_irq(&mddev
->write_lock
);
1670 set_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1671 if (test_and_clear_bit(MD_CHANGE_DEVS
, &mddev
->flags
))
1673 if (test_and_clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
1674 /* just a clean<-> dirty transition, possibly leave spares alone,
1675 * though if events isn't the right even/odd, we will have to do
1681 if (mddev
->degraded
)
1682 /* If the array is degraded, then skipping spares is both
1683 * dangerous and fairly pointless.
1684 * Dangerous because a device that was removed from the array
1685 * might have a event_count that still looks up-to-date,
1686 * so it can be re-added without a resync.
1687 * Pointless because if there are any spares to skip,
1688 * then a recovery will happen and soon that array won't
1689 * be degraded any more and the spare can go back to sleep then.
1693 sync_req
= mddev
->in_sync
;
1694 mddev
->utime
= get_seconds();
1696 /* If this is just a dirty<->clean transition, and the array is clean
1697 * and 'events' is odd, we can roll back to the previous clean state */
1699 && (mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
1700 && (mddev
->events
& 1)
1701 && mddev
->events
!= 1)
1704 /* otherwise we have to go forward and ... */
1706 if (!mddev
->in_sync
|| mddev
->recovery_cp
!= MaxSector
) { /* not clean */
1707 /* .. if the array isn't clean, insist on an odd 'events' */
1708 if ((mddev
->events
&1)==0) {
1713 /* otherwise insist on an even 'events' (for clean states) */
1714 if ((mddev
->events
&1)) {
1721 if (!mddev
->events
) {
1723 * oops, this 64-bit counter should never wrap.
1724 * Either we are in around ~1 trillion A.C., assuming
1725 * 1 reboot per second, or we have a bug:
1732 * do not write anything to disk if using
1733 * nonpersistent superblocks
1735 if (!mddev
->persistent
) {
1736 if (!mddev
->external
)
1737 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1739 spin_unlock_irq(&mddev
->write_lock
);
1740 wake_up(&mddev
->sb_wait
);
1743 sync_sbs(mddev
, nospares
);
1744 spin_unlock_irq(&mddev
->write_lock
);
1747 "md: updating %s RAID superblock on device (in sync %d)\n",
1748 mdname(mddev
),mddev
->in_sync
);
1750 bitmap_update_sb(mddev
->bitmap
);
1751 rdev_for_each(rdev
, tmp
, mddev
) {
1752 char b
[BDEVNAME_SIZE
];
1753 dprintk(KERN_INFO
"md: ");
1754 if (rdev
->sb_loaded
!= 1)
1755 continue; /* no noise on spare devices */
1756 if (test_bit(Faulty
, &rdev
->flags
))
1757 dprintk("(skipping faulty ");
1759 dprintk("%s ", bdevname(rdev
->bdev
,b
));
1760 if (!test_bit(Faulty
, &rdev
->flags
)) {
1761 md_super_write(mddev
,rdev
,
1762 rdev
->sb_offset
<<1, rdev
->sb_size
,
1764 dprintk(KERN_INFO
"(write) %s's sb offset: %llu\n",
1765 bdevname(rdev
->bdev
,b
),
1766 (unsigned long long)rdev
->sb_offset
);
1767 rdev
->sb_events
= mddev
->events
;
1771 if (mddev
->level
== LEVEL_MULTIPATH
)
1772 /* only need to write one superblock... */
1775 md_super_wait(mddev
);
1776 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1778 spin_lock_irq(&mddev
->write_lock
);
1779 if (mddev
->in_sync
!= sync_req
||
1780 test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)) {
1781 /* have to write it out again */
1782 spin_unlock_irq(&mddev
->write_lock
);
1785 clear_bit(MD_CHANGE_PENDING
, &mddev
->flags
);
1786 spin_unlock_irq(&mddev
->write_lock
);
1787 wake_up(&mddev
->sb_wait
);
1791 /* words written to sysfs files may, or my not, be \n terminated.
1792 * We want to accept with case. For this we use cmd_match.
1794 static int cmd_match(const char *cmd
, const char *str
)
1796 /* See if cmd, written into a sysfs file, matches
1797 * str. They must either be the same, or cmd can
1798 * have a trailing newline
1800 while (*cmd
&& *str
&& *cmd
== *str
) {
1811 struct rdev_sysfs_entry
{
1812 struct attribute attr
;
1813 ssize_t (*show
)(mdk_rdev_t
*, char *);
1814 ssize_t (*store
)(mdk_rdev_t
*, const char *, size_t);
1818 state_show(mdk_rdev_t
*rdev
, char *page
)
1823 if (test_bit(Faulty
, &rdev
->flags
)) {
1824 len
+= sprintf(page
+len
, "%sfaulty",sep
);
1827 if (test_bit(In_sync
, &rdev
->flags
)) {
1828 len
+= sprintf(page
+len
, "%sin_sync",sep
);
1831 if (test_bit(WriteMostly
, &rdev
->flags
)) {
1832 len
+= sprintf(page
+len
, "%swrite_mostly",sep
);
1835 if (test_bit(Blocked
, &rdev
->flags
)) {
1836 len
+= sprintf(page
+len
, "%sblocked", sep
);
1839 if (!test_bit(Faulty
, &rdev
->flags
) &&
1840 !test_bit(In_sync
, &rdev
->flags
)) {
1841 len
+= sprintf(page
+len
, "%sspare", sep
);
1844 return len
+sprintf(page
+len
, "\n");
1848 state_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1851 * faulty - simulates and error
1852 * remove - disconnects the device
1853 * writemostly - sets write_mostly
1854 * -writemostly - clears write_mostly
1855 * blocked - sets the Blocked flag
1856 * -blocked - clears the Blocked flag
1859 if (cmd_match(buf
, "faulty") && rdev
->mddev
->pers
) {
1860 md_error(rdev
->mddev
, rdev
);
1862 } else if (cmd_match(buf
, "remove")) {
1863 if (rdev
->raid_disk
>= 0)
1866 mddev_t
*mddev
= rdev
->mddev
;
1867 kick_rdev_from_array(rdev
);
1869 md_update_sb(mddev
, 1);
1870 md_new_event(mddev
);
1873 } else if (cmd_match(buf
, "writemostly")) {
1874 set_bit(WriteMostly
, &rdev
->flags
);
1876 } else if (cmd_match(buf
, "-writemostly")) {
1877 clear_bit(WriteMostly
, &rdev
->flags
);
1879 } else if (cmd_match(buf
, "blocked")) {
1880 set_bit(Blocked
, &rdev
->flags
);
1882 } else if (cmd_match(buf
, "-blocked")) {
1883 clear_bit(Blocked
, &rdev
->flags
);
1884 wake_up(&rdev
->blocked_wait
);
1885 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
1886 md_wakeup_thread(rdev
->mddev
->thread
);
1890 return err
? err
: len
;
1892 static struct rdev_sysfs_entry rdev_state
=
1893 __ATTR(state
, S_IRUGO
|S_IWUSR
, state_show
, state_store
);
1896 errors_show(mdk_rdev_t
*rdev
, char *page
)
1898 return sprintf(page
, "%d\n", atomic_read(&rdev
->corrected_errors
));
1902 errors_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1905 unsigned long n
= simple_strtoul(buf
, &e
, 10);
1906 if (*buf
&& (*e
== 0 || *e
== '\n')) {
1907 atomic_set(&rdev
->corrected_errors
, n
);
1912 static struct rdev_sysfs_entry rdev_errors
=
1913 __ATTR(errors
, S_IRUGO
|S_IWUSR
, errors_show
, errors_store
);
1916 slot_show(mdk_rdev_t
*rdev
, char *page
)
1918 if (rdev
->raid_disk
< 0)
1919 return sprintf(page
, "none\n");
1921 return sprintf(page
, "%d\n", rdev
->raid_disk
);
1925 slot_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
1930 int slot
= simple_strtoul(buf
, &e
, 10);
1931 if (strncmp(buf
, "none", 4)==0)
1933 else if (e
==buf
|| (*e
&& *e
!= '\n'))
1935 if (rdev
->mddev
->pers
&& slot
== -1) {
1936 /* Setting 'slot' on an active array requires also
1937 * updating the 'rd%d' link, and communicating
1938 * with the personality with ->hot_*_disk.
1939 * For now we only support removing
1940 * failed/spare devices. This normally happens automatically,
1941 * but not when the metadata is externally managed.
1943 if (rdev
->raid_disk
== -1)
1945 /* personality does all needed checks */
1946 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
1948 err
= rdev
->mddev
->pers
->
1949 hot_remove_disk(rdev
->mddev
, rdev
->raid_disk
);
1952 sprintf(nm
, "rd%d", rdev
->raid_disk
);
1953 sysfs_remove_link(&rdev
->mddev
->kobj
, nm
);
1954 set_bit(MD_RECOVERY_NEEDED
, &rdev
->mddev
->recovery
);
1955 md_wakeup_thread(rdev
->mddev
->thread
);
1956 } else if (rdev
->mddev
->pers
) {
1958 struct list_head
*tmp
;
1959 /* Activating a spare .. or possibly reactivating
1960 * if we every get bitmaps working here.
1963 if (rdev
->raid_disk
!= -1)
1966 if (rdev
->mddev
->pers
->hot_add_disk
== NULL
)
1969 rdev_for_each(rdev2
, tmp
, rdev
->mddev
)
1970 if (rdev2
->raid_disk
== slot
)
1973 rdev
->raid_disk
= slot
;
1974 if (test_bit(In_sync
, &rdev
->flags
))
1975 rdev
->saved_raid_disk
= slot
;
1977 rdev
->saved_raid_disk
= -1;
1978 err
= rdev
->mddev
->pers
->
1979 hot_add_disk(rdev
->mddev
, rdev
);
1981 rdev
->raid_disk
= -1;
1986 sprintf(nm
, "rd%d", rdev
->raid_disk
);
1987 if (sysfs_create_link(&rdev
->mddev
->kobj
, &rdev
->kobj
, nm
))
1989 "md: cannot register "
1991 nm
, mdname(rdev
->mddev
));
1993 /* don't wakeup anyone, leave that to userspace. */
1995 if (slot
>= rdev
->mddev
->raid_disks
)
1997 rdev
->raid_disk
= slot
;
1998 /* assume it is working */
1999 clear_bit(Faulty
, &rdev
->flags
);
2000 clear_bit(WriteMostly
, &rdev
->flags
);
2001 set_bit(In_sync
, &rdev
->flags
);
2007 static struct rdev_sysfs_entry rdev_slot
=
2008 __ATTR(slot
, S_IRUGO
|S_IWUSR
, slot_show
, slot_store
);
2011 offset_show(mdk_rdev_t
*rdev
, char *page
)
2013 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->data_offset
);
2017 offset_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2020 unsigned long long offset
= simple_strtoull(buf
, &e
, 10);
2021 if (e
==buf
|| (*e
&& *e
!= '\n'))
2023 if (rdev
->mddev
->pers
&& rdev
->raid_disk
>= 0)
2025 if (rdev
->size
&& rdev
->mddev
->external
)
2026 /* Must set offset before size, so overlap checks
2029 rdev
->data_offset
= offset
;
2033 static struct rdev_sysfs_entry rdev_offset
=
2034 __ATTR(offset
, S_IRUGO
|S_IWUSR
, offset_show
, offset_store
);
2037 rdev_size_show(mdk_rdev_t
*rdev
, char *page
)
2039 return sprintf(page
, "%llu\n", (unsigned long long)rdev
->size
);
2042 static int overlaps(sector_t s1
, sector_t l1
, sector_t s2
, sector_t l2
)
2044 /* check if two start/length pairs overlap */
2053 rdev_size_store(mdk_rdev_t
*rdev
, const char *buf
, size_t len
)
2056 unsigned long long size
= simple_strtoull(buf
, &e
, 10);
2057 unsigned long long oldsize
= rdev
->size
;
2058 mddev_t
*my_mddev
= rdev
->mddev
;
2060 if (e
==buf
|| (*e
&& *e
!= '\n'))
2062 if (my_mddev
->pers
&& rdev
->raid_disk
>= 0)
2065 if (size
> oldsize
&& rdev
->mddev
->external
) {
2066 /* need to check that all other rdevs with the same ->bdev
2067 * do not overlap. We need to unlock the mddev to avoid
2068 * a deadlock. We have already changed rdev->size, and if
2069 * we have to change it back, we will have the lock again.
2073 struct list_head
*tmp
, *tmp2
;
2075 mddev_unlock(my_mddev
);
2076 for_each_mddev(mddev
, tmp
) {
2080 rdev_for_each(rdev2
, tmp2
, mddev
)
2081 if (test_bit(AllReserved
, &rdev2
->flags
) ||
2082 (rdev
->bdev
== rdev2
->bdev
&&
2084 overlaps(rdev
->data_offset
, rdev
->size
,
2085 rdev2
->data_offset
, rdev2
->size
))) {
2089 mddev_unlock(mddev
);
2095 mddev_lock(my_mddev
);
2097 /* Someone else could have slipped in a size
2098 * change here, but doing so is just silly.
2099 * We put oldsize back because we *know* it is
2100 * safe, and trust userspace not to race with
2103 rdev
->size
= oldsize
;
2107 if (size
< my_mddev
->size
|| my_mddev
->size
== 0)
2108 my_mddev
->size
= size
;
2112 static struct rdev_sysfs_entry rdev_size
=
2113 __ATTR(size
, S_IRUGO
|S_IWUSR
, rdev_size_show
, rdev_size_store
);
2115 static struct attribute
*rdev_default_attrs
[] = {
2124 rdev_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2126 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2127 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2128 mddev_t
*mddev
= rdev
->mddev
;
2134 rv
= mddev
? mddev_lock(mddev
) : -EBUSY
;
2136 if (rdev
->mddev
== NULL
)
2139 rv
= entry
->show(rdev
, page
);
2140 mddev_unlock(mddev
);
2146 rdev_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2147 const char *page
, size_t length
)
2149 struct rdev_sysfs_entry
*entry
= container_of(attr
, struct rdev_sysfs_entry
, attr
);
2150 mdk_rdev_t
*rdev
= container_of(kobj
, mdk_rdev_t
, kobj
);
2152 mddev_t
*mddev
= rdev
->mddev
;
2156 if (!capable(CAP_SYS_ADMIN
))
2158 rv
= mddev
? mddev_lock(mddev
): -EBUSY
;
2160 if (rdev
->mddev
== NULL
)
2163 rv
= entry
->store(rdev
, page
, length
);
2164 mddev_unlock(mddev
);
2169 static void rdev_free(struct kobject
*ko
)
2171 mdk_rdev_t
*rdev
= container_of(ko
, mdk_rdev_t
, kobj
);
2174 static struct sysfs_ops rdev_sysfs_ops
= {
2175 .show
= rdev_attr_show
,
2176 .store
= rdev_attr_store
,
2178 static struct kobj_type rdev_ktype
= {
2179 .release
= rdev_free
,
2180 .sysfs_ops
= &rdev_sysfs_ops
,
2181 .default_attrs
= rdev_default_attrs
,
2185 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2187 * mark the device faulty if:
2189 * - the device is nonexistent (zero size)
2190 * - the device has no valid superblock
2192 * a faulty rdev _never_ has rdev->sb set.
2194 static mdk_rdev_t
*md_import_device(dev_t newdev
, int super_format
, int super_minor
)
2196 char b
[BDEVNAME_SIZE
];
2201 rdev
= kzalloc(sizeof(*rdev
), GFP_KERNEL
);
2203 printk(KERN_ERR
"md: could not alloc mem for new device!\n");
2204 return ERR_PTR(-ENOMEM
);
2207 if ((err
= alloc_disk_sb(rdev
)))
2210 err
= lock_rdev(rdev
, newdev
, super_format
== -2);
2214 kobject_init(&rdev
->kobj
, &rdev_ktype
);
2217 rdev
->saved_raid_disk
= -1;
2218 rdev
->raid_disk
= -1;
2220 rdev
->data_offset
= 0;
2221 rdev
->sb_events
= 0;
2222 atomic_set(&rdev
->nr_pending
, 0);
2223 atomic_set(&rdev
->read_errors
, 0);
2224 atomic_set(&rdev
->corrected_errors
, 0);
2226 size
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
2229 "md: %s has zero or unknown size, marking faulty!\n",
2230 bdevname(rdev
->bdev
,b
));
2235 if (super_format
>= 0) {
2236 err
= super_types
[super_format
].
2237 load_super(rdev
, NULL
, super_minor
);
2238 if (err
== -EINVAL
) {
2240 "md: %s does not have a valid v%d.%d "
2241 "superblock, not importing!\n",
2242 bdevname(rdev
->bdev
,b
),
2243 super_format
, super_minor
);
2248 "md: could not read %s's sb, not importing!\n",
2249 bdevname(rdev
->bdev
,b
));
2254 INIT_LIST_HEAD(&rdev
->same_set
);
2255 init_waitqueue_head(&rdev
->blocked_wait
);
2260 if (rdev
->sb_page
) {
2266 return ERR_PTR(err
);
2270 * Check a full RAID array for plausibility
2274 static void analyze_sbs(mddev_t
* mddev
)
2277 struct list_head
*tmp
;
2278 mdk_rdev_t
*rdev
, *freshest
;
2279 char b
[BDEVNAME_SIZE
];
2282 rdev_for_each(rdev
, tmp
, mddev
)
2283 switch (super_types
[mddev
->major_version
].
2284 load_super(rdev
, freshest
, mddev
->minor_version
)) {
2292 "md: fatal superblock inconsistency in %s"
2293 " -- removing from array\n",
2294 bdevname(rdev
->bdev
,b
));
2295 kick_rdev_from_array(rdev
);
2299 super_types
[mddev
->major_version
].
2300 validate_super(mddev
, freshest
);
2303 rdev_for_each(rdev
, tmp
, mddev
) {
2304 if (rdev
!= freshest
)
2305 if (super_types
[mddev
->major_version
].
2306 validate_super(mddev
, rdev
)) {
2307 printk(KERN_WARNING
"md: kicking non-fresh %s"
2309 bdevname(rdev
->bdev
,b
));
2310 kick_rdev_from_array(rdev
);
2313 if (mddev
->level
== LEVEL_MULTIPATH
) {
2314 rdev
->desc_nr
= i
++;
2315 rdev
->raid_disk
= rdev
->desc_nr
;
2316 set_bit(In_sync
, &rdev
->flags
);
2317 } else if (rdev
->raid_disk
>= mddev
->raid_disks
) {
2318 rdev
->raid_disk
= -1;
2319 clear_bit(In_sync
, &rdev
->flags
);
2325 if (mddev
->recovery_cp
!= MaxSector
&&
2327 printk(KERN_ERR
"md: %s: raid array is not clean"
2328 " -- starting background reconstruction\n",
2334 safe_delay_show(mddev_t
*mddev
, char *page
)
2336 int msec
= (mddev
->safemode_delay
*1000)/HZ
;
2337 return sprintf(page
, "%d.%03d\n", msec
/1000, msec
%1000);
2340 safe_delay_store(mddev_t
*mddev
, const char *cbuf
, size_t len
)
2348 /* remove a period, and count digits after it */
2349 if (len
>= sizeof(buf
))
2351 strlcpy(buf
, cbuf
, len
);
2353 for (i
=0; i
<len
; i
++) {
2355 if (isdigit(buf
[i
])) {
2360 } else if (buf
[i
] == '.') {
2365 msec
= simple_strtoul(buf
, &e
, 10);
2366 if (e
== buf
|| (*e
&& *e
!= '\n'))
2368 msec
= (msec
* 1000) / scale
;
2370 mddev
->safemode_delay
= 0;
2372 mddev
->safemode_delay
= (msec
*HZ
)/1000;
2373 if (mddev
->safemode_delay
== 0)
2374 mddev
->safemode_delay
= 1;
2378 static struct md_sysfs_entry md_safe_delay
=
2379 __ATTR(safe_mode_delay
, S_IRUGO
|S_IWUSR
,safe_delay_show
, safe_delay_store
);
2382 level_show(mddev_t
*mddev
, char *page
)
2384 struct mdk_personality
*p
= mddev
->pers
;
2386 return sprintf(page
, "%s\n", p
->name
);
2387 else if (mddev
->clevel
[0])
2388 return sprintf(page
, "%s\n", mddev
->clevel
);
2389 else if (mddev
->level
!= LEVEL_NONE
)
2390 return sprintf(page
, "%d\n", mddev
->level
);
2396 level_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2403 if (len
>= sizeof(mddev
->clevel
))
2405 strncpy(mddev
->clevel
, buf
, len
);
2406 if (mddev
->clevel
[len
-1] == '\n')
2408 mddev
->clevel
[len
] = 0;
2409 mddev
->level
= LEVEL_NONE
;
2413 static struct md_sysfs_entry md_level
=
2414 __ATTR(level
, S_IRUGO
|S_IWUSR
, level_show
, level_store
);
2418 layout_show(mddev_t
*mddev
, char *page
)
2420 /* just a number, not meaningful for all levels */
2421 if (mddev
->reshape_position
!= MaxSector
&&
2422 mddev
->layout
!= mddev
->new_layout
)
2423 return sprintf(page
, "%d (%d)\n",
2424 mddev
->new_layout
, mddev
->layout
);
2425 return sprintf(page
, "%d\n", mddev
->layout
);
2429 layout_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2432 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2434 if (!*buf
|| (*e
&& *e
!= '\n'))
2439 if (mddev
->reshape_position
!= MaxSector
)
2440 mddev
->new_layout
= n
;
2445 static struct md_sysfs_entry md_layout
=
2446 __ATTR(layout
, S_IRUGO
|S_IWUSR
, layout_show
, layout_store
);
2450 raid_disks_show(mddev_t
*mddev
, char *page
)
2452 if (mddev
->raid_disks
== 0)
2454 if (mddev
->reshape_position
!= MaxSector
&&
2455 mddev
->delta_disks
!= 0)
2456 return sprintf(page
, "%d (%d)\n", mddev
->raid_disks
,
2457 mddev
->raid_disks
- mddev
->delta_disks
);
2458 return sprintf(page
, "%d\n", mddev
->raid_disks
);
2461 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
);
2464 raid_disks_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2468 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2470 if (!*buf
|| (*e
&& *e
!= '\n'))
2474 rv
= update_raid_disks(mddev
, n
);
2475 else if (mddev
->reshape_position
!= MaxSector
) {
2476 int olddisks
= mddev
->raid_disks
- mddev
->delta_disks
;
2477 mddev
->delta_disks
= n
- olddisks
;
2478 mddev
->raid_disks
= n
;
2480 mddev
->raid_disks
= n
;
2481 return rv
? rv
: len
;
2483 static struct md_sysfs_entry md_raid_disks
=
2484 __ATTR(raid_disks
, S_IRUGO
|S_IWUSR
, raid_disks_show
, raid_disks_store
);
2487 chunk_size_show(mddev_t
*mddev
, char *page
)
2489 if (mddev
->reshape_position
!= MaxSector
&&
2490 mddev
->chunk_size
!= mddev
->new_chunk
)
2491 return sprintf(page
, "%d (%d)\n", mddev
->new_chunk
,
2493 return sprintf(page
, "%d\n", mddev
->chunk_size
);
2497 chunk_size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2499 /* can only set chunk_size if array is not yet active */
2501 unsigned long n
= simple_strtoul(buf
, &e
, 10);
2503 if (!*buf
|| (*e
&& *e
!= '\n'))
2508 else if (mddev
->reshape_position
!= MaxSector
)
2509 mddev
->new_chunk
= n
;
2511 mddev
->chunk_size
= n
;
2514 static struct md_sysfs_entry md_chunk_size
=
2515 __ATTR(chunk_size
, S_IRUGO
|S_IWUSR
, chunk_size_show
, chunk_size_store
);
2518 resync_start_show(mddev_t
*mddev
, char *page
)
2520 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->recovery_cp
);
2524 resync_start_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2527 unsigned long long n
= simple_strtoull(buf
, &e
, 10);
2531 if (!*buf
|| (*e
&& *e
!= '\n'))
2534 mddev
->recovery_cp
= n
;
2537 static struct md_sysfs_entry md_resync_start
=
2538 __ATTR(resync_start
, S_IRUGO
|S_IWUSR
, resync_start_show
, resync_start_store
);
2541 * The array state can be:
2544 * No devices, no size, no level
2545 * Equivalent to STOP_ARRAY ioctl
2547 * May have some settings, but array is not active
2548 * all IO results in error
2549 * When written, doesn't tear down array, but just stops it
2550 * suspended (not supported yet)
2551 * All IO requests will block. The array can be reconfigured.
2552 * Writing this, if accepted, will block until array is quiessent
2554 * no resync can happen. no superblocks get written.
2555 * write requests fail
2557 * like readonly, but behaves like 'clean' on a write request.
2559 * clean - no pending writes, but otherwise active.
2560 * When written to inactive array, starts without resync
2561 * If a write request arrives then
2562 * if metadata is known, mark 'dirty' and switch to 'active'.
2563 * if not known, block and switch to write-pending
2564 * If written to an active array that has pending writes, then fails.
2566 * fully active: IO and resync can be happening.
2567 * When written to inactive array, starts with resync
2570 * clean, but writes are blocked waiting for 'active' to be written.
2573 * like active, but no writes have been seen for a while (100msec).
2576 enum array_state
{ clear
, inactive
, suspended
, readonly
, read_auto
, clean
, active
,
2577 write_pending
, active_idle
, bad_word
};
2578 static char *array_states
[] = {
2579 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2580 "write-pending", "active-idle", NULL
};
2582 static int match_word(const char *word
, char **list
)
2585 for (n
=0; list
[n
]; n
++)
2586 if (cmd_match(word
, list
[n
]))
2592 array_state_show(mddev_t
*mddev
, char *page
)
2594 enum array_state st
= inactive
;
2607 else if (test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
))
2609 else if (mddev
->safemode
)
2615 if (list_empty(&mddev
->disks
) &&
2616 mddev
->raid_disks
== 0 &&
2622 return sprintf(page
, "%s\n", array_states
[st
]);
2625 static int do_md_stop(mddev_t
* mddev
, int ro
);
2626 static int do_md_run(mddev_t
* mddev
);
2627 static int restart_array(mddev_t
*mddev
);
2630 array_state_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2633 enum array_state st
= match_word(buf
, array_states
);
2638 /* stopping an active array */
2639 if (atomic_read(&mddev
->active
) > 1)
2641 err
= do_md_stop(mddev
, 0);
2644 /* stopping an active array */
2646 if (atomic_read(&mddev
->active
) > 1)
2648 err
= do_md_stop(mddev
, 2);
2650 err
= 0; /* already inactive */
2653 break; /* not supported yet */
2656 err
= do_md_stop(mddev
, 1);
2659 set_disk_ro(mddev
->gendisk
, 1);
2660 err
= do_md_run(mddev
);
2666 err
= do_md_stop(mddev
, 1);
2668 err
= restart_array(mddev
);
2671 set_disk_ro(mddev
->gendisk
, 0);
2675 err
= do_md_run(mddev
);
2680 restart_array(mddev
);
2681 spin_lock_irq(&mddev
->write_lock
);
2682 if (atomic_read(&mddev
->writes_pending
) == 0) {
2683 if (mddev
->in_sync
== 0) {
2685 if (mddev
->safemode
== 1)
2686 mddev
->safemode
= 0;
2687 if (mddev
->persistent
)
2688 set_bit(MD_CHANGE_CLEAN
,
2694 spin_unlock_irq(&mddev
->write_lock
);
2697 mddev
->recovery_cp
= MaxSector
;
2698 err
= do_md_run(mddev
);
2703 restart_array(mddev
);
2704 if (mddev
->external
)
2705 clear_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
2706 wake_up(&mddev
->sb_wait
);
2710 set_disk_ro(mddev
->gendisk
, 0);
2711 err
= do_md_run(mddev
);
2716 /* these cannot be set */
2724 static struct md_sysfs_entry md_array_state
=
2725 __ATTR(array_state
, S_IRUGO
|S_IWUSR
, array_state_show
, array_state_store
);
2728 null_show(mddev_t
*mddev
, char *page
)
2734 new_dev_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2736 /* buf must be %d:%d\n? giving major and minor numbers */
2737 /* The new device is added to the array.
2738 * If the array has a persistent superblock, we read the
2739 * superblock to initialise info and check validity.
2740 * Otherwise, only checking done is that in bind_rdev_to_array,
2741 * which mainly checks size.
2744 int major
= simple_strtoul(buf
, &e
, 10);
2750 if (!*buf
|| *e
!= ':' || !e
[1] || e
[1] == '\n')
2752 minor
= simple_strtoul(e
+1, &e
, 10);
2753 if (*e
&& *e
!= '\n')
2755 dev
= MKDEV(major
, minor
);
2756 if (major
!= MAJOR(dev
) ||
2757 minor
!= MINOR(dev
))
2761 if (mddev
->persistent
) {
2762 rdev
= md_import_device(dev
, mddev
->major_version
,
2763 mddev
->minor_version
);
2764 if (!IS_ERR(rdev
) && !list_empty(&mddev
->disks
)) {
2765 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
2766 mdk_rdev_t
, same_set
);
2767 err
= super_types
[mddev
->major_version
]
2768 .load_super(rdev
, rdev0
, mddev
->minor_version
);
2772 } else if (mddev
->external
)
2773 rdev
= md_import_device(dev
, -2, -1);
2775 rdev
= md_import_device(dev
, -1, -1);
2778 return PTR_ERR(rdev
);
2779 err
= bind_rdev_to_array(rdev
, mddev
);
2783 return err
? err
: len
;
2786 static struct md_sysfs_entry md_new_device
=
2787 __ATTR(new_dev
, S_IWUSR
, null_show
, new_dev_store
);
2790 bitmap_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2793 unsigned long chunk
, end_chunk
;
2797 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2799 chunk
= end_chunk
= simple_strtoul(buf
, &end
, 0);
2800 if (buf
== end
) break;
2801 if (*end
== '-') { /* range */
2803 end_chunk
= simple_strtoul(buf
, &end
, 0);
2804 if (buf
== end
) break;
2806 if (*end
&& !isspace(*end
)) break;
2807 bitmap_dirty_bits(mddev
->bitmap
, chunk
, end_chunk
);
2809 while (isspace(*buf
)) buf
++;
2811 bitmap_unplug(mddev
->bitmap
); /* flush the bits to disk */
2816 static struct md_sysfs_entry md_bitmap
=
2817 __ATTR(bitmap_set_bits
, S_IWUSR
, null_show
, bitmap_store
);
2820 size_show(mddev_t
*mddev
, char *page
)
2822 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->size
);
2825 static int update_size(mddev_t
*mddev
, unsigned long size
);
2828 size_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2830 /* If array is inactive, we can reduce the component size, but
2831 * not increase it (except from 0).
2832 * If array is active, we can try an on-line resize
2836 unsigned long long size
= simple_strtoull(buf
, &e
, 10);
2837 if (!*buf
|| *buf
== '\n' ||
2842 err
= update_size(mddev
, size
);
2843 md_update_sb(mddev
, 1);
2845 if (mddev
->size
== 0 ||
2851 return err
? err
: len
;
2854 static struct md_sysfs_entry md_size
=
2855 __ATTR(component_size
, S_IRUGO
|S_IWUSR
, size_show
, size_store
);
2860 * 'none' for arrays with no metadata (good luck...)
2861 * 'external' for arrays with externally managed metadata,
2862 * or N.M for internally known formats
2865 metadata_show(mddev_t
*mddev
, char *page
)
2867 if (mddev
->persistent
)
2868 return sprintf(page
, "%d.%d\n",
2869 mddev
->major_version
, mddev
->minor_version
);
2870 else if (mddev
->external
)
2871 return sprintf(page
, "external:%s\n", mddev
->metadata_type
);
2873 return sprintf(page
, "none\n");
2877 metadata_store(mddev_t
*mddev
, const char *buf
, size_t len
)
2881 if (!list_empty(&mddev
->disks
))
2884 if (cmd_match(buf
, "none")) {
2885 mddev
->persistent
= 0;
2886 mddev
->external
= 0;
2887 mddev
->major_version
= 0;
2888 mddev
->minor_version
= 90;
2891 if (strncmp(buf
, "external:", 9) == 0) {
2892 size_t namelen
= len
-9;
2893 if (namelen
>= sizeof(mddev
->metadata_type
))
2894 namelen
= sizeof(mddev
->metadata_type
)-1;
2895 strncpy(mddev
->metadata_type
, buf
+9, namelen
);
2896 mddev
->metadata_type
[namelen
] = 0;
2897 if (namelen
&& mddev
->metadata_type
[namelen
-1] == '\n')
2898 mddev
->metadata_type
[--namelen
] = 0;
2899 mddev
->persistent
= 0;
2900 mddev
->external
= 1;
2901 mddev
->major_version
= 0;
2902 mddev
->minor_version
= 90;
2905 major
= simple_strtoul(buf
, &e
, 10);
2906 if (e
==buf
|| *e
!= '.')
2909 minor
= simple_strtoul(buf
, &e
, 10);
2910 if (e
==buf
|| (*e
&& *e
!= '\n') )
2912 if (major
>= ARRAY_SIZE(super_types
) || super_types
[major
].name
== NULL
)
2914 mddev
->major_version
= major
;
2915 mddev
->minor_version
= minor
;
2916 mddev
->persistent
= 1;
2917 mddev
->external
= 0;
2921 static struct md_sysfs_entry md_metadata
=
2922 __ATTR(metadata_version
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
2925 action_show(mddev_t
*mddev
, char *page
)
2927 char *type
= "idle";
2928 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2929 (!mddev
->ro
&& test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))) {
2930 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
2932 else if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
2933 if (!test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
2935 else if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
2942 return sprintf(page
, "%s\n", type
);
2946 action_store(mddev_t
*mddev
, const char *page
, size_t len
)
2948 if (!mddev
->pers
|| !mddev
->pers
->sync_request
)
2951 if (cmd_match(page
, "idle")) {
2952 if (mddev
->sync_thread
) {
2953 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
2954 md_unregister_thread(mddev
->sync_thread
);
2955 mddev
->sync_thread
= NULL
;
2956 mddev
->recovery
= 0;
2958 } else if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) ||
2959 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
))
2961 else if (cmd_match(page
, "resync") || cmd_match(page
, "recover"))
2962 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2963 else if (cmd_match(page
, "reshape")) {
2965 if (mddev
->pers
->start_reshape
== NULL
)
2967 err
= mddev
->pers
->start_reshape(mddev
);
2971 if (cmd_match(page
, "check"))
2972 set_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
2973 else if (!cmd_match(page
, "repair"))
2975 set_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
);
2976 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
2978 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
2979 md_wakeup_thread(mddev
->thread
);
2984 mismatch_cnt_show(mddev_t
*mddev
, char *page
)
2986 return sprintf(page
, "%llu\n",
2987 (unsigned long long) mddev
->resync_mismatches
);
2990 static struct md_sysfs_entry md_scan_mode
=
2991 __ATTR(sync_action
, S_IRUGO
|S_IWUSR
, action_show
, action_store
);
2994 static struct md_sysfs_entry md_mismatches
= __ATTR_RO(mismatch_cnt
);
2997 sync_min_show(mddev_t
*mddev
, char *page
)
2999 return sprintf(page
, "%d (%s)\n", speed_min(mddev
),
3000 mddev
->sync_speed_min
? "local": "system");
3004 sync_min_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3008 if (strncmp(buf
, "system", 6)==0) {
3009 mddev
->sync_speed_min
= 0;
3012 min
= simple_strtoul(buf
, &e
, 10);
3013 if (buf
== e
|| (*e
&& *e
!= '\n') || min
<= 0)
3015 mddev
->sync_speed_min
= min
;
3019 static struct md_sysfs_entry md_sync_min
=
3020 __ATTR(sync_speed_min
, S_IRUGO
|S_IWUSR
, sync_min_show
, sync_min_store
);
3023 sync_max_show(mddev_t
*mddev
, char *page
)
3025 return sprintf(page
, "%d (%s)\n", speed_max(mddev
),
3026 mddev
->sync_speed_max
? "local": "system");
3030 sync_max_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3034 if (strncmp(buf
, "system", 6)==0) {
3035 mddev
->sync_speed_max
= 0;
3038 max
= simple_strtoul(buf
, &e
, 10);
3039 if (buf
== e
|| (*e
&& *e
!= '\n') || max
<= 0)
3041 mddev
->sync_speed_max
= max
;
3045 static struct md_sysfs_entry md_sync_max
=
3046 __ATTR(sync_speed_max
, S_IRUGO
|S_IWUSR
, sync_max_show
, sync_max_store
);
3049 degraded_show(mddev_t
*mddev
, char *page
)
3051 return sprintf(page
, "%d\n", mddev
->degraded
);
3053 static struct md_sysfs_entry md_degraded
= __ATTR_RO(degraded
);
3056 sync_force_parallel_show(mddev_t
*mddev
, char *page
)
3058 return sprintf(page
, "%d\n", mddev
->parallel_resync
);
3062 sync_force_parallel_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3066 if (strict_strtol(buf
, 10, &n
))
3069 if (n
!= 0 && n
!= 1)
3072 mddev
->parallel_resync
= n
;
3074 if (mddev
->sync_thread
)
3075 wake_up(&resync_wait
);
3080 /* force parallel resync, even with shared block devices */
3081 static struct md_sysfs_entry md_sync_force_parallel
=
3082 __ATTR(sync_force_parallel
, S_IRUGO
|S_IWUSR
,
3083 sync_force_parallel_show
, sync_force_parallel_store
);
3086 sync_speed_show(mddev_t
*mddev
, char *page
)
3088 unsigned long resync
, dt
, db
;
3089 resync
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
));
3090 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
3092 db
= resync
- (mddev
->resync_mark_cnt
);
3093 return sprintf(page
, "%ld\n", db
/dt
/2); /* K/sec */
3096 static struct md_sysfs_entry md_sync_speed
= __ATTR_RO(sync_speed
);
3099 sync_completed_show(mddev_t
*mddev
, char *page
)
3101 unsigned long max_blocks
, resync
;
3103 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
3104 max_blocks
= mddev
->resync_max_sectors
;
3106 max_blocks
= mddev
->size
<< 1;
3108 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
));
3109 return sprintf(page
, "%lu / %lu\n", resync
, max_blocks
);
3112 static struct md_sysfs_entry md_sync_completed
= __ATTR_RO(sync_completed
);
3115 min_sync_show(mddev_t
*mddev
, char *page
)
3117 return sprintf(page
, "%llu\n",
3118 (unsigned long long)mddev
->resync_min
);
3121 min_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3123 unsigned long long min
;
3124 if (strict_strtoull(buf
, 10, &min
))
3126 if (min
> mddev
->resync_max
)
3128 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3131 /* Must be a multiple of chunk_size */
3132 if (mddev
->chunk_size
) {
3133 if (min
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3136 mddev
->resync_min
= min
;
3141 static struct md_sysfs_entry md_min_sync
=
3142 __ATTR(sync_min
, S_IRUGO
|S_IWUSR
, min_sync_show
, min_sync_store
);
3145 max_sync_show(mddev_t
*mddev
, char *page
)
3147 if (mddev
->resync_max
== MaxSector
)
3148 return sprintf(page
, "max\n");
3150 return sprintf(page
, "%llu\n",
3151 (unsigned long long)mddev
->resync_max
);
3154 max_sync_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3156 if (strncmp(buf
, "max", 3) == 0)
3157 mddev
->resync_max
= MaxSector
;
3159 unsigned long long max
;
3160 if (strict_strtoull(buf
, 10, &max
))
3162 if (max
< mddev
->resync_min
)
3164 if (max
< mddev
->resync_max
&&
3165 test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
3168 /* Must be a multiple of chunk_size */
3169 if (mddev
->chunk_size
) {
3170 if (max
& (sector_t
)((mddev
->chunk_size
>>9)-1))
3173 mddev
->resync_max
= max
;
3175 wake_up(&mddev
->recovery_wait
);
3179 static struct md_sysfs_entry md_max_sync
=
3180 __ATTR(sync_max
, S_IRUGO
|S_IWUSR
, max_sync_show
, max_sync_store
);
3183 suspend_lo_show(mddev_t
*mddev
, char *page
)
3185 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_lo
);
3189 suspend_lo_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3192 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3194 if (mddev
->pers
->quiesce
== NULL
)
3196 if (buf
== e
|| (*e
&& *e
!= '\n'))
3198 if (new >= mddev
->suspend_hi
||
3199 (new > mddev
->suspend_lo
&& new < mddev
->suspend_hi
)) {
3200 mddev
->suspend_lo
= new;
3201 mddev
->pers
->quiesce(mddev
, 2);
3206 static struct md_sysfs_entry md_suspend_lo
=
3207 __ATTR(suspend_lo
, S_IRUGO
|S_IWUSR
, suspend_lo_show
, suspend_lo_store
);
3211 suspend_hi_show(mddev_t
*mddev
, char *page
)
3213 return sprintf(page
, "%llu\n", (unsigned long long)mddev
->suspend_hi
);
3217 suspend_hi_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3220 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3222 if (mddev
->pers
->quiesce
== NULL
)
3224 if (buf
== e
|| (*e
&& *e
!= '\n'))
3226 if ((new <= mddev
->suspend_lo
&& mddev
->suspend_lo
>= mddev
->suspend_hi
) ||
3227 (new > mddev
->suspend_lo
&& new > mddev
->suspend_hi
)) {
3228 mddev
->suspend_hi
= new;
3229 mddev
->pers
->quiesce(mddev
, 1);
3230 mddev
->pers
->quiesce(mddev
, 0);
3235 static struct md_sysfs_entry md_suspend_hi
=
3236 __ATTR(suspend_hi
, S_IRUGO
|S_IWUSR
, suspend_hi_show
, suspend_hi_store
);
3239 reshape_position_show(mddev_t
*mddev
, char *page
)
3241 if (mddev
->reshape_position
!= MaxSector
)
3242 return sprintf(page
, "%llu\n",
3243 (unsigned long long)mddev
->reshape_position
);
3244 strcpy(page
, "none\n");
3249 reshape_position_store(mddev_t
*mddev
, const char *buf
, size_t len
)
3252 unsigned long long new = simple_strtoull(buf
, &e
, 10);
3255 if (buf
== e
|| (*e
&& *e
!= '\n'))
3257 mddev
->reshape_position
= new;
3258 mddev
->delta_disks
= 0;
3259 mddev
->new_level
= mddev
->level
;
3260 mddev
->new_layout
= mddev
->layout
;
3261 mddev
->new_chunk
= mddev
->chunk_size
;
3265 static struct md_sysfs_entry md_reshape_position
=
3266 __ATTR(reshape_position
, S_IRUGO
|S_IWUSR
, reshape_position_show
,
3267 reshape_position_store
);
3270 static struct attribute
*md_default_attrs
[] = {
3273 &md_raid_disks
.attr
,
3274 &md_chunk_size
.attr
,
3276 &md_resync_start
.attr
,
3278 &md_new_device
.attr
,
3279 &md_safe_delay
.attr
,
3280 &md_array_state
.attr
,
3281 &md_reshape_position
.attr
,
3285 static struct attribute
*md_redundancy_attrs
[] = {
3287 &md_mismatches
.attr
,
3290 &md_sync_speed
.attr
,
3291 &md_sync_force_parallel
.attr
,
3292 &md_sync_completed
.attr
,
3295 &md_suspend_lo
.attr
,
3296 &md_suspend_hi
.attr
,
3301 static struct attribute_group md_redundancy_group
= {
3303 .attrs
= md_redundancy_attrs
,
3308 md_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
3310 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3311 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3316 rv
= mddev_lock(mddev
);
3318 rv
= entry
->show(mddev
, page
);
3319 mddev_unlock(mddev
);
3325 md_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
3326 const char *page
, size_t length
)
3328 struct md_sysfs_entry
*entry
= container_of(attr
, struct md_sysfs_entry
, attr
);
3329 mddev_t
*mddev
= container_of(kobj
, struct mddev_s
, kobj
);
3334 if (!capable(CAP_SYS_ADMIN
))
3336 rv
= mddev_lock(mddev
);
3338 rv
= entry
->store(mddev
, page
, length
);
3339 mddev_unlock(mddev
);
3344 static void md_free(struct kobject
*ko
)
3346 mddev_t
*mddev
= container_of(ko
, mddev_t
, kobj
);
3350 static struct sysfs_ops md_sysfs_ops
= {
3351 .show
= md_attr_show
,
3352 .store
= md_attr_store
,
3354 static struct kobj_type md_ktype
= {
3356 .sysfs_ops
= &md_sysfs_ops
,
3357 .default_attrs
= md_default_attrs
,
3362 static struct kobject
*md_probe(dev_t dev
, int *part
, void *data
)
3364 static DEFINE_MUTEX(disks_mutex
);
3365 mddev_t
*mddev
= mddev_find(dev
);
3366 struct gendisk
*disk
;
3367 int partitioned
= (MAJOR(dev
) != MD_MAJOR
);
3368 int shift
= partitioned
? MdpMinorShift
: 0;
3369 int unit
= MINOR(dev
) >> shift
;
3375 mutex_lock(&disks_mutex
);
3376 if (mddev
->gendisk
) {
3377 mutex_unlock(&disks_mutex
);
3381 disk
= alloc_disk(1 << shift
);
3383 mutex_unlock(&disks_mutex
);
3387 disk
->major
= MAJOR(dev
);
3388 disk
->first_minor
= unit
<< shift
;
3390 sprintf(disk
->disk_name
, "md_d%d", unit
);
3392 sprintf(disk
->disk_name
, "md%d", unit
);
3393 disk
->fops
= &md_fops
;
3394 disk
->private_data
= mddev
;
3395 disk
->queue
= mddev
->queue
;
3397 mddev
->gendisk
= disk
;
3398 error
= kobject_init_and_add(&mddev
->kobj
, &md_ktype
, &disk
->dev
.kobj
,
3400 mutex_unlock(&disks_mutex
);
3402 printk(KERN_WARNING
"md: cannot register %s/md - name in use\n",
3405 kobject_uevent(&mddev
->kobj
, KOBJ_ADD
);
3409 static void md_safemode_timeout(unsigned long data
)
3411 mddev_t
*mddev
= (mddev_t
*) data
;
3413 mddev
->safemode
= 1;
3414 md_wakeup_thread(mddev
->thread
);
3417 static int start_dirty_degraded
;
3419 static int do_md_run(mddev_t
* mddev
)
3423 struct list_head
*tmp
;
3425 struct gendisk
*disk
;
3426 struct mdk_personality
*pers
;
3427 char b
[BDEVNAME_SIZE
];
3429 if (list_empty(&mddev
->disks
))
3430 /* cannot run an array with no devices.. */
3437 * Analyze all RAID superblock(s)
3439 if (!mddev
->raid_disks
) {
3440 if (!mddev
->persistent
)
3445 chunk_size
= mddev
->chunk_size
;
3448 if (chunk_size
> MAX_CHUNK_SIZE
) {
3449 printk(KERN_ERR
"too big chunk_size: %d > %d\n",
3450 chunk_size
, MAX_CHUNK_SIZE
);
3454 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3456 if ( (1 << ffz(~chunk_size
)) != chunk_size
) {
3457 printk(KERN_ERR
"chunk_size of %d not valid\n", chunk_size
);
3460 if (chunk_size
< PAGE_SIZE
) {
3461 printk(KERN_ERR
"too small chunk_size: %d < %ld\n",
3462 chunk_size
, PAGE_SIZE
);
3466 /* devices must have minimum size of one chunk */
3467 rdev_for_each(rdev
, tmp
, mddev
) {
3468 if (test_bit(Faulty
, &rdev
->flags
))
3470 if (rdev
->size
< chunk_size
/ 1024) {
3472 "md: Dev %s smaller than chunk_size:"
3474 bdevname(rdev
->bdev
,b
),
3475 (unsigned long long)rdev
->size
,
3483 if (mddev
->level
!= LEVEL_NONE
)
3484 request_module("md-level-%d", mddev
->level
);
3485 else if (mddev
->clevel
[0])
3486 request_module("md-%s", mddev
->clevel
);
3490 * Drop all container device buffers, from now on
3491 * the only valid external interface is through the md
3494 rdev_for_each(rdev
, tmp
, mddev
) {
3495 if (test_bit(Faulty
, &rdev
->flags
))
3497 sync_blockdev(rdev
->bdev
);
3498 invalidate_bdev(rdev
->bdev
);
3500 /* perform some consistency tests on the device.
3501 * We don't want the data to overlap the metadata,
3502 * Internal Bitmap issues has handled elsewhere.
3504 if (rdev
->data_offset
< rdev
->sb_offset
) {
3506 rdev
->data_offset
+ mddev
->size
*2
3507 > rdev
->sb_offset
*2) {
3508 printk("md: %s: data overlaps metadata\n",
3513 if (rdev
->sb_offset
*2 + rdev
->sb_size
/512
3514 > rdev
->data_offset
) {
3515 printk("md: %s: metadata overlaps data\n",
3522 md_probe(mddev
->unit
, NULL
, NULL
);
3523 disk
= mddev
->gendisk
;
3527 spin_lock(&pers_lock
);
3528 pers
= find_pers(mddev
->level
, mddev
->clevel
);
3529 if (!pers
|| !try_module_get(pers
->owner
)) {
3530 spin_unlock(&pers_lock
);
3531 if (mddev
->level
!= LEVEL_NONE
)
3532 printk(KERN_WARNING
"md: personality for level %d is not loaded!\n",
3535 printk(KERN_WARNING
"md: personality for level %s is not loaded!\n",
3540 spin_unlock(&pers_lock
);
3541 mddev
->level
= pers
->level
;
3542 strlcpy(mddev
->clevel
, pers
->name
, sizeof(mddev
->clevel
));
3544 if (mddev
->reshape_position
!= MaxSector
&&
3545 pers
->start_reshape
== NULL
) {
3546 /* This personality cannot handle reshaping... */
3548 module_put(pers
->owner
);
3552 if (pers
->sync_request
) {
3553 /* Warn if this is a potentially silly
3556 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
3558 struct list_head
*tmp2
;
3560 rdev_for_each(rdev
, tmp
, mddev
) {
3561 rdev_for_each(rdev2
, tmp2
, mddev
) {
3563 rdev
->bdev
->bd_contains
==
3564 rdev2
->bdev
->bd_contains
) {
3566 "%s: WARNING: %s appears to be"
3567 " on the same physical disk as"
3570 bdevname(rdev
->bdev
,b
),
3571 bdevname(rdev2
->bdev
,b2
));
3578 "True protection against single-disk"
3579 " failure might be compromised.\n");
3582 mddev
->recovery
= 0;
3583 mddev
->resync_max_sectors
= mddev
->size
<< 1; /* may be over-ridden by personality */
3584 mddev
->barriers_work
= 1;
3585 mddev
->ok_start_degraded
= start_dirty_degraded
;
3588 mddev
->ro
= 2; /* read-only, but switch on first write */
3590 err
= mddev
->pers
->run(mddev
);
3591 if (!err
&& mddev
->pers
->sync_request
) {
3592 err
= bitmap_create(mddev
);
3594 printk(KERN_ERR
"%s: failed to create bitmap (%d)\n",
3595 mdname(mddev
), err
);
3596 mddev
->pers
->stop(mddev
);
3600 printk(KERN_ERR
"md: pers->run() failed ...\n");
3601 module_put(mddev
->pers
->owner
);
3603 bitmap_destroy(mddev
);
3606 if (mddev
->pers
->sync_request
) {
3607 if (sysfs_create_group(&mddev
->kobj
, &md_redundancy_group
))
3609 "md: cannot register extra attributes for %s\n",
3611 } else if (mddev
->ro
== 2) /* auto-readonly not meaningful */
3614 atomic_set(&mddev
->writes_pending
,0);
3615 mddev
->safemode
= 0;
3616 mddev
->safemode_timer
.function
= md_safemode_timeout
;
3617 mddev
->safemode_timer
.data
= (unsigned long) mddev
;
3618 mddev
->safemode_delay
= (200 * HZ
)/1000 +1; /* 200 msec delay */
3621 rdev_for_each(rdev
, tmp
, mddev
)
3622 if (rdev
->raid_disk
>= 0) {
3624 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3625 if (sysfs_create_link(&mddev
->kobj
, &rdev
->kobj
, nm
))
3626 printk("md: cannot register %s for %s\n",
3630 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3633 md_update_sb(mddev
, 0);
3635 set_capacity(disk
, mddev
->array_size
<<1);
3637 /* If we call blk_queue_make_request here, it will
3638 * re-initialise max_sectors etc which may have been
3639 * refined inside -> run. So just set the bits we need to set.
3640 * Most initialisation happended when we called
3641 * blk_queue_make_request(..., md_fail_request)
3644 mddev
->queue
->queuedata
= mddev
;
3645 mddev
->queue
->make_request_fn
= mddev
->pers
->make_request
;
3647 /* If there is a partially-recovered drive we need to
3648 * start recovery here. If we leave it to md_check_recovery,
3649 * it will remove the drives and not do the right thing
3651 if (mddev
->degraded
&& !mddev
->sync_thread
) {
3652 struct list_head
*rtmp
;
3654 rdev_for_each(rdev
, rtmp
, mddev
)
3655 if (rdev
->raid_disk
>= 0 &&
3656 !test_bit(In_sync
, &rdev
->flags
) &&
3657 !test_bit(Faulty
, &rdev
->flags
))
3658 /* complete an interrupted recovery */
3660 if (spares
&& mddev
->pers
->sync_request
) {
3661 mddev
->recovery
= 0;
3662 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
3663 mddev
->sync_thread
= md_register_thread(md_do_sync
,
3666 if (!mddev
->sync_thread
) {
3667 printk(KERN_ERR
"%s: could not start resync"
3670 /* leave the spares where they are, it shouldn't hurt */
3671 mddev
->recovery
= 0;
3675 md_wakeup_thread(mddev
->thread
);
3676 md_wakeup_thread(mddev
->sync_thread
); /* possibly kick off a reshape */
3679 md_new_event(mddev
);
3680 kobject_uevent(&mddev
->gendisk
->dev
.kobj
, KOBJ_CHANGE
);
3684 static int restart_array(mddev_t
*mddev
)
3686 struct gendisk
*disk
= mddev
->gendisk
;
3690 * Complain if it has no devices
3693 if (list_empty(&mddev
->disks
))
3701 mddev
->safemode
= 0;
3703 set_disk_ro(disk
, 0);
3705 printk(KERN_INFO
"md: %s switched to read-write mode.\n",
3708 * Kick recovery or resync if necessary
3710 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
3711 md_wakeup_thread(mddev
->thread
);
3712 md_wakeup_thread(mddev
->sync_thread
);
3721 /* similar to deny_write_access, but accounts for our holding a reference
3722 * to the file ourselves */
3723 static int deny_bitmap_write_access(struct file
* file
)
3725 struct inode
*inode
= file
->f_mapping
->host
;
3727 spin_lock(&inode
->i_lock
);
3728 if (atomic_read(&inode
->i_writecount
) > 1) {
3729 spin_unlock(&inode
->i_lock
);
3732 atomic_set(&inode
->i_writecount
, -1);
3733 spin_unlock(&inode
->i_lock
);
3738 static void restore_bitmap_write_access(struct file
*file
)
3740 struct inode
*inode
= file
->f_mapping
->host
;
3742 spin_lock(&inode
->i_lock
);
3743 atomic_set(&inode
->i_writecount
, 1);
3744 spin_unlock(&inode
->i_lock
);
3748 * 0 - completely stop and dis-assemble array
3749 * 1 - switch to readonly
3750 * 2 - stop but do not disassemble array
3752 static int do_md_stop(mddev_t
* mddev
, int mode
)
3755 struct gendisk
*disk
= mddev
->gendisk
;
3758 if (atomic_read(&mddev
->active
)>2) {
3759 printk("md: %s still in use.\n",mdname(mddev
));
3763 if (mddev
->sync_thread
) {
3764 set_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3765 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
3766 md_unregister_thread(mddev
->sync_thread
);
3767 mddev
->sync_thread
= NULL
;
3770 del_timer_sync(&mddev
->safemode_timer
);
3772 invalidate_partition(disk
, 0);
3775 case 1: /* readonly */
3781 case 0: /* disassemble */
3783 bitmap_flush(mddev
);
3784 md_super_wait(mddev
);
3786 set_disk_ro(disk
, 0);
3787 blk_queue_make_request(mddev
->queue
, md_fail_request
);
3788 mddev
->pers
->stop(mddev
);
3789 mddev
->queue
->merge_bvec_fn
= NULL
;
3790 mddev
->queue
->unplug_fn
= NULL
;
3791 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
3792 if (mddev
->pers
->sync_request
)
3793 sysfs_remove_group(&mddev
->kobj
, &md_redundancy_group
);
3795 module_put(mddev
->pers
->owner
);
3797 /* tell userspace to handle 'inactive' */
3798 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
3800 set_capacity(disk
, 0);
3806 if (!mddev
->in_sync
|| mddev
->flags
) {
3807 /* mark array as shutdown cleanly */
3809 md_update_sb(mddev
, 1);
3812 set_disk_ro(disk
, 1);
3813 clear_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
);
3817 * Free resources if final stop
3821 struct list_head
*tmp
;
3823 printk(KERN_INFO
"md: %s stopped.\n", mdname(mddev
));
3825 bitmap_destroy(mddev
);
3826 if (mddev
->bitmap_file
) {
3827 restore_bitmap_write_access(mddev
->bitmap_file
);
3828 fput(mddev
->bitmap_file
);
3829 mddev
->bitmap_file
= NULL
;
3831 mddev
->bitmap_offset
= 0;
3833 rdev_for_each(rdev
, tmp
, mddev
)
3834 if (rdev
->raid_disk
>= 0) {
3836 sprintf(nm
, "rd%d", rdev
->raid_disk
);
3837 sysfs_remove_link(&mddev
->kobj
, nm
);
3840 /* make sure all md_delayed_delete calls have finished */
3841 flush_scheduled_work();
3843 export_array(mddev
);
3845 mddev
->array_size
= 0;
3847 mddev
->raid_disks
= 0;
3848 mddev
->recovery_cp
= 0;
3849 mddev
->resync_min
= 0;
3850 mddev
->resync_max
= MaxSector
;
3851 mddev
->reshape_position
= MaxSector
;
3852 mddev
->external
= 0;
3853 mddev
->persistent
= 0;
3854 mddev
->level
= LEVEL_NONE
;
3855 mddev
->clevel
[0] = 0;
3858 mddev
->metadata_type
[0] = 0;
3859 mddev
->chunk_size
= 0;
3860 mddev
->ctime
= mddev
->utime
= 0;
3862 mddev
->max_disks
= 0;
3864 mddev
->delta_disks
= 0;
3865 mddev
->new_level
= LEVEL_NONE
;
3866 mddev
->new_layout
= 0;
3867 mddev
->new_chunk
= 0;
3868 mddev
->curr_resync
= 0;
3869 mddev
->resync_mismatches
= 0;
3870 mddev
->suspend_lo
= mddev
->suspend_hi
= 0;
3871 mddev
->sync_speed_min
= mddev
->sync_speed_max
= 0;
3872 mddev
->recovery
= 0;
3875 mddev
->degraded
= 0;
3876 mddev
->barriers_work
= 0;
3877 mddev
->safemode
= 0;
3879 } else if (mddev
->pers
)
3880 printk(KERN_INFO
"md: %s switched to read-only mode.\n",
3883 md_new_event(mddev
);
3889 static void autorun_array(mddev_t
*mddev
)
3892 struct list_head
*tmp
;
3895 if (list_empty(&mddev
->disks
))
3898 printk(KERN_INFO
"md: running: ");
3900 rdev_for_each(rdev
, tmp
, mddev
) {
3901 char b
[BDEVNAME_SIZE
];
3902 printk("<%s>", bdevname(rdev
->bdev
,b
));
3906 err
= do_md_run (mddev
);
3908 printk(KERN_WARNING
"md: do_md_run() returned %d\n", err
);
3909 do_md_stop (mddev
, 0);
3914 * lets try to run arrays based on all disks that have arrived
3915 * until now. (those are in pending_raid_disks)
3917 * the method: pick the first pending disk, collect all disks with
3918 * the same UUID, remove all from the pending list and put them into
3919 * the 'same_array' list. Then order this list based on superblock
3920 * update time (freshest comes first), kick out 'old' disks and
3921 * compare superblocks. If everything's fine then run it.
3923 * If "unit" is allocated, then bump its reference count
3925 static void autorun_devices(int part
)
3927 struct list_head
*tmp
;
3928 mdk_rdev_t
*rdev0
, *rdev
;
3930 char b
[BDEVNAME_SIZE
];
3932 printk(KERN_INFO
"md: autorun ...\n");
3933 while (!list_empty(&pending_raid_disks
)) {
3936 LIST_HEAD(candidates
);
3937 rdev0
= list_entry(pending_raid_disks
.next
,
3938 mdk_rdev_t
, same_set
);
3940 printk(KERN_INFO
"md: considering %s ...\n",
3941 bdevname(rdev0
->bdev
,b
));
3942 INIT_LIST_HEAD(&candidates
);
3943 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
)
3944 if (super_90_load(rdev
, rdev0
, 0) >= 0) {
3945 printk(KERN_INFO
"md: adding %s ...\n",
3946 bdevname(rdev
->bdev
,b
));
3947 list_move(&rdev
->same_set
, &candidates
);
3950 * now we have a set of devices, with all of them having
3951 * mostly sane superblocks. It's time to allocate the
3955 dev
= MKDEV(mdp_major
,
3956 rdev0
->preferred_minor
<< MdpMinorShift
);
3957 unit
= MINOR(dev
) >> MdpMinorShift
;
3959 dev
= MKDEV(MD_MAJOR
, rdev0
->preferred_minor
);
3962 if (rdev0
->preferred_minor
!= unit
) {
3963 printk(KERN_INFO
"md: unit number in %s is bad: %d\n",
3964 bdevname(rdev0
->bdev
, b
), rdev0
->preferred_minor
);
3968 md_probe(dev
, NULL
, NULL
);
3969 mddev
= mddev_find(dev
);
3970 if (!mddev
|| !mddev
->gendisk
) {
3974 "md: cannot allocate memory for md drive.\n");
3977 if (mddev_lock(mddev
))
3978 printk(KERN_WARNING
"md: %s locked, cannot run\n",
3980 else if (mddev
->raid_disks
|| mddev
->major_version
3981 || !list_empty(&mddev
->disks
)) {
3983 "md: %s already running, cannot run %s\n",
3984 mdname(mddev
), bdevname(rdev0
->bdev
,b
));
3985 mddev_unlock(mddev
);
3987 printk(KERN_INFO
"md: created %s\n", mdname(mddev
));
3988 mddev
->persistent
= 1;
3989 rdev_for_each_list(rdev
, tmp
, candidates
) {
3990 list_del_init(&rdev
->same_set
);
3991 if (bind_rdev_to_array(rdev
, mddev
))
3994 autorun_array(mddev
);
3995 mddev_unlock(mddev
);
3997 /* on success, candidates will be empty, on error
4000 rdev_for_each_list(rdev
, tmp
, candidates
)
4004 printk(KERN_INFO
"md: ... autorun DONE.\n");
4006 #endif /* !MODULE */
4008 static int get_version(void __user
* arg
)
4012 ver
.major
= MD_MAJOR_VERSION
;
4013 ver
.minor
= MD_MINOR_VERSION
;
4014 ver
.patchlevel
= MD_PATCHLEVEL_VERSION
;
4016 if (copy_to_user(arg
, &ver
, sizeof(ver
)))
4022 static int get_array_info(mddev_t
* mddev
, void __user
* arg
)
4024 mdu_array_info_t info
;
4025 int nr
,working
,active
,failed
,spare
;
4027 struct list_head
*tmp
;
4029 nr
=working
=active
=failed
=spare
=0;
4030 rdev_for_each(rdev
, tmp
, mddev
) {
4032 if (test_bit(Faulty
, &rdev
->flags
))
4036 if (test_bit(In_sync
, &rdev
->flags
))
4043 info
.major_version
= mddev
->major_version
;
4044 info
.minor_version
= mddev
->minor_version
;
4045 info
.patch_version
= MD_PATCHLEVEL_VERSION
;
4046 info
.ctime
= mddev
->ctime
;
4047 info
.level
= mddev
->level
;
4048 info
.size
= mddev
->size
;
4049 if (info
.size
!= mddev
->size
) /* overflow */
4052 info
.raid_disks
= mddev
->raid_disks
;
4053 info
.md_minor
= mddev
->md_minor
;
4054 info
.not_persistent
= !mddev
->persistent
;
4056 info
.utime
= mddev
->utime
;
4059 info
.state
= (1<<MD_SB_CLEAN
);
4060 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4061 info
.state
= (1<<MD_SB_BITMAP_PRESENT
);
4062 info
.active_disks
= active
;
4063 info
.working_disks
= working
;
4064 info
.failed_disks
= failed
;
4065 info
.spare_disks
= spare
;
4067 info
.layout
= mddev
->layout
;
4068 info
.chunk_size
= mddev
->chunk_size
;
4070 if (copy_to_user(arg
, &info
, sizeof(info
)))
4076 static int get_bitmap_file(mddev_t
* mddev
, void __user
* arg
)
4078 mdu_bitmap_file_t
*file
= NULL
; /* too big for stack allocation */
4079 char *ptr
, *buf
= NULL
;
4082 md_allow_write(mddev
);
4084 file
= kmalloc(sizeof(*file
), GFP_KERNEL
);
4088 /* bitmap disabled, zero the first byte and copy out */
4089 if (!mddev
->bitmap
|| !mddev
->bitmap
->file
) {
4090 file
->pathname
[0] = '\0';
4094 buf
= kmalloc(sizeof(file
->pathname
), GFP_KERNEL
);
4098 ptr
= d_path(&mddev
->bitmap
->file
->f_path
, buf
, sizeof(file
->pathname
));
4102 strcpy(file
->pathname
, ptr
);
4106 if (copy_to_user(arg
, file
, sizeof(*file
)))
4114 static int get_disk_info(mddev_t
* mddev
, void __user
* arg
)
4116 mdu_disk_info_t info
;
4120 if (copy_from_user(&info
, arg
, sizeof(info
)))
4125 rdev
= find_rdev_nr(mddev
, nr
);
4127 info
.major
= MAJOR(rdev
->bdev
->bd_dev
);
4128 info
.minor
= MINOR(rdev
->bdev
->bd_dev
);
4129 info
.raid_disk
= rdev
->raid_disk
;
4131 if (test_bit(Faulty
, &rdev
->flags
))
4132 info
.state
|= (1<<MD_DISK_FAULTY
);
4133 else if (test_bit(In_sync
, &rdev
->flags
)) {
4134 info
.state
|= (1<<MD_DISK_ACTIVE
);
4135 info
.state
|= (1<<MD_DISK_SYNC
);
4137 if (test_bit(WriteMostly
, &rdev
->flags
))
4138 info
.state
|= (1<<MD_DISK_WRITEMOSTLY
);
4140 info
.major
= info
.minor
= 0;
4141 info
.raid_disk
= -1;
4142 info
.state
= (1<<MD_DISK_REMOVED
);
4145 if (copy_to_user(arg
, &info
, sizeof(info
)))
4151 static int add_new_disk(mddev_t
* mddev
, mdu_disk_info_t
*info
)
4153 char b
[BDEVNAME_SIZE
], b2
[BDEVNAME_SIZE
];
4155 dev_t dev
= MKDEV(info
->major
,info
->minor
);
4157 if (info
->major
!= MAJOR(dev
) || info
->minor
!= MINOR(dev
))
4160 if (!mddev
->raid_disks
) {
4162 /* expecting a device which has a superblock */
4163 rdev
= md_import_device(dev
, mddev
->major_version
, mddev
->minor_version
);
4166 "md: md_import_device returned %ld\n",
4168 return PTR_ERR(rdev
);
4170 if (!list_empty(&mddev
->disks
)) {
4171 mdk_rdev_t
*rdev0
= list_entry(mddev
->disks
.next
,
4172 mdk_rdev_t
, same_set
);
4173 int err
= super_types
[mddev
->major_version
]
4174 .load_super(rdev
, rdev0
, mddev
->minor_version
);
4177 "md: %s has different UUID to %s\n",
4178 bdevname(rdev
->bdev
,b
),
4179 bdevname(rdev0
->bdev
,b2
));
4184 err
= bind_rdev_to_array(rdev
, mddev
);
4191 * add_new_disk can be used once the array is assembled
4192 * to add "hot spares". They must already have a superblock
4197 if (!mddev
->pers
->hot_add_disk
) {
4199 "%s: personality does not support diskops!\n",
4203 if (mddev
->persistent
)
4204 rdev
= md_import_device(dev
, mddev
->major_version
,
4205 mddev
->minor_version
);
4207 rdev
= md_import_device(dev
, -1, -1);
4210 "md: md_import_device returned %ld\n",
4212 return PTR_ERR(rdev
);
4214 /* set save_raid_disk if appropriate */
4215 if (!mddev
->persistent
) {
4216 if (info
->state
& (1<<MD_DISK_SYNC
) &&
4217 info
->raid_disk
< mddev
->raid_disks
)
4218 rdev
->raid_disk
= info
->raid_disk
;
4220 rdev
->raid_disk
= -1;
4222 super_types
[mddev
->major_version
].
4223 validate_super(mddev
, rdev
);
4224 rdev
->saved_raid_disk
= rdev
->raid_disk
;
4226 clear_bit(In_sync
, &rdev
->flags
); /* just to be sure */
4227 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4228 set_bit(WriteMostly
, &rdev
->flags
);
4230 rdev
->raid_disk
= -1;
4231 err
= bind_rdev_to_array(rdev
, mddev
);
4232 if (!err
&& !mddev
->pers
->hot_remove_disk
) {
4233 /* If there is hot_add_disk but no hot_remove_disk
4234 * then added disks for geometry changes,
4235 * and should be added immediately.
4237 super_types
[mddev
->major_version
].
4238 validate_super(mddev
, rdev
);
4239 err
= mddev
->pers
->hot_add_disk(mddev
, rdev
);
4241 unbind_rdev_from_array(rdev
);
4246 md_update_sb(mddev
, 1);
4247 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4248 md_wakeup_thread(mddev
->thread
);
4252 /* otherwise, add_new_disk is only allowed
4253 * for major_version==0 superblocks
4255 if (mddev
->major_version
!= 0) {
4256 printk(KERN_WARNING
"%s: ADD_NEW_DISK not supported\n",
4261 if (!(info
->state
& (1<<MD_DISK_FAULTY
))) {
4263 rdev
= md_import_device (dev
, -1, 0);
4266 "md: error, md_import_device() returned %ld\n",
4268 return PTR_ERR(rdev
);
4270 rdev
->desc_nr
= info
->number
;
4271 if (info
->raid_disk
< mddev
->raid_disks
)
4272 rdev
->raid_disk
= info
->raid_disk
;
4274 rdev
->raid_disk
= -1;
4276 if (rdev
->raid_disk
< mddev
->raid_disks
)
4277 if (info
->state
& (1<<MD_DISK_SYNC
))
4278 set_bit(In_sync
, &rdev
->flags
);
4280 if (info
->state
& (1<<MD_DISK_WRITEMOSTLY
))
4281 set_bit(WriteMostly
, &rdev
->flags
);
4283 if (!mddev
->persistent
) {
4284 printk(KERN_INFO
"md: nonpersistent superblock ...\n");
4285 rdev
->sb_offset
= rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
4287 rdev
->sb_offset
= calc_dev_sboffset(rdev
->bdev
);
4288 rdev
->size
= calc_dev_size(rdev
, mddev
->chunk_size
);
4290 err
= bind_rdev_to_array(rdev
, mddev
);
4300 static int hot_remove_disk(mddev_t
* mddev
, dev_t dev
)
4302 char b
[BDEVNAME_SIZE
];
4308 rdev
= find_rdev(mddev
, dev
);
4312 if (rdev
->raid_disk
>= 0)
4315 kick_rdev_from_array(rdev
);
4316 md_update_sb(mddev
, 1);
4317 md_new_event(mddev
);
4321 printk(KERN_WARNING
"md: cannot remove active disk %s from %s ...\n",
4322 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4326 static int hot_add_disk(mddev_t
* mddev
, dev_t dev
)
4328 char b
[BDEVNAME_SIZE
];
4336 if (mddev
->major_version
!= 0) {
4337 printk(KERN_WARNING
"%s: HOT_ADD may only be used with"
4338 " version-0 superblocks.\n",
4342 if (!mddev
->pers
->hot_add_disk
) {
4344 "%s: personality does not support diskops!\n",
4349 rdev
= md_import_device (dev
, -1, 0);
4352 "md: error, md_import_device() returned %ld\n",
4357 if (mddev
->persistent
)
4358 rdev
->sb_offset
= calc_dev_sboffset(rdev
->bdev
);
4361 rdev
->bdev
->bd_inode
->i_size
>> BLOCK_SIZE_BITS
;
4363 size
= calc_dev_size(rdev
, mddev
->chunk_size
);
4366 if (test_bit(Faulty
, &rdev
->flags
)) {
4368 "md: can not hot-add faulty %s disk to %s!\n",
4369 bdevname(rdev
->bdev
,b
), mdname(mddev
));
4373 clear_bit(In_sync
, &rdev
->flags
);
4375 rdev
->saved_raid_disk
= -1;
4376 err
= bind_rdev_to_array(rdev
, mddev
);
4381 * The rest should better be atomic, we can have disk failures
4382 * noticed in interrupt contexts ...
4385 if (rdev
->desc_nr
== mddev
->max_disks
) {
4386 printk(KERN_WARNING
"%s: can not hot-add to full array!\n",
4389 goto abort_unbind_export
;
4392 rdev
->raid_disk
= -1;
4394 md_update_sb(mddev
, 1);
4397 * Kick recovery, maybe this spare has to be added to the
4398 * array immediately.
4400 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4401 md_wakeup_thread(mddev
->thread
);
4402 md_new_event(mddev
);
4405 abort_unbind_export
:
4406 unbind_rdev_from_array(rdev
);
4413 static int set_bitmap_file(mddev_t
*mddev
, int fd
)
4418 if (!mddev
->pers
->quiesce
)
4420 if (mddev
->recovery
|| mddev
->sync_thread
)
4422 /* we should be able to change the bitmap.. */
4428 return -EEXIST
; /* cannot add when bitmap is present */
4429 mddev
->bitmap_file
= fget(fd
);
4431 if (mddev
->bitmap_file
== NULL
) {
4432 printk(KERN_ERR
"%s: error: failed to get bitmap file\n",
4437 err
= deny_bitmap_write_access(mddev
->bitmap_file
);
4439 printk(KERN_ERR
"%s: error: bitmap file is already in use\n",
4441 fput(mddev
->bitmap_file
);
4442 mddev
->bitmap_file
= NULL
;
4445 mddev
->bitmap_offset
= 0; /* file overrides offset */
4446 } else if (mddev
->bitmap
== NULL
)
4447 return -ENOENT
; /* cannot remove what isn't there */
4450 mddev
->pers
->quiesce(mddev
, 1);
4452 err
= bitmap_create(mddev
);
4453 if (fd
< 0 || err
) {
4454 bitmap_destroy(mddev
);
4455 fd
= -1; /* make sure to put the file */
4457 mddev
->pers
->quiesce(mddev
, 0);
4460 if (mddev
->bitmap_file
) {
4461 restore_bitmap_write_access(mddev
->bitmap_file
);
4462 fput(mddev
->bitmap_file
);
4464 mddev
->bitmap_file
= NULL
;
4471 * set_array_info is used two different ways
4472 * The original usage is when creating a new array.
4473 * In this usage, raid_disks is > 0 and it together with
4474 * level, size, not_persistent,layout,chunksize determine the
4475 * shape of the array.
4476 * This will always create an array with a type-0.90.0 superblock.
4477 * The newer usage is when assembling an array.
4478 * In this case raid_disks will be 0, and the major_version field is
4479 * use to determine which style super-blocks are to be found on the devices.
4480 * The minor and patch _version numbers are also kept incase the
4481 * super_block handler wishes to interpret them.
4483 static int set_array_info(mddev_t
* mddev
, mdu_array_info_t
*info
)
4486 if (info
->raid_disks
== 0) {
4487 /* just setting version number for superblock loading */
4488 if (info
->major_version
< 0 ||
4489 info
->major_version
>= ARRAY_SIZE(super_types
) ||
4490 super_types
[info
->major_version
].name
== NULL
) {
4491 /* maybe try to auto-load a module? */
4493 "md: superblock version %d not known\n",
4494 info
->major_version
);
4497 mddev
->major_version
= info
->major_version
;
4498 mddev
->minor_version
= info
->minor_version
;
4499 mddev
->patch_version
= info
->patch_version
;
4500 mddev
->persistent
= !info
->not_persistent
;
4503 mddev
->major_version
= MD_MAJOR_VERSION
;
4504 mddev
->minor_version
= MD_MINOR_VERSION
;
4505 mddev
->patch_version
= MD_PATCHLEVEL_VERSION
;
4506 mddev
->ctime
= get_seconds();
4508 mddev
->level
= info
->level
;
4509 mddev
->clevel
[0] = 0;
4510 mddev
->size
= info
->size
;
4511 mddev
->raid_disks
= info
->raid_disks
;
4512 /* don't set md_minor, it is determined by which /dev/md* was
4515 if (info
->state
& (1<<MD_SB_CLEAN
))
4516 mddev
->recovery_cp
= MaxSector
;
4518 mddev
->recovery_cp
= 0;
4519 mddev
->persistent
= ! info
->not_persistent
;
4520 mddev
->external
= 0;
4522 mddev
->layout
= info
->layout
;
4523 mddev
->chunk_size
= info
->chunk_size
;
4525 mddev
->max_disks
= MD_SB_DISKS
;
4527 if (mddev
->persistent
)
4529 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4531 mddev
->default_bitmap_offset
= MD_SB_BYTES
>> 9;
4532 mddev
->bitmap_offset
= 0;
4534 mddev
->reshape_position
= MaxSector
;
4537 * Generate a 128 bit UUID
4539 get_random_bytes(mddev
->uuid
, 16);
4541 mddev
->new_level
= mddev
->level
;
4542 mddev
->new_chunk
= mddev
->chunk_size
;
4543 mddev
->new_layout
= mddev
->layout
;
4544 mddev
->delta_disks
= 0;
4549 static int update_size(mddev_t
*mddev
, unsigned long size
)
4553 struct list_head
*tmp
;
4554 int fit
= (size
== 0);
4556 if (mddev
->pers
->resize
== NULL
)
4558 /* The "size" is the amount of each device that is used.
4559 * This can only make sense for arrays with redundancy.
4560 * linear and raid0 always use whatever space is available
4561 * We can only consider changing the size if no resync
4562 * or reconstruction is happening, and if the new size
4563 * is acceptable. It must fit before the sb_offset or,
4564 * if that is <data_offset, it must fit before the
4565 * size of each device.
4566 * If size is zero, we find the largest size that fits.
4568 if (mddev
->sync_thread
)
4570 rdev_for_each(rdev
, tmp
, mddev
) {
4572 avail
= rdev
->size
* 2;
4574 if (fit
&& (size
== 0 || size
> avail
/2))
4576 if (avail
< ((sector_t
)size
<< 1))
4579 rv
= mddev
->pers
->resize(mddev
, (sector_t
)size
*2);
4581 struct block_device
*bdev
;
4583 bdev
= bdget_disk(mddev
->gendisk
, 0);
4585 mutex_lock(&bdev
->bd_inode
->i_mutex
);
4586 i_size_write(bdev
->bd_inode
, (loff_t
)mddev
->array_size
<< 10);
4587 mutex_unlock(&bdev
->bd_inode
->i_mutex
);
4594 static int update_raid_disks(mddev_t
*mddev
, int raid_disks
)
4597 /* change the number of raid disks */
4598 if (mddev
->pers
->check_reshape
== NULL
)
4600 if (raid_disks
<= 0 ||
4601 raid_disks
>= mddev
->max_disks
)
4603 if (mddev
->sync_thread
|| mddev
->reshape_position
!= MaxSector
)
4605 mddev
->delta_disks
= raid_disks
- mddev
->raid_disks
;
4607 rv
= mddev
->pers
->check_reshape(mddev
);
4613 * update_array_info is used to change the configuration of an
4615 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4616 * fields in the info are checked against the array.
4617 * Any differences that cannot be handled will cause an error.
4618 * Normally, only one change can be managed at a time.
4620 static int update_array_info(mddev_t
*mddev
, mdu_array_info_t
*info
)
4626 /* calculate expected state,ignoring low bits */
4627 if (mddev
->bitmap
&& mddev
->bitmap_offset
)
4628 state
|= (1 << MD_SB_BITMAP_PRESENT
);
4630 if (mddev
->major_version
!= info
->major_version
||
4631 mddev
->minor_version
!= info
->minor_version
||
4632 /* mddev->patch_version != info->patch_version || */
4633 mddev
->ctime
!= info
->ctime
||
4634 mddev
->level
!= info
->level
||
4635 /* mddev->layout != info->layout || */
4636 !mddev
->persistent
!= info
->not_persistent
||
4637 mddev
->chunk_size
!= info
->chunk_size
||
4638 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4639 ((state
^info
->state
) & 0xfffffe00)
4642 /* Check there is only one change */
4643 if (info
->size
>= 0 && mddev
->size
!= info
->size
) cnt
++;
4644 if (mddev
->raid_disks
!= info
->raid_disks
) cnt
++;
4645 if (mddev
->layout
!= info
->layout
) cnt
++;
4646 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) cnt
++;
4647 if (cnt
== 0) return 0;
4648 if (cnt
> 1) return -EINVAL
;
4650 if (mddev
->layout
!= info
->layout
) {
4652 * we don't need to do anything at the md level, the
4653 * personality will take care of it all.
4655 if (mddev
->pers
->reconfig
== NULL
)
4658 return mddev
->pers
->reconfig(mddev
, info
->layout
, -1);
4660 if (info
->size
>= 0 && mddev
->size
!= info
->size
)
4661 rv
= update_size(mddev
, info
->size
);
4663 if (mddev
->raid_disks
!= info
->raid_disks
)
4664 rv
= update_raid_disks(mddev
, info
->raid_disks
);
4666 if ((state
^ info
->state
) & (1<<MD_SB_BITMAP_PRESENT
)) {
4667 if (mddev
->pers
->quiesce
== NULL
)
4669 if (mddev
->recovery
|| mddev
->sync_thread
)
4671 if (info
->state
& (1<<MD_SB_BITMAP_PRESENT
)) {
4672 /* add the bitmap */
4675 if (mddev
->default_bitmap_offset
== 0)
4677 mddev
->bitmap_offset
= mddev
->default_bitmap_offset
;
4678 mddev
->pers
->quiesce(mddev
, 1);
4679 rv
= bitmap_create(mddev
);
4681 bitmap_destroy(mddev
);
4682 mddev
->pers
->quiesce(mddev
, 0);
4684 /* remove the bitmap */
4687 if (mddev
->bitmap
->file
)
4689 mddev
->pers
->quiesce(mddev
, 1);
4690 bitmap_destroy(mddev
);
4691 mddev
->pers
->quiesce(mddev
, 0);
4692 mddev
->bitmap_offset
= 0;
4695 md_update_sb(mddev
, 1);
4699 static int set_disk_faulty(mddev_t
*mddev
, dev_t dev
)
4703 if (mddev
->pers
== NULL
)
4706 rdev
= find_rdev(mddev
, dev
);
4710 md_error(mddev
, rdev
);
4714 static int md_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
4716 mddev_t
*mddev
= bdev
->bd_disk
->private_data
;
4720 geo
->cylinders
= get_capacity(mddev
->gendisk
) / 8;
4724 static int md_ioctl(struct inode
*inode
, struct file
*file
,
4725 unsigned int cmd
, unsigned long arg
)
4728 void __user
*argp
= (void __user
*)arg
;
4729 mddev_t
*mddev
= NULL
;
4731 if (!capable(CAP_SYS_ADMIN
))
4735 * Commands dealing with the RAID driver but not any
4741 err
= get_version(argp
);
4744 case PRINT_RAID_DEBUG
:
4752 autostart_arrays(arg
);
4759 * Commands creating/starting a new array:
4762 mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4769 err
= mddev_lock(mddev
);
4772 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4779 case SET_ARRAY_INFO
:
4781 mdu_array_info_t info
;
4783 memset(&info
, 0, sizeof(info
));
4784 else if (copy_from_user(&info
, argp
, sizeof(info
))) {
4789 err
= update_array_info(mddev
, &info
);
4791 printk(KERN_WARNING
"md: couldn't update"
4792 " array info. %d\n", err
);
4797 if (!list_empty(&mddev
->disks
)) {
4799 "md: array %s already has disks!\n",
4804 if (mddev
->raid_disks
) {
4806 "md: array %s already initialised!\n",
4811 err
= set_array_info(mddev
, &info
);
4813 printk(KERN_WARNING
"md: couldn't set"
4814 " array info. %d\n", err
);
4824 * Commands querying/configuring an existing array:
4826 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4827 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4828 if ((!mddev
->raid_disks
&& !mddev
->external
)
4829 && cmd
!= ADD_NEW_DISK
&& cmd
!= STOP_ARRAY
4830 && cmd
!= RUN_ARRAY
&& cmd
!= SET_BITMAP_FILE
4831 && cmd
!= GET_BITMAP_FILE
) {
4837 * Commands even a read-only array can execute:
4841 case GET_ARRAY_INFO
:
4842 err
= get_array_info(mddev
, argp
);
4845 case GET_BITMAP_FILE
:
4846 err
= get_bitmap_file(mddev
, argp
);
4850 err
= get_disk_info(mddev
, argp
);
4853 case RESTART_ARRAY_RW
:
4854 err
= restart_array(mddev
);
4858 err
= do_md_stop (mddev
, 0);
4862 err
= do_md_stop (mddev
, 1);
4866 * We have a problem here : there is no easy way to give a CHS
4867 * virtual geometry. We currently pretend that we have a 2 heads
4868 * 4 sectors (with a BIG number of cylinders...). This drives
4869 * dosfs just mad... ;-)
4874 * The remaining ioctls are changing the state of the
4875 * superblock, so we do not allow them on read-only arrays.
4876 * However non-MD ioctls (e.g. get-size) will still come through
4877 * here and hit the 'default' below, so only disallow
4878 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4880 if (_IOC_TYPE(cmd
) == MD_MAJOR
&&
4881 mddev
->ro
&& mddev
->pers
) {
4882 if (mddev
->ro
== 2) {
4884 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
4885 md_wakeup_thread(mddev
->thread
);
4897 mdu_disk_info_t info
;
4898 if (copy_from_user(&info
, argp
, sizeof(info
)))
4901 err
= add_new_disk(mddev
, &info
);
4905 case HOT_REMOVE_DISK
:
4906 err
= hot_remove_disk(mddev
, new_decode_dev(arg
));
4910 err
= hot_add_disk(mddev
, new_decode_dev(arg
));
4913 case SET_DISK_FAULTY
:
4914 err
= set_disk_faulty(mddev
, new_decode_dev(arg
));
4918 err
= do_md_run (mddev
);
4921 case SET_BITMAP_FILE
:
4922 err
= set_bitmap_file(mddev
, (int)arg
);
4932 mddev_unlock(mddev
);
4942 static int md_open(struct inode
*inode
, struct file
*file
)
4945 * Succeed if we can lock the mddev, which confirms that
4946 * it isn't being stopped right now.
4948 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4951 if ((err
= mutex_lock_interruptible_nested(&mddev
->reconfig_mutex
, 1)))
4956 mddev_unlock(mddev
);
4958 check_disk_change(inode
->i_bdev
);
4963 static int md_release(struct inode
*inode
, struct file
* file
)
4965 mddev_t
*mddev
= inode
->i_bdev
->bd_disk
->private_data
;
4973 static int md_media_changed(struct gendisk
*disk
)
4975 mddev_t
*mddev
= disk
->private_data
;
4977 return mddev
->changed
;
4980 static int md_revalidate(struct gendisk
*disk
)
4982 mddev_t
*mddev
= disk
->private_data
;
4987 static struct block_device_operations md_fops
=
4989 .owner
= THIS_MODULE
,
4991 .release
= md_release
,
4993 .getgeo
= md_getgeo
,
4994 .media_changed
= md_media_changed
,
4995 .revalidate_disk
= md_revalidate
,
4998 static int md_thread(void * arg
)
5000 mdk_thread_t
*thread
= arg
;
5003 * md_thread is a 'system-thread', it's priority should be very
5004 * high. We avoid resource deadlocks individually in each
5005 * raid personality. (RAID5 does preallocation) We also use RR and
5006 * the very same RT priority as kswapd, thus we will never get
5007 * into a priority inversion deadlock.
5009 * we definitely have to have equal or higher priority than
5010 * bdflush, otherwise bdflush will deadlock if there are too
5011 * many dirty RAID5 blocks.
5014 allow_signal(SIGKILL
);
5015 while (!kthread_should_stop()) {
5017 /* We need to wait INTERRUPTIBLE so that
5018 * we don't add to the load-average.
5019 * That means we need to be sure no signals are
5022 if (signal_pending(current
))
5023 flush_signals(current
);
5025 wait_event_interruptible_timeout
5027 test_bit(THREAD_WAKEUP
, &thread
->flags
)
5028 || kthread_should_stop(),
5031 clear_bit(THREAD_WAKEUP
, &thread
->flags
);
5033 thread
->run(thread
->mddev
);
5039 void md_wakeup_thread(mdk_thread_t
*thread
)
5042 dprintk("md: waking up MD thread %s.\n", thread
->tsk
->comm
);
5043 set_bit(THREAD_WAKEUP
, &thread
->flags
);
5044 wake_up(&thread
->wqueue
);
5048 mdk_thread_t
*md_register_thread(void (*run
) (mddev_t
*), mddev_t
*mddev
,
5051 mdk_thread_t
*thread
;
5053 thread
= kzalloc(sizeof(mdk_thread_t
), GFP_KERNEL
);
5057 init_waitqueue_head(&thread
->wqueue
);
5060 thread
->mddev
= mddev
;
5061 thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
5062 thread
->tsk
= kthread_run(md_thread
, thread
, name
, mdname(thread
->mddev
));
5063 if (IS_ERR(thread
->tsk
)) {
5070 void md_unregister_thread(mdk_thread_t
*thread
)
5072 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread
->tsk
));
5074 kthread_stop(thread
->tsk
);
5078 void md_error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5085 if (!rdev
|| test_bit(Faulty
, &rdev
->flags
))
5088 if (mddev
->external
)
5089 set_bit(Blocked
, &rdev
->flags
);
5091 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5093 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5094 __builtin_return_address(0),__builtin_return_address(1),
5095 __builtin_return_address(2),__builtin_return_address(3));
5099 if (!mddev
->pers
->error_handler
)
5101 mddev
->pers
->error_handler(mddev
,rdev
);
5102 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5103 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5104 md_wakeup_thread(mddev
->thread
);
5105 md_new_event_inintr(mddev
);
5108 /* seq_file implementation /proc/mdstat */
5110 static void status_unused(struct seq_file
*seq
)
5114 struct list_head
*tmp
;
5116 seq_printf(seq
, "unused devices: ");
5118 rdev_for_each_list(rdev
, tmp
, pending_raid_disks
) {
5119 char b
[BDEVNAME_SIZE
];
5121 seq_printf(seq
, "%s ",
5122 bdevname(rdev
->bdev
,b
));
5125 seq_printf(seq
, "<none>");
5127 seq_printf(seq
, "\n");
5131 static void status_resync(struct seq_file
*seq
, mddev_t
* mddev
)
5133 sector_t max_blocks
, resync
, res
;
5134 unsigned long dt
, db
, rt
;
5136 unsigned int per_milli
;
5138 resync
= (mddev
->curr_resync
- atomic_read(&mddev
->recovery_active
))/2;
5140 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
5141 max_blocks
= mddev
->resync_max_sectors
>> 1;
5143 max_blocks
= mddev
->size
;
5146 * Should not happen.
5152 /* Pick 'scale' such that (resync>>scale)*1000 will fit
5153 * in a sector_t, and (max_blocks>>scale) will fit in a
5154 * u32, as those are the requirements for sector_div.
5155 * Thus 'scale' must be at least 10
5158 if (sizeof(sector_t
) > sizeof(unsigned long)) {
5159 while ( max_blocks
/2 > (1ULL<<(scale
+32)))
5162 res
= (resync
>>scale
)*1000;
5163 sector_div(res
, (u32
)((max_blocks
>>scale
)+1));
5167 int i
, x
= per_milli
/50, y
= 20-x
;
5168 seq_printf(seq
, "[");
5169 for (i
= 0; i
< x
; i
++)
5170 seq_printf(seq
, "=");
5171 seq_printf(seq
, ">");
5172 for (i
= 0; i
< y
; i
++)
5173 seq_printf(seq
, ".");
5174 seq_printf(seq
, "] ");
5176 seq_printf(seq
, " %s =%3u.%u%% (%llu/%llu)",
5177 (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)?
5179 (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
)?
5181 (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
) ?
5182 "resync" : "recovery"))),
5183 per_milli
/10, per_milli
% 10,
5184 (unsigned long long) resync
,
5185 (unsigned long long) max_blocks
);
5188 * We do not want to overflow, so the order of operands and
5189 * the * 100 / 100 trick are important. We do a +1 to be
5190 * safe against division by zero. We only estimate anyway.
5192 * dt: time from mark until now
5193 * db: blocks written from mark until now
5194 * rt: remaining time
5196 dt
= ((jiffies
- mddev
->resync_mark
) / HZ
);
5198 db
= (mddev
->curr_mark_cnt
- atomic_read(&mddev
->recovery_active
))
5199 - mddev
->resync_mark_cnt
;
5200 rt
= (dt
* ((unsigned long)(max_blocks
-resync
) / (db
/2/100+1)))/100;
5202 seq_printf(seq
, " finish=%lu.%lumin", rt
/ 60, (rt
% 60)/6);
5204 seq_printf(seq
, " speed=%ldK/sec", db
/2/dt
);
5207 static void *md_seq_start(struct seq_file
*seq
, loff_t
*pos
)
5209 struct list_head
*tmp
;
5219 spin_lock(&all_mddevs_lock
);
5220 list_for_each(tmp
,&all_mddevs
)
5222 mddev
= list_entry(tmp
, mddev_t
, all_mddevs
);
5224 spin_unlock(&all_mddevs_lock
);
5227 spin_unlock(&all_mddevs_lock
);
5229 return (void*)2;/* tail */
5233 static void *md_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
5235 struct list_head
*tmp
;
5236 mddev_t
*next_mddev
, *mddev
= v
;
5242 spin_lock(&all_mddevs_lock
);
5244 tmp
= all_mddevs
.next
;
5246 tmp
= mddev
->all_mddevs
.next
;
5247 if (tmp
!= &all_mddevs
)
5248 next_mddev
= mddev_get(list_entry(tmp
,mddev_t
,all_mddevs
));
5250 next_mddev
= (void*)2;
5253 spin_unlock(&all_mddevs_lock
);
5261 static void md_seq_stop(struct seq_file
*seq
, void *v
)
5265 if (mddev
&& v
!= (void*)1 && v
!= (void*)2)
5269 struct mdstat_info
{
5273 static int md_seq_show(struct seq_file
*seq
, void *v
)
5277 struct list_head
*tmp2
;
5279 struct mdstat_info
*mi
= seq
->private;
5280 struct bitmap
*bitmap
;
5282 if (v
== (void*)1) {
5283 struct mdk_personality
*pers
;
5284 seq_printf(seq
, "Personalities : ");
5285 spin_lock(&pers_lock
);
5286 list_for_each_entry(pers
, &pers_list
, list
)
5287 seq_printf(seq
, "[%s] ", pers
->name
);
5289 spin_unlock(&pers_lock
);
5290 seq_printf(seq
, "\n");
5291 mi
->event
= atomic_read(&md_event_count
);
5294 if (v
== (void*)2) {
5299 if (mddev_lock(mddev
) < 0)
5302 if (mddev
->pers
|| mddev
->raid_disks
|| !list_empty(&mddev
->disks
)) {
5303 seq_printf(seq
, "%s : %sactive", mdname(mddev
),
5304 mddev
->pers
? "" : "in");
5307 seq_printf(seq
, " (read-only)");
5309 seq_printf(seq
, " (auto-read-only)");
5310 seq_printf(seq
, " %s", mddev
->pers
->name
);
5314 rdev_for_each(rdev
, tmp2
, mddev
) {
5315 char b
[BDEVNAME_SIZE
];
5316 seq_printf(seq
, " %s[%d]",
5317 bdevname(rdev
->bdev
,b
), rdev
->desc_nr
);
5318 if (test_bit(WriteMostly
, &rdev
->flags
))
5319 seq_printf(seq
, "(W)");
5320 if (test_bit(Faulty
, &rdev
->flags
)) {
5321 seq_printf(seq
, "(F)");
5323 } else if (rdev
->raid_disk
< 0)
5324 seq_printf(seq
, "(S)"); /* spare */
5328 if (!list_empty(&mddev
->disks
)) {
5330 seq_printf(seq
, "\n %llu blocks",
5331 (unsigned long long)mddev
->array_size
);
5333 seq_printf(seq
, "\n %llu blocks",
5334 (unsigned long long)size
);
5336 if (mddev
->persistent
) {
5337 if (mddev
->major_version
!= 0 ||
5338 mddev
->minor_version
!= 90) {
5339 seq_printf(seq
," super %d.%d",
5340 mddev
->major_version
,
5341 mddev
->minor_version
);
5343 } else if (mddev
->external
)
5344 seq_printf(seq
, " super external:%s",
5345 mddev
->metadata_type
);
5347 seq_printf(seq
, " super non-persistent");
5350 mddev
->pers
->status (seq
, mddev
);
5351 seq_printf(seq
, "\n ");
5352 if (mddev
->pers
->sync_request
) {
5353 if (mddev
->curr_resync
> 2) {
5354 status_resync (seq
, mddev
);
5355 seq_printf(seq
, "\n ");
5356 } else if (mddev
->curr_resync
== 1 || mddev
->curr_resync
== 2)
5357 seq_printf(seq
, "\tresync=DELAYED\n ");
5358 else if (mddev
->recovery_cp
< MaxSector
)
5359 seq_printf(seq
, "\tresync=PENDING\n ");
5362 seq_printf(seq
, "\n ");
5364 if ((bitmap
= mddev
->bitmap
)) {
5365 unsigned long chunk_kb
;
5366 unsigned long flags
;
5367 spin_lock_irqsave(&bitmap
->lock
, flags
);
5368 chunk_kb
= bitmap
->chunksize
>> 10;
5369 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
5371 bitmap
->pages
- bitmap
->missing_pages
,
5373 (bitmap
->pages
- bitmap
->missing_pages
)
5374 << (PAGE_SHIFT
- 10),
5375 chunk_kb
? chunk_kb
: bitmap
->chunksize
,
5376 chunk_kb
? "KB" : "B");
5378 seq_printf(seq
, ", file: ");
5379 seq_path(seq
, &bitmap
->file
->f_path
, " \t\n");
5382 seq_printf(seq
, "\n");
5383 spin_unlock_irqrestore(&bitmap
->lock
, flags
);
5386 seq_printf(seq
, "\n");
5388 mddev_unlock(mddev
);
5393 static struct seq_operations md_seq_ops
= {
5394 .start
= md_seq_start
,
5395 .next
= md_seq_next
,
5396 .stop
= md_seq_stop
,
5397 .show
= md_seq_show
,
5400 static int md_seq_open(struct inode
*inode
, struct file
*file
)
5403 struct mdstat_info
*mi
= kmalloc(sizeof(*mi
), GFP_KERNEL
);
5407 error
= seq_open(file
, &md_seq_ops
);
5411 struct seq_file
*p
= file
->private_data
;
5413 mi
->event
= atomic_read(&md_event_count
);
5418 static unsigned int mdstat_poll(struct file
*filp
, poll_table
*wait
)
5420 struct seq_file
*m
= filp
->private_data
;
5421 struct mdstat_info
*mi
= m
->private;
5424 poll_wait(filp
, &md_event_waiters
, wait
);
5426 /* always allow read */
5427 mask
= POLLIN
| POLLRDNORM
;
5429 if (mi
->event
!= atomic_read(&md_event_count
))
5430 mask
|= POLLERR
| POLLPRI
;
5434 static const struct file_operations md_seq_fops
= {
5435 .owner
= THIS_MODULE
,
5436 .open
= md_seq_open
,
5438 .llseek
= seq_lseek
,
5439 .release
= seq_release_private
,
5440 .poll
= mdstat_poll
,
5443 int register_md_personality(struct mdk_personality
*p
)
5445 spin_lock(&pers_lock
);
5446 list_add_tail(&p
->list
, &pers_list
);
5447 printk(KERN_INFO
"md: %s personality registered for level %d\n", p
->name
, p
->level
);
5448 spin_unlock(&pers_lock
);
5452 int unregister_md_personality(struct mdk_personality
*p
)
5454 printk(KERN_INFO
"md: %s personality unregistered\n", p
->name
);
5455 spin_lock(&pers_lock
);
5456 list_del_init(&p
->list
);
5457 spin_unlock(&pers_lock
);
5461 static int is_mddev_idle(mddev_t
*mddev
)
5464 struct list_head
*tmp
;
5469 rdev_for_each(rdev
, tmp
, mddev
) {
5470 struct gendisk
*disk
= rdev
->bdev
->bd_contains
->bd_disk
;
5471 curr_events
= disk_stat_read(disk
, sectors
[0]) +
5472 disk_stat_read(disk
, sectors
[1]) -
5473 atomic_read(&disk
->sync_io
);
5474 /* sync IO will cause sync_io to increase before the disk_stats
5475 * as sync_io is counted when a request starts, and
5476 * disk_stats is counted when it completes.
5477 * So resync activity will cause curr_events to be smaller than
5478 * when there was no such activity.
5479 * non-sync IO will cause disk_stat to increase without
5480 * increasing sync_io so curr_events will (eventually)
5481 * be larger than it was before. Once it becomes
5482 * substantially larger, the test below will cause
5483 * the array to appear non-idle, and resync will slow
5485 * If there is a lot of outstanding resync activity when
5486 * we set last_event to curr_events, then all that activity
5487 * completing might cause the array to appear non-idle
5488 * and resync will be slowed down even though there might
5489 * not have been non-resync activity. This will only
5490 * happen once though. 'last_events' will soon reflect
5491 * the state where there is little or no outstanding
5492 * resync requests, and further resync activity will
5493 * always make curr_events less than last_events.
5496 if (curr_events
- rdev
->last_events
> 4096) {
5497 rdev
->last_events
= curr_events
;
5504 void md_done_sync(mddev_t
*mddev
, int blocks
, int ok
)
5506 /* another "blocks" (512byte) blocks have been synced */
5507 atomic_sub(blocks
, &mddev
->recovery_active
);
5508 wake_up(&mddev
->recovery_wait
);
5510 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5511 md_wakeup_thread(mddev
->thread
);
5512 // stop recovery, signal do_sync ....
5517 /* md_write_start(mddev, bi)
5518 * If we need to update some array metadata (e.g. 'active' flag
5519 * in superblock) before writing, schedule a superblock update
5520 * and wait for it to complete.
5522 void md_write_start(mddev_t
*mddev
, struct bio
*bi
)
5524 if (bio_data_dir(bi
) != WRITE
)
5527 BUG_ON(mddev
->ro
== 1);
5528 if (mddev
->ro
== 2) {
5529 /* need to switch to read/write */
5531 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5532 md_wakeup_thread(mddev
->thread
);
5533 md_wakeup_thread(mddev
->sync_thread
);
5535 atomic_inc(&mddev
->writes_pending
);
5536 if (mddev
->safemode
== 1)
5537 mddev
->safemode
= 0;
5538 if (mddev
->in_sync
) {
5539 spin_lock_irq(&mddev
->write_lock
);
5540 if (mddev
->in_sync
) {
5542 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5543 md_wakeup_thread(mddev
->thread
);
5545 spin_unlock_irq(&mddev
->write_lock
);
5546 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
5548 wait_event(mddev
->sb_wait
,
5549 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
5550 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
5553 void md_write_end(mddev_t
*mddev
)
5555 if (atomic_dec_and_test(&mddev
->writes_pending
)) {
5556 if (mddev
->safemode
== 2)
5557 md_wakeup_thread(mddev
->thread
);
5558 else if (mddev
->safemode_delay
)
5559 mod_timer(&mddev
->safemode_timer
, jiffies
+ mddev
->safemode_delay
);
5563 /* md_allow_write(mddev)
5564 * Calling this ensures that the array is marked 'active' so that writes
5565 * may proceed without blocking. It is important to call this before
5566 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5567 * Must be called with mddev_lock held.
5569 void md_allow_write(mddev_t
*mddev
)
5575 if (!mddev
->pers
->sync_request
)
5578 spin_lock_irq(&mddev
->write_lock
);
5579 if (mddev
->in_sync
) {
5581 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
5582 if (mddev
->safemode_delay
&&
5583 mddev
->safemode
== 0)
5584 mddev
->safemode
= 1;
5585 spin_unlock_irq(&mddev
->write_lock
);
5586 md_update_sb(mddev
, 0);
5588 sysfs_notify(&mddev
->kobj
, NULL
, "array_state");
5589 /* wait for the dirty state to be recorded in the metadata */
5590 wait_event(mddev
->sb_wait
,
5591 !test_bit(MD_CHANGE_CLEAN
, &mddev
->flags
) &&
5592 !test_bit(MD_CHANGE_PENDING
, &mddev
->flags
));
5594 spin_unlock_irq(&mddev
->write_lock
);
5596 EXPORT_SYMBOL_GPL(md_allow_write
);
5598 #define SYNC_MARKS 10
5599 #define SYNC_MARK_STEP (3*HZ)
5600 void md_do_sync(mddev_t
*mddev
)
5603 unsigned int currspeed
= 0,
5605 sector_t max_sectors
,j
, io_sectors
;
5606 unsigned long mark
[SYNC_MARKS
];
5607 sector_t mark_cnt
[SYNC_MARKS
];
5609 struct list_head
*tmp
;
5610 sector_t last_check
;
5612 struct list_head
*rtmp
;
5616 /* just incase thread restarts... */
5617 if (test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
))
5619 if (mddev
->ro
) /* never try to sync a read-only array */
5622 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5623 if (test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
))
5624 desc
= "data-check";
5625 else if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5626 desc
= "requested-resync";
5629 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5634 /* we overload curr_resync somewhat here.
5635 * 0 == not engaged in resync at all
5636 * 2 == checking that there is no conflict with another sync
5637 * 1 == like 2, but have yielded to allow conflicting resync to
5639 * other == active in resync - this many blocks
5641 * Before starting a resync we must have set curr_resync to
5642 * 2, and then checked that every "conflicting" array has curr_resync
5643 * less than ours. When we find one that is the same or higher
5644 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5645 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5646 * This will mean we have to start checking from the beginning again.
5651 mddev
->curr_resync
= 2;
5654 if (kthread_should_stop()) {
5655 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5658 for_each_mddev(mddev2
, tmp
) {
5659 if (mddev2
== mddev
)
5661 if (!mddev
->parallel_resync
5662 && mddev2
->curr_resync
5663 && match_mddev_units(mddev
, mddev2
)) {
5665 if (mddev
< mddev2
&& mddev
->curr_resync
== 2) {
5666 /* arbitrarily yield */
5667 mddev
->curr_resync
= 1;
5668 wake_up(&resync_wait
);
5670 if (mddev
> mddev2
&& mddev
->curr_resync
== 1)
5671 /* no need to wait here, we can wait the next
5672 * time 'round when curr_resync == 2
5675 prepare_to_wait(&resync_wait
, &wq
, TASK_UNINTERRUPTIBLE
);
5676 if (!kthread_should_stop() &&
5677 mddev2
->curr_resync
>= mddev
->curr_resync
) {
5678 printk(KERN_INFO
"md: delaying %s of %s"
5679 " until %s has finished (they"
5680 " share one or more physical units)\n",
5681 desc
, mdname(mddev
), mdname(mddev2
));
5684 finish_wait(&resync_wait
, &wq
);
5687 finish_wait(&resync_wait
, &wq
);
5690 } while (mddev
->curr_resync
< 2);
5693 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5694 /* resync follows the size requested by the personality,
5695 * which defaults to physical size, but can be virtual size
5697 max_sectors
= mddev
->resync_max_sectors
;
5698 mddev
->resync_mismatches
= 0;
5699 /* we don't use the checkpoint if there's a bitmap */
5700 if (test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
))
5701 j
= mddev
->resync_min
;
5702 else if (!mddev
->bitmap
)
5703 j
= mddev
->recovery_cp
;
5705 } else if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
5706 max_sectors
= mddev
->size
<< 1;
5708 /* recovery follows the physical size of devices */
5709 max_sectors
= mddev
->size
<< 1;
5711 rdev_for_each(rdev
, rtmp
, mddev
)
5712 if (rdev
->raid_disk
>= 0 &&
5713 !test_bit(Faulty
, &rdev
->flags
) &&
5714 !test_bit(In_sync
, &rdev
->flags
) &&
5715 rdev
->recovery_offset
< j
)
5716 j
= rdev
->recovery_offset
;
5719 printk(KERN_INFO
"md: %s of RAID array %s\n", desc
, mdname(mddev
));
5720 printk(KERN_INFO
"md: minimum _guaranteed_ speed:"
5721 " %d KB/sec/disk.\n", speed_min(mddev
));
5722 printk(KERN_INFO
"md: using maximum available idle IO bandwidth "
5723 "(but not more than %d KB/sec) for %s.\n",
5724 speed_max(mddev
), desc
);
5726 is_mddev_idle(mddev
); /* this also initializes IO event counters */
5729 for (m
= 0; m
< SYNC_MARKS
; m
++) {
5731 mark_cnt
[m
] = io_sectors
;
5734 mddev
->resync_mark
= mark
[last_mark
];
5735 mddev
->resync_mark_cnt
= mark_cnt
[last_mark
];
5738 * Tune reconstruction:
5740 window
= 32*(PAGE_SIZE
/512);
5741 printk(KERN_INFO
"md: using %dk window, over a total of %llu blocks.\n",
5742 window
/2,(unsigned long long) max_sectors
/2);
5744 atomic_set(&mddev
->recovery_active
, 0);
5749 "md: resuming %s of %s from checkpoint.\n",
5750 desc
, mdname(mddev
));
5751 mddev
->curr_resync
= j
;
5754 while (j
< max_sectors
) {
5758 if (j
>= mddev
->resync_max
) {
5759 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5760 wait_event(mddev
->recovery_wait
,
5761 mddev
->resync_max
> j
5762 || kthread_should_stop());
5764 if (kthread_should_stop())
5766 sectors
= mddev
->pers
->sync_request(mddev
, j
, &skipped
,
5767 currspeed
< speed_min(mddev
));
5769 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5773 if (!skipped
) { /* actual IO requested */
5774 io_sectors
+= sectors
;
5775 atomic_add(sectors
, &mddev
->recovery_active
);
5779 if (j
>1) mddev
->curr_resync
= j
;
5780 mddev
->curr_mark_cnt
= io_sectors
;
5781 if (last_check
== 0)
5782 /* this is the earliers that rebuilt will be
5783 * visible in /proc/mdstat
5785 md_new_event(mddev
);
5787 if (last_check
+ window
> io_sectors
|| j
== max_sectors
)
5790 last_check
= io_sectors
;
5792 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5796 if (time_after_eq(jiffies
, mark
[last_mark
] + SYNC_MARK_STEP
)) {
5798 int next
= (last_mark
+1) % SYNC_MARKS
;
5800 mddev
->resync_mark
= mark
[next
];
5801 mddev
->resync_mark_cnt
= mark_cnt
[next
];
5802 mark
[next
] = jiffies
;
5803 mark_cnt
[next
] = io_sectors
- atomic_read(&mddev
->recovery_active
);
5808 if (kthread_should_stop())
5813 * this loop exits only if either when we are slower than
5814 * the 'hard' speed limit, or the system was IO-idle for
5816 * the system might be non-idle CPU-wise, but we only care
5817 * about not overloading the IO subsystem. (things like an
5818 * e2fsck being done on the RAID array should execute fast)
5820 blk_unplug(mddev
->queue
);
5823 currspeed
= ((unsigned long)(io_sectors
-mddev
->resync_mark_cnt
))/2
5824 /((jiffies
-mddev
->resync_mark
)/HZ
+1) +1;
5826 if (currspeed
> speed_min(mddev
)) {
5827 if ((currspeed
> speed_max(mddev
)) ||
5828 !is_mddev_idle(mddev
)) {
5834 printk(KERN_INFO
"md: %s: %s done.\n",mdname(mddev
), desc
);
5836 * this also signals 'finished resyncing' to md_stop
5839 blk_unplug(mddev
->queue
);
5841 wait_event(mddev
->recovery_wait
, !atomic_read(&mddev
->recovery_active
));
5843 /* tell personality that we are finished */
5844 mddev
->pers
->sync_request(mddev
, max_sectors
, &skipped
, 1);
5846 if (!test_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
) &&
5847 mddev
->curr_resync
> 2) {
5848 if (test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
5849 if (test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5850 if (mddev
->curr_resync
>= mddev
->recovery_cp
) {
5852 "md: checkpointing %s of %s.\n",
5853 desc
, mdname(mddev
));
5854 mddev
->recovery_cp
= mddev
->curr_resync
;
5857 mddev
->recovery_cp
= MaxSector
;
5859 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
))
5860 mddev
->curr_resync
= MaxSector
;
5861 rdev_for_each(rdev
, rtmp
, mddev
)
5862 if (rdev
->raid_disk
>= 0 &&
5863 !test_bit(Faulty
, &rdev
->flags
) &&
5864 !test_bit(In_sync
, &rdev
->flags
) &&
5865 rdev
->recovery_offset
< mddev
->curr_resync
)
5866 rdev
->recovery_offset
= mddev
->curr_resync
;
5869 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5872 mddev
->curr_resync
= 0;
5873 mddev
->resync_min
= 0;
5874 mddev
->resync_max
= MaxSector
;
5875 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
5876 wake_up(&resync_wait
);
5877 set_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
5878 md_wakeup_thread(mddev
->thread
);
5883 * got a signal, exit.
5886 "md: md_do_sync() got signal ... exiting\n");
5887 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
5891 EXPORT_SYMBOL_GPL(md_do_sync
);
5894 static int remove_and_add_spares(mddev_t
*mddev
)
5897 struct list_head
*rtmp
;
5900 rdev_for_each(rdev
, rtmp
, mddev
)
5901 if (rdev
->raid_disk
>= 0 &&
5902 !test_bit(Blocked
, &rdev
->flags
) &&
5903 (test_bit(Faulty
, &rdev
->flags
) ||
5904 ! test_bit(In_sync
, &rdev
->flags
)) &&
5905 atomic_read(&rdev
->nr_pending
)==0) {
5906 if (mddev
->pers
->hot_remove_disk(
5907 mddev
, rdev
->raid_disk
)==0) {
5909 sprintf(nm
,"rd%d", rdev
->raid_disk
);
5910 sysfs_remove_link(&mddev
->kobj
, nm
);
5911 rdev
->raid_disk
= -1;
5915 if (mddev
->degraded
) {
5916 rdev_for_each(rdev
, rtmp
, mddev
) {
5917 if (rdev
->raid_disk
>= 0 &&
5918 !test_bit(In_sync
, &rdev
->flags
))
5920 if (rdev
->raid_disk
< 0
5921 && !test_bit(Faulty
, &rdev
->flags
)) {
5922 rdev
->recovery_offset
= 0;
5923 if (mddev
->pers
->hot_add_disk(mddev
,rdev
)) {
5925 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5926 if (sysfs_create_link(&mddev
->kobj
,
5929 "md: cannot register "
5933 md_new_event(mddev
);
5942 * This routine is regularly called by all per-raid-array threads to
5943 * deal with generic issues like resync and super-block update.
5944 * Raid personalities that don't have a thread (linear/raid0) do not
5945 * need this as they never do any recovery or update the superblock.
5947 * It does not do any resync itself, but rather "forks" off other threads
5948 * to do that as needed.
5949 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5950 * "->recovery" and create a thread at ->sync_thread.
5951 * When the thread finishes it sets MD_RECOVERY_DONE
5952 * and wakeups up this thread which will reap the thread and finish up.
5953 * This thread also removes any faulty devices (with nr_pending == 0).
5955 * The overall approach is:
5956 * 1/ if the superblock needs updating, update it.
5957 * 2/ If a recovery thread is running, don't do anything else.
5958 * 3/ If recovery has finished, clean up, possibly marking spares active.
5959 * 4/ If there are any faulty devices, remove them.
5960 * 5/ If array is degraded, try to add spares devices
5961 * 6/ If array has spares or is not in-sync, start a resync thread.
5963 void md_check_recovery(mddev_t
*mddev
)
5966 struct list_head
*rtmp
;
5970 bitmap_daemon_work(mddev
->bitmap
);
5975 if (signal_pending(current
)) {
5976 if (mddev
->pers
->sync_request
&& !mddev
->external
) {
5977 printk(KERN_INFO
"md: %s in immediate safe mode\n",
5979 mddev
->safemode
= 2;
5981 flush_signals(current
);
5985 (mddev
->flags
&& !mddev
->external
) ||
5986 test_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
) ||
5987 test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
) ||
5988 (mddev
->external
== 0 && mddev
->safemode
== 1) ||
5989 (mddev
->safemode
== 2 && ! atomic_read(&mddev
->writes_pending
)
5990 && !mddev
->in_sync
&& mddev
->recovery_cp
== MaxSector
)
5994 if (mddev_trylock(mddev
)) {
5997 if (!mddev
->external
) {
5998 spin_lock_irq(&mddev
->write_lock
);
5999 if (mddev
->safemode
&&
6000 !atomic_read(&mddev
->writes_pending
) &&
6002 mddev
->recovery_cp
== MaxSector
) {
6004 if (mddev
->persistent
)
6005 set_bit(MD_CHANGE_CLEAN
, &mddev
->flags
);
6007 if (mddev
->safemode
== 1)
6008 mddev
->safemode
= 0;
6009 spin_unlock_irq(&mddev
->write_lock
);
6013 md_update_sb(mddev
, 0);
6016 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
) &&
6017 !test_bit(MD_RECOVERY_DONE
, &mddev
->recovery
)) {
6018 /* resync/recovery still happening */
6019 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6022 if (mddev
->sync_thread
) {
6023 /* resync has finished, collect result */
6024 md_unregister_thread(mddev
->sync_thread
);
6025 mddev
->sync_thread
= NULL
;
6026 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
6028 /* activate any spares */
6029 mddev
->pers
->spare_active(mddev
);
6031 md_update_sb(mddev
, 1);
6033 /* if array is no-longer degraded, then any saved_raid_disk
6034 * information must be scrapped
6036 if (!mddev
->degraded
)
6037 rdev_for_each(rdev
, rtmp
, mddev
)
6038 rdev
->saved_raid_disk
= -1;
6040 mddev
->recovery
= 0;
6041 /* flag recovery needed just to double check */
6042 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6043 md_new_event(mddev
);
6046 /* Clear some bits that don't mean anything, but
6049 clear_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
6050 clear_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
6051 clear_bit(MD_RECOVERY_DONE
, &mddev
->recovery
);
6053 if (test_bit(MD_RECOVERY_FROZEN
, &mddev
->recovery
))
6055 /* no recovery is running.
6056 * remove any failed drives, then
6057 * add spares if possible.
6058 * Spare are also removed and re-added, to allow
6059 * the personality to fail the re-add.
6062 if (mddev
->reshape_position
!= MaxSector
) {
6063 if (mddev
->pers
->check_reshape(mddev
) != 0)
6064 /* Cannot proceed */
6066 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
6067 } else if ((spares
= remove_and_add_spares(mddev
))) {
6068 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6069 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
6070 } else if (mddev
->recovery_cp
< MaxSector
) {
6071 set_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
6072 } else if (!test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
))
6073 /* nothing to be done ... */
6076 if (mddev
->pers
->sync_request
) {
6077 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
6078 if (spares
&& mddev
->bitmap
&& ! mddev
->bitmap
->file
) {
6079 /* We are adding a device or devices to an array
6080 * which has the bitmap stored on all devices.
6081 * So make sure all bitmap pages get written
6083 bitmap_write_all(mddev
->bitmap
);
6085 mddev
->sync_thread
= md_register_thread(md_do_sync
,
6088 if (!mddev
->sync_thread
) {
6089 printk(KERN_ERR
"%s: could not start resync"
6092 /* leave the spares where they are, it shouldn't hurt */
6093 mddev
->recovery
= 0;
6095 md_wakeup_thread(mddev
->sync_thread
);
6096 md_new_event(mddev
);
6099 mddev_unlock(mddev
);
6103 void md_wait_for_blocked_rdev(mdk_rdev_t
*rdev
, mddev_t
*mddev
)
6105 sysfs_notify(&rdev
->kobj
, NULL
, "state");
6106 wait_event_timeout(rdev
->blocked_wait
,
6107 !test_bit(Blocked
, &rdev
->flags
),
6108 msecs_to_jiffies(5000));
6109 rdev_dec_pending(rdev
, mddev
);
6111 EXPORT_SYMBOL(md_wait_for_blocked_rdev
);
6113 static int md_notify_reboot(struct notifier_block
*this,
6114 unsigned long code
, void *x
)
6116 struct list_head
*tmp
;
6119 if ((code
== SYS_DOWN
) || (code
== SYS_HALT
) || (code
== SYS_POWER_OFF
)) {
6121 printk(KERN_INFO
"md: stopping all md devices.\n");
6123 for_each_mddev(mddev
, tmp
)
6124 if (mddev_trylock(mddev
)) {
6125 do_md_stop (mddev
, 1);
6126 mddev_unlock(mddev
);
6129 * certain more exotic SCSI devices are known to be
6130 * volatile wrt too early system reboots. While the
6131 * right place to handle this issue is the given
6132 * driver, we do want to have a safe RAID driver ...
6139 static struct notifier_block md_notifier
= {
6140 .notifier_call
= md_notify_reboot
,
6142 .priority
= INT_MAX
, /* before any real devices */
6145 static void md_geninit(void)
6147 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t
));
6149 proc_create("mdstat", S_IRUGO
, NULL
, &md_seq_fops
);
6152 static int __init
md_init(void)
6154 if (register_blkdev(MAJOR_NR
, "md"))
6156 if ((mdp_major
=register_blkdev(0, "mdp"))<=0) {
6157 unregister_blkdev(MAJOR_NR
, "md");
6160 blk_register_region(MKDEV(MAJOR_NR
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6161 md_probe
, NULL
, NULL
);
6162 blk_register_region(MKDEV(mdp_major
, 0), 1UL<<MINORBITS
, THIS_MODULE
,
6163 md_probe
, NULL
, NULL
);
6165 register_reboot_notifier(&md_notifier
);
6166 raid_table_header
= register_sysctl_table(raid_root_table
);
6176 * Searches all registered partitions for autorun RAID arrays
6180 static LIST_HEAD(all_detected_devices
);
6181 struct detected_devices_node
{
6182 struct list_head list
;
6186 void md_autodetect_dev(dev_t dev
)
6188 struct detected_devices_node
*node_detected_dev
;
6190 node_detected_dev
= kzalloc(sizeof(*node_detected_dev
), GFP_KERNEL
);
6191 if (node_detected_dev
) {
6192 node_detected_dev
->dev
= dev
;
6193 list_add_tail(&node_detected_dev
->list
, &all_detected_devices
);
6195 printk(KERN_CRIT
"md: md_autodetect_dev: kzalloc failed"
6196 ", skipping dev(%d,%d)\n", MAJOR(dev
), MINOR(dev
));
6201 static void autostart_arrays(int part
)
6204 struct detected_devices_node
*node_detected_dev
;
6206 int i_scanned
, i_passed
;
6211 printk(KERN_INFO
"md: Autodetecting RAID arrays.\n");
6213 while (!list_empty(&all_detected_devices
) && i_scanned
< INT_MAX
) {
6215 node_detected_dev
= list_entry(all_detected_devices
.next
,
6216 struct detected_devices_node
, list
);
6217 list_del(&node_detected_dev
->list
);
6218 dev
= node_detected_dev
->dev
;
6219 kfree(node_detected_dev
);
6220 rdev
= md_import_device(dev
,0, 90);
6224 if (test_bit(Faulty
, &rdev
->flags
)) {
6228 set_bit(AutoDetected
, &rdev
->flags
);
6229 list_add(&rdev
->same_set
, &pending_raid_disks
);
6233 printk(KERN_INFO
"md: Scanned %d and added %d devices.\n",
6234 i_scanned
, i_passed
);
6236 autorun_devices(part
);
6239 #endif /* !MODULE */
6241 static __exit
void md_exit(void)
6244 struct list_head
*tmp
;
6246 blk_unregister_region(MKDEV(MAJOR_NR
,0), 1U << MINORBITS
);
6247 blk_unregister_region(MKDEV(mdp_major
,0), 1U << MINORBITS
);
6249 unregister_blkdev(MAJOR_NR
,"md");
6250 unregister_blkdev(mdp_major
, "mdp");
6251 unregister_reboot_notifier(&md_notifier
);
6252 unregister_sysctl_table(raid_table_header
);
6253 remove_proc_entry("mdstat", NULL
);
6254 for_each_mddev(mddev
, tmp
) {
6255 struct gendisk
*disk
= mddev
->gendisk
;
6258 export_array(mddev
);
6261 mddev
->gendisk
= NULL
;
6266 subsys_initcall(md_init
);
6267 module_exit(md_exit
)
6269 static int get_ro(char *buffer
, struct kernel_param
*kp
)
6271 return sprintf(buffer
, "%d", start_readonly
);
6273 static int set_ro(const char *val
, struct kernel_param
*kp
)
6276 int num
= simple_strtoul(val
, &e
, 10);
6277 if (*val
&& (*e
== '\0' || *e
== '\n')) {
6278 start_readonly
= num
;
6284 module_param_call(start_ro
, set_ro
, get_ro
, NULL
, S_IRUSR
|S_IWUSR
);
6285 module_param(start_dirty_degraded
, int, S_IRUGO
|S_IWUSR
);
6288 EXPORT_SYMBOL(register_md_personality
);
6289 EXPORT_SYMBOL(unregister_md_personality
);
6290 EXPORT_SYMBOL(md_error
);
6291 EXPORT_SYMBOL(md_done_sync
);
6292 EXPORT_SYMBOL(md_write_start
);
6293 EXPORT_SYMBOL(md_write_end
);
6294 EXPORT_SYMBOL(md_register_thread
);
6295 EXPORT_SYMBOL(md_unregister_thread
);
6296 EXPORT_SYMBOL(md_wakeup_thread
);
6297 EXPORT_SYMBOL(md_check_recovery
);
6298 MODULE_LICENSE("GPL");
6300 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR
);