Commit | Line | Data |
---|---|---|
9d09e663 N |
1 | /* |
2 | * Copyright (C) 2010-2011 Neil Brown | |
702108d1 | 3 | * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved. |
9d09e663 N |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include <linux/slab.h> | |
056075c7 | 9 | #include <linux/module.h> |
9d09e663 N |
10 | |
11 | #include "md.h" | |
32737279 | 12 | #include "raid1.h" |
9d09e663 | 13 | #include "raid5.h" |
63f33b8d | 14 | #include "raid10.h" |
9d09e663 N |
15 | #include "bitmap.h" |
16 | ||
3e8dbb7f AK |
17 | #include <linux/device-mapper.h> |
18 | ||
9d09e663 | 19 | #define DM_MSG_PREFIX "raid" |
92c83d79 | 20 | #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */ |
9d09e663 | 21 | |
48cf06bc HM |
22 | static bool devices_handle_discard_safely = false; |
23 | ||
9d09e663 | 24 | /* |
b12d437b JB |
25 | * The following flags are used by dm-raid.c to set up the array state. |
26 | * They must be cleared before md_run is called. | |
9d09e663 | 27 | */ |
b12d437b | 28 | #define FirstUse 10 /* rdev flag */ |
9d09e663 N |
29 | |
30 | struct raid_dev { | |
31 | /* | |
32 | * Two DM devices, one to hold metadata and one to hold the | |
33 | * actual data/parity. The reason for this is to not confuse | |
34 | * ti->len and give more flexibility in altering size and | |
35 | * characteristics. | |
36 | * | |
37 | * While it is possible for this device to be associated | |
38 | * with a different physical device than the data_dev, it | |
39 | * is intended for it to be the same. | |
40 | * |--------- Physical Device ---------| | |
41 | * |- meta_dev -|------ data_dev ------| | |
42 | */ | |
43 | struct dm_dev *meta_dev; | |
44 | struct dm_dev *data_dev; | |
3cb03002 | 45 | struct md_rdev rdev; |
9d09e663 N |
46 | }; |
47 | ||
48 | /* | |
c76d53f4 | 49 | * Flags for rs->ctr_flags field. |
702108d1 HM |
50 | * |
51 | * 1 = no flag value | |
52 | * 2 = flag with value | |
9d09e663 | 53 | */ |
702108d1 HM |
54 | #define CTR_FLAG_SYNC 0x1 /* 1 */ /* Not with raid0! */ |
55 | #define CTR_FLAG_NOSYNC 0x2 /* 1 */ /* Not with raid0! */ | |
56 | #define CTR_FLAG_REBUILD 0x4 /* 2 */ /* Not with raid0! */ | |
57 | #define CTR_FLAG_DAEMON_SLEEP 0x8 /* 2 */ /* Not with raid0! */ | |
58 | #define CTR_FLAG_MIN_RECOVERY_RATE 0x10 /* 2 */ /* Not with raid0! */ | |
59 | #define CTR_FLAG_MAX_RECOVERY_RATE 0x20 /* 2 */ /* Not with raid0! */ | |
60 | #define CTR_FLAG_MAX_WRITE_BEHIND 0x40 /* 2 */ /* Only with raid1! */ | |
61 | #define CTR_FLAG_WRITE_MOSTLY 0x80 /* 2 */ /* Only with raid1! */ | |
62 | #define CTR_FLAG_STRIPE_CACHE 0x100 /* 2 */ /* Only with raid4/5/6! */ | |
63 | #define CTR_FLAG_REGION_SIZE 0x200 /* 2 */ /* Not with raid0! */ | |
64 | #define CTR_FLAG_RAID10_COPIES 0x400 /* 2 */ /* Only with raid10 */ | |
65 | #define CTR_FLAG_RAID10_FORMAT 0x800 /* 2 */ /* Only with raid10 */ | |
33e53f06 HM |
66 | /* New for v1.8.0 */ |
67 | #define CTR_FLAG_DELTA_DISKS 0x1000 /* 2 */ /* Only with reshapable raid4/5/6/10! */ | |
68 | #define CTR_FLAG_DATA_OFFSET 0x2000 /* 2 */ /* Only with reshapable raid4/5/6/10! */ | |
69 | #define CTR_FLAG_RAID10_USE_NEAR_SETS 0x4000 /* 2 */ /* Only with raid10! */ | |
63f33b8d | 70 | |
f090279e HM |
71 | /* |
72 | * Definitions of various constructor flags to | |
73 | * be used in checks of valid / invalid flags | |
74 | * per raid level. | |
75 | */ | |
76 | /* Define all any sync flags */ | |
77 | #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC) | |
78 | ||
79 | /* Define flags for options without argument (e.g. 'nosync') */ | |
33e53f06 HM |
80 | #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \ |
81 | CTR_FLAG_RAID10_USE_NEAR_SETS) | |
f090279e HM |
82 | |
83 | /* Define flags for options with one argument (e.g. 'delta_disks +2') */ | |
84 | #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \ | |
85 | CTR_FLAG_WRITE_MOSTLY | \ | |
86 | CTR_FLAG_DAEMON_SLEEP | \ | |
87 | CTR_FLAG_MIN_RECOVERY_RATE | \ | |
88 | CTR_FLAG_MAX_RECOVERY_RATE | \ | |
89 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
90 | CTR_FLAG_STRIPE_CACHE | \ | |
91 | CTR_FLAG_REGION_SIZE | \ | |
92 | CTR_FLAG_RAID10_COPIES | \ | |
33e53f06 HM |
93 | CTR_FLAG_RAID10_FORMAT | \ |
94 | CTR_FLAG_DELTA_DISKS | \ | |
95 | CTR_FLAG_DATA_OFFSET) | |
f090279e HM |
96 | |
97 | /* All ctr optional arguments */ | |
98 | #define ALL_CTR_FLAGS (CTR_FLAG_OPTIONS_NO_ARGS | \ | |
99 | CTR_FLAG_OPTIONS_ONE_ARG) | |
100 | ||
ecbfb9f1 HM |
101 | /* |
102 | * All flags which cause a recovery unfreeze once they got stored in the raid metadata | |
103 | */ | |
104 | #define ALL_FREEZE_FLAGS (ALL_CTR_FLAGS & ~(CTR_FLAG_REGION_SIZE | CTR_FLAGS_ANY_SYNC | \ | |
105 | CTR_FLAG_RAID10_FORMAT | CTR_FLAG_RAID10_COPIES | \ | |
106 | CTR_FLAG_RAID10_USE_NEAR_SETS)) | |
107 | ||
f090279e HM |
108 | /* Invalid options definitions per raid level... */ |
109 | ||
110 | /* "raid0" does not accept any options */ | |
111 | #define RAID0_INVALID_FLAGS ALL_CTR_FLAGS | |
112 | ||
113 | /* "raid1" does not accept stripe cache or any raid10 options */ | |
114 | #define RAID1_INVALID_FLAGS (CTR_FLAG_STRIPE_CACHE | \ | |
115 | CTR_FLAG_RAID10_COPIES | \ | |
33e53f06 HM |
116 | CTR_FLAG_RAID10_FORMAT | \ |
117 | CTR_FLAG_DELTA_DISKS | \ | |
118 | CTR_FLAG_DATA_OFFSET) | |
f090279e HM |
119 | |
120 | /* "raid10" does not accept any raid1 or stripe cache options */ | |
121 | #define RAID10_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \ | |
122 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
123 | CTR_FLAG_STRIPE_CACHE) | |
124 | /* | |
125 | * "raid4/5/6" do not accept any raid1 or raid10 specific options | |
126 | * | |
127 | * "raid6" does not accept "nosync", because it is not guaranteed | |
128 | * that both parity and q-syndrome are being written properly with | |
129 | * any writes | |
130 | */ | |
131 | #define RAID45_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \ | |
132 | CTR_FLAG_MAX_WRITE_BEHIND | \ | |
133 | CTR_FLAG_RAID10_FORMAT | \ | |
33e53f06 HM |
134 | CTR_FLAG_RAID10_COPIES | \ |
135 | CTR_FLAG_RAID10_USE_NEAR_SETS) | |
f090279e HM |
136 | #define RAID6_INVALID_FLAGS (CTR_FLAG_NOSYNC | RAID45_INVALID_FLAGS) |
137 | /* ...invalid options definitions per raid level */ | |
138 | ||
ecbfb9f1 HM |
139 | /* |
140 | * Flags for rs->runtime_flags field | |
141 | * (RT_FLAG prefix meaning "runtime flag") | |
142 | * | |
143 | * These are all internal and used to define runtime state, | |
144 | * e.g. to prevent another resume from preresume processing | |
145 | * the raid set all over again. | |
146 | */ | |
147 | #define RT_FLAG_RS_PRERESUMED 0x1 | |
148 | #define RT_FLAG_RS_RESUMED 0x2 | |
149 | #define RT_FLAG_RS_BITMAP_LOADED 0x4 | |
150 | #define RT_FLAG_UPDATE_SBS 0x8 | |
151 | ||
33e53f06 HM |
152 | /* Array elements of 64 bit needed for rebuild/write_mostly bits */ |
153 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) | |
154 | ||
ecbfb9f1 HM |
155 | /* |
156 | * raid set level, layout and chunk sectors backup/restore | |
157 | */ | |
158 | struct rs_layout { | |
159 | int new_level; | |
160 | int new_layout; | |
161 | int new_chunk_sectors; | |
162 | }; | |
163 | ||
9d09e663 N |
164 | struct raid_set { |
165 | struct dm_target *ti; | |
166 | ||
34f8ac6d | 167 | uint32_t bitmap_loaded; |
c76d53f4 | 168 | uint32_t ctr_flags; |
ecbfb9f1 HM |
169 | uint32_t runtime_flags; |
170 | ||
171 | uint64_t rebuild_disks[DISKS_ARRAY_ELEMS]; | |
9d09e663 | 172 | |
33e53f06 HM |
173 | int raid_disks; |
174 | int delta_disks; | |
4763e543 | 175 | int data_offset; |
33e53f06 HM |
176 | int raid10_copies; |
177 | ||
fd01b88c | 178 | struct mddev md; |
9d09e663 N |
179 | struct raid_type *raid_type; |
180 | struct dm_target_callbacks callbacks; | |
ecbfb9f1 | 181 | struct rs_layout rs_layout; |
9d09e663 N |
182 | |
183 | struct raid_dev dev[0]; | |
184 | }; | |
185 | ||
ecbfb9f1 HM |
186 | /* Backup/restore raid set configuration helpers */ |
187 | static void _rs_config_backup(struct raid_set *rs, struct rs_layout *l) | |
188 | { | |
189 | struct mddev *mddev = &rs->md; | |
190 | ||
191 | l->new_level = mddev->new_level; | |
192 | l->new_layout = mddev->new_layout; | |
193 | l->new_chunk_sectors = mddev->new_chunk_sectors; | |
194 | } | |
195 | ||
196 | static void rs_config_backup(struct raid_set *rs) | |
197 | { | |
198 | return _rs_config_backup(rs, &rs->rs_layout); | |
199 | } | |
200 | ||
201 | static void _rs_config_restore(struct raid_set *rs, struct rs_layout *l) | |
202 | { | |
203 | struct mddev *mddev = &rs->md; | |
204 | ||
205 | mddev->new_level = l->new_level; | |
206 | mddev->new_layout = l->new_layout; | |
207 | mddev->new_chunk_sectors = l->new_chunk_sectors; | |
208 | } | |
209 | ||
210 | static void rs_config_restore(struct raid_set *rs) | |
211 | { | |
212 | return _rs_config_restore(rs, &rs->rs_layout); | |
213 | } | |
214 | /* END: backup/restore raid set configuration helpers */ | |
215 | ||
33e53f06 HM |
216 | /* raid10 algorithms (i.e. formats) */ |
217 | #define ALGORITHM_RAID10_DEFAULT 0 | |
218 | #define ALGORITHM_RAID10_NEAR 1 | |
219 | #define ALGORITHM_RAID10_OFFSET 2 | |
220 | #define ALGORITHM_RAID10_FAR 3 | |
221 | ||
9d09e663 N |
222 | /* Supported raid types and properties. */ |
223 | static struct raid_type { | |
224 | const char *name; /* RAID algorithm. */ | |
225 | const char *descr; /* Descriptor text for logging. */ | |
226 | const unsigned parity_devs; /* # of parity devices. */ | |
227 | const unsigned minimal_devs; /* minimal # of devices in set. */ | |
228 | const unsigned level; /* RAID level. */ | |
229 | const unsigned algorithm; /* RAID algorithm. */ | |
230 | } raid_types[] = { | |
33e53f06 HM |
231 | {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, |
232 | {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, | |
233 | {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR}, | |
234 | {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, | |
235 | {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, | |
236 | {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, | |
237 | {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ | |
238 | {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, | |
239 | {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, | |
240 | {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, | |
241 | {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, | |
242 | {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, | |
243 | {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, | |
244 | {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, | |
245 | {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}, | |
246 | {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6}, | |
247 | {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6}, | |
248 | {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6}, | |
249 | {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6}, | |
250 | {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6} | |
9d09e663 N |
251 | }; |
252 | ||
92c83d79 HM |
253 | /* True, if @v is in inclusive range [@min, @max] */ |
254 | static bool _in_range(long v, long min, long max) | |
255 | { | |
256 | return v >= min && v <= max; | |
257 | } | |
258 | ||
702108d1 HM |
259 | /* ctr flag bit manipulation... */ |
260 | /* Set single @flag in @flags */ | |
261 | static void _set_flag(uint32_t flag, uint32_t *flags) | |
262 | { | |
263 | WARN_ON_ONCE(hweight32(flag) != 1); | |
264 | *flags |= flag; | |
265 | } | |
266 | ||
ecbfb9f1 HM |
267 | /* Clear single @flag in @flags */ |
268 | static void _clear_flag(uint32_t flag, uint32_t *flags) | |
269 | { | |
270 | WARN_ON_ONCE(hweight32(flag) != 1); | |
271 | *flags &= ~flag; | |
272 | } | |
273 | ||
702108d1 HM |
274 | /* Test single @flag in @flags */ |
275 | static bool _test_flag(uint32_t flag, uint32_t flags) | |
276 | { | |
277 | WARN_ON_ONCE(hweight32(flag) != 1); | |
278 | return (flag & flags) ? true : false; | |
279 | } | |
280 | ||
ad51d7f1 HM |
281 | /* Test multiple @flags in @all_flags */ |
282 | static bool _test_flags(uint32_t flags, uint32_t all_flags) | |
283 | { | |
284 | return (flags & all_flags) ? true : false; | |
285 | } | |
286 | ||
7b34df74 HM |
287 | /* Clear (multiple) @flags in @all_flags */ |
288 | static void _clear_flags(uint32_t flags, uint32_t *all_flags) | |
289 | { | |
290 | *all_flags &= ~flags; | |
291 | } | |
292 | ||
702108d1 HM |
293 | /* Return true if single @flag is set in @*flags, else set it and return false */ |
294 | static bool _test_and_set_flag(uint32_t flag, uint32_t *flags) | |
295 | { | |
296 | if (_test_flag(flag, *flags)) | |
297 | return true; | |
298 | ||
299 | _set_flag(flag, flags); | |
300 | return false; | |
301 | } | |
ecbfb9f1 HM |
302 | |
303 | /* Return true if single @flag is set in @*flags and clear it, else return false */ | |
304 | static bool _test_and_clear_flag(uint32_t flag, uint32_t *flags) | |
305 | { | |
306 | if (_test_flag(flag, *flags)) { | |
307 | _clear_flag(flag, flags); | |
308 | return true; | |
309 | } | |
310 | ||
311 | return false; | |
312 | } | |
702108d1 HM |
313 | /* ...ctr and runtime flag bit manipulation */ |
314 | ||
315 | /* All table line arguments are defined here */ | |
316 | static struct arg_name_flag { | |
317 | const uint32_t flag; | |
318 | const char *name; | |
319 | } _arg_name_flags[] = { | |
320 | { CTR_FLAG_SYNC, "sync"}, | |
321 | { CTR_FLAG_NOSYNC, "nosync"}, | |
322 | { CTR_FLAG_REBUILD, "rebuild"}, | |
323 | { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"}, | |
324 | { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"}, | |
325 | { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"}, | |
326 | { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"}, | |
327 | { CTR_FLAG_WRITE_MOSTLY, "writemostly"}, | |
328 | { CTR_FLAG_STRIPE_CACHE, "stripe_cache"}, | |
329 | { CTR_FLAG_REGION_SIZE, "region_size"}, | |
330 | { CTR_FLAG_RAID10_COPIES, "raid10_copies"}, | |
331 | { CTR_FLAG_RAID10_FORMAT, "raid10_format"}, | |
4763e543 HM |
332 | { CTR_FLAG_DATA_OFFSET, "data_offset"}, |
333 | { CTR_FLAG_DELTA_DISKS, "delta_disks"}, | |
334 | { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"}, | |
702108d1 HM |
335 | }; |
336 | ||
337 | /* Return argument name string for given @flag */ | |
338 | static const char *_argname_by_flag(const uint32_t flag) | |
339 | { | |
340 | if (hweight32(flag) == 1) { | |
341 | struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags); | |
342 | ||
343 | while (anf-- > _arg_name_flags) | |
344 | if (_test_flag(flag, anf->flag)) | |
345 | return anf->name; | |
346 | ||
347 | } else | |
348 | DMERR("%s called with more than one flag!", __func__); | |
349 | ||
350 | return NULL; | |
351 | } | |
352 | ||
33e53f06 HM |
353 | /* |
354 | * bool helpers to test for various raid levels of a raid set, | |
355 | * is. it's level as reported by the superblock rather than | |
356 | * the requested raid_type passed to the constructor. | |
357 | */ | |
358 | /* Return true, if raid set in @rs is raid0 */ | |
359 | static bool rs_is_raid0(struct raid_set *rs) | |
360 | { | |
361 | return !rs->md.level; | |
362 | } | |
363 | ||
364 | /* Return true, if raid set in @rs is raid10 */ | |
365 | static bool rs_is_raid10(struct raid_set *rs) | |
366 | { | |
367 | return rs->md.level == 10; | |
368 | } | |
369 | ||
f090279e HM |
370 | /* |
371 | * bool helpers to test for various raid levels of a raid type | |
372 | */ | |
373 | ||
374 | /* Return true, if raid type in @rt is raid0 */ | |
375 | static bool rt_is_raid0(struct raid_type *rt) | |
376 | { | |
377 | return !rt->level; | |
378 | } | |
379 | ||
380 | /* Return true, if raid type in @rt is raid1 */ | |
381 | static bool rt_is_raid1(struct raid_type *rt) | |
382 | { | |
383 | return rt->level == 1; | |
384 | } | |
385 | ||
386 | /* Return true, if raid type in @rt is raid10 */ | |
387 | static bool rt_is_raid10(struct raid_type *rt) | |
388 | { | |
389 | return rt->level == 10; | |
390 | } | |
391 | ||
392 | /* Return true, if raid type in @rt is raid4/5 */ | |
393 | static bool rt_is_raid45(struct raid_type *rt) | |
394 | { | |
395 | return _in_range(rt->level, 4, 5); | |
396 | } | |
397 | ||
398 | /* Return true, if raid type in @rt is raid6 */ | |
399 | static bool rt_is_raid6(struct raid_type *rt) | |
400 | { | |
401 | return rt->level == 6; | |
402 | } | |
676fa5ad HM |
403 | |
404 | /* Return true, if raid type in @rt is raid4/5/6 */ | |
405 | static bool rt_is_raid456(struct raid_type *rt) | |
406 | { | |
407 | return _in_range(rt->level, 4, 6); | |
408 | } | |
f090279e HM |
409 | /* END: raid level bools */ |
410 | ||
702108d1 HM |
411 | /* |
412 | * Convenience functions to set ti->error to @errmsg and | |
413 | * return @r in order to shorten code in a lot of places | |
414 | */ | |
415 | static int ti_error_ret(struct dm_target *ti, const char *errmsg, int r) | |
416 | { | |
417 | ti->error = (char *) errmsg; | |
418 | return r; | |
419 | } | |
420 | ||
421 | static int ti_error_einval(struct dm_target *ti, const char *errmsg) | |
422 | { | |
423 | return ti_error_ret(ti, errmsg, -EINVAL); | |
424 | } | |
425 | /* END: convenience functions to set ti->error to @errmsg... */ | |
426 | ||
f090279e HM |
427 | /* Return invalid ctr flags for the raid level of @rs */ |
428 | static uint32_t _invalid_flags(struct raid_set *rs) | |
429 | { | |
430 | if (rt_is_raid0(rs->raid_type)) | |
431 | return RAID0_INVALID_FLAGS; | |
432 | else if (rt_is_raid1(rs->raid_type)) | |
433 | return RAID1_INVALID_FLAGS; | |
434 | else if (rt_is_raid10(rs->raid_type)) | |
435 | return RAID10_INVALID_FLAGS; | |
436 | else if (rt_is_raid45(rs->raid_type)) | |
437 | return RAID45_INVALID_FLAGS; | |
438 | else if (rt_is_raid6(rs->raid_type)) | |
439 | return RAID6_INVALID_FLAGS; | |
440 | ||
441 | return ~0; | |
442 | } | |
443 | ||
444 | /* | |
445 | * Check for any invalid flags set on @rs defined by bitset @invalid_flags | |
446 | * | |
447 | * Has to be called after parsing of the ctr flags! | |
448 | */ | |
449 | static int rs_check_for_invalid_flags(struct raid_set *rs) | |
450 | { | |
ad51d7f1 HM |
451 | if (_test_flags(rs->ctr_flags, _invalid_flags(rs))) |
452 | return ti_error_einval(rs->ti, "Invalid flag combined"); | |
f090279e HM |
453 | |
454 | return 0; | |
455 | } | |
456 | ||
33e53f06 HM |
457 | |
458 | /* MD raid10 bit definitions and helpers */ | |
459 | #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */ | |
460 | #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */ | |
461 | #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */ | |
462 | #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */ | |
463 | ||
464 | /* Return md raid10 near copies for @layout */ | |
465 | static unsigned int _raid10_near_copies(int layout) | |
466 | { | |
467 | return layout & 0xFF; | |
468 | } | |
469 | ||
470 | /* Return md raid10 far copies for @layout */ | |
471 | static unsigned int _raid10_far_copies(int layout) | |
472 | { | |
473 | return _raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT); | |
474 | } | |
475 | ||
476 | /* Return true if md raid10 offset for @layout */ | |
477 | static unsigned int _is_raid10_offset(int layout) | |
478 | { | |
479 | return layout & RAID10_OFFSET; | |
480 | } | |
481 | ||
482 | /* Return true if md raid10 near for @layout */ | |
483 | static unsigned int _is_raid10_near(int layout) | |
484 | { | |
485 | return !_is_raid10_offset(layout) && _raid10_near_copies(layout) > 1; | |
486 | } | |
487 | ||
488 | /* Return true if md raid10 far for @layout */ | |
489 | static unsigned int _is_raid10_far(int layout) | |
490 | { | |
491 | return !_is_raid10_offset(layout) && _raid10_far_copies(layout) > 1; | |
492 | } | |
493 | ||
494 | /* Return md raid10 layout string for @layout */ | |
495 | static const char *raid10_md_layout_to_format(int layout) | |
fe5d2f4a JB |
496 | { |
497 | /* | |
33e53f06 HM |
498 | * Bit 16 stands for "offset" |
499 | * (i.e. adjacent stripes hold copies) | |
500 | * | |
fe5d2f4a JB |
501 | * Refer to MD's raid10.c for details |
502 | */ | |
33e53f06 | 503 | if (_is_raid10_offset(layout)) |
fe5d2f4a JB |
504 | return "offset"; |
505 | ||
33e53f06 | 506 | if (_raid10_near_copies(layout) > 1) |
fe5d2f4a JB |
507 | return "near"; |
508 | ||
33e53f06 HM |
509 | WARN_ON(_raid10_far_copies(layout) < 2); |
510 | ||
fe5d2f4a JB |
511 | return "far"; |
512 | } | |
513 | ||
33e53f06 HM |
514 | /* Return md raid10 algorithm for @name */ |
515 | static const int raid10_name_to_format(const char *name) | |
516 | { | |
517 | if (!strcasecmp(name, "near")) | |
518 | return ALGORITHM_RAID10_NEAR; | |
519 | else if (!strcasecmp(name, "offset")) | |
520 | return ALGORITHM_RAID10_OFFSET; | |
521 | else if (!strcasecmp(name, "far")) | |
522 | return ALGORITHM_RAID10_FAR; | |
523 | ||
524 | return -EINVAL; | |
525 | } | |
526 | ||
527 | ||
528 | /* Return md raid10 copies for @layout */ | |
529 | static unsigned int raid10_md_layout_to_copies(int layout) | |
63f33b8d | 530 | { |
33e53f06 HM |
531 | return _raid10_near_copies(layout) > 1 ? |
532 | _raid10_near_copies(layout) : _raid10_far_copies(layout); | |
63f33b8d JB |
533 | } |
534 | ||
33e53f06 HM |
535 | /* Return md raid10 format id for @format string */ |
536 | static int raid10_format_to_md_layout(struct raid_set *rs, | |
537 | unsigned int algorithm, | |
538 | unsigned int copies) | |
63f33b8d | 539 | { |
33e53f06 | 540 | unsigned int n = 1, f = 1, r = 0; |
fe5d2f4a | 541 | |
33e53f06 HM |
542 | /* |
543 | * MD resilienece flaw: | |
544 | * | |
545 | * enabling use_far_sets for far/offset formats causes copies | |
546 | * to be colocated on the same devs together with their origins! | |
547 | * | |
548 | * -> disable it for now in the definition above | |
549 | */ | |
550 | if (algorithm == ALGORITHM_RAID10_DEFAULT || | |
551 | algorithm == ALGORITHM_RAID10_NEAR) | |
fe5d2f4a | 552 | n = copies; |
33e53f06 HM |
553 | |
554 | else if (algorithm == ALGORITHM_RAID10_OFFSET) { | |
555 | f = copies; | |
556 | r = RAID10_OFFSET; | |
557 | if (!_test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) | |
558 | r |= RAID10_USE_FAR_SETS; | |
559 | ||
560 | } else if (algorithm == ALGORITHM_RAID10_FAR) { | |
fe5d2f4a | 561 | f = copies; |
33e53f06 HM |
562 | r = !RAID10_OFFSET; |
563 | if (!_test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) | |
564 | r |= RAID10_USE_FAR_SETS; | |
fe5d2f4a | 565 | |
33e53f06 HM |
566 | } else |
567 | return -EINVAL; | |
568 | ||
569 | return r | (f << RAID10_FAR_COPIES_SHIFT) | n; | |
570 | } | |
571 | /* END: MD raid10 bit definitions and helpers */ | |
fe5d2f4a | 572 | |
33e53f06 HM |
573 | /* Check for any of the raid10 algorithms */ |
574 | static int _got_raid10(struct raid_type *rtp, const int layout) | |
575 | { | |
576 | if (rtp->level == 10) { | |
577 | switch (rtp->algorithm) { | |
578 | case ALGORITHM_RAID10_DEFAULT: | |
579 | case ALGORITHM_RAID10_NEAR: | |
580 | return _is_raid10_near(layout); | |
581 | case ALGORITHM_RAID10_OFFSET: | |
582 | return _is_raid10_offset(layout); | |
583 | case ALGORITHM_RAID10_FAR: | |
584 | return _is_raid10_far(layout); | |
585 | default: | |
586 | break; | |
587 | } | |
588 | } | |
fe5d2f4a | 589 | |
33e53f06 | 590 | return 0; |
63f33b8d JB |
591 | } |
592 | ||
33e53f06 | 593 | /* Return raid_type for @name */ |
92c83d79 | 594 | static struct raid_type *get_raid_type(const char *name) |
9d09e663 | 595 | { |
33e53f06 | 596 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); |
9d09e663 | 597 | |
33e53f06 HM |
598 | while (rtp-- > raid_types) |
599 | if (!strcasecmp(rtp->name, name)) | |
600 | return rtp; | |
9d09e663 N |
601 | |
602 | return NULL; | |
603 | } | |
604 | ||
33e53f06 HM |
605 | /* Return raid_type for @name based derived from @level and @layout */ |
606 | static struct raid_type *get_raid_type_by_ll(const int level, const int layout) | |
607 | { | |
608 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); | |
609 | ||
610 | while (rtp-- > raid_types) { | |
611 | /* RAID10 special checks based on @layout flags/properties */ | |
612 | if (rtp->level == level && | |
613 | (_got_raid10(rtp, layout) || rtp->algorithm == layout)) | |
614 | return rtp; | |
615 | } | |
616 | ||
617 | return NULL; | |
618 | } | |
619 | ||
620 | /* | |
621 | * Set the mddev properties in @rs to the new | |
622 | * ones requested by the ctr | |
623 | */ | |
624 | static void rs_set_new(struct raid_set *rs) | |
625 | { | |
626 | struct mddev *mddev = &rs->md; | |
627 | ||
628 | mddev->level = mddev->new_level; | |
629 | mddev->layout = mddev->new_layout; | |
630 | mddev->chunk_sectors = mddev->new_chunk_sectors; | |
631 | mddev->delta_disks = 0; | |
632 | } | |
633 | ||
634 | ||
9d09e663 N |
635 | static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs) |
636 | { | |
637 | unsigned i; | |
638 | struct raid_set *rs; | |
9d09e663 | 639 | |
702108d1 HM |
640 | if (raid_devs <= raid_type->parity_devs) |
641 | return ERR_PTR(ti_error_einval(ti, "Insufficient number of devices")); | |
9d09e663 | 642 | |
9d09e663 | 643 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); |
702108d1 HM |
644 | if (!rs) |
645 | return ERR_PTR(ti_error_ret(ti, "Cannot allocate raid context", -ENOMEM)); | |
9d09e663 N |
646 | |
647 | mddev_init(&rs->md); | |
648 | ||
33e53f06 HM |
649 | rs->raid_disks = raid_devs; |
650 | rs->delta_disks = 0; | |
651 | ||
9d09e663 N |
652 | rs->ti = ti; |
653 | rs->raid_type = raid_type; | |
654 | rs->md.raid_disks = raid_devs; | |
655 | rs->md.level = raid_type->level; | |
656 | rs->md.new_level = rs->md.level; | |
9d09e663 N |
657 | rs->md.layout = raid_type->algorithm; |
658 | rs->md.new_layout = rs->md.layout; | |
659 | rs->md.delta_disks = 0; | |
ecbfb9f1 | 660 | rs->md.recovery_cp = rs_is_raid0(rs) ? MaxSector : 0; |
9d09e663 N |
661 | |
662 | for (i = 0; i < raid_devs; i++) | |
663 | md_rdev_init(&rs->dev[i].rdev); | |
664 | ||
665 | /* | |
666 | * Remaining items to be initialized by further RAID params: | |
667 | * rs->md.persistent | |
668 | * rs->md.external | |
669 | * rs->md.chunk_sectors | |
670 | * rs->md.new_chunk_sectors | |
c039c332 | 671 | * rs->md.dev_sectors |
9d09e663 N |
672 | */ |
673 | ||
674 | return rs; | |
675 | } | |
676 | ||
677 | static void context_free(struct raid_set *rs) | |
678 | { | |
679 | int i; | |
680 | ||
b12d437b JB |
681 | for (i = 0; i < rs->md.raid_disks; i++) { |
682 | if (rs->dev[i].meta_dev) | |
683 | dm_put_device(rs->ti, rs->dev[i].meta_dev); | |
545c8795 | 684 | md_rdev_clear(&rs->dev[i].rdev); |
9d09e663 N |
685 | if (rs->dev[i].data_dev) |
686 | dm_put_device(rs->ti, rs->dev[i].data_dev); | |
b12d437b | 687 | } |
9d09e663 N |
688 | |
689 | kfree(rs); | |
690 | } | |
691 | ||
692 | /* | |
693 | * For every device we have two words | |
694 | * <meta_dev>: meta device name or '-' if missing | |
695 | * <data_dev>: data device name or '-' if missing | |
696 | * | |
b12d437b JB |
697 | * The following are permitted: |
698 | * - - | |
699 | * - <data_dev> | |
700 | * <meta_dev> <data_dev> | |
701 | * | |
702 | * The following is not allowed: | |
703 | * <meta_dev> - | |
704 | * | |
705 | * This code parses those words. If there is a failure, | |
706 | * the caller must use context_free to unwind the operations. | |
9d09e663 | 707 | */ |
702108d1 | 708 | static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) |
9d09e663 N |
709 | { |
710 | int i; | |
711 | int rebuild = 0; | |
712 | int metadata_available = 0; | |
73c6f239 | 713 | int r = 0; |
92c83d79 | 714 | const char *arg; |
9d09e663 | 715 | |
92c83d79 HM |
716 | /* Put off the number of raid devices argument to get to dev pairs */ |
717 | arg = dm_shift_arg(as); | |
718 | if (!arg) | |
719 | return -EINVAL; | |
720 | ||
721 | for (i = 0; i < rs->md.raid_disks; i++) { | |
9d09e663 N |
722 | rs->dev[i].rdev.raid_disk = i; |
723 | ||
724 | rs->dev[i].meta_dev = NULL; | |
725 | rs->dev[i].data_dev = NULL; | |
726 | ||
727 | /* | |
728 | * There are no offsets, since there is a separate device | |
729 | * for data and metadata. | |
730 | */ | |
731 | rs->dev[i].rdev.data_offset = 0; | |
732 | rs->dev[i].rdev.mddev = &rs->md; | |
733 | ||
92c83d79 HM |
734 | arg = dm_shift_arg(as); |
735 | if (!arg) | |
736 | return -EINVAL; | |
737 | ||
738 | if (strcmp(arg, "-")) { | |
739 | r = dm_get_device(rs->ti, arg, | |
b12d437b JB |
740 | dm_table_get_mode(rs->ti->table), |
741 | &rs->dev[i].meta_dev); | |
73c6f239 | 742 | if (r) |
702108d1 | 743 | return ti_error_ret(rs->ti, "RAID metadata device lookup failure", r); |
b12d437b JB |
744 | |
745 | rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); | |
746 | if (!rs->dev[i].rdev.sb_page) | |
702108d1 | 747 | return ti_error_ret(rs->ti, "Failed to allocate superblock page", -ENOMEM); |
9d09e663 N |
748 | } |
749 | ||
92c83d79 HM |
750 | arg = dm_shift_arg(as); |
751 | if (!arg) | |
752 | return -EINVAL; | |
753 | ||
754 | if (!strcmp(arg, "-")) { | |
9d09e663 | 755 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && |
702108d1 HM |
756 | (!rs->dev[i].rdev.recovery_offset)) |
757 | return ti_error_einval(rs->ti, "Drive designated for rebuild not specified"); | |
9d09e663 | 758 | |
b12d437b | 759 | if (rs->dev[i].meta_dev) |
702108d1 | 760 | return ti_error_einval(rs->ti, "No data device supplied with metadata device"); |
b12d437b | 761 | |
9d09e663 N |
762 | continue; |
763 | } | |
764 | ||
92c83d79 | 765 | r = dm_get_device(rs->ti, arg, |
9d09e663 N |
766 | dm_table_get_mode(rs->ti->table), |
767 | &rs->dev[i].data_dev); | |
702108d1 HM |
768 | if (r) |
769 | return ti_error_ret(rs->ti, "RAID device lookup failure", r); | |
9d09e663 | 770 | |
b12d437b JB |
771 | if (rs->dev[i].meta_dev) { |
772 | metadata_available = 1; | |
773 | rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; | |
774 | } | |
9d09e663 N |
775 | rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; |
776 | list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); | |
777 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) | |
778 | rebuild++; | |
779 | } | |
780 | ||
781 | if (metadata_available) { | |
782 | rs->md.external = 0; | |
783 | rs->md.persistent = 1; | |
784 | rs->md.major_version = 2; | |
785 | } else if (rebuild && !rs->md.recovery_cp) { | |
786 | /* | |
787 | * Without metadata, we will not be able to tell if the array | |
788 | * is in-sync or not - we must assume it is not. Therefore, | |
789 | * it is impossible to rebuild a drive. | |
790 | * | |
791 | * Even if there is metadata, the on-disk information may | |
792 | * indicate that the array is not in-sync and it will then | |
793 | * fail at that time. | |
794 | * | |
795 | * User could specify 'nosync' option if desperate. | |
796 | */ | |
797 | DMERR("Unable to rebuild drive while array is not in-sync"); | |
702108d1 | 798 | return ti_error_einval(rs->ti, "Unable to rebuild drive while array is not in-sync"); |
9d09e663 N |
799 | } |
800 | ||
801 | return 0; | |
802 | } | |
803 | ||
c1084561 JB |
804 | /* |
805 | * validate_region_size | |
806 | * @rs | |
807 | * @region_size: region size in sectors. If 0, pick a size (4MiB default). | |
808 | * | |
809 | * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). | |
810 | * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. | |
811 | * | |
812 | * Returns: 0 on success, -EINVAL on failure. | |
813 | */ | |
814 | static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |
815 | { | |
816 | unsigned long min_region_size = rs->ti->len / (1 << 21); | |
817 | ||
818 | if (!region_size) { | |
819 | /* | |
820 | * Choose a reasonable default. All figures in sectors. | |
821 | */ | |
822 | if (min_region_size > (1 << 13)) { | |
3a0f9aae | 823 | /* If not a power of 2, make it the next power of 2 */ |
042745ee | 824 | region_size = roundup_pow_of_two(min_region_size); |
c1084561 JB |
825 | DMINFO("Choosing default region size of %lu sectors", |
826 | region_size); | |
c1084561 JB |
827 | } else { |
828 | DMINFO("Choosing default region size of 4MiB"); | |
829 | region_size = 1 << 13; /* sectors */ | |
830 | } | |
831 | } else { | |
832 | /* | |
833 | * Validate user-supplied value. | |
834 | */ | |
702108d1 HM |
835 | if (region_size > rs->ti->len) |
836 | return ti_error_einval(rs->ti, "Supplied region size is too large"); | |
c1084561 JB |
837 | |
838 | if (region_size < min_region_size) { | |
839 | DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", | |
840 | region_size, min_region_size); | |
702108d1 | 841 | return ti_error_einval(rs->ti, "Supplied region size is too small"); |
c1084561 JB |
842 | } |
843 | ||
702108d1 HM |
844 | if (!is_power_of_2(region_size)) |
845 | return ti_error_einval(rs->ti, "Region size is not a power of 2"); | |
c1084561 | 846 | |
702108d1 HM |
847 | if (region_size < rs->md.chunk_sectors) |
848 | return ti_error_einval(rs->ti, "Region size is smaller than the chunk size"); | |
c1084561 JB |
849 | } |
850 | ||
851 | /* | |
852 | * Convert sectors to bytes. | |
853 | */ | |
854 | rs->md.bitmap_info.chunksize = (region_size << 9); | |
855 | ||
856 | return 0; | |
857 | } | |
858 | ||
eb649123 | 859 | /* |
55ebbb59 | 860 | * validate_raid_redundancy |
eb649123 JB |
861 | * @rs |
862 | * | |
55ebbb59 JB |
863 | * Determine if there are enough devices in the array that haven't |
864 | * failed (or are being rebuilt) to form a usable array. | |
eb649123 JB |
865 | * |
866 | * Returns: 0 on success, -EINVAL on failure. | |
867 | */ | |
55ebbb59 | 868 | static int validate_raid_redundancy(struct raid_set *rs) |
eb649123 JB |
869 | { |
870 | unsigned i, rebuild_cnt = 0; | |
3f6bbd3f | 871 | unsigned rebuilds_per_group = 0, copies, d; |
fe5d2f4a | 872 | unsigned group_size, last_group_start; |
eb649123 | 873 | |
eb649123 | 874 | for (i = 0; i < rs->md.raid_disks; i++) |
55ebbb59 JB |
875 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || |
876 | !rs->dev[i].rdev.sb_page) | |
eb649123 JB |
877 | rebuild_cnt++; |
878 | ||
879 | switch (rs->raid_type->level) { | |
880 | case 1: | |
881 | if (rebuild_cnt >= rs->md.raid_disks) | |
882 | goto too_many; | |
883 | break; | |
884 | case 4: | |
885 | case 5: | |
886 | case 6: | |
887 | if (rebuild_cnt > rs->raid_type->parity_devs) | |
888 | goto too_many; | |
889 | break; | |
890 | case 10: | |
4ec1e369 JB |
891 | copies = raid10_md_layout_to_copies(rs->md.layout); |
892 | if (rebuild_cnt < copies) | |
893 | break; | |
894 | ||
895 | /* | |
896 | * It is possible to have a higher rebuild count for RAID10, | |
897 | * as long as the failed devices occur in different mirror | |
898 | * groups (i.e. different stripes). | |
899 | * | |
4ec1e369 JB |
900 | * When checking "near" format, make sure no adjacent devices |
901 | * have failed beyond what can be handled. In addition to the | |
902 | * simple case where the number of devices is a multiple of the | |
903 | * number of copies, we must also handle cases where the number | |
904 | * of devices is not a multiple of the number of copies. | |
905 | * E.g. dev1 dev2 dev3 dev4 dev5 | |
906 | * A A B B C | |
907 | * C D D E E | |
908 | */ | |
fe5d2f4a JB |
909 | if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { |
910 | for (i = 0; i < rs->md.raid_disks * copies; i++) { | |
911 | if (!(i % copies)) | |
912 | rebuilds_per_group = 0; | |
913 | d = i % rs->md.raid_disks; | |
914 | if ((!rs->dev[d].rdev.sb_page || | |
915 | !test_bit(In_sync, &rs->dev[d].rdev.flags)) && | |
916 | (++rebuilds_per_group >= copies)) | |
917 | goto too_many; | |
918 | } | |
919 | break; | |
920 | } | |
921 | ||
922 | /* | |
923 | * When checking "far" and "offset" formats, we need to ensure | |
924 | * that the device that holds its copy is not also dead or | |
925 | * being rebuilt. (Note that "far" and "offset" formats only | |
926 | * support two copies right now. These formats also only ever | |
927 | * use the 'use_far_sets' variant.) | |
928 | * | |
929 | * This check is somewhat complicated by the need to account | |
930 | * for arrays that are not a multiple of (far) copies. This | |
931 | * results in the need to treat the last (potentially larger) | |
932 | * set differently. | |
933 | */ | |
934 | group_size = (rs->md.raid_disks / copies); | |
935 | last_group_start = (rs->md.raid_disks / group_size) - 1; | |
936 | last_group_start *= group_size; | |
937 | for (i = 0; i < rs->md.raid_disks; i++) { | |
938 | if (!(i % copies) && !(i > last_group_start)) | |
55ebbb59 | 939 | rebuilds_per_group = 0; |
fe5d2f4a JB |
940 | if ((!rs->dev[i].rdev.sb_page || |
941 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) && | |
4ec1e369 | 942 | (++rebuilds_per_group >= copies)) |
fe5d2f4a | 943 | goto too_many; |
4ec1e369 JB |
944 | } |
945 | break; | |
eb649123 | 946 | default: |
55ebbb59 JB |
947 | if (rebuild_cnt) |
948 | return -EINVAL; | |
eb649123 JB |
949 | } |
950 | ||
951 | return 0; | |
952 | ||
953 | too_many: | |
eb649123 JB |
954 | return -EINVAL; |
955 | } | |
956 | ||
9d09e663 N |
957 | /* |
958 | * Possible arguments are... | |
9d09e663 N |
959 | * <chunk_size> [optional_args] |
960 | * | |
32737279 JB |
961 | * Argument definitions |
962 | * <chunk_size> The number of sectors per disk that | |
963 | * will form the "stripe" | |
964 | * [[no]sync] Force or prevent recovery of the | |
965 | * entire array | |
9d09e663 | 966 | * [rebuild <idx>] Rebuild the drive indicated by the index |
32737279 JB |
967 | * [daemon_sleep <ms>] Time between bitmap daemon work to |
968 | * clear bits | |
9d09e663 N |
969 | * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization |
970 | * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization | |
46bed2b5 | 971 | * [write_mostly <idx>] Indicate a write mostly drive via index |
9d09e663 N |
972 | * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) |
973 | * [stripe_cache <sectors>] Stripe cache size for higher RAIDs | |
c1084561 | 974 | * [region_size <sectors>] Defines granularity of bitmap |
63f33b8d JB |
975 | * |
976 | * RAID10-only options: | |
977 | * [raid10_copies <# copies>] Number of copies. (Default: 2) | |
fe5d2f4a | 978 | * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) |
9d09e663 | 979 | */ |
92c83d79 | 980 | static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, |
9d09e663 N |
981 | unsigned num_raid_params) |
982 | { | |
33e53f06 | 983 | int raid10_format = ALGORITHM_RAID10_DEFAULT; |
63f33b8d | 984 | unsigned raid10_copies = 2; |
eb649123 | 985 | unsigned i; |
92c83d79 | 986 | unsigned value, region_size = 0; |
c039c332 | 987 | sector_t sectors_per_dev = rs->ti->len; |
542f9038 | 988 | sector_t max_io_len; |
92c83d79 | 989 | const char *arg, *key; |
702108d1 | 990 | struct raid_dev *rd; |
33e53f06 | 991 | struct raid_type *rt = rs->raid_type; |
92c83d79 HM |
992 | |
993 | arg = dm_shift_arg(as); | |
994 | num_raid_params--; /* Account for chunk_size argument */ | |
995 | ||
702108d1 HM |
996 | if (kstrtouint(arg, 10, &value) < 0) |
997 | return ti_error_einval(rs->ti, "Bad numerical argument given for chunk_size"); | |
9d09e663 N |
998 | |
999 | /* | |
1000 | * First, parse the in-order required arguments | |
32737279 | 1001 | * "chunk_size" is the only argument of this type. |
9d09e663 | 1002 | */ |
33e53f06 | 1003 | if (rt_is_raid1(rt)) { |
32737279 JB |
1004 | if (value) |
1005 | DMERR("Ignoring chunk size parameter for RAID 1"); | |
1006 | value = 0; | |
702108d1 HM |
1007 | } else if (!is_power_of_2(value)) |
1008 | return ti_error_einval(rs->ti, "Chunk size must be a power of 2"); | |
1009 | else if (value < 8) | |
1010 | return ti_error_einval(rs->ti, "Chunk size value is too small"); | |
9d09e663 N |
1011 | |
1012 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; | |
9d09e663 N |
1013 | |
1014 | /* | |
b12d437b JB |
1015 | * We set each individual device as In_sync with a completed |
1016 | * 'recovery_offset'. If there has been a device failure or | |
1017 | * replacement then one of the following cases applies: | |
1018 | * | |
1019 | * 1) User specifies 'rebuild'. | |
1020 | * - Device is reset when param is read. | |
1021 | * 2) A new device is supplied. | |
1022 | * - No matching superblock found, resets device. | |
1023 | * 3) Device failure was transient and returns on reload. | |
1024 | * - Failure noticed, resets device for bitmap replay. | |
1025 | * 4) Device hadn't completed recovery after previous failure. | |
1026 | * - Superblock is read and overrides recovery_offset. | |
1027 | * | |
1028 | * What is found in the superblocks of the devices is always | |
1029 | * authoritative, unless 'rebuild' or '[no]sync' was specified. | |
9d09e663 | 1030 | */ |
b12d437b | 1031 | for (i = 0; i < rs->md.raid_disks; i++) { |
9d09e663 | 1032 | set_bit(In_sync, &rs->dev[i].rdev.flags); |
b12d437b JB |
1033 | rs->dev[i].rdev.recovery_offset = MaxSector; |
1034 | } | |
9d09e663 | 1035 | |
b12d437b JB |
1036 | /* |
1037 | * Second, parse the unordered optional arguments | |
1038 | */ | |
9d09e663 | 1039 | for (i = 0; i < num_raid_params; i++) { |
4763e543 HM |
1040 | key = dm_shift_arg(as); |
1041 | if (!key) | |
702108d1 | 1042 | return ti_error_einval(rs->ti, "Not enough raid parameters given"); |
92c83d79 | 1043 | |
4763e543 HM |
1044 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_NOSYNC))) { |
1045 | if (_test_and_set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags)) | |
1046 | return ti_error_einval(rs->ti, "Only one 'nosync' argument allowed"); | |
9d09e663 | 1047 | rs->md.recovery_cp = MaxSector; |
9d09e663 N |
1048 | continue; |
1049 | } | |
4763e543 HM |
1050 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_SYNC))) { |
1051 | if (_test_and_set_flag(CTR_FLAG_SYNC, &rs->ctr_flags)) | |
1052 | return ti_error_einval(rs->ti, "Only one 'sync' argument allowed"); | |
9d09e663 | 1053 | rs->md.recovery_cp = 0; |
4763e543 HM |
1054 | continue; |
1055 | } | |
1056 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { | |
1057 | if (_test_and_set_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) | |
1058 | return ti_error_einval(rs->ti, "Only one 'raid10_use_new_sets' argument allowed"); | |
9d09e663 N |
1059 | continue; |
1060 | } | |
1061 | ||
92c83d79 HM |
1062 | arg = dm_shift_arg(as); |
1063 | i++; /* Account for the argument pairs */ | |
702108d1 HM |
1064 | if (!arg) |
1065 | return ti_error_einval(rs->ti, "Wrong number of raid parameters given"); | |
63f33b8d | 1066 | |
702108d1 HM |
1067 | /* |
1068 | * Parameters that take a string value are checked here. | |
1069 | */ | |
1070 | ||
1071 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) { | |
1072 | if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) | |
4763e543 | 1073 | return ti_error_einval(rs->ti, "Only one 'raid10_format' argument pair allowed"); |
33e53f06 | 1074 | if (!rt_is_raid10(rt)) |
702108d1 | 1075 | return ti_error_einval(rs->ti, "'raid10_format' is an invalid parameter for this RAID type"); |
33e53f06 HM |
1076 | raid10_format = raid10_name_to_format(arg); |
1077 | if (raid10_format < 0) | |
1078 | return ti_error_ret(rs->ti, "Invalid 'raid10_format' value given", raid10_format); | |
63f33b8d JB |
1079 | continue; |
1080 | } | |
1081 | ||
702108d1 HM |
1082 | if (kstrtouint(arg, 10, &value) < 0) |
1083 | return ti_error_einval(rs->ti, "Bad numerical argument given in raid params"); | |
1084 | ||
1085 | if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) { | |
1086 | /* | |
1087 | * "rebuild" is being passed in by userspace to provide | |
1088 | * indexes of replaced devices and to set up additional | |
1089 | * devices on raid level takeover. | |
1090 | */ | |
ecbfb9f1 | 1091 | if (!_in_range(value, 0, rs->raid_disks - 1)) |
702108d1 HM |
1092 | return ti_error_einval(rs->ti, "Invalid rebuild index given"); |
1093 | ||
ecbfb9f1 HM |
1094 | if (test_and_set_bit(value, (void *) rs->rebuild_disks)) |
1095 | return ti_error_einval(rs->ti, "rebuild for this index already given"); | |
1096 | ||
702108d1 HM |
1097 | rd = rs->dev + value; |
1098 | clear_bit(In_sync, &rd->rdev.flags); | |
1099 | clear_bit(Faulty, &rd->rdev.flags); | |
1100 | rd->rdev.recovery_offset = 0; | |
1101 | _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags); | |
1102 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) { | |
33e53f06 | 1103 | if (!rt_is_raid1(rt)) |
702108d1 HM |
1104 | return ti_error_einval(rs->ti, "write_mostly option is only valid for RAID1"); |
1105 | ||
1106 | if (!_in_range(value, 0, rs->md.raid_disks - 1)) | |
1107 | return ti_error_einval(rs->ti, "Invalid write_mostly index given"); | |
9d09e663 | 1108 | |
46bed2b5 | 1109 | set_bit(WriteMostly, &rs->dev[value].rdev.flags); |
702108d1 HM |
1110 | _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); |
1111 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { | |
33e53f06 | 1112 | if (!rt_is_raid1(rt)) |
702108d1 HM |
1113 | return ti_error_einval(rs->ti, "max_write_behind option is only valid for RAID1"); |
1114 | ||
1115 | if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) | |
1116 | return ti_error_einval(rs->ti, "Only one max_write_behind argument pair allowed"); | |
9d09e663 N |
1117 | |
1118 | /* | |
1119 | * In device-mapper, we specify things in sectors, but | |
1120 | * MD records this value in kB | |
1121 | */ | |
1122 | value /= 2; | |
702108d1 HM |
1123 | if (value > COUNTER_MAX) |
1124 | return ti_error_einval(rs->ti, "Max write-behind limit out of range"); | |
1125 | ||
9d09e663 | 1126 | rs->md.bitmap_info.max_write_behind = value; |
702108d1 HM |
1127 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) { |
1128 | if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) | |
1129 | return ti_error_einval(rs->ti, "Only one daemon_sleep argument pair allowed"); | |
1130 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) | |
1131 | return ti_error_einval(rs->ti, "daemon sleep period out of range"); | |
9d09e663 | 1132 | rs->md.bitmap_info.daemon_sleep = value; |
4763e543 HM |
1133 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DATA_OFFSET))) { |
1134 | /* Userspace passes new data_offset after having extended the the data image LV */ | |
1135 | if (_test_and_set_flag(CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) | |
1136 | return ti_error_einval(rs->ti, "Only one data_offset argument pair allowed"); | |
1137 | ||
1138 | /* Ensure sensible data offset */ | |
1139 | if (value < 0) | |
1140 | return ti_error_einval(rs->ti, "Bogus data_offset value"); | |
1141 | ||
1142 | rs->data_offset = value; | |
1143 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DELTA_DISKS))) { | |
1144 | /* Define the +/-# of disks to add to/remove from the given raid set */ | |
1145 | if (_test_and_set_flag(CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) | |
1146 | return ti_error_einval(rs->ti, "Only one delta_disks argument pair allowed"); | |
1147 | ||
1148 | /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */ | |
1149 | if (!_in_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) | |
1150 | return ti_error_einval(rs->ti, "Too many delta_disk requested"); | |
1151 | ||
1152 | rs->delta_disks = value; | |
702108d1 HM |
1153 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) { |
1154 | if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) | |
1155 | return ti_error_einval(rs->ti, "Only one stripe_cache argument pair allowed"); | |
9d09e663 N |
1156 | /* |
1157 | * In device-mapper, we specify things in sectors, but | |
1158 | * MD records this value in kB | |
1159 | */ | |
1160 | value /= 2; | |
1161 | ||
33e53f06 | 1162 | if (!rt_is_raid456(rt)) |
702108d1 HM |
1163 | return ti_error_einval(rs->ti, "Inappropriate argument: stripe_cache"); |
1164 | if (raid5_set_cache_size(&rs->md, (int)value)) | |
1165 | return ti_error_einval(rs->ti, "Bad stripe_cache size"); | |
1166 | ||
1167 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { | |
1168 | if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) | |
1169 | return ti_error_einval(rs->ti, "Only one min_recovery_rate argument pair allowed"); | |
1170 | if (value > INT_MAX) | |
1171 | return ti_error_einval(rs->ti, "min_recovery_rate out of range"); | |
9d09e663 | 1172 | rs->md.sync_speed_min = (int)value; |
702108d1 HM |
1173 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { |
1174 | if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) | |
1175 | return ti_error_einval(rs->ti, "Only one max_recovery_rate argument pair allowed"); | |
1176 | if (value > INT_MAX) | |
1177 | return ti_error_einval(rs->ti, "max_recovery_rate out of range"); | |
9d09e663 | 1178 | rs->md.sync_speed_max = (int)value; |
702108d1 HM |
1179 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) { |
1180 | if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) | |
1181 | return ti_error_einval(rs->ti, "Only one region_size argument pair allowed"); | |
1182 | ||
c1084561 | 1183 | region_size = value; |
702108d1 HM |
1184 | } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) { |
1185 | if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) | |
1186 | return ti_error_einval(rs->ti, "Only one raid10_copies argument pair allowed"); | |
1187 | ||
1188 | if (!_in_range(value, 2, rs->md.raid_disks)) | |
1189 | return ti_error_einval(rs->ti, "Bad value for 'raid10_copies'"); | |
1190 | ||
63f33b8d | 1191 | raid10_copies = value; |
9d09e663 N |
1192 | } else { |
1193 | DMERR("Unable to parse RAID parameter: %s", key); | |
702108d1 | 1194 | return ti_error_einval(rs->ti, "Unable to parse RAID parameters"); |
9d09e663 N |
1195 | } |
1196 | } | |
1197 | ||
c1084561 JB |
1198 | if (validate_region_size(rs, region_size)) |
1199 | return -EINVAL; | |
1200 | ||
1201 | if (rs->md.chunk_sectors) | |
542f9038 | 1202 | max_io_len = rs->md.chunk_sectors; |
c1084561 | 1203 | else |
542f9038 | 1204 | max_io_len = region_size; |
c1084561 | 1205 | |
542f9038 MS |
1206 | if (dm_set_target_max_io_len(rs->ti, max_io_len)) |
1207 | return -EINVAL; | |
32737279 | 1208 | |
33e53f06 | 1209 | if (rt_is_raid10(rt)) { |
702108d1 HM |
1210 | if (raid10_copies > rs->md.raid_disks) |
1211 | return ti_error_einval(rs->ti, "Not enough devices to satisfy specification"); | |
63f33b8d | 1212 | |
33e53f06 HM |
1213 | rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); |
1214 | if (rs->md.new_layout < 0) | |
1215 | return ti_error_ret(rs->ti, "Error getting raid10 format", rs->md.new_layout); | |
1216 | ||
1217 | rt = get_raid_type_by_ll(10, rs->md.new_layout); | |
1218 | if (!rt) | |
1219 | return ti_error_einval(rs->ti, "Failed to recognize new raid10 layout"); | |
1220 | ||
1221 | if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT || | |
1222 | rt->algorithm == ALGORITHM_RAID10_NEAR) && | |
1223 | _test_flag(CTR_FLAG_RAID10_USE_NEAR_SETS, rs->ctr_flags)) | |
4763e543 | 1224 | return ti_error_einval(rs->ti, "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"); |
fe5d2f4a | 1225 | |
63f33b8d JB |
1226 | /* (Len * #mirrors) / #devices */ |
1227 | sectors_per_dev = rs->ti->len * raid10_copies; | |
1228 | sector_div(sectors_per_dev, rs->md.raid_disks); | |
1229 | ||
33e53f06 | 1230 | rs->md.layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); |
63f33b8d | 1231 | rs->md.new_layout = rs->md.layout; |
33e53f06 | 1232 | } else if (!rt_is_raid1(rt) && |
63f33b8d | 1233 | sector_div(sectors_per_dev, |
33e53f06 | 1234 | (rs->md.raid_disks - rt->parity_devs))) |
702108d1 HM |
1235 | return ti_error_einval(rs->ti, "Target length not divisible by number of data devices"); |
1236 | ||
33e53f06 | 1237 | rs->raid10_copies = raid10_copies; |
c039c332 JB |
1238 | rs->md.dev_sectors = sectors_per_dev; |
1239 | ||
9d09e663 N |
1240 | /* Assume there are no metadata devices until the drives are parsed */ |
1241 | rs->md.persistent = 0; | |
1242 | rs->md.external = 1; | |
1243 | ||
f090279e HM |
1244 | /* Check, if any invalid ctr arguments have been passed in for the raid level */ |
1245 | return rs_check_for_invalid_flags(rs); | |
9d09e663 N |
1246 | } |
1247 | ||
1248 | static void do_table_event(struct work_struct *ws) | |
1249 | { | |
1250 | struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); | |
1251 | ||
1252 | dm_table_event(rs->ti->table); | |
1253 | } | |
1254 | ||
1255 | static int raid_is_congested(struct dm_target_callbacks *cb, int bits) | |
1256 | { | |
1257 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); | |
1258 | ||
5c675f83 | 1259 | return mddev_congested(&rs->md, bits); |
9d09e663 N |
1260 | } |
1261 | ||
ecbfb9f1 HM |
1262 | /* |
1263 | * Make sure a valid takover (level switch) is being requested on @rs | |
1264 | * | |
1265 | * Conversions of raid sets from one MD personality to another | |
1266 | * have to conform to restrictions which are enforced here. | |
1267 | * | |
1268 | * Degration is already checked for in rs_check_conversion() below. | |
1269 | */ | |
1270 | static int rs_check_takeover(struct raid_set *rs) | |
1271 | { | |
1272 | struct mddev *mddev = &rs->md; | |
1273 | unsigned int near_copies; | |
1274 | ||
1275 | switch (mddev->level) { | |
1276 | case 0: | |
1277 | /* raid0 -> raid1/5 with one disk */ | |
1278 | if ((mddev->new_level == 1 || mddev->new_level == 5) && | |
1279 | mddev->raid_disks == 1) | |
1280 | return 0; | |
1281 | ||
1282 | /* raid0 -> raid10 */ | |
1283 | if (mddev->new_level == 10 && | |
1284 | !(rs->raid_disks % 2)) | |
1285 | return 0; | |
1286 | ||
1287 | /* raid0 with multiple disks -> raid4/5/6 */ | |
1288 | if (_in_range(mddev->new_level, 4, 6) && | |
1289 | mddev->new_layout == ALGORITHM_PARITY_N && | |
1290 | mddev->raid_disks > 1) | |
1291 | return 0; | |
1292 | ||
1293 | break; | |
1294 | ||
1295 | case 10: | |
1296 | /* Can't takeover raid10_offset! */ | |
1297 | if (_is_raid10_offset(mddev->layout)) | |
1298 | break; | |
1299 | ||
1300 | near_copies = _raid10_near_copies(mddev->layout); | |
1301 | ||
1302 | /* raid10* -> raid0 */ | |
1303 | if (mddev->new_level == 0) { | |
1304 | /* Can takeover raid10_near with raid disks divisable by data copies! */ | |
1305 | if (near_copies > 1 && | |
1306 | !(mddev->raid_disks % near_copies)) { | |
1307 | mddev->raid_disks /= near_copies; | |
1308 | mddev->delta_disks = mddev->raid_disks; | |
1309 | return 0; | |
1310 | } | |
1311 | ||
1312 | /* Can takeover raid10_far */ | |
1313 | if (near_copies == 1 && | |
1314 | _raid10_far_copies(mddev->layout) > 1) | |
1315 | return 0; | |
1316 | ||
1317 | break; | |
1318 | } | |
1319 | ||
1320 | /* raid10_{near,far} -> raid1 */ | |
1321 | if (mddev->new_level == 1 && | |
1322 | max(near_copies, _raid10_far_copies(mddev->layout)) == mddev->raid_disks) | |
1323 | return 0; | |
1324 | ||
1325 | /* raid10_{near,far} with 2 disks -> raid4/5 */ | |
1326 | if (_in_range(mddev->new_level, 4, 5) && | |
1327 | mddev->raid_disks == 2) | |
1328 | return 0; | |
1329 | break; | |
1330 | ||
1331 | case 1: | |
1332 | /* raid1 with 2 disks -> raid4/5 */ | |
1333 | if (_in_range(mddev->new_level, 4, 5) && | |
1334 | mddev->raid_disks == 2) { | |
1335 | mddev->degraded = 1; | |
1336 | return 0; | |
1337 | } | |
1338 | ||
1339 | /* raid1 -> raid0 */ | |
1340 | if (mddev->new_level == 0 && | |
1341 | mddev->raid_disks == 1) | |
1342 | return 0; | |
1343 | ||
1344 | /* raid1 -> raid10 */ | |
1345 | if (mddev->new_level == 10) | |
1346 | return 0; | |
1347 | ||
1348 | break; | |
1349 | ||
1350 | case 4: | |
1351 | /* raid4 -> raid0 */ | |
1352 | if (mddev->new_level == 0) | |
1353 | return 0; | |
1354 | ||
1355 | /* raid4 -> raid1/5 with 2 disks */ | |
1356 | if ((mddev->new_level == 1 || mddev->new_level == 5) && | |
1357 | mddev->raid_disks == 2) | |
1358 | return 0; | |
1359 | ||
1360 | /* raid4 -> raid5/6 with parity N */ | |
1361 | if (_in_range(mddev->new_level, 5, 6) && | |
1362 | mddev->layout == ALGORITHM_PARITY_N) | |
1363 | return 0; | |
1364 | break; | |
1365 | ||
1366 | case 5: | |
1367 | /* raid5 with parity N -> raid0 */ | |
1368 | if (mddev->new_level == 0 && | |
1369 | mddev->layout == ALGORITHM_PARITY_N) | |
1370 | return 0; | |
1371 | ||
1372 | /* raid5 with parity N -> raid4 */ | |
1373 | if (mddev->new_level == 4 && | |
1374 | mddev->layout == ALGORITHM_PARITY_N) | |
1375 | return 0; | |
1376 | ||
1377 | /* raid5 with 2 disks -> raid1/4/10 */ | |
1378 | if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && | |
1379 | mddev->raid_disks == 2) | |
1380 | return 0; | |
1381 | ||
1382 | /* raid5 with parity N -> raid6 with parity N */ | |
1383 | if (mddev->new_level == 6 && | |
1384 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || | |
1385 | _in_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) | |
1386 | return 0; | |
1387 | break; | |
1388 | ||
1389 | case 6: | |
1390 | /* raid6 with parity N -> raid0 */ | |
1391 | if (mddev->new_level == 0 && | |
1392 | mddev->layout == ALGORITHM_PARITY_N) | |
1393 | return 0; | |
1394 | ||
1395 | /* raid6 with parity N -> raid4 */ | |
1396 | if (mddev->new_level == 4 && | |
1397 | mddev->layout == ALGORITHM_PARITY_N) | |
1398 | return 0; | |
1399 | ||
1400 | /* raid6_*_n with parity N -> raid5_* */ | |
1401 | if (mddev->new_level == 5 && | |
1402 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || | |
1403 | _in_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) | |
1404 | return 0; | |
1405 | ||
1406 | default: | |
1407 | break; | |
1408 | } | |
1409 | ||
1410 | return ti_error_einval(rs->ti, "takeover not possible"); | |
1411 | } | |
1412 | ||
1413 | /* True if @rs requested to be taken over */ | |
1414 | static bool rs_takeover_requested(struct raid_set *rs) | |
1415 | { | |
1416 | return rs->md.new_level != rs->md.level; | |
1417 | } | |
1418 | ||
33e53f06 | 1419 | /* Features */ |
ecbfb9f1 HM |
1420 | #define FEATURE_FLAG_SUPPORTS_V180 0x1 /* Supports v1.8.0 extended superblock */ |
1421 | #define FEATURE_FLAG_SUPPORTS_RESHAPE 0x2 /* Supports v1.8.0 reshaping functionality */ | |
33e53f06 HM |
1422 | |
1423 | /* State flags for sb->flags */ | |
1424 | #define SB_FLAG_RESHAPE_ACTIVE 0x1 | |
1425 | #define SB_FLAG_RESHAPE_BACKWARDS 0x2 | |
1426 | ||
b12d437b JB |
1427 | /* |
1428 | * This structure is never routinely used by userspace, unlike md superblocks. | |
1429 | * Devices with this superblock should only ever be accessed via device-mapper. | |
1430 | */ | |
1431 | #define DM_RAID_MAGIC 0x64526D44 | |
1432 | struct dm_raid_superblock { | |
1433 | __le32 magic; /* "DmRd" */ | |
33e53f06 | 1434 | __le32 compat_features; /* Used to indicate compatible features (like 1.8.0 ondisk metadata extension) */ |
b12d437b | 1435 | |
33e53f06 HM |
1436 | __le32 num_devices; /* Number of devices in this raid set. (Max 64) */ |
1437 | __le32 array_position; /* The position of this drive in the raid set */ | |
b12d437b JB |
1438 | |
1439 | __le64 events; /* Incremented by md when superblock updated */ | |
33e53f06 HM |
1440 | __le64 failed_devices; /* Pre 1.8.0 part of bit field of devices to */ |
1441 | /* indicate failures (see extension below) */ | |
b12d437b JB |
1442 | |
1443 | /* | |
1444 | * This offset tracks the progress of the repair or replacement of | |
1445 | * an individual drive. | |
1446 | */ | |
1447 | __le64 disk_recovery_offset; | |
1448 | ||
1449 | /* | |
33e53f06 | 1450 | * This offset tracks the progress of the initial raid set |
b12d437b JB |
1451 | * synchronisation/parity calculation. |
1452 | */ | |
1453 | __le64 array_resync_offset; | |
1454 | ||
1455 | /* | |
33e53f06 | 1456 | * raid characteristics |
b12d437b JB |
1457 | */ |
1458 | __le32 level; | |
1459 | __le32 layout; | |
1460 | __le32 stripe_sectors; | |
1461 | ||
33e53f06 HM |
1462 | /******************************************************************** |
1463 | * BELOW FOLLOW V1.8.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! | |
1464 | * | |
ecbfb9f1 | 1465 | * FEATURE_FLAG_SUPPORTS_V180 in the features member indicates that those exist |
33e53f06 HM |
1466 | */ |
1467 | ||
1468 | __le32 flags; /* Flags defining array states for reshaping */ | |
1469 | ||
1470 | /* | |
1471 | * This offset tracks the progress of a raid | |
1472 | * set reshape in order to be able to restart it | |
1473 | */ | |
1474 | __le64 reshape_position; | |
1475 | ||
1476 | /* | |
1477 | * These define the properties of the array in case of an interrupted reshape | |
1478 | */ | |
1479 | __le32 new_level; | |
1480 | __le32 new_layout; | |
1481 | __le32 new_stripe_sectors; | |
1482 | __le32 delta_disks; | |
1483 | ||
1484 | __le64 array_sectors; /* Array size in sectors */ | |
1485 | ||
1486 | /* | |
1487 | * Sector offsets to data on devices (reshaping). | |
1488 | * Needed to support out of place reshaping, thus | |
1489 | * not writing over any stripes whilst converting | |
1490 | * them from old to new layout | |
1491 | */ | |
1492 | __le64 data_offset; | |
1493 | __le64 new_data_offset; | |
1494 | ||
1495 | __le64 sectors; /* Used device size in sectors */ | |
1496 | ||
1497 | /* | |
1498 | * Additonal Bit field of devices indicating failures to support | |
1499 | * up to 256 devices with the 1.8.0 on-disk metadata format | |
1500 | */ | |
1501 | __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1]; | |
1502 | ||
1503 | __le32 incompat_features; /* Used to indicate any incompatible features */ | |
1504 | ||
1505 | /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */ | |
b12d437b JB |
1506 | } __packed; |
1507 | ||
3cb03002 | 1508 | static int read_disk_sb(struct md_rdev *rdev, int size) |
b12d437b JB |
1509 | { |
1510 | BUG_ON(!rdev->sb_page); | |
1511 | ||
1512 | if (rdev->sb_loaded) | |
1513 | return 0; | |
1514 | ||
796a5cf0 | 1515 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) { |
0447568f JB |
1516 | DMERR("Failed to read superblock of device at position %d", |
1517 | rdev->raid_disk); | |
c32fb9e7 | 1518 | md_error(rdev->mddev, rdev); |
b12d437b JB |
1519 | return -EINVAL; |
1520 | } | |
1521 | ||
1522 | rdev->sb_loaded = 1; | |
1523 | ||
1524 | return 0; | |
1525 | } | |
1526 | ||
33e53f06 HM |
1527 | static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
1528 | { | |
1529 | failed_devices[0] = le64_to_cpu(sb->failed_devices); | |
1530 | memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices)); | |
1531 | ||
ecbfb9f1 | 1532 | if (_test_flag(FEATURE_FLAG_SUPPORTS_V180, le32_to_cpu(sb->compat_features))) { |
33e53f06 HM |
1533 | int i = ARRAY_SIZE(sb->extended_failed_devices); |
1534 | ||
1535 | while (i--) | |
1536 | failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]); | |
1537 | } | |
1538 | } | |
1539 | ||
7b34df74 HM |
1540 | static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
1541 | { | |
1542 | int i = ARRAY_SIZE(sb->extended_failed_devices); | |
1543 | ||
1544 | sb->failed_devices = cpu_to_le64(failed_devices[0]); | |
1545 | while (i--) | |
1546 | sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]); | |
1547 | } | |
1548 | ||
1549 | /* | |
1550 | * Synchronize the superblock members with the raid set properties | |
1551 | * | |
1552 | * All superblock data is little endian. | |
1553 | */ | |
fd01b88c | 1554 | static void super_sync(struct mddev *mddev, struct md_rdev *rdev) |
b12d437b | 1555 | { |
7b34df74 HM |
1556 | bool update_failed_devices = false; |
1557 | unsigned int i; | |
1558 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; | |
b12d437b | 1559 | struct dm_raid_superblock *sb; |
81f382f9 | 1560 | struct raid_set *rs = container_of(mddev, struct raid_set, md); |
b12d437b | 1561 | |
7b34df74 HM |
1562 | /* No metadata device, no superblock */ |
1563 | if (!rdev->meta_bdev) | |
1564 | return; | |
1565 | ||
1566 | BUG_ON(!rdev->sb_page); | |
1567 | ||
b12d437b | 1568 | sb = page_address(rdev->sb_page); |
b12d437b | 1569 | |
7b34df74 | 1570 | sb_retrieve_failed_devices(sb, failed_devices); |
b12d437b | 1571 | |
7b34df74 HM |
1572 | for (i = 0; i < rs->raid_disks; i++) |
1573 | if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { | |
1574 | update_failed_devices = true; | |
1575 | set_bit(i, (void *) failed_devices); | |
1576 | } | |
1577 | ||
1578 | if (update_failed_devices) | |
1579 | sb_update_failed_devices(sb, failed_devices); | |
b12d437b JB |
1580 | |
1581 | sb->magic = cpu_to_le32(DM_RAID_MAGIC); | |
ecbfb9f1 | 1582 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V180); /* Don't set reshape flag yet */ |
b12d437b JB |
1583 | |
1584 | sb->num_devices = cpu_to_le32(mddev->raid_disks); | |
1585 | sb->array_position = cpu_to_le32(rdev->raid_disk); | |
1586 | ||
1587 | sb->events = cpu_to_le64(mddev->events); | |
b12d437b JB |
1588 | |
1589 | sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); | |
1590 | sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); | |
1591 | ||
1592 | sb->level = cpu_to_le32(mddev->level); | |
1593 | sb->layout = cpu_to_le32(mddev->layout); | |
1594 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); | |
7b34df74 HM |
1595 | |
1596 | sb->new_level = cpu_to_le32(mddev->new_level); | |
1597 | sb->new_layout = cpu_to_le32(mddev->new_layout); | |
1598 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); | |
1599 | ||
1600 | sb->delta_disks = cpu_to_le32(mddev->delta_disks); | |
1601 | ||
1602 | smp_rmb(); /* Make sure we access most recent reshape position */ | |
1603 | sb->reshape_position = cpu_to_le64(mddev->reshape_position); | |
1604 | if (le64_to_cpu(sb->reshape_position) != MaxSector) { | |
1605 | /* Flag ongoing reshape */ | |
1606 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE); | |
1607 | ||
1608 | if (mddev->delta_disks < 0 || mddev->reshape_backwards) | |
1609 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS); | |
1610 | } else | |
1611 | /* Flag no reshape */ | |
1612 | _clear_flags(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS), &sb->flags); | |
1613 | ||
1614 | sb->array_sectors = cpu_to_le64(mddev->array_sectors); | |
1615 | sb->data_offset = cpu_to_le64(rdev->data_offset); | |
1616 | sb->new_data_offset = cpu_to_le64(rdev->new_data_offset); | |
1617 | sb->sectors = cpu_to_le64(rdev->sectors); | |
1618 | ||
1619 | /* Zero out the rest of the payload after the size of the superblock */ | |
1620 | memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); | |
b12d437b JB |
1621 | } |
1622 | ||
1623 | /* | |
1624 | * super_load | |
1625 | * | |
1626 | * This function creates a superblock if one is not found on the device | |
1627 | * and will decide which superblock to use if there's a choice. | |
1628 | * | |
1629 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise | |
1630 | */ | |
3cb03002 | 1631 | static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
b12d437b | 1632 | { |
73c6f239 | 1633 | int r; |
b12d437b JB |
1634 | struct dm_raid_superblock *sb; |
1635 | struct dm_raid_superblock *refsb; | |
1636 | uint64_t events_sb, events_refsb; | |
1637 | ||
1638 | rdev->sb_start = 0; | |
40d43c4b HM |
1639 | rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); |
1640 | if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { | |
1641 | DMERR("superblock size of a logical block is no longer valid"); | |
1642 | return -EINVAL; | |
1643 | } | |
b12d437b | 1644 | |
73c6f239 HM |
1645 | r = read_disk_sb(rdev, rdev->sb_size); |
1646 | if (r) | |
1647 | return r; | |
b12d437b JB |
1648 | |
1649 | sb = page_address(rdev->sb_page); | |
3aa3b2b2 JB |
1650 | |
1651 | /* | |
1652 | * Two cases that we want to write new superblocks and rebuild: | |
1653 | * 1) New device (no matching magic number) | |
1654 | * 2) Device specified for rebuild (!In_sync w/ offset == 0) | |
1655 | */ | |
1656 | if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) || | |
1657 | (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { | |
b12d437b JB |
1658 | super_sync(rdev->mddev, rdev); |
1659 | ||
1660 | set_bit(FirstUse, &rdev->flags); | |
ecbfb9f1 | 1661 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V180); /* Don't set reshape flag yet */ |
b12d437b JB |
1662 | |
1663 | /* Force writing of superblocks to disk */ | |
1664 | set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); | |
1665 | ||
1666 | /* Any superblock is better than none, choose that if given */ | |
1667 | return refdev ? 0 : 1; | |
1668 | } | |
1669 | ||
1670 | if (!refdev) | |
1671 | return 1; | |
1672 | ||
1673 | events_sb = le64_to_cpu(sb->events); | |
1674 | ||
1675 | refsb = page_address(refdev->sb_page); | |
1676 | events_refsb = le64_to_cpu(refsb->events); | |
1677 | ||
1678 | return (events_sb > events_refsb) ? 1 : 0; | |
1679 | } | |
1680 | ||
33e53f06 | 1681 | static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) |
b12d437b JB |
1682 | { |
1683 | int role; | |
33e53f06 HM |
1684 | unsigned int d; |
1685 | struct mddev *mddev = &rs->md; | |
b12d437b | 1686 | uint64_t events_sb; |
33e53f06 | 1687 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; |
b12d437b | 1688 | struct dm_raid_superblock *sb; |
33e53f06 | 1689 | uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0; |
dafb20fa | 1690 | struct md_rdev *r; |
b12d437b JB |
1691 | struct dm_raid_superblock *sb2; |
1692 | ||
1693 | sb = page_address(rdev->sb_page); | |
1694 | events_sb = le64_to_cpu(sb->events); | |
b12d437b JB |
1695 | |
1696 | /* | |
1697 | * Initialise to 1 if this is a new superblock. | |
1698 | */ | |
1699 | mddev->events = events_sb ? : 1; | |
1700 | ||
33e53f06 HM |
1701 | mddev->reshape_position = MaxSector; |
1702 | ||
b12d437b | 1703 | /* |
33e53f06 HM |
1704 | * Reshaping is supported, e.g. reshape_position is valid |
1705 | * in superblock and superblock content is authoritative. | |
b12d437b | 1706 | */ |
ecbfb9f1 | 1707 | if (_test_flag(FEATURE_FLAG_SUPPORTS_V180, le32_to_cpu(sb->compat_features))) { |
33e53f06 HM |
1708 | /* Superblock is authoritative wrt given raid set layout! */ |
1709 | mddev->raid_disks = le32_to_cpu(sb->num_devices); | |
1710 | mddev->level = le32_to_cpu(sb->level); | |
1711 | mddev->layout = le32_to_cpu(sb->layout); | |
1712 | mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); | |
1713 | mddev->new_level = le32_to_cpu(sb->new_level); | |
1714 | mddev->new_layout = le32_to_cpu(sb->new_layout); | |
1715 | mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); | |
1716 | mddev->delta_disks = le32_to_cpu(sb->delta_disks); | |
1717 | mddev->array_sectors = le64_to_cpu(sb->array_sectors); | |
1718 | ||
1719 | /* raid was reshaping and got interrupted */ | |
1720 | if (_test_flag(SB_FLAG_RESHAPE_ACTIVE, le32_to_cpu(sb->flags))) { | |
1721 | if (_test_flag(CTR_FLAG_DELTA_DISKS, rs->ctr_flags)) { | |
1722 | DMERR("Reshape requested but raid set is still reshaping"); | |
1723 | return -EINVAL; | |
1724 | } | |
b12d437b | 1725 | |
33e53f06 HM |
1726 | if (mddev->delta_disks < 0 || |
1727 | (!mddev->delta_disks && _test_flag(SB_FLAG_RESHAPE_BACKWARDS, le32_to_cpu(sb->flags)))) | |
1728 | mddev->reshape_backwards = 1; | |
1729 | else | |
1730 | mddev->reshape_backwards = 0; | |
1731 | ||
1732 | mddev->reshape_position = le64_to_cpu(sb->reshape_position); | |
1733 | rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); | |
1734 | } | |
1735 | ||
1736 | } else { | |
1737 | /* | |
1738 | * Reshaping is not allowed, because we don't have the appropriate metadata | |
1739 | */ | |
1740 | if (le32_to_cpu(sb->level) != mddev->level) { | |
1741 | DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); | |
1742 | return -EINVAL; | |
1743 | } | |
1744 | if (le32_to_cpu(sb->layout) != mddev->layout) { | |
1745 | DMERR("Reshaping raid sets not yet supported. (raid layout change)"); | |
1746 | DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); | |
1747 | DMERR(" Old layout: %s w/ %d copies", | |
1748 | raid10_md_layout_to_format(le32_to_cpu(sb->layout)), | |
1749 | raid10_md_layout_to_copies(le32_to_cpu(sb->layout))); | |
1750 | DMERR(" New layout: %s w/ %d copies", | |
1751 | raid10_md_layout_to_format(mddev->layout), | |
1752 | raid10_md_layout_to_copies(mddev->layout)); | |
1753 | return -EINVAL; | |
1754 | } | |
1755 | if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { | |
1756 | DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); | |
1757 | return -EINVAL; | |
1758 | } | |
1759 | ||
1760 | /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */ | |
1761 | if (!rt_is_raid1(rs->raid_type) && | |
1762 | (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { | |
1763 | DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)", | |
1764 | sb->num_devices, mddev->raid_disks); | |
1765 | return -EINVAL; | |
1766 | } | |
1767 | ||
1768 | /* Table line is checked vs. authoritative superblock */ | |
1769 | rs_set_new(rs); | |
b12d437b JB |
1770 | } |
1771 | ||
33e53f06 | 1772 | if (!_test_flag(CTR_FLAG_NOSYNC, rs->ctr_flags)) |
b12d437b JB |
1773 | mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); |
1774 | ||
1775 | /* | |
1776 | * During load, we set FirstUse if a new superblock was written. | |
1777 | * There are two reasons we might not have a superblock: | |
33e53f06 | 1778 | * 1) The raid set is brand new - in which case, all of the |
b12d437b JB |
1779 | * devices must have their In_sync bit set. Also, |
1780 | * recovery_cp must be 0, unless forced. | |
33e53f06 | 1781 | * 2) This is a new device being added to an old raid set |
b12d437b JB |
1782 | * and the new device needs to be rebuilt - in which |
1783 | * case the In_sync bit will /not/ be set and | |
1784 | * recovery_cp must be MaxSector. | |
1785 | */ | |
33e53f06 | 1786 | d = 0; |
dafb20fa | 1787 | rdev_for_each(r, mddev) { |
33e53f06 HM |
1788 | if (test_bit(FirstUse, &r->flags)) |
1789 | new_devs++; | |
1790 | ||
b12d437b | 1791 | if (!test_bit(In_sync, &r->flags)) { |
33e53f06 HM |
1792 | DMINFO("Device %d specified for rebuild; clearing superblock", |
1793 | r->raid_disk); | |
b12d437b | 1794 | rebuilds++; |
33e53f06 HM |
1795 | |
1796 | if (test_bit(FirstUse, &r->flags)) | |
1797 | rebuild_and_new++; | |
1798 | } | |
1799 | ||
1800 | d++; | |
b12d437b JB |
1801 | } |
1802 | ||
33e53f06 HM |
1803 | if (new_devs == rs->raid_disks || !rebuilds) { |
1804 | /* Replace a broken device */ | |
1805 | if (new_devs == 1 && !rs->delta_disks) | |
1806 | ; | |
1807 | if (new_devs == rs->raid_disks) { | |
1808 | DMINFO("Superblocks created for new raid set"); | |
b12d437b | 1809 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); |
ecbfb9f1 | 1810 | _set_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
33e53f06 HM |
1811 | mddev->recovery_cp = 0; |
1812 | } else if (new_devs && new_devs != rs->raid_disks && !rebuilds) { | |
1813 | DMERR("New device injected into existing raid set without " | |
1814 | "'delta_disks' or 'rebuild' parameter specified"); | |
b12d437b JB |
1815 | return -EINVAL; |
1816 | } | |
33e53f06 HM |
1817 | } else if (new_devs && new_devs != rebuilds) { |
1818 | DMERR("%u 'rebuild' devices cannot be injected into" | |
1819 | " a raid set with %u other first-time devices", | |
1820 | rebuilds, new_devs); | |
b12d437b | 1821 | return -EINVAL; |
33e53f06 HM |
1822 | } else if (rebuilds) { |
1823 | if (rebuild_and_new && rebuilds != rebuild_and_new) { | |
1824 | DMERR("new device%s provided without 'rebuild'", | |
1825 | new_devs > 1 ? "s" : ""); | |
1826 | return -EINVAL; | |
1827 | } else if (mddev->recovery_cp != MaxSector) { | |
1828 | DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)", | |
1829 | (unsigned long long) mddev->recovery_cp); | |
1830 | return -EINVAL; | |
1831 | } else if (mddev->reshape_position != MaxSector) { | |
1832 | DMERR("'rebuild' specified while raid set is being reshaped"); | |
1833 | return -EINVAL; | |
1834 | } | |
b12d437b JB |
1835 | } |
1836 | ||
1837 | /* | |
1838 | * Now we set the Faulty bit for those devices that are | |
1839 | * recorded in the superblock as failed. | |
1840 | */ | |
33e53f06 | 1841 | sb_retrieve_failed_devices(sb, failed_devices); |
dafb20fa | 1842 | rdev_for_each(r, mddev) { |
b12d437b JB |
1843 | if (!r->sb_page) |
1844 | continue; | |
1845 | sb2 = page_address(r->sb_page); | |
1846 | sb2->failed_devices = 0; | |
33e53f06 | 1847 | memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices)); |
b12d437b JB |
1848 | |
1849 | /* | |
1850 | * Check for any device re-ordering. | |
1851 | */ | |
1852 | if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { | |
1853 | role = le32_to_cpu(sb2->array_position); | |
33e53f06 HM |
1854 | if (role < 0) |
1855 | continue; | |
1856 | ||
b12d437b | 1857 | if (role != r->raid_disk) { |
33e53f06 HM |
1858 | if (_is_raid10_near(mddev->layout)) { |
1859 | if (mddev->raid_disks % _raid10_near_copies(mddev->layout) || | |
1860 | rs->raid_disks % rs->raid10_copies) | |
1861 | return ti_error_einval(rs->ti, "Cannot change raid10 near " | |
1862 | "set to odd # of devices!"); | |
1863 | ||
1864 | sb2->array_position = cpu_to_le32(r->raid_disk); | |
1865 | ||
1866 | } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && | |
1867 | !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && | |
1868 | !rt_is_raid1(rs->raid_type)) | |
1869 | return ti_error_einval(rs->ti, "Cannot change device positions in raid set"); | |
1870 | ||
1871 | DMINFO("raid device #%d now at position #%d", | |
b12d437b JB |
1872 | role, r->raid_disk); |
1873 | } | |
1874 | ||
1875 | /* | |
1876 | * Partial recovery is performed on | |
1877 | * returning failed devices. | |
1878 | */ | |
33e53f06 | 1879 | if (test_bit(role, (void *) failed_devices)) |
b12d437b JB |
1880 | set_bit(Faulty, &r->flags); |
1881 | } | |
1882 | } | |
1883 | ||
1884 | return 0; | |
1885 | } | |
1886 | ||
0cf45031 | 1887 | static int super_validate(struct raid_set *rs, struct md_rdev *rdev) |
b12d437b | 1888 | { |
0cf45031 | 1889 | struct mddev *mddev = &rs->md; |
33e53f06 HM |
1890 | struct dm_raid_superblock *sb; |
1891 | ||
1892 | if (!rdev->sb_page) | |
1893 | return 0; | |
1894 | ||
1895 | sb = page_address(rdev->sb_page); | |
b12d437b JB |
1896 | |
1897 | /* | |
1898 | * If mddev->events is not set, we know we have not yet initialized | |
1899 | * the array. | |
1900 | */ | |
33e53f06 | 1901 | if (!mddev->events && super_init_validation(rs, rdev)) |
b12d437b JB |
1902 | return -EINVAL; |
1903 | ||
ecbfb9f1 HM |
1904 | if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V180 || |
1905 | sb->incompat_features) { | |
1906 | rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; | |
4c9971ca HM |
1907 | return -EINVAL; |
1908 | } | |
1909 | ||
0cf45031 | 1910 | /* Enable bitmap creation for RAID levels != 0 */ |
676fa5ad | 1911 | mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); |
0cf45031 HM |
1912 | rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; |
1913 | ||
33e53f06 HM |
1914 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { |
1915 | /* Retrieve device size stored in superblock to be prepared for shrink */ | |
1916 | rdev->sectors = le64_to_cpu(sb->sectors); | |
b12d437b | 1917 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); |
33e53f06 HM |
1918 | if (rdev->recovery_offset == MaxSector) |
1919 | set_bit(In_sync, &rdev->flags); | |
1920 | /* | |
1921 | * If no reshape in progress -> we're recovering single | |
1922 | * disk(s) and have to set the device(s) to out-of-sync | |
1923 | */ | |
1924 | else if (rs->md.reshape_position == MaxSector) | |
1925 | clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */ | |
b12d437b JB |
1926 | } |
1927 | ||
1928 | /* | |
1929 | * If a device comes back, set it as not In_sync and no longer faulty. | |
1930 | */ | |
33e53f06 HM |
1931 | if (test_and_clear_bit(Faulty, &rdev->flags)) { |
1932 | rdev->recovery_offset = 0; | |
b12d437b JB |
1933 | clear_bit(In_sync, &rdev->flags); |
1934 | rdev->saved_raid_disk = rdev->raid_disk; | |
b12d437b JB |
1935 | } |
1936 | ||
33e53f06 HM |
1937 | /* Reshape support -> restore repective data offsets */ |
1938 | rdev->data_offset = le64_to_cpu(sb->data_offset); | |
1939 | rdev->new_data_offset = le64_to_cpu(sb->new_data_offset); | |
b12d437b JB |
1940 | |
1941 | return 0; | |
1942 | } | |
1943 | ||
1944 | /* | |
1945 | * Analyse superblocks and select the freshest. | |
1946 | */ | |
1947 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |
1948 | { | |
73c6f239 | 1949 | int r; |
0447568f | 1950 | struct raid_dev *dev; |
a9ad8526 | 1951 | struct md_rdev *rdev, *tmp, *freshest; |
fd01b88c | 1952 | struct mddev *mddev = &rs->md; |
b12d437b JB |
1953 | |
1954 | freshest = NULL; | |
a9ad8526 | 1955 | rdev_for_each_safe(rdev, tmp, mddev) { |
761becff | 1956 | /* |
c76d53f4 | 1957 | * Skipping super_load due to CTR_FLAG_SYNC will cause |
761becff JB |
1958 | * the array to undergo initialization again as |
1959 | * though it were new. This is the intended effect | |
1960 | * of the "sync" directive. | |
1961 | * | |
1962 | * When reshaping capability is added, we must ensure | |
1963 | * that the "sync" directive is disallowed during the | |
1964 | * reshape. | |
1965 | */ | |
ad51d7f1 | 1966 | if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags)) |
761becff JB |
1967 | continue; |
1968 | ||
b12d437b JB |
1969 | if (!rdev->meta_bdev) |
1970 | continue; | |
1971 | ||
73c6f239 | 1972 | r = super_load(rdev, freshest); |
b12d437b | 1973 | |
73c6f239 | 1974 | switch (r) { |
b12d437b JB |
1975 | case 1: |
1976 | freshest = rdev; | |
1977 | break; | |
1978 | case 0: | |
1979 | break; | |
1980 | default: | |
0447568f | 1981 | dev = container_of(rdev, struct raid_dev, rdev); |
55ebbb59 JB |
1982 | if (dev->meta_dev) |
1983 | dm_put_device(ti, dev->meta_dev); | |
0447568f | 1984 | |
55ebbb59 JB |
1985 | dev->meta_dev = NULL; |
1986 | rdev->meta_bdev = NULL; | |
0447568f | 1987 | |
55ebbb59 JB |
1988 | if (rdev->sb_page) |
1989 | put_page(rdev->sb_page); | |
0447568f | 1990 | |
55ebbb59 | 1991 | rdev->sb_page = NULL; |
0447568f | 1992 | |
55ebbb59 | 1993 | rdev->sb_loaded = 0; |
0447568f | 1994 | |
55ebbb59 JB |
1995 | /* |
1996 | * We might be able to salvage the data device | |
1997 | * even though the meta device has failed. For | |
1998 | * now, we behave as though '- -' had been | |
1999 | * set for this device in the table. | |
2000 | */ | |
2001 | if (dev->data_dev) | |
2002 | dm_put_device(ti, dev->data_dev); | |
0447568f | 2003 | |
55ebbb59 JB |
2004 | dev->data_dev = NULL; |
2005 | rdev->bdev = NULL; | |
0447568f | 2006 | |
55ebbb59 | 2007 | list_del(&rdev->same_set); |
b12d437b JB |
2008 | } |
2009 | } | |
2010 | ||
2011 | if (!freshest) | |
2012 | return 0; | |
2013 | ||
702108d1 HM |
2014 | if (validate_raid_redundancy(rs)) |
2015 | return ti_error_einval(rs->ti, "Insufficient redundancy to activate array"); | |
55ebbb59 | 2016 | |
b12d437b JB |
2017 | /* |
2018 | * Validation of the freshest device provides the source of | |
2019 | * validation for the remaining devices. | |
2020 | */ | |
0cf45031 | 2021 | if (super_validate(rs, freshest)) |
702108d1 | 2022 | return ti_error_einval(rs->ti, "Unable to assemble array: Invalid superblocks"); |
b12d437b | 2023 | |
dafb20fa | 2024 | rdev_for_each(rdev, mddev) |
0cf45031 | 2025 | if ((rdev != freshest) && super_validate(rs, rdev)) |
b12d437b JB |
2026 | return -EINVAL; |
2027 | ||
2028 | return 0; | |
2029 | } | |
2030 | ||
ecbfb9f1 HM |
2031 | /* Userpace reordered disks -> adjust raid_disk indexes in @rs */ |
2032 | static void _reorder_raid_disk_indexes(struct raid_set *rs) | |
2033 | { | |
2034 | int i = 0; | |
2035 | struct md_rdev *rdev; | |
2036 | ||
2037 | rdev_for_each(rdev, &rs->md) { | |
2038 | rdev->raid_disk = i++; | |
2039 | rdev->saved_raid_disk = rdev->new_raid_disk = -1; | |
2040 | } | |
2041 | } | |
2042 | ||
2043 | /* | |
2044 | * Setup @rs for takeover by a different raid level | |
2045 | */ | |
2046 | static int rs_setup_takeover(struct raid_set *rs) | |
2047 | { | |
2048 | struct mddev *mddev = &rs->md; | |
2049 | struct md_rdev *rdev; | |
2050 | unsigned int d = mddev->raid_disks = rs->raid_disks; | |
2051 | sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; | |
2052 | ||
2053 | if (rt_is_raid10(rs->raid_type)) { | |
2054 | if (mddev->level == 0) { | |
2055 | /* Userpace reordered disks -> adjust raid_disk indexes */ | |
2056 | _reorder_raid_disk_indexes(rs); | |
2057 | ||
2058 | /* raid0 -> raid10_far layout */ | |
2059 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, | |
2060 | rs->raid10_copies); | |
2061 | } else if (mddev->level == 1) | |
2062 | /* raid1 -> raid10_near layout */ | |
2063 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, | |
2064 | rs->raid_disks); | |
2065 | else | |
2066 | return -EINVAL; | |
2067 | ||
2068 | } | |
2069 | ||
2070 | clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); | |
2071 | mddev->recovery_cp = MaxSector; | |
2072 | ||
2073 | while (d--) { | |
2074 | rdev = &rs->dev[d].rdev; | |
2075 | ||
2076 | if (test_bit(d, (void *) rs->rebuild_disks)) { | |
2077 | clear_bit(In_sync, &rdev->flags); | |
2078 | clear_bit(Faulty, &rdev->flags); | |
2079 | mddev->recovery_cp = rdev->recovery_offset = 0; | |
2080 | /* Bitmap has to be created when we do an "up" takeover */ | |
2081 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); | |
2082 | } | |
2083 | ||
2084 | rdev->new_data_offset = new_data_offset; | |
2085 | } | |
2086 | ||
2087 | rs_set_new(rs); | |
2088 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | |
2089 | ||
2090 | return 0; | |
2091 | } | |
2092 | ||
75b8e04b | 2093 | /* |
48cf06bc HM |
2094 | * Enable/disable discard support on RAID set depending on |
2095 | * RAID level and discard properties of underlying RAID members. | |
75b8e04b | 2096 | */ |
ecbfb9f1 | 2097 | static void configure_discard_support(struct raid_set *rs) |
75b8e04b | 2098 | { |
48cf06bc HM |
2099 | int i; |
2100 | bool raid456; | |
ecbfb9f1 | 2101 | struct dm_target *ti = rs->ti; |
48cf06bc | 2102 | |
75b8e04b HM |
2103 | /* Assume discards not supported until after checks below. */ |
2104 | ti->discards_supported = false; | |
2105 | ||
2106 | /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ | |
48cf06bc | 2107 | raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); |
75b8e04b | 2108 | |
48cf06bc | 2109 | for (i = 0; i < rs->md.raid_disks; i++) { |
d20c4b08 | 2110 | struct request_queue *q; |
48cf06bc | 2111 | |
d20c4b08 HM |
2112 | if (!rs->dev[i].rdev.bdev) |
2113 | continue; | |
2114 | ||
2115 | q = bdev_get_queue(rs->dev[i].rdev.bdev); | |
48cf06bc HM |
2116 | if (!q || !blk_queue_discard(q)) |
2117 | return; | |
2118 | ||
2119 | if (raid456) { | |
2120 | if (!q->limits.discard_zeroes_data) | |
2121 | return; | |
2122 | if (!devices_handle_discard_safely) { | |
2123 | DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); | |
2124 | DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); | |
2125 | return; | |
2126 | } | |
2127 | } | |
2128 | } | |
2129 | ||
2130 | /* All RAID members properly support discards */ | |
75b8e04b HM |
2131 | ti->discards_supported = true; |
2132 | ||
2133 | /* | |
2134 | * RAID1 and RAID10 personalities require bio splitting, | |
48cf06bc | 2135 | * RAID0/4/5/6 don't and process large discard bios properly. |
75b8e04b | 2136 | */ |
48cf06bc | 2137 | ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); |
75b8e04b HM |
2138 | ti->num_discard_bios = 1; |
2139 | } | |
2140 | ||
9d09e663 | 2141 | /* |
73c6f239 | 2142 | * Construct a RAID0/1/10/4/5/6 mapping: |
9d09e663 | 2143 | * Args: |
73c6f239 HM |
2144 | * <raid_type> <#raid_params> <raid_params>{0,} \ |
2145 | * <#raid_devs> [<meta_dev1> <dev1>]{1,} | |
9d09e663 | 2146 | * |
9d09e663 N |
2147 | * <raid_params> varies by <raid_type>. See 'parse_raid_params' for |
2148 | * details on possible <raid_params>. | |
73c6f239 HM |
2149 | * |
2150 | * Userspace is free to initialize the metadata devices, hence the superblocks to | |
2151 | * enforce recreation based on the passed in table parameters. | |
2152 | * | |
9d09e663 N |
2153 | */ |
2154 | static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) | |
2155 | { | |
73c6f239 | 2156 | int r; |
9d09e663 | 2157 | struct raid_type *rt; |
92c83d79 | 2158 | unsigned num_raid_params, num_raid_devs; |
9d09e663 | 2159 | struct raid_set *rs = NULL; |
92c83d79 HM |
2160 | const char *arg; |
2161 | struct dm_arg_set as = { argc, argv }, as_nrd; | |
2162 | struct dm_arg _args[] = { | |
2163 | { 0, as.argc, "Cannot understand number of raid parameters" }, | |
2164 | { 1, 254, "Cannot understand number of raid devices parameters" } | |
2165 | }; | |
2166 | ||
2167 | /* Must have <raid_type> */ | |
2168 | arg = dm_shift_arg(&as); | |
702108d1 HM |
2169 | if (!arg) |
2170 | return ti_error_einval(rs->ti, "No arguments"); | |
9d09e663 | 2171 | |
92c83d79 | 2172 | rt = get_raid_type(arg); |
702108d1 HM |
2173 | if (!rt) |
2174 | return ti_error_einval(rs->ti, "Unrecognised raid_type"); | |
9d09e663 | 2175 | |
92c83d79 HM |
2176 | /* Must have <#raid_params> */ |
2177 | if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) | |
2178 | return -EINVAL; | |
9d09e663 | 2179 | |
92c83d79 HM |
2180 | /* number of raid device tupples <meta_dev data_dev> */ |
2181 | as_nrd = as; | |
2182 | dm_consume_args(&as_nrd, num_raid_params); | |
2183 | _args[1].max = (as_nrd.argc - 1) / 2; | |
2184 | if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) | |
2185 | return -EINVAL; | |
9d09e663 | 2186 | |
702108d1 HM |
2187 | if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES)) |
2188 | return ti_error_einval(rs->ti, "Invalid number of supplied raid devices"); | |
3ca5a21a | 2189 | |
92c83d79 | 2190 | rs = context_alloc(ti, rt, num_raid_devs); |
9d09e663 N |
2191 | if (IS_ERR(rs)) |
2192 | return PTR_ERR(rs); | |
2193 | ||
92c83d79 | 2194 | r = parse_raid_params(rs, &as, num_raid_params); |
73c6f239 | 2195 | if (r) |
9d09e663 N |
2196 | goto bad; |
2197 | ||
702108d1 | 2198 | r = parse_dev_params(rs, &as); |
73c6f239 | 2199 | if (r) |
9d09e663 N |
2200 | goto bad; |
2201 | ||
b12d437b | 2202 | rs->md.sync_super = super_sync; |
ecbfb9f1 HM |
2203 | |
2204 | /* | |
2205 | * Backup any new raid set level, layout, ... | |
2206 | * requested to be able to compare to superblock | |
2207 | * members for conversion decisions. | |
2208 | */ | |
2209 | rs_config_backup(rs); | |
2210 | ||
73c6f239 HM |
2211 | r = analyse_superblocks(ti, rs); |
2212 | if (r) | |
b12d437b JB |
2213 | goto bad; |
2214 | ||
9d09e663 | 2215 | INIT_WORK(&rs->md.event_work, do_table_event); |
9d09e663 | 2216 | ti->private = rs; |
55a62eef | 2217 | ti->num_flush_bios = 1; |
9d09e663 | 2218 | |
ecbfb9f1 HM |
2219 | /* Restore any requested new layout for conversion decision */ |
2220 | rs_config_restore(rs); | |
2221 | ||
75b8e04b | 2222 | /* |
ecbfb9f1 HM |
2223 | * If a takeover is needed, just set the level to |
2224 | * the new requested one and allow the raid set to run. | |
75b8e04b | 2225 | */ |
ecbfb9f1 HM |
2226 | if (rs_takeover_requested(rs)) { |
2227 | r = rs_check_takeover(rs); | |
2228 | if (r) | |
2229 | return r; | |
2230 | ||
2231 | r = rs_setup_takeover(rs); | |
2232 | if (r) | |
2233 | return r; | |
2234 | ||
2235 | _set_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | |
2236 | } | |
2237 | ||
2238 | /* Start raid set read-only and assumed clean to change in raid_resume() */ | |
2239 | rs->md.ro = 1; | |
2240 | rs->md.in_sync = 1; | |
2241 | set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); | |
75b8e04b | 2242 | |
0cf45031 HM |
2243 | /* Has to be held on running the array */ |
2244 | mddev_lock_nointr(&rs->md); | |
73c6f239 | 2245 | r = md_run(&rs->md); |
9d09e663 | 2246 | rs->md.in_sync = 0; /* Assume already marked dirty */ |
0cf45031 | 2247 | mddev_unlock(&rs->md); |
9d09e663 | 2248 | |
73c6f239 | 2249 | if (r) { |
9d09e663 N |
2250 | ti->error = "Fail to run raid array"; |
2251 | goto bad; | |
2252 | } | |
2253 | ||
63f33b8d | 2254 | if (ti->len != rs->md.array_sectors) { |
702108d1 | 2255 | r = ti_error_einval(ti, "Array size does not match requested target length"); |
63f33b8d JB |
2256 | goto size_mismatch; |
2257 | } | |
9d09e663 | 2258 | rs->callbacks.congested_fn = raid_is_congested; |
9d09e663 N |
2259 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); |
2260 | ||
32737279 | 2261 | mddev_suspend(&rs->md); |
9d09e663 N |
2262 | return 0; |
2263 | ||
63f33b8d JB |
2264 | size_mismatch: |
2265 | md_stop(&rs->md); | |
9d09e663 N |
2266 | bad: |
2267 | context_free(rs); | |
2268 | ||
73c6f239 | 2269 | return r; |
9d09e663 N |
2270 | } |
2271 | ||
2272 | static void raid_dtr(struct dm_target *ti) | |
2273 | { | |
2274 | struct raid_set *rs = ti->private; | |
2275 | ||
2276 | list_del_init(&rs->callbacks.list); | |
2277 | md_stop(&rs->md); | |
2278 | context_free(rs); | |
2279 | } | |
2280 | ||
7de3ee57 | 2281 | static int raid_map(struct dm_target *ti, struct bio *bio) |
9d09e663 N |
2282 | { |
2283 | struct raid_set *rs = ti->private; | |
fd01b88c | 2284 | struct mddev *mddev = &rs->md; |
9d09e663 N |
2285 | |
2286 | mddev->pers->make_request(mddev, bio); | |
2287 | ||
2288 | return DM_MAPIO_SUBMITTED; | |
2289 | } | |
2290 | ||
be83651f JB |
2291 | static const char *decipher_sync_action(struct mddev *mddev) |
2292 | { | |
2293 | if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) | |
2294 | return "frozen"; | |
2295 | ||
2296 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || | |
2297 | (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { | |
2298 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) | |
2299 | return "reshape"; | |
2300 | ||
2301 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | |
2302 | if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) | |
2303 | return "resync"; | |
2304 | else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) | |
2305 | return "check"; | |
2306 | return "repair"; | |
2307 | } | |
2308 | ||
2309 | if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) | |
2310 | return "recover"; | |
2311 | } | |
2312 | ||
2313 | return "idle"; | |
2314 | } | |
2315 | ||
fd7c092e MP |
2316 | static void raid_status(struct dm_target *ti, status_type_t type, |
2317 | unsigned status_flags, char *result, unsigned maxlen) | |
9d09e663 N |
2318 | { |
2319 | struct raid_set *rs = ti->private; | |
2320 | unsigned raid_param_cnt = 1; /* at least 1 for chunksize */ | |
2321 | unsigned sz = 0; | |
2e727c3c | 2322 | int i, array_in_sync = 0; |
9d09e663 N |
2323 | sector_t sync; |
2324 | ||
2325 | switch (type) { | |
2326 | case STATUSTYPE_INFO: | |
2327 | DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); | |
2328 | ||
676fa5ad | 2329 | if (!rt_is_raid0(rs->raid_type)) { |
0cf45031 HM |
2330 | if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) |
2331 | sync = rs->md.curr_resync_completed; | |
2332 | else | |
2333 | sync = rs->md.recovery_cp; | |
2334 | ||
2335 | if (sync >= rs->md.resync_max_sectors) { | |
2336 | /* | |
2337 | * Sync complete. | |
2338 | */ | |
2339 | array_in_sync = 1; | |
2340 | sync = rs->md.resync_max_sectors; | |
2341 | } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) { | |
2342 | /* | |
2343 | * If "check" or "repair" is occurring, the array has | |
2344 | * undergone and initial sync and the health characters | |
2345 | * should not be 'a' anymore. | |
2346 | */ | |
2347 | array_in_sync = 1; | |
2348 | } else { | |
2349 | /* | |
2350 | * The array may be doing an initial sync, or it may | |
2351 | * be rebuilding individual components. If all the | |
2352 | * devices are In_sync, then it is the array that is | |
2353 | * being initialized. | |
2354 | */ | |
2355 | for (i = 0; i < rs->md.raid_disks; i++) | |
2356 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) | |
2357 | array_in_sync = 1; | |
2358 | } | |
2359 | } else { | |
2360 | /* RAID0 */ | |
2e727c3c | 2361 | array_in_sync = 1; |
9d09e663 | 2362 | sync = rs->md.resync_max_sectors; |
2e727c3c | 2363 | } |
be83651f | 2364 | |
2e727c3c JB |
2365 | /* |
2366 | * Status characters: | |
2367 | * 'D' = Dead/Failed device | |
2368 | * 'a' = Alive but not in-sync | |
2369 | * 'A' = Alive and in-sync | |
2370 | */ | |
2371 | for (i = 0; i < rs->md.raid_disks; i++) { | |
2372 | if (test_bit(Faulty, &rs->dev[i].rdev.flags)) | |
2373 | DMEMIT("D"); | |
2374 | else if (!array_in_sync || | |
2375 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) | |
2376 | DMEMIT("a"); | |
2377 | else | |
2378 | DMEMIT("A"); | |
2379 | } | |
9d09e663 | 2380 | |
2e727c3c JB |
2381 | /* |
2382 | * In-sync ratio: | |
2383 | * The in-sync ratio shows the progress of: | |
2384 | * - Initializing the array | |
2385 | * - Rebuilding a subset of devices of the array | |
2386 | * The user can distinguish between the two by referring | |
2387 | * to the status characters. | |
2388 | */ | |
9d09e663 N |
2389 | DMEMIT(" %llu/%llu", |
2390 | (unsigned long long) sync, | |
2391 | (unsigned long long) rs->md.resync_max_sectors); | |
2392 | ||
be83651f JB |
2393 | /* |
2394 | * Sync action: | |
2395 | * See Documentation/device-mapper/dm-raid.c for | |
2396 | * information on each of these states. | |
2397 | */ | |
2398 | DMEMIT(" %s", decipher_sync_action(&rs->md)); | |
2399 | ||
2400 | /* | |
2401 | * resync_mismatches/mismatch_cnt | |
2402 | * This field shows the number of discrepancies found when | |
2403 | * performing a "check" of the array. | |
2404 | */ | |
2405 | DMEMIT(" %llu", | |
c4a39551 | 2406 | (strcmp(rs->md.last_sync_action, "check")) ? 0 : |
be83651f JB |
2407 | (unsigned long long) |
2408 | atomic64_read(&rs->md.resync_mismatches)); | |
9d09e663 N |
2409 | break; |
2410 | case STATUSTYPE_TABLE: | |
2411 | /* The string you would use to construct this array */ | |
46bed2b5 | 2412 | for (i = 0; i < rs->md.raid_disks; i++) { |
ad51d7f1 | 2413 | if (_test_flag(CTR_FLAG_REBUILD, rs->ctr_flags) && |
13c87583 | 2414 | rs->dev[i].data_dev && |
9d09e663 | 2415 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) |
13c87583 | 2416 | raid_param_cnt += 2; /* for rebuilds */ |
46bed2b5 JB |
2417 | if (rs->dev[i].data_dev && |
2418 | test_bit(WriteMostly, &rs->dev[i].rdev.flags)) | |
2419 | raid_param_cnt += 2; | |
2420 | } | |
9d09e663 | 2421 | |
c76d53f4 HM |
2422 | raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2); |
2423 | if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)) | |
9d09e663 N |
2424 | raid_param_cnt--; |
2425 | ||
2426 | DMEMIT("%s %u %u", rs->raid_type->name, | |
2427 | raid_param_cnt, rs->md.chunk_sectors); | |
2428 | ||
ad51d7f1 HM |
2429 | if (_test_flag(CTR_FLAG_SYNC, rs->ctr_flags) && |
2430 | rs->md.recovery_cp == MaxSector) | |
9d09e663 | 2431 | DMEMIT(" sync"); |
ad51d7f1 | 2432 | if (_test_flag(CTR_FLAG_NOSYNC, rs->ctr_flags)) |
9d09e663 N |
2433 | DMEMIT(" nosync"); |
2434 | ||
2435 | for (i = 0; i < rs->md.raid_disks; i++) | |
ad51d7f1 | 2436 | if (_test_flag(CTR_FLAG_REBUILD, rs->ctr_flags) && |
13c87583 | 2437 | rs->dev[i].data_dev && |
9d09e663 N |
2438 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) |
2439 | DMEMIT(" rebuild %u", i); | |
2440 | ||
ad51d7f1 | 2441 | if (_test_flag(CTR_FLAG_DAEMON_SLEEP, rs->ctr_flags)) |
9d09e663 N |
2442 | DMEMIT(" daemon_sleep %lu", |
2443 | rs->md.bitmap_info.daemon_sleep); | |
2444 | ||
ad51d7f1 | 2445 | if (_test_flag(CTR_FLAG_MIN_RECOVERY_RATE, rs->ctr_flags)) |
9d09e663 N |
2446 | DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); |
2447 | ||
ad51d7f1 | 2448 | if (_test_flag(CTR_FLAG_MAX_RECOVERY_RATE, rs->ctr_flags)) |
9d09e663 N |
2449 | DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); |
2450 | ||
46bed2b5 JB |
2451 | for (i = 0; i < rs->md.raid_disks; i++) |
2452 | if (rs->dev[i].data_dev && | |
2453 | test_bit(WriteMostly, &rs->dev[i].rdev.flags)) | |
2454 | DMEMIT(" write_mostly %u", i); | |
2455 | ||
ad51d7f1 | 2456 | if (_test_flag(CTR_FLAG_MAX_WRITE_BEHIND, rs->ctr_flags)) |
9d09e663 N |
2457 | DMEMIT(" max_write_behind %lu", |
2458 | rs->md.bitmap_info.max_write_behind); | |
2459 | ||
ad51d7f1 | 2460 | if (_test_flag(CTR_FLAG_STRIPE_CACHE, rs->ctr_flags)) { |
d1688a6d | 2461 | struct r5conf *conf = rs->md.private; |
9d09e663 N |
2462 | |
2463 | /* convert from kiB to sectors */ | |
2464 | DMEMIT(" stripe_cache %d", | |
2465 | conf ? conf->max_nr_stripes * 2 : 0); | |
2466 | } | |
2467 | ||
ad51d7f1 | 2468 | if (_test_flag(CTR_FLAG_REGION_SIZE, rs->ctr_flags)) |
c1084561 JB |
2469 | DMEMIT(" region_size %lu", |
2470 | rs->md.bitmap_info.chunksize >> 9); | |
2471 | ||
ad51d7f1 | 2472 | if (_test_flag(CTR_FLAG_RAID10_COPIES, rs->ctr_flags)) |
63f33b8d JB |
2473 | DMEMIT(" raid10_copies %u", |
2474 | raid10_md_layout_to_copies(rs->md.layout)); | |
2475 | ||
ad51d7f1 | 2476 | if (_test_flag(CTR_FLAG_RAID10_FORMAT, rs->ctr_flags)) |
fe5d2f4a JB |
2477 | DMEMIT(" raid10_format %s", |
2478 | raid10_md_layout_to_format(rs->md.layout)); | |
63f33b8d | 2479 | |
9d09e663 N |
2480 | DMEMIT(" %d", rs->md.raid_disks); |
2481 | for (i = 0; i < rs->md.raid_disks; i++) { | |
b12d437b JB |
2482 | if (rs->dev[i].meta_dev) |
2483 | DMEMIT(" %s", rs->dev[i].meta_dev->name); | |
2484 | else | |
2485 | DMEMIT(" -"); | |
9d09e663 N |
2486 | |
2487 | if (rs->dev[i].data_dev) | |
2488 | DMEMIT(" %s", rs->dev[i].data_dev->name); | |
2489 | else | |
2490 | DMEMIT(" -"); | |
2491 | } | |
2492 | } | |
9d09e663 N |
2493 | } |
2494 | ||
be83651f JB |
2495 | static int raid_message(struct dm_target *ti, unsigned argc, char **argv) |
2496 | { | |
2497 | struct raid_set *rs = ti->private; | |
2498 | struct mddev *mddev = &rs->md; | |
2499 | ||
2500 | if (!strcasecmp(argv[0], "reshape")) { | |
2501 | DMERR("Reshape not supported."); | |
2502 | return -EINVAL; | |
2503 | } | |
2504 | ||
2505 | if (!mddev->pers || !mddev->pers->sync_request) | |
2506 | return -EINVAL; | |
2507 | ||
2508 | if (!strcasecmp(argv[0], "frozen")) | |
2509 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2510 | else | |
2511 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
2512 | ||
2513 | if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { | |
2514 | if (mddev->sync_thread) { | |
2515 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); | |
2516 | md_reap_sync_thread(mddev); | |
2517 | } | |
2518 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || | |
2519 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) | |
2520 | return -EBUSY; | |
2521 | else if (!strcasecmp(argv[0], "resync")) | |
2522 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
2523 | else if (!strcasecmp(argv[0], "recover")) { | |
2524 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); | |
2525 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
2526 | } else { | |
2527 | if (!strcasecmp(argv[0], "check")) | |
2528 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); | |
2529 | else if (!!strcasecmp(argv[0], "repair")) | |
2530 | return -EINVAL; | |
2531 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | |
2532 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); | |
2533 | } | |
2534 | if (mddev->ro == 2) { | |
2535 | /* A write to sync_action is enough to justify | |
2536 | * canceling read-auto mode | |
2537 | */ | |
2538 | mddev->ro = 0; | |
2539 | if (!mddev->suspended) | |
2540 | md_wakeup_thread(mddev->sync_thread); | |
2541 | } | |
2542 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | |
2543 | if (!mddev->suspended) | |
2544 | md_wakeup_thread(mddev->thread); | |
2545 | ||
2546 | return 0; | |
2547 | } | |
2548 | ||
2549 | static int raid_iterate_devices(struct dm_target *ti, | |
2550 | iterate_devices_callout_fn fn, void *data) | |
9d09e663 N |
2551 | { |
2552 | struct raid_set *rs = ti->private; | |
2553 | unsigned i; | |
73c6f239 | 2554 | int r = 0; |
9d09e663 | 2555 | |
73c6f239 | 2556 | for (i = 0; !r && i < rs->md.raid_disks; i++) |
9d09e663 | 2557 | if (rs->dev[i].data_dev) |
73c6f239 | 2558 | r = fn(ti, |
9d09e663 N |
2559 | rs->dev[i].data_dev, |
2560 | 0, /* No offset on data devs */ | |
2561 | rs->md.dev_sectors, | |
2562 | data); | |
2563 | ||
73c6f239 | 2564 | return r; |
9d09e663 N |
2565 | } |
2566 | ||
2567 | static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) | |
2568 | { | |
2569 | struct raid_set *rs = ti->private; | |
2570 | unsigned chunk_size = rs->md.chunk_sectors << 9; | |
d1688a6d | 2571 | struct r5conf *conf = rs->md.private; |
9d09e663 N |
2572 | |
2573 | blk_limits_io_min(limits, chunk_size); | |
2574 | blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); | |
2575 | } | |
2576 | ||
2577 | static void raid_presuspend(struct dm_target *ti) | |
2578 | { | |
2579 | struct raid_set *rs = ti->private; | |
2580 | ||
2581 | md_stop_writes(&rs->md); | |
2582 | } | |
2583 | ||
2584 | static void raid_postsuspend(struct dm_target *ti) | |
2585 | { | |
2586 | struct raid_set *rs = ti->private; | |
2587 | ||
2588 | mddev_suspend(&rs->md); | |
2589 | } | |
2590 | ||
f381e71b | 2591 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) |
9d09e663 | 2592 | { |
9092c02d JB |
2593 | int i; |
2594 | uint64_t failed_devices, cleared_failed_devices = 0; | |
2595 | unsigned long flags; | |
2596 | struct dm_raid_superblock *sb; | |
9092c02d | 2597 | struct md_rdev *r; |
9d09e663 | 2598 | |
f381e71b JB |
2599 | for (i = 0; i < rs->md.raid_disks; i++) { |
2600 | r = &rs->dev[i].rdev; | |
2601 | if (test_bit(Faulty, &r->flags) && r->sb_page && | |
796a5cf0 MC |
2602 | sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0, |
2603 | 1)) { | |
f381e71b JB |
2604 | DMINFO("Faulty %s device #%d has readable super block." |
2605 | " Attempting to revive it.", | |
2606 | rs->raid_type->name, i); | |
a4dc163a JB |
2607 | |
2608 | /* | |
2609 | * Faulty bit may be set, but sometimes the array can | |
2610 | * be suspended before the personalities can respond | |
2611 | * by removing the device from the array (i.e. calling | |
2612 | * 'hot_remove_disk'). If they haven't yet removed | |
2613 | * the failed device, its 'raid_disk' number will be | |
2614 | * '>= 0' - meaning we must call this function | |
2615 | * ourselves. | |
2616 | */ | |
2617 | if ((r->raid_disk >= 0) && | |
2618 | (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) | |
2619 | /* Failed to revive this device, try next */ | |
2620 | continue; | |
2621 | ||
f381e71b JB |
2622 | r->raid_disk = i; |
2623 | r->saved_raid_disk = i; | |
2624 | flags = r->flags; | |
2625 | clear_bit(Faulty, &r->flags); | |
2626 | clear_bit(WriteErrorSeen, &r->flags); | |
2627 | clear_bit(In_sync, &r->flags); | |
2628 | if (r->mddev->pers->hot_add_disk(r->mddev, r)) { | |
2629 | r->raid_disk = -1; | |
2630 | r->saved_raid_disk = -1; | |
2631 | r->flags = flags; | |
2632 | } else { | |
2633 | r->recovery_offset = 0; | |
2634 | cleared_failed_devices |= 1 << i; | |
2635 | } | |
2636 | } | |
2637 | } | |
2638 | if (cleared_failed_devices) { | |
2639 | rdev_for_each(r, &rs->md) { | |
2640 | sb = page_address(r->sb_page); | |
2641 | failed_devices = le64_to_cpu(sb->failed_devices); | |
2642 | failed_devices &= ~cleared_failed_devices; | |
2643 | sb->failed_devices = cpu_to_le64(failed_devices); | |
2644 | } | |
2645 | } | |
2646 | } | |
2647 | ||
ecbfb9f1 HM |
2648 | /* Load the dirty region bitmap */ |
2649 | static int _bitmap_load(struct raid_set *rs) | |
2650 | { | |
2651 | int r = 0; | |
2652 | ||
2653 | /* Try loading the bitmap unless "raid0", which does not have one */ | |
2654 | if (!rs_is_raid0(rs) && | |
2655 | !_test_and_set_flag(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { | |
2656 | r = bitmap_load(&rs->md); | |
2657 | if (r) | |
2658 | DMERR("Failed to load bitmap"); | |
2659 | } | |
2660 | ||
2661 | return r; | |
2662 | } | |
2663 | ||
2664 | static int raid_preresume(struct dm_target *ti) | |
2665 | { | |
2666 | struct raid_set *rs = ti->private; | |
2667 | struct mddev *mddev = &rs->md; | |
2668 | ||
2669 | /* This is a resume after a suspend of the set -> it's already started */ | |
2670 | if (_test_and_set_flag(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) | |
2671 | return 0; | |
2672 | ||
2673 | /* | |
2674 | * The superblocks need to be updated on disk if the | |
2675 | * array is new or _bitmap_load will overwrite them | |
2676 | * in core with old data. | |
2677 | * | |
2678 | * In case the array got modified (takeover/reshape/resize) | |
2679 | * or the data offsets on the component devices changed, they | |
2680 | * have to be updated as well. | |
2681 | * | |
2682 | * Have to switch to readwrite and back in order to | |
2683 | * allow for the superblock updates. | |
2684 | */ | |
2685 | if (_test_and_clear_flag(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) { | |
2686 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | |
2687 | mddev->ro = 0; | |
2688 | md_update_sb(mddev, 1); | |
2689 | mddev->ro = 1; | |
2690 | } | |
2691 | ||
2692 | /* | |
2693 | * Disable/enable discard support on raid set after any | |
2694 | * conversion, because devices can have been added | |
2695 | */ | |
2696 | configure_discard_support(rs); | |
2697 | ||
2698 | /* Load the bitmap from disk unless raid0 */ | |
2699 | return _bitmap_load(rs); | |
2700 | } | |
2701 | ||
f381e71b JB |
2702 | static void raid_resume(struct dm_target *ti) |
2703 | { | |
2704 | struct raid_set *rs = ti->private; | |
ecbfb9f1 | 2705 | struct mddev *mddev = &rs->md; |
f381e71b | 2706 | |
ecbfb9f1 HM |
2707 | if (_test_and_set_flag(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { |
2708 | /* | |
2709 | * A secondary resume while the device is active. | |
2710 | * Take this opportunity to check whether any failed | |
2711 | * devices are reachable again. | |
2712 | */ | |
2713 | attempt_restore_of_faulty_devices(rs); | |
0cf45031 | 2714 | |
ecbfb9f1 HM |
2715 | } else { |
2716 | mddev->in_sync = 0; | |
0cf45031 | 2717 | |
ecbfb9f1 HM |
2718 | /* |
2719 | * If any of the constructor flags got passed in | |
2720 | * but "region_size" (gets always passed in for | |
2721 | * mappings with bitmap), we expect userspace to | |
2722 | * reset them and reload the mapping anyway. | |
2723 | * | |
2724 | * -> don't unfreeze resynchronization until imminant | |
2725 | * reload of the table w/o theses flags | |
2726 | */ | |
2727 | if (!_test_flags(ALL_FREEZE_FLAGS, rs->ctr_flags)) | |
2728 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | |
47525e59 | 2729 | } |
34f8ac6d | 2730 | |
ecbfb9f1 HM |
2731 | mddev->ro = 0; |
2732 | if (mddev->suspended) | |
2733 | mddev_resume(mddev); | |
9d09e663 N |
2734 | } |
2735 | ||
2736 | static struct target_type raid_target = { | |
2737 | .name = "raid", | |
702108d1 | 2738 | .version = {1, 8, 1}, |
9d09e663 N |
2739 | .module = THIS_MODULE, |
2740 | .ctr = raid_ctr, | |
2741 | .dtr = raid_dtr, | |
2742 | .map = raid_map, | |
2743 | .status = raid_status, | |
be83651f | 2744 | .message = raid_message, |
9d09e663 N |
2745 | .iterate_devices = raid_iterate_devices, |
2746 | .io_hints = raid_io_hints, | |
2747 | .presuspend = raid_presuspend, | |
2748 | .postsuspend = raid_postsuspend, | |
ecbfb9f1 | 2749 | .preresume = raid_preresume, |
9d09e663 N |
2750 | .resume = raid_resume, |
2751 | }; | |
2752 | ||
2753 | static int __init dm_raid_init(void) | |
2754 | { | |
fe5d2f4a JB |
2755 | DMINFO("Loading target version %u.%u.%u", |
2756 | raid_target.version[0], | |
2757 | raid_target.version[1], | |
2758 | raid_target.version[2]); | |
9d09e663 N |
2759 | return dm_register_target(&raid_target); |
2760 | } | |
2761 | ||
2762 | static void __exit dm_raid_exit(void) | |
2763 | { | |
2764 | dm_unregister_target(&raid_target); | |
2765 | } | |
2766 | ||
2767 | module_init(dm_raid_init); | |
2768 | module_exit(dm_raid_exit); | |
2769 | ||
48cf06bc HM |
2770 | module_param(devices_handle_discard_safely, bool, 0644); |
2771 | MODULE_PARM_DESC(devices_handle_discard_safely, | |
2772 | "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); | |
2773 | ||
9d09e663 | 2774 | MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); |
63f33b8d JB |
2775 | MODULE_ALIAS("dm-raid1"); |
2776 | MODULE_ALIAS("dm-raid10"); | |
9d09e663 N |
2777 | MODULE_ALIAS("dm-raid4"); |
2778 | MODULE_ALIAS("dm-raid5"); | |
2779 | MODULE_ALIAS("dm-raid6"); | |
2780 | MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>"); | |
2781 | MODULE_LICENSE("GPL"); |