2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the LGPL.
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
22 typedef enum { STATUSTYPE_INFO
, STATUSTYPE_TABLE
} status_type_t
;
29 * In the constructor the target parameter will already have the
30 * table, type, begin and len fields filled in.
32 typedef int (*dm_ctr_fn
) (struct dm_target
*target
,
33 unsigned int argc
, char **argv
);
36 * The destructor doesn't need to free the dm_target, just
37 * anything hidden ti->private.
39 typedef void (*dm_dtr_fn
) (struct dm_target
*ti
);
42 * The map function must return:
44 * = 0: The target will handle the io by resubmitting it later
45 * = 1: simple remap complete
46 * = 2: The target wants to push back the io
48 typedef int (*dm_map_fn
) (struct dm_target
*ti
, struct bio
*bio
);
49 typedef int (*dm_map_request_fn
) (struct dm_target
*ti
, struct request
*clone
,
50 union map_info
*map_context
);
54 * < 0 : error (currently ignored)
55 * 0 : ended successfully
56 * 1 : for some reason the io has still not completed (eg,
57 * multipath target might want to requeue a failed io).
58 * 2 : The target wants to push back the io
60 typedef int (*dm_endio_fn
) (struct dm_target
*ti
,
61 struct bio
*bio
, int error
);
62 typedef int (*dm_request_endio_fn
) (struct dm_target
*ti
,
63 struct request
*clone
, int error
,
64 union map_info
*map_context
);
66 typedef void (*dm_presuspend_fn
) (struct dm_target
*ti
);
67 typedef void (*dm_postsuspend_fn
) (struct dm_target
*ti
);
68 typedef int (*dm_preresume_fn
) (struct dm_target
*ti
);
69 typedef void (*dm_resume_fn
) (struct dm_target
*ti
);
71 typedef void (*dm_status_fn
) (struct dm_target
*ti
, status_type_t status_type
,
72 unsigned status_flags
, char *result
, unsigned maxlen
);
74 typedef int (*dm_message_fn
) (struct dm_target
*ti
, unsigned argc
, char **argv
);
76 typedef int (*dm_ioctl_fn
) (struct dm_target
*ti
, unsigned int cmd
,
79 typedef int (*dm_merge_fn
) (struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
80 struct bio_vec
*biovec
, int max_size
);
83 * These iteration functions are typically used to check (and combine)
84 * properties of underlying devices.
85 * E.g. Does at least one underlying device support flush?
86 * Does any underlying device not support WRITE_SAME?
88 * The callout function is called once for each contiguous section of
89 * an underlying device. State can be maintained in *data.
90 * Return non-zero to stop iterating through any further devices.
92 typedef int (*iterate_devices_callout_fn
) (struct dm_target
*ti
,
94 sector_t start
, sector_t len
,
98 * This function must iterate through each section of device used by the
99 * target until it encounters a non-zero return code, which it then returns.
100 * Returns zero if no callout returned non-zero.
102 typedef int (*dm_iterate_devices_fn
) (struct dm_target
*ti
,
103 iterate_devices_callout_fn fn
,
106 typedef void (*dm_io_hints_fn
) (struct dm_target
*ti
,
107 struct queue_limits
*limits
);
111 * 0: The target can handle the next I/O immediately.
112 * 1: The target can't handle the next I/O immediately.
114 typedef int (*dm_busy_fn
) (struct dm_target
*ti
);
116 void dm_error(const char *message
);
119 struct block_device
*bdev
;
125 * Constructors should call these functions to ensure destination devices
126 * are opened/closed correctly.
128 int dm_get_device(struct dm_target
*ti
, const char *path
, fmode_t mode
,
129 struct dm_dev
**result
);
130 void dm_put_device(struct dm_target
*ti
, struct dm_dev
*d
);
133 * Information about a target type
139 struct module
*module
;
144 dm_map_request_fn map_rq
;
146 dm_request_endio_fn rq_end_io
;
147 dm_presuspend_fn presuspend
;
148 dm_postsuspend_fn postsuspend
;
149 dm_preresume_fn preresume
;
152 dm_message_fn message
;
156 dm_iterate_devices_fn iterate_devices
;
157 dm_io_hints_fn io_hints
;
159 /* For internal device-mapper use. */
160 struct list_head list
;
168 * Any table that contains an instance of this target must have only one.
170 #define DM_TARGET_SINGLETON 0x00000001
171 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
174 * Indicates that a target does not support read-only devices.
176 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
177 #define dm_target_always_writeable(type) \
178 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
181 * Any device that contains a table with an instance of this target may never
182 * have tables containing any different target type.
184 #define DM_TARGET_IMMUTABLE 0x00000004
185 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
188 * Some targets need to be sent the same WRITE bio severals times so
189 * that they can send copies of it to different devices. This function
190 * examines any supplied bio and returns the number of copies of it the
193 typedef unsigned (*dm_num_write_bios_fn
) (struct dm_target
*ti
, struct bio
*bio
);
196 struct dm_table
*table
;
197 struct target_type
*type
;
203 /* If non-zero, maximum size of I/O submitted to a target. */
207 * A number of zero-length barrier bios that will be submitted
208 * to the target for the purpose of flushing cache.
210 * The bio number can be accessed with dm_bio_get_target_bio_nr.
211 * It is a responsibility of the target driver to remap these bios
212 * to the real underlying devices.
214 unsigned num_flush_bios
;
217 * The number of discard bios that will be submitted to the target.
218 * The bio number can be accessed with dm_bio_get_target_bio_nr.
220 unsigned num_discard_bios
;
223 * The number of WRITE SAME bios that will be submitted to the target.
224 * The bio number can be accessed with dm_bio_get_target_bio_nr.
226 unsigned num_write_same_bios
;
229 * The minimum number of extra bytes allocated in each bio for the
230 * target to use. dm_per_bio_data returns the data location.
232 unsigned per_bio_data_size
;
235 * If defined, this function is called to find out how many
236 * duplicate bios should be sent to the target when writing
239 dm_num_write_bios_fn num_write_bios
;
241 /* target specific data */
244 /* Used to provide an error string from the ctr */
248 * Set if this target needs to receive flushes regardless of
249 * whether or not its underlying devices have support.
251 bool flush_supported
:1;
254 * Set if this target needs to receive discards regardless of
255 * whether or not its underlying devices have support.
257 bool discards_supported
:1;
260 * Set if the target required discard bios to be split
261 * on max_io_len boundary.
263 bool split_discard_bios
:1;
266 * Set if this target does not return zeroes on discarded blocks.
268 bool discard_zeroes_data_unsupported
:1;
271 /* Each target can link one of these into the table */
272 struct dm_target_callbacks
{
273 struct list_head list
;
274 int (*congested_fn
) (struct dm_target_callbacks
*, int);
279 * One of these is allocated for each bio.
280 * This structure shouldn't be touched directly by target drivers.
281 * It is here so that we can inline dm_per_bio_data and
282 * dm_bio_from_per_bio_data
284 struct dm_target_io
{
286 struct dm_target
*ti
;
287 unsigned target_bio_nr
;
292 static inline void *dm_per_bio_data(struct bio
*bio
, size_t data_size
)
294 return (char *)bio
- offsetof(struct dm_target_io
, clone
) - data_size
;
297 static inline struct bio
*dm_bio_from_per_bio_data(void *data
, size_t data_size
)
299 return (struct bio
*)((char *)data
+ data_size
+ offsetof(struct dm_target_io
, clone
));
302 static inline unsigned dm_bio_get_target_bio_nr(const struct bio
*bio
)
304 return container_of(bio
, struct dm_target_io
, clone
)->target_bio_nr
;
307 int dm_register_target(struct target_type
*t
);
308 void dm_unregister_target(struct target_type
*t
);
311 * Target argument parsing.
319 * The minimum and maximum value of a numeric argument, together with
320 * the error message to use if the number is found to be outside that range.
329 * Validate the next argument, either returning it as *value or, if invalid,
330 * returning -EINVAL and setting *error.
332 int dm_read_arg(struct dm_arg
*arg
, struct dm_arg_set
*arg_set
,
333 unsigned *value
, char **error
);
336 * Process the next argument as the start of a group containing between
337 * arg->min and arg->max further arguments. Either return the size as
338 * *num_args or, if invalid, return -EINVAL and set *error.
340 int dm_read_arg_group(struct dm_arg
*arg
, struct dm_arg_set
*arg_set
,
341 unsigned *num_args
, char **error
);
344 * Return the current argument and shift to the next.
346 const char *dm_shift_arg(struct dm_arg_set
*as
);
349 * Move through num_args arguments.
351 void dm_consume_args(struct dm_arg_set
*as
, unsigned num_args
);
353 /*-----------------------------------------------------------------
354 * Functions for creating and manipulating mapped devices.
355 * Drop the reference with dm_put when you finish with the object.
356 *---------------------------------------------------------------*/
359 * DM_ANY_MINOR chooses the next available minor number.
361 #define DM_ANY_MINOR (-1)
362 int dm_create(int minor
, struct mapped_device
**md
);
365 * Reference counting for md.
367 struct mapped_device
*dm_get_md(dev_t dev
);
368 void dm_get(struct mapped_device
*md
);
369 void dm_put(struct mapped_device
*md
);
372 * An arbitrary pointer may be stored alongside a mapped device.
374 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
);
375 void *dm_get_mdptr(struct mapped_device
*md
);
378 * A device can still be used while suspended, but I/O is deferred.
380 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
);
381 int dm_resume(struct mapped_device
*md
);
386 uint32_t dm_get_event_nr(struct mapped_device
*md
);
387 int dm_wait_event(struct mapped_device
*md
, int event_nr
);
388 uint32_t dm_next_uevent_seq(struct mapped_device
*md
);
389 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
);
394 const char *dm_device_name(struct mapped_device
*md
);
395 int dm_copy_name_and_uuid(struct mapped_device
*md
, char *name
, char *uuid
);
396 struct gendisk
*dm_disk(struct mapped_device
*md
);
397 int dm_suspended(struct dm_target
*ti
);
398 int dm_noflush_suspending(struct dm_target
*ti
);
399 void dm_accept_partial_bio(struct bio
*bio
, unsigned n_sectors
);
400 union map_info
*dm_get_rq_mapinfo(struct request
*rq
);
402 struct queue_limits
*dm_get_queue_limits(struct mapped_device
*md
);
405 * Geometry functions.
407 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
);
408 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
);
410 /*-----------------------------------------------------------------
411 * Functions for manipulating device-mapper tables.
412 *---------------------------------------------------------------*/
415 * First create an empty table.
417 int dm_table_create(struct dm_table
**result
, fmode_t mode
,
418 unsigned num_targets
, struct mapped_device
*md
);
421 * Then call this once for each target.
423 int dm_table_add_target(struct dm_table
*t
, const char *type
,
424 sector_t start
, sector_t len
, char *params
);
427 * Target_ctr should call this if it needs to add any callbacks.
429 void dm_table_add_target_callbacks(struct dm_table
*t
, struct dm_target_callbacks
*cb
);
432 * Finally call this to make the table ready for use.
434 int dm_table_complete(struct dm_table
*t
);
437 * Target may require that it is never sent I/O larger than len.
439 int __must_check
dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
);
442 * Table reference counting.
444 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
);
445 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
);
446 void dm_sync_table(struct mapped_device
*md
);
451 sector_t
dm_table_get_size(struct dm_table
*t
);
452 unsigned int dm_table_get_num_targets(struct dm_table
*t
);
453 fmode_t
dm_table_get_mode(struct dm_table
*t
);
454 struct mapped_device
*dm_table_get_md(struct dm_table
*t
);
459 void dm_table_event(struct dm_table
*t
);
462 * Run the queue for request-based targets.
464 void dm_table_run_md_queue_async(struct dm_table
*t
);
467 * The device must be suspended before calling this method.
468 * Returns the previous table, which the caller must destroy.
470 struct dm_table
*dm_swap_table(struct mapped_device
*md
,
474 * A wrapper around vmalloc.
476 void *dm_vcalloc(unsigned long nmemb
, unsigned long elem_size
);
478 /*-----------------------------------------------------------------
480 *---------------------------------------------------------------*/
481 #define DM_NAME "device-mapper"
484 extern struct ratelimit_state dm_ratelimit_state
;
486 #define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
488 #define dm_ratelimit() 0
491 #define DMCRIT(f, arg...) \
492 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
494 #define DMERR(f, arg...) \
495 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
496 #define DMERR_LIMIT(f, arg...) \
498 if (dm_ratelimit()) \
499 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
503 #define DMWARN(f, arg...) \
504 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
505 #define DMWARN_LIMIT(f, arg...) \
507 if (dm_ratelimit()) \
508 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
512 #define DMINFO(f, arg...) \
513 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
514 #define DMINFO_LIMIT(f, arg...) \
516 if (dm_ratelimit()) \
517 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
521 #ifdef CONFIG_DM_DEBUG
522 # define DMDEBUG(f, arg...) \
523 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
524 # define DMDEBUG_LIMIT(f, arg...) \
526 if (dm_ratelimit()) \
527 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
531 # define DMDEBUG(f, arg...) do {} while (0)
532 # define DMDEBUG_LIMIT(f, arg...) do {} while (0)
535 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
536 0 : scnprintf(result + sz, maxlen - sz, x))
538 #define SECTOR_SHIFT 9
541 * Definitions of return values from target end_io function.
543 #define DM_ENDIO_INCOMPLETE 1
544 #define DM_ENDIO_REQUEUE 2
547 * Definitions of return values from target map function.
549 #define DM_MAPIO_SUBMITTED 0
550 #define DM_MAPIO_REMAPPED 1
551 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
553 #define dm_sector_div64(x, y)( \
556 (x) = div64_u64_rem(x, y, &_res); \
564 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
566 #define dm_sector_div_up(n, sz) ( \
568 sector_t _r = ((n) + (sz) - 1); \
569 sector_div(_r, (sz)); \
575 * ceiling(n / size) * size
577 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
579 #define dm_array_too_big(fixed, obj, num) \
580 ((num) > (UINT_MAX - (fixed)) / (obj))
583 * Sector offset taken relative to the start of the target instead of
584 * relative to the start of the device.
586 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
588 static inline sector_t
to_sector(unsigned long n
)
590 return (n
>> SECTOR_SHIFT
);
593 static inline unsigned long to_bytes(sector_t n
)
595 return (n
<< SECTOR_SHIFT
);
598 /*-----------------------------------------------------------------
599 * Helper for block layer and dm core operations
600 *---------------------------------------------------------------*/
601 void dm_dispatch_request(struct request
*rq
);
602 void dm_requeue_unmapped_request(struct request
*rq
);
603 void dm_kill_unmapped_request(struct request
*rq
, int error
);
604 int dm_underlying_device_busy(struct request_queue
*q
);
606 #endif /* _LINUX_DEVICE_MAPPER_H */