3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include "rbd_types.h"
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
58 #define SECTOR_SHIFT 9
59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
67 static int atomic_inc_return_safe(atomic_t
*v
)
71 counter
= (unsigned int)__atomic_add_unless(v
, 1, 0);
72 if (counter
<= (unsigned int)INT_MAX
)
80 /* Decrement the counter. Return the resulting value, or -EINVAL */
81 static int atomic_dec_return_safe(atomic_t
*v
)
85 counter
= atomic_dec_return(v
);
94 #define RBD_DRV_NAME "rbd"
96 #define RBD_MINORS_PER_MAJOR 256
97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
99 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
100 #define RBD_MAX_SNAP_NAME_LEN \
101 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
103 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
105 #define RBD_SNAP_HEAD_NAME "-"
107 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
109 /* This allows a single page to hold an image name sent by OSD */
110 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
111 #define RBD_IMAGE_ID_LEN_MAX 64
113 #define RBD_OBJ_PREFIX_LEN_MAX 64
117 #define RBD_FEATURE_LAYERING (1<<0)
118 #define RBD_FEATURE_STRIPINGV2 (1<<1)
119 #define RBD_FEATURES_ALL \
120 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
122 /* Features supported by this (client software) implementation. */
124 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
127 * An RBD device name will be "rbd#", where the "rbd" comes from
128 * RBD_DRV_NAME above, and # is a unique integer identifier.
129 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
130 * enough to hold all possible device names.
132 #define DEV_NAME_LEN 32
133 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
136 * block device image metadata (in-memory version)
138 struct rbd_image_header
{
139 /* These six fields never change for a given rbd image */
146 u64 features
; /* Might be changeable someday? */
148 /* The remaining fields need to be updated occasionally */
150 struct ceph_snap_context
*snapc
;
151 char *snap_names
; /* format 1 only */
152 u64
*snap_sizes
; /* format 1 only */
156 * An rbd image specification.
158 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
159 * identify an image. Each rbd_dev structure includes a pointer to
160 * an rbd_spec structure that encapsulates this identity.
162 * Each of the id's in an rbd_spec has an associated name. For a
163 * user-mapped image, the names are supplied and the id's associated
164 * with them are looked up. For a layered image, a parent image is
165 * defined by the tuple, and the names are looked up.
167 * An rbd_dev structure contains a parent_spec pointer which is
168 * non-null if the image it represents is a child in a layered
169 * image. This pointer will refer to the rbd_spec structure used
170 * by the parent rbd_dev for its own identity (i.e., the structure
171 * is shared between the parent and child).
173 * Since these structures are populated once, during the discovery
174 * phase of image construction, they are effectively immutable so
175 * we make no effort to synchronize access to them.
177 * Note that code herein does not assume the image name is known (it
178 * could be a null pointer).
182 const char *pool_name
;
184 const char *image_id
;
185 const char *image_name
;
188 const char *snap_name
;
194 * an instance of the client. multiple devices may share an rbd client.
197 struct ceph_client
*client
;
199 struct list_head node
;
202 struct rbd_img_request
;
203 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
205 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
207 struct rbd_obj_request
;
208 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
210 enum obj_request_type
{
211 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
214 enum obj_operation_type
{
221 OBJ_REQ_DONE
, /* completion flag: not done = 0, done = 1 */
222 OBJ_REQ_IMG_DATA
, /* object usage: standalone = 0, image = 1 */
223 OBJ_REQ_KNOWN
, /* EXISTS flag valid: no = 0, yes = 1 */
224 OBJ_REQ_EXISTS
, /* target exists: no = 0, yes = 1 */
227 struct rbd_obj_request
{
228 const char *object_name
;
229 u64 offset
; /* object start byte */
230 u64 length
; /* bytes from offset */
234 * An object request associated with an image will have its
235 * img_data flag set; a standalone object request will not.
237 * A standalone object request will have which == BAD_WHICH
238 * and a null obj_request pointer.
240 * An object request initiated in support of a layered image
241 * object (to check for its existence before a write) will
242 * have which == BAD_WHICH and a non-null obj_request pointer.
244 * Finally, an object request for rbd image data will have
245 * which != BAD_WHICH, and will have a non-null img_request
246 * pointer. The value of which will be in the range
247 * 0..(img_request->obj_request_count-1).
250 struct rbd_obj_request
*obj_request
; /* STAT op */
252 struct rbd_img_request
*img_request
;
254 /* links for img_request->obj_requests list */
255 struct list_head links
;
258 u32 which
; /* posn image request list */
260 enum obj_request_type type
;
262 struct bio
*bio_list
;
268 struct page
**copyup_pages
;
269 u32 copyup_page_count
;
271 struct ceph_osd_request
*osd_req
;
273 u64 xferred
; /* bytes transferred */
276 rbd_obj_callback_t callback
;
277 struct completion completion
;
283 IMG_REQ_WRITE
, /* I/O direction: read = 0, write = 1 */
284 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
285 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
286 IMG_REQ_DISCARD
, /* discard: normal = 0, discard request = 1 */
289 struct rbd_img_request
{
290 struct rbd_device
*rbd_dev
;
291 u64 offset
; /* starting image byte offset */
292 u64 length
; /* byte count from offset */
295 u64 snap_id
; /* for reads */
296 struct ceph_snap_context
*snapc
; /* for writes */
299 struct request
*rq
; /* block request */
300 struct rbd_obj_request
*obj_request
; /* obj req initiator */
302 struct page
**copyup_pages
;
303 u32 copyup_page_count
;
304 spinlock_t completion_lock
;/* protects next_completion */
306 rbd_img_callback_t callback
;
307 u64 xferred
;/* aggregate bytes transferred */
308 int result
; /* first nonzero obj_request result */
310 u32 obj_request_count
;
311 struct list_head obj_requests
; /* rbd_obj_request structs */
316 #define for_each_obj_request(ireq, oreq) \
317 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
318 #define for_each_obj_request_from(ireq, oreq) \
319 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
320 #define for_each_obj_request_safe(ireq, oreq, n) \
321 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
333 int dev_id
; /* blkdev unique id */
335 int major
; /* blkdev assigned major */
337 struct gendisk
*disk
; /* blkdev's gendisk and rq */
339 u32 image_format
; /* Either 1 or 2 */
340 struct rbd_client
*rbd_client
;
342 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
344 spinlock_t lock
; /* queue, flags, open_count */
346 struct rbd_image_header header
;
347 unsigned long flags
; /* possibly lock protected */
348 struct rbd_spec
*spec
;
352 struct ceph_file_layout layout
;
354 struct ceph_osd_event
*watch_event
;
355 struct rbd_obj_request
*watch_request
;
357 struct rbd_spec
*parent_spec
;
360 struct rbd_device
*parent
;
362 /* Block layer tags. */
363 struct blk_mq_tag_set tag_set
;
365 /* protects updating the header */
366 struct rw_semaphore header_rwsem
;
368 struct rbd_mapping mapping
;
370 struct list_head node
;
374 unsigned long open_count
; /* protected by lock */
378 * Flag bits for rbd_dev->flags. If atomicity is required,
379 * rbd_dev->lock is used to protect access.
381 * Currently, only the "removing" flag (which is coupled with the
382 * "open_count" field) requires atomic access.
385 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
386 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
389 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
391 static LIST_HEAD(rbd_dev_list
); /* devices */
392 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
394 static LIST_HEAD(rbd_client_list
); /* clients */
395 static DEFINE_SPINLOCK(rbd_client_list_lock
);
397 /* Slab caches for frequently-allocated structures */
399 static struct kmem_cache
*rbd_img_request_cache
;
400 static struct kmem_cache
*rbd_obj_request_cache
;
401 static struct kmem_cache
*rbd_segment_name_cache
;
403 static int rbd_major
;
404 static DEFINE_IDA(rbd_dev_id_ida
);
406 static struct workqueue_struct
*rbd_wq
;
409 * Default to false for now, as single-major requires >= 0.75 version of
410 * userspace rbd utility.
412 static bool single_major
= false;
413 module_param(single_major
, bool, S_IRUGO
);
414 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: false)");
416 static int rbd_img_request_submit(struct rbd_img_request
*img_request
);
418 static void rbd_dev_device_release(struct device
*dev
);
420 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
422 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
424 static ssize_t
rbd_add_single_major(struct bus_type
*bus
, const char *buf
,
426 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
, const char *buf
,
428 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
);
429 static void rbd_spec_put(struct rbd_spec
*spec
);
431 static int rbd_dev_id_to_minor(int dev_id
)
433 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
436 static int minor_to_rbd_dev_id(int minor
)
438 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
441 static BUS_ATTR(add
, S_IWUSR
, NULL
, rbd_add
);
442 static BUS_ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
);
443 static BUS_ATTR(add_single_major
, S_IWUSR
, NULL
, rbd_add_single_major
);
444 static BUS_ATTR(remove_single_major
, S_IWUSR
, NULL
, rbd_remove_single_major
);
446 static struct attribute
*rbd_bus_attrs
[] = {
448 &bus_attr_remove
.attr
,
449 &bus_attr_add_single_major
.attr
,
450 &bus_attr_remove_single_major
.attr
,
454 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
455 struct attribute
*attr
, int index
)
458 (attr
== &bus_attr_add_single_major
.attr
||
459 attr
== &bus_attr_remove_single_major
.attr
))
465 static const struct attribute_group rbd_bus_group
= {
466 .attrs
= rbd_bus_attrs
,
467 .is_visible
= rbd_bus_is_visible
,
469 __ATTRIBUTE_GROUPS(rbd_bus
);
471 static struct bus_type rbd_bus_type
= {
473 .bus_groups
= rbd_bus_groups
,
476 static void rbd_root_dev_release(struct device
*dev
)
480 static struct device rbd_root_dev
= {
482 .release
= rbd_root_dev_release
,
485 static __printf(2, 3)
486 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
488 struct va_format vaf
;
496 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
497 else if (rbd_dev
->disk
)
498 printk(KERN_WARNING
"%s: %s: %pV\n",
499 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
500 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
501 printk(KERN_WARNING
"%s: image %s: %pV\n",
502 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
503 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
504 printk(KERN_WARNING
"%s: id %s: %pV\n",
505 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
507 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
508 RBD_DRV_NAME
, rbd_dev
, &vaf
);
513 #define rbd_assert(expr) \
514 if (unlikely(!(expr))) { \
515 printk(KERN_ERR "\nAssertion failure in %s() " \
517 "\trbd_assert(%s);\n\n", \
518 __func__, __LINE__, #expr); \
521 #else /* !RBD_DEBUG */
522 # define rbd_assert(expr) ((void) 0)
523 #endif /* !RBD_DEBUG */
525 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
);
526 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
);
527 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
529 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
530 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
531 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
);
532 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
);
533 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
535 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
536 u8
*order
, u64
*snap_size
);
537 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
539 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
);
541 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
543 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
544 bool removing
= false;
546 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
549 spin_lock_irq(&rbd_dev
->lock
);
550 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
553 rbd_dev
->open_count
++;
554 spin_unlock_irq(&rbd_dev
->lock
);
558 (void) get_device(&rbd_dev
->dev
);
563 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
565 struct rbd_device
*rbd_dev
= disk
->private_data
;
566 unsigned long open_count_before
;
568 spin_lock_irq(&rbd_dev
->lock
);
569 open_count_before
= rbd_dev
->open_count
--;
570 spin_unlock_irq(&rbd_dev
->lock
);
571 rbd_assert(open_count_before
> 0);
573 put_device(&rbd_dev
->dev
);
576 static int rbd_ioctl_set_ro(struct rbd_device
*rbd_dev
, unsigned long arg
)
581 bool ro_changed
= false;
583 /* get_user() may sleep, so call it before taking rbd_dev->lock */
584 if (get_user(val
, (int __user
*)(arg
)))
587 ro
= val
? true : false;
588 /* Snapshot doesn't allow to write*/
589 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
&& !ro
)
592 spin_lock_irq(&rbd_dev
->lock
);
593 /* prevent others open this device */
594 if (rbd_dev
->open_count
> 1) {
599 if (rbd_dev
->mapping
.read_only
!= ro
) {
600 rbd_dev
->mapping
.read_only
= ro
;
605 spin_unlock_irq(&rbd_dev
->lock
);
606 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
607 if (ret
== 0 && ro_changed
)
608 set_disk_ro(rbd_dev
->disk
, ro
? 1 : 0);
613 static int rbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
614 unsigned int cmd
, unsigned long arg
)
616 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
621 ret
= rbd_ioctl_set_ro(rbd_dev
, arg
);
631 static int rbd_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
632 unsigned int cmd
, unsigned long arg
)
634 return rbd_ioctl(bdev
, mode
, cmd
, arg
);
636 #endif /* CONFIG_COMPAT */
638 static const struct block_device_operations rbd_bd_ops
= {
639 .owner
= THIS_MODULE
,
641 .release
= rbd_release
,
644 .compat_ioctl
= rbd_compat_ioctl
,
649 * Initialize an rbd client instance. Success or not, this function
650 * consumes ceph_opts. Caller holds client_mutex.
652 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
654 struct rbd_client
*rbdc
;
657 dout("%s:\n", __func__
);
658 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
662 kref_init(&rbdc
->kref
);
663 INIT_LIST_HEAD(&rbdc
->node
);
665 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
666 if (IS_ERR(rbdc
->client
))
668 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
670 ret
= ceph_open_session(rbdc
->client
);
674 spin_lock(&rbd_client_list_lock
);
675 list_add_tail(&rbdc
->node
, &rbd_client_list
);
676 spin_unlock(&rbd_client_list_lock
);
678 dout("%s: rbdc %p\n", __func__
, rbdc
);
682 ceph_destroy_client(rbdc
->client
);
687 ceph_destroy_options(ceph_opts
);
688 dout("%s: error %d\n", __func__
, ret
);
693 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
695 kref_get(&rbdc
->kref
);
701 * Find a ceph client with specific addr and configuration. If
702 * found, bump its reference count.
704 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
706 struct rbd_client
*client_node
;
709 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
712 spin_lock(&rbd_client_list_lock
);
713 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
714 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
715 __rbd_get_client(client_node
);
721 spin_unlock(&rbd_client_list_lock
);
723 return found
? client_node
: NULL
;
733 /* string args above */
736 /* Boolean args above */
740 static match_table_t rbd_opts_tokens
= {
742 /* string args above */
743 {Opt_read_only
, "read_only"},
744 {Opt_read_only
, "ro"}, /* Alternate spelling */
745 {Opt_read_write
, "read_write"},
746 {Opt_read_write
, "rw"}, /* Alternate spelling */
747 /* Boolean args above */
755 #define RBD_READ_ONLY_DEFAULT false
757 static int parse_rbd_opts_token(char *c
, void *private)
759 struct rbd_options
*rbd_opts
= private;
760 substring_t argstr
[MAX_OPT_ARGS
];
761 int token
, intval
, ret
;
763 token
= match_token(c
, rbd_opts_tokens
, argstr
);
767 if (token
< Opt_last_int
) {
768 ret
= match_int(&argstr
[0], &intval
);
770 pr_err("bad mount option arg (not int) "
774 dout("got int token %d val %d\n", token
, intval
);
775 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
776 dout("got string token %d val %s\n", token
,
778 } else if (token
> Opt_last_string
&& token
< Opt_last_bool
) {
779 dout("got Boolean token %d\n", token
);
781 dout("got token %d\n", token
);
786 rbd_opts
->read_only
= true;
789 rbd_opts
->read_only
= false;
798 static char* obj_op_name(enum obj_operation_type op_type
)
813 * Get a ceph client with specific addr and configuration, if one does
814 * not exist create it. Either way, ceph_opts is consumed by this
817 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
819 struct rbd_client
*rbdc
;
821 mutex_lock_nested(&client_mutex
, SINGLE_DEPTH_NESTING
);
822 rbdc
= rbd_client_find(ceph_opts
);
823 if (rbdc
) /* using an existing client */
824 ceph_destroy_options(ceph_opts
);
826 rbdc
= rbd_client_create(ceph_opts
);
827 mutex_unlock(&client_mutex
);
833 * Destroy ceph client
835 * Caller must hold rbd_client_list_lock.
837 static void rbd_client_release(struct kref
*kref
)
839 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
841 dout("%s: rbdc %p\n", __func__
, rbdc
);
842 spin_lock(&rbd_client_list_lock
);
843 list_del(&rbdc
->node
);
844 spin_unlock(&rbd_client_list_lock
);
846 ceph_destroy_client(rbdc
->client
);
851 * Drop reference to ceph client node. If it's not referenced anymore, release
854 static void rbd_put_client(struct rbd_client
*rbdc
)
857 kref_put(&rbdc
->kref
, rbd_client_release
);
860 static bool rbd_image_format_valid(u32 image_format
)
862 return image_format
== 1 || image_format
== 2;
865 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
870 /* The header has to start with the magic rbd header text */
871 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
874 /* The bio layer requires at least sector-sized I/O */
876 if (ondisk
->options
.order
< SECTOR_SHIFT
)
879 /* If we use u64 in a few spots we may be able to loosen this */
881 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
885 * The size of a snapshot header has to fit in a size_t, and
886 * that limits the number of snapshots.
888 snap_count
= le32_to_cpu(ondisk
->snap_count
);
889 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
890 if (snap_count
> size
/ sizeof (__le64
))
894 * Not only that, but the size of the entire the snapshot
895 * header must also be representable in a size_t.
897 size
-= snap_count
* sizeof (__le64
);
898 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
905 * Fill an rbd image header with information from the given format 1
908 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
909 struct rbd_image_header_ondisk
*ondisk
)
911 struct rbd_image_header
*header
= &rbd_dev
->header
;
912 bool first_time
= header
->object_prefix
== NULL
;
913 struct ceph_snap_context
*snapc
;
914 char *object_prefix
= NULL
;
915 char *snap_names
= NULL
;
916 u64
*snap_sizes
= NULL
;
922 /* Allocate this now to avoid having to handle failure below */
927 len
= strnlen(ondisk
->object_prefix
,
928 sizeof (ondisk
->object_prefix
));
929 object_prefix
= kmalloc(len
+ 1, GFP_KERNEL
);
932 memcpy(object_prefix
, ondisk
->object_prefix
, len
);
933 object_prefix
[len
] = '\0';
936 /* Allocate the snapshot context and fill it in */
938 snap_count
= le32_to_cpu(ondisk
->snap_count
);
939 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
942 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
944 struct rbd_image_snap_ondisk
*snaps
;
945 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
947 /* We'll keep a copy of the snapshot names... */
949 if (snap_names_len
> (u64
)SIZE_MAX
)
951 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
955 /* ...as well as the array of their sizes. */
957 size
= snap_count
* sizeof (*header
->snap_sizes
);
958 snap_sizes
= kmalloc(size
, GFP_KERNEL
);
963 * Copy the names, and fill in each snapshot's id
966 * Note that rbd_dev_v1_header_info() guarantees the
967 * ondisk buffer we're working with has
968 * snap_names_len bytes beyond the end of the
969 * snapshot id array, this memcpy() is safe.
971 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
972 snaps
= ondisk
->snaps
;
973 for (i
= 0; i
< snap_count
; i
++) {
974 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
975 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
979 /* We won't fail any more, fill in the header */
982 header
->object_prefix
= object_prefix
;
983 header
->obj_order
= ondisk
->options
.order
;
984 header
->crypt_type
= ondisk
->options
.crypt_type
;
985 header
->comp_type
= ondisk
->options
.comp_type
;
986 /* The rest aren't used for format 1 images */
987 header
->stripe_unit
= 0;
988 header
->stripe_count
= 0;
989 header
->features
= 0;
991 ceph_put_snap_context(header
->snapc
);
992 kfree(header
->snap_names
);
993 kfree(header
->snap_sizes
);
996 /* The remaining fields always get updated (when we refresh) */
998 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
999 header
->snapc
= snapc
;
1000 header
->snap_names
= snap_names
;
1001 header
->snap_sizes
= snap_sizes
;
1009 ceph_put_snap_context(snapc
);
1010 kfree(object_prefix
);
1015 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
1017 const char *snap_name
;
1019 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
1021 /* Skip over names until we find the one we are looking for */
1023 snap_name
= rbd_dev
->header
.snap_names
;
1025 snap_name
+= strlen(snap_name
) + 1;
1027 return kstrdup(snap_name
, GFP_KERNEL
);
1031 * Snapshot id comparison function for use with qsort()/bsearch().
1032 * Note that result is for snapshots in *descending* order.
1034 static int snapid_compare_reverse(const void *s1
, const void *s2
)
1036 u64 snap_id1
= *(u64
*)s1
;
1037 u64 snap_id2
= *(u64
*)s2
;
1039 if (snap_id1
< snap_id2
)
1041 return snap_id1
== snap_id2
? 0 : -1;
1045 * Search a snapshot context to see if the given snapshot id is
1048 * Returns the position of the snapshot id in the array if it's found,
1049 * or BAD_SNAP_INDEX otherwise.
1051 * Note: The snapshot array is in kept sorted (by the osd) in
1052 * reverse order, highest snapshot id first.
1054 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
1056 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
1059 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
1060 sizeof (snap_id
), snapid_compare_reverse
);
1062 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
1065 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
1069 const char *snap_name
;
1071 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1072 if (which
== BAD_SNAP_INDEX
)
1073 return ERR_PTR(-ENOENT
);
1075 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
1076 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
1079 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
1081 if (snap_id
== CEPH_NOSNAP
)
1082 return RBD_SNAP_HEAD_NAME
;
1084 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1085 if (rbd_dev
->image_format
== 1)
1086 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1088 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1091 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1094 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1095 if (snap_id
== CEPH_NOSNAP
) {
1096 *snap_size
= rbd_dev
->header
.image_size
;
1097 } else if (rbd_dev
->image_format
== 1) {
1100 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1101 if (which
== BAD_SNAP_INDEX
)
1104 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1109 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1118 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
1121 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1122 if (snap_id
== CEPH_NOSNAP
) {
1123 *snap_features
= rbd_dev
->header
.features
;
1124 } else if (rbd_dev
->image_format
== 1) {
1125 *snap_features
= 0; /* No features for format 1 */
1130 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1134 *snap_features
= features
;
1139 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1141 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1146 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1149 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1153 rbd_dev
->mapping
.size
= size
;
1154 rbd_dev
->mapping
.features
= features
;
1159 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1161 rbd_dev
->mapping
.size
= 0;
1162 rbd_dev
->mapping
.features
= 0;
1165 static void rbd_segment_name_free(const char *name
)
1167 /* The explicit cast here is needed to drop the const qualifier */
1169 kmem_cache_free(rbd_segment_name_cache
, (void *)name
);
1172 static const char *rbd_segment_name(struct rbd_device
*rbd_dev
, u64 offset
)
1179 name
= kmem_cache_alloc(rbd_segment_name_cache
, GFP_NOIO
);
1182 segment
= offset
>> rbd_dev
->header
.obj_order
;
1183 name_format
= "%s.%012llx";
1184 if (rbd_dev
->image_format
== 2)
1185 name_format
= "%s.%016llx";
1186 ret
= snprintf(name
, CEPH_MAX_OID_NAME_LEN
+ 1, name_format
,
1187 rbd_dev
->header
.object_prefix
, segment
);
1188 if (ret
< 0 || ret
> CEPH_MAX_OID_NAME_LEN
) {
1189 pr_err("error formatting segment name for #%llu (%d)\n",
1191 rbd_segment_name_free(name
);
1198 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
1200 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1202 return offset
& (segment_size
- 1);
1205 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
1206 u64 offset
, u64 length
)
1208 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1210 offset
&= segment_size
- 1;
1212 rbd_assert(length
<= U64_MAX
- offset
);
1213 if (offset
+ length
> segment_size
)
1214 length
= segment_size
- offset
;
1220 * returns the size of an object in the image
1222 static u64
rbd_obj_bytes(struct rbd_image_header
*header
)
1224 return 1 << header
->obj_order
;
1231 static void bio_chain_put(struct bio
*chain
)
1237 chain
= chain
->bi_next
;
1243 * zeros a bio chain, starting at specific offset
1245 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
1248 struct bvec_iter iter
;
1249 unsigned long flags
;
1254 bio_for_each_segment(bv
, chain
, iter
) {
1255 if (pos
+ bv
.bv_len
> start_ofs
) {
1256 int remainder
= max(start_ofs
- pos
, 0);
1257 buf
= bvec_kmap_irq(&bv
, &flags
);
1258 memset(buf
+ remainder
, 0,
1259 bv
.bv_len
- remainder
);
1260 flush_dcache_page(bv
.bv_page
);
1261 bvec_kunmap_irq(buf
, &flags
);
1266 chain
= chain
->bi_next
;
1271 * similar to zero_bio_chain(), zeros data defined by a page array,
1272 * starting at the given byte offset from the start of the array and
1273 * continuing up to the given end offset. The pages array is
1274 * assumed to be big enough to hold all bytes up to the end.
1276 static void zero_pages(struct page
**pages
, u64 offset
, u64 end
)
1278 struct page
**page
= &pages
[offset
>> PAGE_SHIFT
];
1280 rbd_assert(end
> offset
);
1281 rbd_assert(end
- offset
<= (u64
)SIZE_MAX
);
1282 while (offset
< end
) {
1285 unsigned long flags
;
1288 page_offset
= offset
& ~PAGE_MASK
;
1289 length
= min_t(size_t, PAGE_SIZE
- page_offset
, end
- offset
);
1290 local_irq_save(flags
);
1291 kaddr
= kmap_atomic(*page
);
1292 memset(kaddr
+ page_offset
, 0, length
);
1293 flush_dcache_page(*page
);
1294 kunmap_atomic(kaddr
);
1295 local_irq_restore(flags
);
1303 * Clone a portion of a bio, starting at the given byte offset
1304 * and continuing for the number of bytes indicated.
1306 static struct bio
*bio_clone_range(struct bio
*bio_src
,
1307 unsigned int offset
,
1313 bio
= bio_clone(bio_src
, gfpmask
);
1315 return NULL
; /* ENOMEM */
1317 bio_advance(bio
, offset
);
1318 bio
->bi_iter
.bi_size
= len
;
1324 * Clone a portion of a bio chain, starting at the given byte offset
1325 * into the first bio in the source chain and continuing for the
1326 * number of bytes indicated. The result is another bio chain of
1327 * exactly the given length, or a null pointer on error.
1329 * The bio_src and offset parameters are both in-out. On entry they
1330 * refer to the first source bio and the offset into that bio where
1331 * the start of data to be cloned is located.
1333 * On return, bio_src is updated to refer to the bio in the source
1334 * chain that contains first un-cloned byte, and *offset will
1335 * contain the offset of that byte within that bio.
1337 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1338 unsigned int *offset
,
1342 struct bio
*bi
= *bio_src
;
1343 unsigned int off
= *offset
;
1344 struct bio
*chain
= NULL
;
1347 /* Build up a chain of clone bios up to the limit */
1349 if (!bi
|| off
>= bi
->bi_iter
.bi_size
|| !len
)
1350 return NULL
; /* Nothing to clone */
1354 unsigned int bi_size
;
1358 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1359 goto out_err
; /* EINVAL; ran out of bio's */
1361 bi_size
= min_t(unsigned int, bi
->bi_iter
.bi_size
- off
, len
);
1362 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1364 goto out_err
; /* ENOMEM */
1367 end
= &bio
->bi_next
;
1370 if (off
== bi
->bi_iter
.bi_size
) {
1381 bio_chain_put(chain
);
1387 * The default/initial value for all object request flags is 0. For
1388 * each flag, once its value is set to 1 it is never reset to 0
1391 static void obj_request_img_data_set(struct rbd_obj_request
*obj_request
)
1393 if (test_and_set_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
)) {
1394 struct rbd_device
*rbd_dev
;
1396 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1397 rbd_warn(rbd_dev
, "obj_request %p already marked img_data",
1402 static bool obj_request_img_data_test(struct rbd_obj_request
*obj_request
)
1405 return test_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
) != 0;
1408 static void obj_request_done_set(struct rbd_obj_request
*obj_request
)
1410 if (test_and_set_bit(OBJ_REQ_DONE
, &obj_request
->flags
)) {
1411 struct rbd_device
*rbd_dev
= NULL
;
1413 if (obj_request_img_data_test(obj_request
))
1414 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1415 rbd_warn(rbd_dev
, "obj_request %p already marked done",
1420 static bool obj_request_done_test(struct rbd_obj_request
*obj_request
)
1423 return test_bit(OBJ_REQ_DONE
, &obj_request
->flags
) != 0;
1427 * This sets the KNOWN flag after (possibly) setting the EXISTS
1428 * flag. The latter is set based on the "exists" value provided.
1430 * Note that for our purposes once an object exists it never goes
1431 * away again. It's possible that the response from two existence
1432 * checks are separated by the creation of the target object, and
1433 * the first ("doesn't exist") response arrives *after* the second
1434 * ("does exist"). In that case we ignore the second one.
1436 static void obj_request_existence_set(struct rbd_obj_request
*obj_request
,
1440 set_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
);
1441 set_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
);
1445 static bool obj_request_known_test(struct rbd_obj_request
*obj_request
)
1448 return test_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
) != 0;
1451 static bool obj_request_exists_test(struct rbd_obj_request
*obj_request
)
1454 return test_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
) != 0;
1457 static bool obj_request_overlaps_parent(struct rbd_obj_request
*obj_request
)
1459 struct rbd_device
*rbd_dev
= obj_request
->img_request
->rbd_dev
;
1461 return obj_request
->img_offset
<
1462 round_up(rbd_dev
->parent_overlap
, rbd_obj_bytes(&rbd_dev
->header
));
1465 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1467 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1468 atomic_read(&obj_request
->kref
.refcount
));
1469 kref_get(&obj_request
->kref
);
1472 static void rbd_obj_request_destroy(struct kref
*kref
);
1473 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1475 rbd_assert(obj_request
!= NULL
);
1476 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1477 atomic_read(&obj_request
->kref
.refcount
));
1478 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1481 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1483 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1484 atomic_read(&img_request
->kref
.refcount
));
1485 kref_get(&img_request
->kref
);
1488 static bool img_request_child_test(struct rbd_img_request
*img_request
);
1489 static void rbd_parent_request_destroy(struct kref
*kref
);
1490 static void rbd_img_request_destroy(struct kref
*kref
);
1491 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1493 rbd_assert(img_request
!= NULL
);
1494 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1495 atomic_read(&img_request
->kref
.refcount
));
1496 if (img_request_child_test(img_request
))
1497 kref_put(&img_request
->kref
, rbd_parent_request_destroy
);
1499 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1502 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1503 struct rbd_obj_request
*obj_request
)
1505 rbd_assert(obj_request
->img_request
== NULL
);
1507 /* Image request now owns object's original reference */
1508 obj_request
->img_request
= img_request
;
1509 obj_request
->which
= img_request
->obj_request_count
;
1510 rbd_assert(!obj_request_img_data_test(obj_request
));
1511 obj_request_img_data_set(obj_request
);
1512 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1513 img_request
->obj_request_count
++;
1514 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1515 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1516 obj_request
->which
);
1519 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1520 struct rbd_obj_request
*obj_request
)
1522 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1524 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1525 obj_request
->which
);
1526 list_del(&obj_request
->links
);
1527 rbd_assert(img_request
->obj_request_count
> 0);
1528 img_request
->obj_request_count
--;
1529 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1530 obj_request
->which
= BAD_WHICH
;
1531 rbd_assert(obj_request_img_data_test(obj_request
));
1532 rbd_assert(obj_request
->img_request
== img_request
);
1533 obj_request
->img_request
= NULL
;
1534 obj_request
->callback
= NULL
;
1535 rbd_obj_request_put(obj_request
);
1538 static bool obj_request_type_valid(enum obj_request_type type
)
1541 case OBJ_REQUEST_NODATA
:
1542 case OBJ_REQUEST_BIO
:
1543 case OBJ_REQUEST_PAGES
:
1550 static int rbd_obj_request_submit(struct ceph_osd_client
*osdc
,
1551 struct rbd_obj_request
*obj_request
)
1553 dout("%s %p\n", __func__
, obj_request
);
1554 return ceph_osdc_start_request(osdc
, obj_request
->osd_req
, false);
1557 static void rbd_obj_request_end(struct rbd_obj_request
*obj_request
)
1559 dout("%s %p\n", __func__
, obj_request
);
1560 ceph_osdc_cancel_request(obj_request
->osd_req
);
1564 * Wait for an object request to complete. If interrupted, cancel the
1565 * underlying osd request.
1567 static int rbd_obj_request_wait(struct rbd_obj_request
*obj_request
)
1571 dout("%s %p\n", __func__
, obj_request
);
1573 ret
= wait_for_completion_interruptible(&obj_request
->completion
);
1575 dout("%s %p interrupted\n", __func__
, obj_request
);
1576 rbd_obj_request_end(obj_request
);
1580 dout("%s %p done\n", __func__
, obj_request
);
1584 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1587 dout("%s: img %p\n", __func__
, img_request
);
1590 * If no error occurred, compute the aggregate transfer
1591 * count for the image request. We could instead use
1592 * atomic64_cmpxchg() to update it as each object request
1593 * completes; not clear which way is better off hand.
1595 if (!img_request
->result
) {
1596 struct rbd_obj_request
*obj_request
;
1599 for_each_obj_request(img_request
, obj_request
)
1600 xferred
+= obj_request
->xferred
;
1601 img_request
->xferred
= xferred
;
1604 if (img_request
->callback
)
1605 img_request
->callback(img_request
);
1607 rbd_img_request_put(img_request
);
1611 * The default/initial value for all image request flags is 0. Each
1612 * is conditionally set to 1 at image request initialization time
1613 * and currently never change thereafter.
1615 static void img_request_write_set(struct rbd_img_request
*img_request
)
1617 set_bit(IMG_REQ_WRITE
, &img_request
->flags
);
1621 static bool img_request_write_test(struct rbd_img_request
*img_request
)
1624 return test_bit(IMG_REQ_WRITE
, &img_request
->flags
) != 0;
1628 * Set the discard flag when the img_request is an discard request
1630 static void img_request_discard_set(struct rbd_img_request
*img_request
)
1632 set_bit(IMG_REQ_DISCARD
, &img_request
->flags
);
1636 static bool img_request_discard_test(struct rbd_img_request
*img_request
)
1639 return test_bit(IMG_REQ_DISCARD
, &img_request
->flags
) != 0;
1642 static void img_request_child_set(struct rbd_img_request
*img_request
)
1644 set_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1648 static void img_request_child_clear(struct rbd_img_request
*img_request
)
1650 clear_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1654 static bool img_request_child_test(struct rbd_img_request
*img_request
)
1657 return test_bit(IMG_REQ_CHILD
, &img_request
->flags
) != 0;
1660 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1662 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1666 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1668 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1672 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1675 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1678 static enum obj_operation_type
1679 rbd_img_request_op_type(struct rbd_img_request
*img_request
)
1681 if (img_request_write_test(img_request
))
1682 return OBJ_OP_WRITE
;
1683 else if (img_request_discard_test(img_request
))
1684 return OBJ_OP_DISCARD
;
1690 rbd_img_obj_request_read_callback(struct rbd_obj_request
*obj_request
)
1692 u64 xferred
= obj_request
->xferred
;
1693 u64 length
= obj_request
->length
;
1695 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1696 obj_request
, obj_request
->img_request
, obj_request
->result
,
1699 * ENOENT means a hole in the image. We zero-fill the entire
1700 * length of the request. A short read also implies zero-fill
1701 * to the end of the request. An error requires the whole
1702 * length of the request to be reported finished with an error
1703 * to the block layer. In each case we update the xferred
1704 * count to indicate the whole request was satisfied.
1706 rbd_assert(obj_request
->type
!= OBJ_REQUEST_NODATA
);
1707 if (obj_request
->result
== -ENOENT
) {
1708 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1709 zero_bio_chain(obj_request
->bio_list
, 0);
1711 zero_pages(obj_request
->pages
, 0, length
);
1712 obj_request
->result
= 0;
1713 } else if (xferred
< length
&& !obj_request
->result
) {
1714 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1715 zero_bio_chain(obj_request
->bio_list
, xferred
);
1717 zero_pages(obj_request
->pages
, xferred
, length
);
1719 obj_request
->xferred
= length
;
1720 obj_request_done_set(obj_request
);
1723 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1725 dout("%s: obj %p cb %p\n", __func__
, obj_request
,
1726 obj_request
->callback
);
1727 if (obj_request
->callback
)
1728 obj_request
->callback(obj_request
);
1730 complete_all(&obj_request
->completion
);
1733 static void rbd_osd_trivial_callback(struct rbd_obj_request
*obj_request
)
1735 dout("%s: obj %p\n", __func__
, obj_request
);
1736 obj_request_done_set(obj_request
);
1739 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
)
1741 struct rbd_img_request
*img_request
= NULL
;
1742 struct rbd_device
*rbd_dev
= NULL
;
1743 bool layered
= false;
1745 if (obj_request_img_data_test(obj_request
)) {
1746 img_request
= obj_request
->img_request
;
1747 layered
= img_request
&& img_request_layered_test(img_request
);
1748 rbd_dev
= img_request
->rbd_dev
;
1751 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1752 obj_request
, img_request
, obj_request
->result
,
1753 obj_request
->xferred
, obj_request
->length
);
1754 if (layered
&& obj_request
->result
== -ENOENT
&&
1755 obj_request
->img_offset
< rbd_dev
->parent_overlap
)
1756 rbd_img_parent_read(obj_request
);
1757 else if (img_request
)
1758 rbd_img_obj_request_read_callback(obj_request
);
1760 obj_request_done_set(obj_request
);
1763 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
)
1765 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1766 obj_request
->result
, obj_request
->length
);
1768 * There is no such thing as a successful short write. Set
1769 * it to our originally-requested length.
1771 obj_request
->xferred
= obj_request
->length
;
1772 obj_request_done_set(obj_request
);
1775 static void rbd_osd_discard_callback(struct rbd_obj_request
*obj_request
)
1777 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1778 obj_request
->result
, obj_request
->length
);
1780 * There is no such thing as a successful short discard. Set
1781 * it to our originally-requested length.
1783 obj_request
->xferred
= obj_request
->length
;
1784 /* discarding a non-existent object is not a problem */
1785 if (obj_request
->result
== -ENOENT
)
1786 obj_request
->result
= 0;
1787 obj_request_done_set(obj_request
);
1791 * For a simple stat call there's nothing to do. We'll do more if
1792 * this is part of a write sequence for a layered image.
1794 static void rbd_osd_stat_callback(struct rbd_obj_request
*obj_request
)
1796 dout("%s: obj %p\n", __func__
, obj_request
);
1797 obj_request_done_set(obj_request
);
1800 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
,
1801 struct ceph_msg
*msg
)
1803 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1806 dout("%s: osd_req %p msg %p\n", __func__
, osd_req
, msg
);
1807 rbd_assert(osd_req
== obj_request
->osd_req
);
1808 if (obj_request_img_data_test(obj_request
)) {
1809 rbd_assert(obj_request
->img_request
);
1810 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1812 rbd_assert(obj_request
->which
== BAD_WHICH
);
1815 if (osd_req
->r_result
< 0)
1816 obj_request
->result
= osd_req
->r_result
;
1818 rbd_assert(osd_req
->r_num_ops
<= CEPH_OSD_MAX_OP
);
1821 * We support a 64-bit length, but ultimately it has to be
1822 * passed to the block layer, which just supports a 32-bit
1825 obj_request
->xferred
= osd_req
->r_reply_op_len
[0];
1826 rbd_assert(obj_request
->xferred
< (u64
)UINT_MAX
);
1828 opcode
= osd_req
->r_ops
[0].op
;
1830 case CEPH_OSD_OP_READ
:
1831 rbd_osd_read_callback(obj_request
);
1833 case CEPH_OSD_OP_SETALLOCHINT
:
1834 rbd_assert(osd_req
->r_ops
[1].op
== CEPH_OSD_OP_WRITE
);
1836 case CEPH_OSD_OP_WRITE
:
1837 rbd_osd_write_callback(obj_request
);
1839 case CEPH_OSD_OP_STAT
:
1840 rbd_osd_stat_callback(obj_request
);
1842 case CEPH_OSD_OP_DELETE
:
1843 case CEPH_OSD_OP_TRUNCATE
:
1844 case CEPH_OSD_OP_ZERO
:
1845 rbd_osd_discard_callback(obj_request
);
1847 case CEPH_OSD_OP_CALL
:
1848 case CEPH_OSD_OP_NOTIFY_ACK
:
1849 case CEPH_OSD_OP_WATCH
:
1850 rbd_osd_trivial_callback(obj_request
);
1853 rbd_warn(NULL
, "%s: unsupported op %hu",
1854 obj_request
->object_name
, (unsigned short) opcode
);
1858 if (obj_request_done_test(obj_request
))
1859 rbd_obj_request_complete(obj_request
);
1862 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1864 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1865 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1868 rbd_assert(osd_req
!= NULL
);
1870 snap_id
= img_request
? img_request
->snap_id
: CEPH_NOSNAP
;
1871 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1872 NULL
, snap_id
, NULL
);
1875 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1877 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1878 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1879 struct ceph_snap_context
*snapc
;
1880 struct timespec mtime
= CURRENT_TIME
;
1882 rbd_assert(osd_req
!= NULL
);
1884 snapc
= img_request
? img_request
->snapc
: NULL
;
1885 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1886 snapc
, CEPH_NOSNAP
, &mtime
);
1890 * Create an osd request. A read request has one osd op (read).
1891 * A write request has either one (watch) or two (hint+write) osd ops.
1892 * (All rbd data writes are prefixed with an allocation hint op, but
1893 * technically osd watch is a write request, hence this distinction.)
1895 static struct ceph_osd_request
*rbd_osd_req_create(
1896 struct rbd_device
*rbd_dev
,
1897 enum obj_operation_type op_type
,
1898 unsigned int num_ops
,
1899 struct rbd_obj_request
*obj_request
)
1901 struct ceph_snap_context
*snapc
= NULL
;
1902 struct ceph_osd_client
*osdc
;
1903 struct ceph_osd_request
*osd_req
;
1905 if (obj_request_img_data_test(obj_request
) &&
1906 (op_type
== OBJ_OP_DISCARD
|| op_type
== OBJ_OP_WRITE
)) {
1907 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1908 if (op_type
== OBJ_OP_WRITE
) {
1909 rbd_assert(img_request_write_test(img_request
));
1911 rbd_assert(img_request_discard_test(img_request
));
1913 snapc
= img_request
->snapc
;
1916 rbd_assert(num_ops
== 1 || ((op_type
== OBJ_OP_WRITE
) && num_ops
== 2));
1918 /* Allocate and initialize the request, for the num_ops ops */
1920 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1921 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, false,
1924 return NULL
; /* ENOMEM */
1926 if (op_type
== OBJ_OP_WRITE
|| op_type
== OBJ_OP_DISCARD
)
1927 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1929 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1931 osd_req
->r_callback
= rbd_osd_req_callback
;
1932 osd_req
->r_priv
= obj_request
;
1934 osd_req
->r_base_oloc
.pool
= ceph_file_layout_pg_pool(rbd_dev
->layout
);
1935 ceph_oid_set_name(&osd_req
->r_base_oid
, obj_request
->object_name
);
1941 * Create a copyup osd request based on the information in the object
1942 * request supplied. A copyup request has two or three osd ops, a
1943 * copyup method call, potentially a hint op, and a write or truncate
1946 static struct ceph_osd_request
*
1947 rbd_osd_req_create_copyup(struct rbd_obj_request
*obj_request
)
1949 struct rbd_img_request
*img_request
;
1950 struct ceph_snap_context
*snapc
;
1951 struct rbd_device
*rbd_dev
;
1952 struct ceph_osd_client
*osdc
;
1953 struct ceph_osd_request
*osd_req
;
1954 int num_osd_ops
= 3;
1956 rbd_assert(obj_request_img_data_test(obj_request
));
1957 img_request
= obj_request
->img_request
;
1958 rbd_assert(img_request
);
1959 rbd_assert(img_request_write_test(img_request
) ||
1960 img_request_discard_test(img_request
));
1962 if (img_request_discard_test(img_request
))
1965 /* Allocate and initialize the request, for all the ops */
1967 snapc
= img_request
->snapc
;
1968 rbd_dev
= img_request
->rbd_dev
;
1969 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1970 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, num_osd_ops
,
1973 return NULL
; /* ENOMEM */
1975 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1976 osd_req
->r_callback
= rbd_osd_req_callback
;
1977 osd_req
->r_priv
= obj_request
;
1979 osd_req
->r_base_oloc
.pool
= ceph_file_layout_pg_pool(rbd_dev
->layout
);
1980 ceph_oid_set_name(&osd_req
->r_base_oid
, obj_request
->object_name
);
1986 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1988 ceph_osdc_put_request(osd_req
);
1991 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1993 static struct rbd_obj_request
*rbd_obj_request_create(const char *object_name
,
1994 u64 offset
, u64 length
,
1995 enum obj_request_type type
)
1997 struct rbd_obj_request
*obj_request
;
2001 rbd_assert(obj_request_type_valid(type
));
2003 size
= strlen(object_name
) + 1;
2004 name
= kmalloc(size
, GFP_KERNEL
);
2008 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_KERNEL
);
2014 obj_request
->object_name
= memcpy(name
, object_name
, size
);
2015 obj_request
->offset
= offset
;
2016 obj_request
->length
= length
;
2017 obj_request
->flags
= 0;
2018 obj_request
->which
= BAD_WHICH
;
2019 obj_request
->type
= type
;
2020 INIT_LIST_HEAD(&obj_request
->links
);
2021 init_completion(&obj_request
->completion
);
2022 kref_init(&obj_request
->kref
);
2024 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__
, object_name
,
2025 offset
, length
, (int)type
, obj_request
);
2030 static void rbd_obj_request_destroy(struct kref
*kref
)
2032 struct rbd_obj_request
*obj_request
;
2034 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
2036 dout("%s: obj %p\n", __func__
, obj_request
);
2038 rbd_assert(obj_request
->img_request
== NULL
);
2039 rbd_assert(obj_request
->which
== BAD_WHICH
);
2041 if (obj_request
->osd_req
)
2042 rbd_osd_req_destroy(obj_request
->osd_req
);
2044 rbd_assert(obj_request_type_valid(obj_request
->type
));
2045 switch (obj_request
->type
) {
2046 case OBJ_REQUEST_NODATA
:
2047 break; /* Nothing to do */
2048 case OBJ_REQUEST_BIO
:
2049 if (obj_request
->bio_list
)
2050 bio_chain_put(obj_request
->bio_list
);
2052 case OBJ_REQUEST_PAGES
:
2053 if (obj_request
->pages
)
2054 ceph_release_page_vector(obj_request
->pages
,
2055 obj_request
->page_count
);
2059 kfree(obj_request
->object_name
);
2060 obj_request
->object_name
= NULL
;
2061 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
2064 /* It's OK to call this for a device with no parent */
2066 static void rbd_spec_put(struct rbd_spec
*spec
);
2067 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
2069 rbd_dev_remove_parent(rbd_dev
);
2070 rbd_spec_put(rbd_dev
->parent_spec
);
2071 rbd_dev
->parent_spec
= NULL
;
2072 rbd_dev
->parent_overlap
= 0;
2076 * Parent image reference counting is used to determine when an
2077 * image's parent fields can be safely torn down--after there are no
2078 * more in-flight requests to the parent image. When the last
2079 * reference is dropped, cleaning them up is safe.
2081 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
2085 if (!rbd_dev
->parent_spec
)
2088 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
2092 /* Last reference; clean up parent data structures */
2095 rbd_dev_unparent(rbd_dev
);
2097 rbd_warn(rbd_dev
, "parent reference underflow");
2101 * If an image has a non-zero parent overlap, get a reference to its
2104 * Returns true if the rbd device has a parent with a non-zero
2105 * overlap and a reference for it was successfully taken, or
2108 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
2112 if (!rbd_dev
->parent_spec
)
2115 down_read(&rbd_dev
->header_rwsem
);
2116 if (rbd_dev
->parent_overlap
)
2117 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
2118 up_read(&rbd_dev
->header_rwsem
);
2121 rbd_warn(rbd_dev
, "parent reference overflow");
2127 * Caller is responsible for filling in the list of object requests
2128 * that comprises the image request, and the Linux request pointer
2129 * (if there is one).
2131 static struct rbd_img_request
*rbd_img_request_create(
2132 struct rbd_device
*rbd_dev
,
2133 u64 offset
, u64 length
,
2134 enum obj_operation_type op_type
,
2135 struct ceph_snap_context
*snapc
)
2137 struct rbd_img_request
*img_request
;
2139 img_request
= kmem_cache_alloc(rbd_img_request_cache
, GFP_NOIO
);
2143 img_request
->rq
= NULL
;
2144 img_request
->rbd_dev
= rbd_dev
;
2145 img_request
->offset
= offset
;
2146 img_request
->length
= length
;
2147 img_request
->flags
= 0;
2148 if (op_type
== OBJ_OP_DISCARD
) {
2149 img_request_discard_set(img_request
);
2150 img_request
->snapc
= snapc
;
2151 } else if (op_type
== OBJ_OP_WRITE
) {
2152 img_request_write_set(img_request
);
2153 img_request
->snapc
= snapc
;
2155 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
2157 if (rbd_dev_parent_get(rbd_dev
))
2158 img_request_layered_set(img_request
);
2159 spin_lock_init(&img_request
->completion_lock
);
2160 img_request
->next_completion
= 0;
2161 img_request
->callback
= NULL
;
2162 img_request
->result
= 0;
2163 img_request
->obj_request_count
= 0;
2164 INIT_LIST_HEAD(&img_request
->obj_requests
);
2165 kref_init(&img_request
->kref
);
2167 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__
, rbd_dev
,
2168 obj_op_name(op_type
), offset
, length
, img_request
);
2173 static void rbd_img_request_destroy(struct kref
*kref
)
2175 struct rbd_img_request
*img_request
;
2176 struct rbd_obj_request
*obj_request
;
2177 struct rbd_obj_request
*next_obj_request
;
2179 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
2181 dout("%s: img %p\n", __func__
, img_request
);
2183 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2184 rbd_img_obj_request_del(img_request
, obj_request
);
2185 rbd_assert(img_request
->obj_request_count
== 0);
2187 if (img_request_layered_test(img_request
)) {
2188 img_request_layered_clear(img_request
);
2189 rbd_dev_parent_put(img_request
->rbd_dev
);
2192 if (img_request_write_test(img_request
) ||
2193 img_request_discard_test(img_request
))
2194 ceph_put_snap_context(img_request
->snapc
);
2196 kmem_cache_free(rbd_img_request_cache
, img_request
);
2199 static struct rbd_img_request
*rbd_parent_request_create(
2200 struct rbd_obj_request
*obj_request
,
2201 u64 img_offset
, u64 length
)
2203 struct rbd_img_request
*parent_request
;
2204 struct rbd_device
*rbd_dev
;
2206 rbd_assert(obj_request
->img_request
);
2207 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2209 parent_request
= rbd_img_request_create(rbd_dev
->parent
, img_offset
,
2210 length
, OBJ_OP_READ
, NULL
);
2211 if (!parent_request
)
2214 img_request_child_set(parent_request
);
2215 rbd_obj_request_get(obj_request
);
2216 parent_request
->obj_request
= obj_request
;
2218 return parent_request
;
2221 static void rbd_parent_request_destroy(struct kref
*kref
)
2223 struct rbd_img_request
*parent_request
;
2224 struct rbd_obj_request
*orig_request
;
2226 parent_request
= container_of(kref
, struct rbd_img_request
, kref
);
2227 orig_request
= parent_request
->obj_request
;
2229 parent_request
->obj_request
= NULL
;
2230 rbd_obj_request_put(orig_request
);
2231 img_request_child_clear(parent_request
);
2233 rbd_img_request_destroy(kref
);
2236 static bool rbd_img_obj_end_request(struct rbd_obj_request
*obj_request
)
2238 struct rbd_img_request
*img_request
;
2239 unsigned int xferred
;
2243 rbd_assert(obj_request_img_data_test(obj_request
));
2244 img_request
= obj_request
->img_request
;
2246 rbd_assert(obj_request
->xferred
<= (u64
)UINT_MAX
);
2247 xferred
= (unsigned int)obj_request
->xferred
;
2248 result
= obj_request
->result
;
2250 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2251 enum obj_operation_type op_type
;
2253 if (img_request_discard_test(img_request
))
2254 op_type
= OBJ_OP_DISCARD
;
2255 else if (img_request_write_test(img_request
))
2256 op_type
= OBJ_OP_WRITE
;
2258 op_type
= OBJ_OP_READ
;
2260 rbd_warn(rbd_dev
, "%s %llx at %llx (%llx)",
2261 obj_op_name(op_type
), obj_request
->length
,
2262 obj_request
->img_offset
, obj_request
->offset
);
2263 rbd_warn(rbd_dev
, " result %d xferred %x",
2265 if (!img_request
->result
)
2266 img_request
->result
= result
;
2268 * Need to end I/O on the entire obj_request worth of
2269 * bytes in case of error.
2271 xferred
= obj_request
->length
;
2274 /* Image object requests don't own their page array */
2276 if (obj_request
->type
== OBJ_REQUEST_PAGES
) {
2277 obj_request
->pages
= NULL
;
2278 obj_request
->page_count
= 0;
2281 if (img_request_child_test(img_request
)) {
2282 rbd_assert(img_request
->obj_request
!= NULL
);
2283 more
= obj_request
->which
< img_request
->obj_request_count
- 1;
2285 rbd_assert(img_request
->rq
!= NULL
);
2287 more
= blk_update_request(img_request
->rq
, result
, xferred
);
2289 __blk_mq_end_request(img_request
->rq
, result
);
2295 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
2297 struct rbd_img_request
*img_request
;
2298 u32 which
= obj_request
->which
;
2301 rbd_assert(obj_request_img_data_test(obj_request
));
2302 img_request
= obj_request
->img_request
;
2304 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
2305 rbd_assert(img_request
!= NULL
);
2306 rbd_assert(img_request
->obj_request_count
> 0);
2307 rbd_assert(which
!= BAD_WHICH
);
2308 rbd_assert(which
< img_request
->obj_request_count
);
2310 spin_lock_irq(&img_request
->completion_lock
);
2311 if (which
!= img_request
->next_completion
)
2314 for_each_obj_request_from(img_request
, obj_request
) {
2316 rbd_assert(which
< img_request
->obj_request_count
);
2318 if (!obj_request_done_test(obj_request
))
2320 more
= rbd_img_obj_end_request(obj_request
);
2324 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
2325 img_request
->next_completion
= which
;
2327 spin_unlock_irq(&img_request
->completion_lock
);
2328 rbd_img_request_put(img_request
);
2331 rbd_img_request_complete(img_request
);
2335 * Add individual osd ops to the given ceph_osd_request and prepare
2336 * them for submission. num_ops is the current number of
2337 * osd operations already to the object request.
2339 static void rbd_img_obj_request_fill(struct rbd_obj_request
*obj_request
,
2340 struct ceph_osd_request
*osd_request
,
2341 enum obj_operation_type op_type
,
2342 unsigned int num_ops
)
2344 struct rbd_img_request
*img_request
= obj_request
->img_request
;
2345 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2346 u64 object_size
= rbd_obj_bytes(&rbd_dev
->header
);
2347 u64 offset
= obj_request
->offset
;
2348 u64 length
= obj_request
->length
;
2352 if (op_type
== OBJ_OP_DISCARD
) {
2353 if (!offset
&& length
== object_size
&&
2354 (!img_request_layered_test(img_request
) ||
2355 !obj_request_overlaps_parent(obj_request
))) {
2356 opcode
= CEPH_OSD_OP_DELETE
;
2357 } else if ((offset
+ length
== object_size
)) {
2358 opcode
= CEPH_OSD_OP_TRUNCATE
;
2360 down_read(&rbd_dev
->header_rwsem
);
2361 img_end
= rbd_dev
->header
.image_size
;
2362 up_read(&rbd_dev
->header_rwsem
);
2364 if (obj_request
->img_offset
+ length
== img_end
)
2365 opcode
= CEPH_OSD_OP_TRUNCATE
;
2367 opcode
= CEPH_OSD_OP_ZERO
;
2369 } else if (op_type
== OBJ_OP_WRITE
) {
2370 opcode
= CEPH_OSD_OP_WRITE
;
2371 osd_req_op_alloc_hint_init(osd_request
, num_ops
,
2372 object_size
, object_size
);
2375 opcode
= CEPH_OSD_OP_READ
;
2378 if (opcode
== CEPH_OSD_OP_DELETE
)
2379 osd_req_op_init(osd_request
, num_ops
, opcode
);
2381 osd_req_op_extent_init(osd_request
, num_ops
, opcode
,
2382 offset
, length
, 0, 0);
2384 if (obj_request
->type
== OBJ_REQUEST_BIO
)
2385 osd_req_op_extent_osd_data_bio(osd_request
, num_ops
,
2386 obj_request
->bio_list
, length
);
2387 else if (obj_request
->type
== OBJ_REQUEST_PAGES
)
2388 osd_req_op_extent_osd_data_pages(osd_request
, num_ops
,
2389 obj_request
->pages
, length
,
2390 offset
& ~PAGE_MASK
, false, false);
2392 /* Discards are also writes */
2393 if (op_type
== OBJ_OP_WRITE
|| op_type
== OBJ_OP_DISCARD
)
2394 rbd_osd_req_format_write(obj_request
);
2396 rbd_osd_req_format_read(obj_request
);
2400 * Split up an image request into one or more object requests, each
2401 * to a different object. The "type" parameter indicates whether
2402 * "data_desc" is the pointer to the head of a list of bio
2403 * structures, or the base of a page array. In either case this
2404 * function assumes data_desc describes memory sufficient to hold
2405 * all data described by the image request.
2407 static int rbd_img_request_fill(struct rbd_img_request
*img_request
,
2408 enum obj_request_type type
,
2411 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2412 struct rbd_obj_request
*obj_request
= NULL
;
2413 struct rbd_obj_request
*next_obj_request
;
2414 struct bio
*bio_list
= NULL
;
2415 unsigned int bio_offset
= 0;
2416 struct page
**pages
= NULL
;
2417 enum obj_operation_type op_type
;
2421 dout("%s: img %p type %d data_desc %p\n", __func__
, img_request
,
2422 (int)type
, data_desc
);
2424 img_offset
= img_request
->offset
;
2425 resid
= img_request
->length
;
2426 rbd_assert(resid
> 0);
2427 op_type
= rbd_img_request_op_type(img_request
);
2429 if (type
== OBJ_REQUEST_BIO
) {
2430 bio_list
= data_desc
;
2431 rbd_assert(img_offset
==
2432 bio_list
->bi_iter
.bi_sector
<< SECTOR_SHIFT
);
2433 } else if (type
== OBJ_REQUEST_PAGES
) {
2438 struct ceph_osd_request
*osd_req
;
2439 const char *object_name
;
2443 object_name
= rbd_segment_name(rbd_dev
, img_offset
);
2446 offset
= rbd_segment_offset(rbd_dev
, img_offset
);
2447 length
= rbd_segment_length(rbd_dev
, img_offset
, resid
);
2448 obj_request
= rbd_obj_request_create(object_name
,
2449 offset
, length
, type
);
2450 /* object request has its own copy of the object name */
2451 rbd_segment_name_free(object_name
);
2456 * set obj_request->img_request before creating the
2457 * osd_request so that it gets the right snapc
2459 rbd_img_obj_request_add(img_request
, obj_request
);
2461 if (type
== OBJ_REQUEST_BIO
) {
2462 unsigned int clone_size
;
2464 rbd_assert(length
<= (u64
)UINT_MAX
);
2465 clone_size
= (unsigned int)length
;
2466 obj_request
->bio_list
=
2467 bio_chain_clone_range(&bio_list
,
2471 if (!obj_request
->bio_list
)
2473 } else if (type
== OBJ_REQUEST_PAGES
) {
2474 unsigned int page_count
;
2476 obj_request
->pages
= pages
;
2477 page_count
= (u32
)calc_pages_for(offset
, length
);
2478 obj_request
->page_count
= page_count
;
2479 if ((offset
+ length
) & ~PAGE_MASK
)
2480 page_count
--; /* more on last page */
2481 pages
+= page_count
;
2484 osd_req
= rbd_osd_req_create(rbd_dev
, op_type
,
2485 (op_type
== OBJ_OP_WRITE
) ? 2 : 1,
2490 obj_request
->osd_req
= osd_req
;
2491 obj_request
->callback
= rbd_img_obj_callback
;
2492 obj_request
->img_offset
= img_offset
;
2494 rbd_img_obj_request_fill(obj_request
, osd_req
, op_type
, 0);
2496 rbd_img_request_get(img_request
);
2498 img_offset
+= length
;
2505 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2506 rbd_img_obj_request_del(img_request
, obj_request
);
2512 rbd_img_obj_copyup_callback(struct rbd_obj_request
*obj_request
)
2514 struct rbd_img_request
*img_request
;
2515 struct rbd_device
*rbd_dev
;
2516 struct page
**pages
;
2519 rbd_assert(obj_request
->type
== OBJ_REQUEST_BIO
||
2520 obj_request
->type
== OBJ_REQUEST_NODATA
);
2521 rbd_assert(obj_request_img_data_test(obj_request
));
2522 img_request
= obj_request
->img_request
;
2523 rbd_assert(img_request
);
2525 rbd_dev
= img_request
->rbd_dev
;
2526 rbd_assert(rbd_dev
);
2528 pages
= obj_request
->copyup_pages
;
2529 rbd_assert(pages
!= NULL
);
2530 obj_request
->copyup_pages
= NULL
;
2531 page_count
= obj_request
->copyup_page_count
;
2532 rbd_assert(page_count
);
2533 obj_request
->copyup_page_count
= 0;
2534 ceph_release_page_vector(pages
, page_count
);
2537 * We want the transfer count to reflect the size of the
2538 * original write request. There is no such thing as a
2539 * successful short write, so if the request was successful
2540 * we can just set it to the originally-requested length.
2542 if (!obj_request
->result
)
2543 obj_request
->xferred
= obj_request
->length
;
2545 /* Finish up with the normal image object callback */
2547 rbd_img_obj_callback(obj_request
);
2551 rbd_img_obj_parent_read_full_callback(struct rbd_img_request
*img_request
)
2553 struct rbd_obj_request
*orig_request
;
2554 struct ceph_osd_request
*osd_req
;
2555 struct ceph_osd_client
*osdc
;
2556 struct rbd_device
*rbd_dev
;
2557 struct page
**pages
;
2558 enum obj_operation_type op_type
;
2563 rbd_assert(img_request_child_test(img_request
));
2565 /* First get what we need from the image request */
2567 pages
= img_request
->copyup_pages
;
2568 rbd_assert(pages
!= NULL
);
2569 img_request
->copyup_pages
= NULL
;
2570 page_count
= img_request
->copyup_page_count
;
2571 rbd_assert(page_count
);
2572 img_request
->copyup_page_count
= 0;
2574 orig_request
= img_request
->obj_request
;
2575 rbd_assert(orig_request
!= NULL
);
2576 rbd_assert(obj_request_type_valid(orig_request
->type
));
2577 img_result
= img_request
->result
;
2578 parent_length
= img_request
->length
;
2579 rbd_assert(parent_length
== img_request
->xferred
);
2580 rbd_img_request_put(img_request
);
2582 rbd_assert(orig_request
->img_request
);
2583 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2584 rbd_assert(rbd_dev
);
2587 * If the overlap has become 0 (most likely because the
2588 * image has been flattened) we need to free the pages
2589 * and re-submit the original write request.
2591 if (!rbd_dev
->parent_overlap
) {
2592 struct ceph_osd_client
*osdc
;
2594 ceph_release_page_vector(pages
, page_count
);
2595 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2596 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2605 * The original osd request is of no use to use any more.
2606 * We need a new one that can hold the three ops in a copyup
2607 * request. Allocate the new copyup osd request for the
2608 * original request, and release the old one.
2610 img_result
= -ENOMEM
;
2611 osd_req
= rbd_osd_req_create_copyup(orig_request
);
2614 rbd_osd_req_destroy(orig_request
->osd_req
);
2615 orig_request
->osd_req
= osd_req
;
2616 orig_request
->copyup_pages
= pages
;
2617 orig_request
->copyup_page_count
= page_count
;
2619 /* Initialize the copyup op */
2621 osd_req_op_cls_init(osd_req
, 0, CEPH_OSD_OP_CALL
, "rbd", "copyup");
2622 osd_req_op_cls_request_data_pages(osd_req
, 0, pages
, parent_length
, 0,
2625 /* Add the other op(s) */
2627 op_type
= rbd_img_request_op_type(orig_request
->img_request
);
2628 rbd_img_obj_request_fill(orig_request
, osd_req
, op_type
, 1);
2630 /* All set, send it off. */
2632 orig_request
->callback
= rbd_img_obj_copyup_callback
;
2633 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2634 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2638 /* Record the error code and complete the request */
2640 orig_request
->result
= img_result
;
2641 orig_request
->xferred
= 0;
2642 obj_request_done_set(orig_request
);
2643 rbd_obj_request_complete(orig_request
);
2647 * Read from the parent image the range of data that covers the
2648 * entire target of the given object request. This is used for
2649 * satisfying a layered image write request when the target of an
2650 * object request from the image request does not exist.
2652 * A page array big enough to hold the returned data is allocated
2653 * and supplied to rbd_img_request_fill() as the "data descriptor."
2654 * When the read completes, this page array will be transferred to
2655 * the original object request for the copyup operation.
2657 * If an error occurs, record it as the result of the original
2658 * object request and mark it done so it gets completed.
2660 static int rbd_img_obj_parent_read_full(struct rbd_obj_request
*obj_request
)
2662 struct rbd_img_request
*img_request
= NULL
;
2663 struct rbd_img_request
*parent_request
= NULL
;
2664 struct rbd_device
*rbd_dev
;
2667 struct page
**pages
= NULL
;
2671 rbd_assert(obj_request_img_data_test(obj_request
));
2672 rbd_assert(obj_request_type_valid(obj_request
->type
));
2674 img_request
= obj_request
->img_request
;
2675 rbd_assert(img_request
!= NULL
);
2676 rbd_dev
= img_request
->rbd_dev
;
2677 rbd_assert(rbd_dev
->parent
!= NULL
);
2680 * Determine the byte range covered by the object in the
2681 * child image to which the original request was to be sent.
2683 img_offset
= obj_request
->img_offset
- obj_request
->offset
;
2684 length
= (u64
)1 << rbd_dev
->header
.obj_order
;
2687 * There is no defined parent data beyond the parent
2688 * overlap, so limit what we read at that boundary if
2691 if (img_offset
+ length
> rbd_dev
->parent_overlap
) {
2692 rbd_assert(img_offset
< rbd_dev
->parent_overlap
);
2693 length
= rbd_dev
->parent_overlap
- img_offset
;
2697 * Allocate a page array big enough to receive the data read
2700 page_count
= (u32
)calc_pages_for(0, length
);
2701 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2702 if (IS_ERR(pages
)) {
2703 result
= PTR_ERR(pages
);
2709 parent_request
= rbd_parent_request_create(obj_request
,
2710 img_offset
, length
);
2711 if (!parent_request
)
2714 result
= rbd_img_request_fill(parent_request
, OBJ_REQUEST_PAGES
, pages
);
2717 parent_request
->copyup_pages
= pages
;
2718 parent_request
->copyup_page_count
= page_count
;
2720 parent_request
->callback
= rbd_img_obj_parent_read_full_callback
;
2721 result
= rbd_img_request_submit(parent_request
);
2725 parent_request
->copyup_pages
= NULL
;
2726 parent_request
->copyup_page_count
= 0;
2727 parent_request
->obj_request
= NULL
;
2728 rbd_obj_request_put(obj_request
);
2731 ceph_release_page_vector(pages
, page_count
);
2733 rbd_img_request_put(parent_request
);
2734 obj_request
->result
= result
;
2735 obj_request
->xferred
= 0;
2736 obj_request_done_set(obj_request
);
2741 static void rbd_img_obj_exists_callback(struct rbd_obj_request
*obj_request
)
2743 struct rbd_obj_request
*orig_request
;
2744 struct rbd_device
*rbd_dev
;
2747 rbd_assert(!obj_request_img_data_test(obj_request
));
2750 * All we need from the object request is the original
2751 * request and the result of the STAT op. Grab those, then
2752 * we're done with the request.
2754 orig_request
= obj_request
->obj_request
;
2755 obj_request
->obj_request
= NULL
;
2756 rbd_obj_request_put(orig_request
);
2757 rbd_assert(orig_request
);
2758 rbd_assert(orig_request
->img_request
);
2760 result
= obj_request
->result
;
2761 obj_request
->result
= 0;
2763 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__
,
2764 obj_request
, orig_request
, result
,
2765 obj_request
->xferred
, obj_request
->length
);
2766 rbd_obj_request_put(obj_request
);
2769 * If the overlap has become 0 (most likely because the
2770 * image has been flattened) we need to free the pages
2771 * and re-submit the original write request.
2773 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2774 if (!rbd_dev
->parent_overlap
) {
2775 struct ceph_osd_client
*osdc
;
2777 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2778 result
= rbd_obj_request_submit(osdc
, orig_request
);
2784 * Our only purpose here is to determine whether the object
2785 * exists, and we don't want to treat the non-existence as
2786 * an error. If something else comes back, transfer the
2787 * error to the original request and complete it now.
2790 obj_request_existence_set(orig_request
, true);
2791 } else if (result
== -ENOENT
) {
2792 obj_request_existence_set(orig_request
, false);
2793 } else if (result
) {
2794 orig_request
->result
= result
;
2799 * Resubmit the original request now that we have recorded
2800 * whether the target object exists.
2802 orig_request
->result
= rbd_img_obj_request_submit(orig_request
);
2804 if (orig_request
->result
)
2805 rbd_obj_request_complete(orig_request
);
2808 static int rbd_img_obj_exists_submit(struct rbd_obj_request
*obj_request
)
2810 struct rbd_obj_request
*stat_request
;
2811 struct rbd_device
*rbd_dev
;
2812 struct ceph_osd_client
*osdc
;
2813 struct page
**pages
= NULL
;
2819 * The response data for a STAT call consists of:
2826 size
= sizeof (__le64
) + sizeof (__le32
) + sizeof (__le32
);
2827 page_count
= (u32
)calc_pages_for(0, size
);
2828 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2830 return PTR_ERR(pages
);
2833 stat_request
= rbd_obj_request_create(obj_request
->object_name
, 0, 0,
2838 rbd_obj_request_get(obj_request
);
2839 stat_request
->obj_request
= obj_request
;
2840 stat_request
->pages
= pages
;
2841 stat_request
->page_count
= page_count
;
2843 rbd_assert(obj_request
->img_request
);
2844 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2845 stat_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
2847 if (!stat_request
->osd_req
)
2849 stat_request
->callback
= rbd_img_obj_exists_callback
;
2851 osd_req_op_init(stat_request
->osd_req
, 0, CEPH_OSD_OP_STAT
);
2852 osd_req_op_raw_data_in_pages(stat_request
->osd_req
, 0, pages
, size
, 0,
2854 rbd_osd_req_format_read(stat_request
);
2856 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2857 ret
= rbd_obj_request_submit(osdc
, stat_request
);
2860 rbd_obj_request_put(obj_request
);
2865 static bool img_obj_request_simple(struct rbd_obj_request
*obj_request
)
2867 struct rbd_img_request
*img_request
;
2868 struct rbd_device
*rbd_dev
;
2870 rbd_assert(obj_request_img_data_test(obj_request
));
2872 img_request
= obj_request
->img_request
;
2873 rbd_assert(img_request
);
2874 rbd_dev
= img_request
->rbd_dev
;
2877 if (!img_request_write_test(img_request
) &&
2878 !img_request_discard_test(img_request
))
2881 /* Non-layered writes */
2882 if (!img_request_layered_test(img_request
))
2886 * Layered writes outside of the parent overlap range don't
2887 * share any data with the parent.
2889 if (!obj_request_overlaps_parent(obj_request
))
2893 * Entire-object layered writes - we will overwrite whatever
2894 * parent data there is anyway.
2896 if (!obj_request
->offset
&&
2897 obj_request
->length
== rbd_obj_bytes(&rbd_dev
->header
))
2901 * If the object is known to already exist, its parent data has
2902 * already been copied.
2904 if (obj_request_known_test(obj_request
) &&
2905 obj_request_exists_test(obj_request
))
2911 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
)
2913 if (img_obj_request_simple(obj_request
)) {
2914 struct rbd_device
*rbd_dev
;
2915 struct ceph_osd_client
*osdc
;
2917 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2918 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2920 return rbd_obj_request_submit(osdc
, obj_request
);
2924 * It's a layered write. The target object might exist but
2925 * we may not know that yet. If we know it doesn't exist,
2926 * start by reading the data for the full target object from
2927 * the parent so we can use it for a copyup to the target.
2929 if (obj_request_known_test(obj_request
))
2930 return rbd_img_obj_parent_read_full(obj_request
);
2932 /* We don't know whether the target exists. Go find out. */
2934 return rbd_img_obj_exists_submit(obj_request
);
2937 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
2939 struct rbd_obj_request
*obj_request
;
2940 struct rbd_obj_request
*next_obj_request
;
2942 dout("%s: img %p\n", __func__
, img_request
);
2943 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
) {
2946 ret
= rbd_img_obj_request_submit(obj_request
);
2954 static void rbd_img_parent_read_callback(struct rbd_img_request
*img_request
)
2956 struct rbd_obj_request
*obj_request
;
2957 struct rbd_device
*rbd_dev
;
2962 rbd_assert(img_request_child_test(img_request
));
2964 /* First get what we need from the image request and release it */
2966 obj_request
= img_request
->obj_request
;
2967 img_xferred
= img_request
->xferred
;
2968 img_result
= img_request
->result
;
2969 rbd_img_request_put(img_request
);
2972 * If the overlap has become 0 (most likely because the
2973 * image has been flattened) we need to re-submit the
2976 rbd_assert(obj_request
);
2977 rbd_assert(obj_request
->img_request
);
2978 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2979 if (!rbd_dev
->parent_overlap
) {
2980 struct ceph_osd_client
*osdc
;
2982 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2983 img_result
= rbd_obj_request_submit(osdc
, obj_request
);
2988 obj_request
->result
= img_result
;
2989 if (obj_request
->result
)
2993 * We need to zero anything beyond the parent overlap
2994 * boundary. Since rbd_img_obj_request_read_callback()
2995 * will zero anything beyond the end of a short read, an
2996 * easy way to do this is to pretend the data from the
2997 * parent came up short--ending at the overlap boundary.
2999 rbd_assert(obj_request
->img_offset
< U64_MAX
- obj_request
->length
);
3000 obj_end
= obj_request
->img_offset
+ obj_request
->length
;
3001 if (obj_end
> rbd_dev
->parent_overlap
) {
3004 if (obj_request
->img_offset
< rbd_dev
->parent_overlap
)
3005 xferred
= rbd_dev
->parent_overlap
-
3006 obj_request
->img_offset
;
3008 obj_request
->xferred
= min(img_xferred
, xferred
);
3010 obj_request
->xferred
= img_xferred
;
3013 rbd_img_obj_request_read_callback(obj_request
);
3014 rbd_obj_request_complete(obj_request
);
3017 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
)
3019 struct rbd_img_request
*img_request
;
3022 rbd_assert(obj_request_img_data_test(obj_request
));
3023 rbd_assert(obj_request
->img_request
!= NULL
);
3024 rbd_assert(obj_request
->result
== (s32
) -ENOENT
);
3025 rbd_assert(obj_request_type_valid(obj_request
->type
));
3027 /* rbd_read_finish(obj_request, obj_request->length); */
3028 img_request
= rbd_parent_request_create(obj_request
,
3029 obj_request
->img_offset
,
3030 obj_request
->length
);
3035 if (obj_request
->type
== OBJ_REQUEST_BIO
)
3036 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3037 obj_request
->bio_list
);
3039 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_PAGES
,
3040 obj_request
->pages
);
3044 img_request
->callback
= rbd_img_parent_read_callback
;
3045 result
= rbd_img_request_submit(img_request
);
3052 rbd_img_request_put(img_request
);
3053 obj_request
->result
= result
;
3054 obj_request
->xferred
= 0;
3055 obj_request_done_set(obj_request
);
3058 static int rbd_obj_notify_ack_sync(struct rbd_device
*rbd_dev
, u64 notify_id
)
3060 struct rbd_obj_request
*obj_request
;
3061 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3064 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
3065 OBJ_REQUEST_NODATA
);
3070 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
3072 if (!obj_request
->osd_req
)
3075 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_NOTIFY_ACK
,
3077 rbd_osd_req_format_read(obj_request
);
3079 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3082 ret
= rbd_obj_request_wait(obj_request
);
3084 rbd_obj_request_put(obj_request
);
3089 static void rbd_watch_cb(u64 ver
, u64 notify_id
, u8 opcode
, void *data
)
3091 struct rbd_device
*rbd_dev
= (struct rbd_device
*)data
;
3097 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__
,
3098 rbd_dev
->header_name
, (unsigned long long)notify_id
,
3099 (unsigned int)opcode
);
3102 * Until adequate refresh error handling is in place, there is
3103 * not much we can do here, except warn.
3105 * See http://tracker.ceph.com/issues/5040
3107 ret
= rbd_dev_refresh(rbd_dev
);
3109 rbd_warn(rbd_dev
, "refresh failed: %d", ret
);
3111 ret
= rbd_obj_notify_ack_sync(rbd_dev
, notify_id
);
3113 rbd_warn(rbd_dev
, "notify_ack ret %d", ret
);
3117 * Send a (un)watch request and wait for the ack. Return a request
3118 * with a ref held on success or error.
3120 static struct rbd_obj_request
*rbd_obj_watch_request_helper(
3121 struct rbd_device
*rbd_dev
,
3124 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3125 struct rbd_obj_request
*obj_request
;
3128 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
3129 OBJ_REQUEST_NODATA
);
3131 return ERR_PTR(-ENOMEM
);
3133 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_WRITE
, 1,
3135 if (!obj_request
->osd_req
) {
3140 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_WATCH
,
3141 rbd_dev
->watch_event
->cookie
, 0, watch
);
3142 rbd_osd_req_format_write(obj_request
);
3145 ceph_osdc_set_request_linger(osdc
, obj_request
->osd_req
);
3147 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3151 ret
= rbd_obj_request_wait(obj_request
);
3155 ret
= obj_request
->result
;
3158 rbd_obj_request_end(obj_request
);
3165 rbd_obj_request_put(obj_request
);
3166 return ERR_PTR(ret
);
3170 * Initiate a watch request, synchronously.
3172 static int rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
)
3174 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3175 struct rbd_obj_request
*obj_request
;
3178 rbd_assert(!rbd_dev
->watch_event
);
3179 rbd_assert(!rbd_dev
->watch_request
);
3181 ret
= ceph_osdc_create_event(osdc
, rbd_watch_cb
, rbd_dev
,
3182 &rbd_dev
->watch_event
);
3186 obj_request
= rbd_obj_watch_request_helper(rbd_dev
, true);
3187 if (IS_ERR(obj_request
)) {
3188 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
3189 rbd_dev
->watch_event
= NULL
;
3190 return PTR_ERR(obj_request
);
3194 * A watch request is set to linger, so the underlying osd
3195 * request won't go away until we unregister it. We retain
3196 * a pointer to the object request during that time (in
3197 * rbd_dev->watch_request), so we'll keep a reference to it.
3198 * We'll drop that reference after we've unregistered it in
3199 * rbd_dev_header_unwatch_sync().
3201 rbd_dev
->watch_request
= obj_request
;
3207 * Tear down a watch request, synchronously.
3209 static void rbd_dev_header_unwatch_sync(struct rbd_device
*rbd_dev
)
3211 struct rbd_obj_request
*obj_request
;
3213 rbd_assert(rbd_dev
->watch_event
);
3214 rbd_assert(rbd_dev
->watch_request
);
3216 rbd_obj_request_end(rbd_dev
->watch_request
);
3217 rbd_obj_request_put(rbd_dev
->watch_request
);
3218 rbd_dev
->watch_request
= NULL
;
3220 obj_request
= rbd_obj_watch_request_helper(rbd_dev
, false);
3221 if (!IS_ERR(obj_request
))
3222 rbd_obj_request_put(obj_request
);
3224 rbd_warn(rbd_dev
, "unable to tear down watch request (%ld)",
3225 PTR_ERR(obj_request
));
3227 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
3228 rbd_dev
->watch_event
= NULL
;
3232 * Synchronous osd object method call. Returns the number of bytes
3233 * returned in the outbound buffer, or a negative error code.
3235 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
3236 const char *object_name
,
3237 const char *class_name
,
3238 const char *method_name
,
3239 const void *outbound
,
3240 size_t outbound_size
,
3242 size_t inbound_size
)
3244 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3245 struct rbd_obj_request
*obj_request
;
3246 struct page
**pages
;
3251 * Method calls are ultimately read operations. The result
3252 * should placed into the inbound buffer provided. They
3253 * also supply outbound data--parameters for the object
3254 * method. Currently if this is present it will be a
3257 page_count
= (u32
)calc_pages_for(0, inbound_size
);
3258 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3260 return PTR_ERR(pages
);
3263 obj_request
= rbd_obj_request_create(object_name
, 0, inbound_size
,
3268 obj_request
->pages
= pages
;
3269 obj_request
->page_count
= page_count
;
3271 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
3273 if (!obj_request
->osd_req
)
3276 osd_req_op_cls_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_CALL
,
3277 class_name
, method_name
);
3278 if (outbound_size
) {
3279 struct ceph_pagelist
*pagelist
;
3281 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
3285 ceph_pagelist_init(pagelist
);
3286 ceph_pagelist_append(pagelist
, outbound
, outbound_size
);
3287 osd_req_op_cls_request_data_pagelist(obj_request
->osd_req
, 0,
3290 osd_req_op_cls_response_data_pages(obj_request
->osd_req
, 0,
3291 obj_request
->pages
, inbound_size
,
3293 rbd_osd_req_format_read(obj_request
);
3295 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3298 ret
= rbd_obj_request_wait(obj_request
);
3302 ret
= obj_request
->result
;
3306 rbd_assert(obj_request
->xferred
< (u64
)INT_MAX
);
3307 ret
= (int)obj_request
->xferred
;
3308 ceph_copy_from_page_vector(pages
, inbound
, 0, obj_request
->xferred
);
3311 rbd_obj_request_put(obj_request
);
3313 ceph_release_page_vector(pages
, page_count
);
3318 static void rbd_queue_workfn(struct work_struct
*work
)
3320 struct request
*rq
= blk_mq_rq_from_pdu(work
);
3321 struct rbd_device
*rbd_dev
= rq
->q
->queuedata
;
3322 struct rbd_img_request
*img_request
;
3323 struct ceph_snap_context
*snapc
= NULL
;
3324 u64 offset
= (u64
)blk_rq_pos(rq
) << SECTOR_SHIFT
;
3325 u64 length
= blk_rq_bytes(rq
);
3326 enum obj_operation_type op_type
;
3330 if (rq
->cmd_type
!= REQ_TYPE_FS
) {
3331 dout("%s: non-fs request type %d\n", __func__
,
3332 (int) rq
->cmd_type
);
3337 if (rq
->cmd_flags
& REQ_DISCARD
)
3338 op_type
= OBJ_OP_DISCARD
;
3339 else if (rq
->cmd_flags
& REQ_WRITE
)
3340 op_type
= OBJ_OP_WRITE
;
3342 op_type
= OBJ_OP_READ
;
3344 /* Ignore/skip any zero-length requests */
3347 dout("%s: zero-length request\n", __func__
);
3352 /* Only reads are allowed to a read-only device */
3354 if (op_type
!= OBJ_OP_READ
) {
3355 if (rbd_dev
->mapping
.read_only
) {
3359 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
3363 * Quit early if the mapped snapshot no longer exists. It's
3364 * still possible the snapshot will have disappeared by the
3365 * time our request arrives at the osd, but there's no sense in
3366 * sending it if we already know.
3368 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
3369 dout("request for non-existent snapshot");
3370 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
3375 if (offset
&& length
> U64_MAX
- offset
+ 1) {
3376 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)", offset
,
3379 goto err_rq
; /* Shouldn't happen */
3382 blk_mq_start_request(rq
);
3384 down_read(&rbd_dev
->header_rwsem
);
3385 mapping_size
= rbd_dev
->mapping
.size
;
3386 if (op_type
!= OBJ_OP_READ
) {
3387 snapc
= rbd_dev
->header
.snapc
;
3388 ceph_get_snap_context(snapc
);
3390 up_read(&rbd_dev
->header_rwsem
);
3392 if (offset
+ length
> mapping_size
) {
3393 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)", offset
,
3394 length
, mapping_size
);
3399 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
, op_type
,
3405 img_request
->rq
= rq
;
3407 if (op_type
== OBJ_OP_DISCARD
)
3408 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_NODATA
,
3411 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3414 goto err_img_request
;
3416 result
= rbd_img_request_submit(img_request
);
3418 goto err_img_request
;
3423 rbd_img_request_put(img_request
);
3426 rbd_warn(rbd_dev
, "%s %llx at %llx result %d",
3427 obj_op_name(op_type
), length
, offset
, result
);
3428 ceph_put_snap_context(snapc
);
3430 blk_mq_end_request(rq
, result
);
3433 static int rbd_queue_rq(struct blk_mq_hw_ctx
*hctx
,
3434 const struct blk_mq_queue_data
*bd
)
3436 struct request
*rq
= bd
->rq
;
3437 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
3439 queue_work(rbd_wq
, work
);
3440 return BLK_MQ_RQ_QUEUE_OK
;
3444 * a queue callback. Makes sure that we don't create a bio that spans across
3445 * multiple osd objects. One exception would be with a single page bios,
3446 * which we handle later at bio_chain_clone_range()
3448 static int rbd_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
3449 struct bio_vec
*bvec
)
3451 struct rbd_device
*rbd_dev
= q
->queuedata
;
3452 sector_t sector_offset
;
3453 sector_t sectors_per_obj
;
3454 sector_t obj_sector_offset
;
3458 * Find how far into its rbd object the partition-relative
3459 * bio start sector is to offset relative to the enclosing
3462 sector_offset
= get_start_sect(bmd
->bi_bdev
) + bmd
->bi_sector
;
3463 sectors_per_obj
= 1 << (rbd_dev
->header
.obj_order
- SECTOR_SHIFT
);
3464 obj_sector_offset
= sector_offset
& (sectors_per_obj
- 1);
3467 * Compute the number of bytes from that offset to the end
3468 * of the object. Account for what's already used by the bio.
3470 ret
= (int) (sectors_per_obj
- obj_sector_offset
) << SECTOR_SHIFT
;
3471 if (ret
> bmd
->bi_size
)
3472 ret
-= bmd
->bi_size
;
3477 * Don't send back more than was asked for. And if the bio
3478 * was empty, let the whole thing through because: "Note
3479 * that a block device *must* allow a single page to be
3480 * added to an empty bio."
3482 rbd_assert(bvec
->bv_len
<= PAGE_SIZE
);
3483 if (ret
> (int) bvec
->bv_len
|| !bmd
->bi_size
)
3484 ret
= (int) bvec
->bv_len
;
3489 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
3491 struct gendisk
*disk
= rbd_dev
->disk
;
3496 rbd_dev
->disk
= NULL
;
3497 if (disk
->flags
& GENHD_FL_UP
) {
3500 blk_cleanup_queue(disk
->queue
);
3501 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
3506 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
3507 const char *object_name
,
3508 u64 offset
, u64 length
, void *buf
)
3511 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3512 struct rbd_obj_request
*obj_request
;
3513 struct page
**pages
= NULL
;
3518 page_count
= (u32
) calc_pages_for(offset
, length
);
3519 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3521 return PTR_ERR(pages
);
3524 obj_request
= rbd_obj_request_create(object_name
, offset
, length
,
3529 obj_request
->pages
= pages
;
3530 obj_request
->page_count
= page_count
;
3532 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, OBJ_OP_READ
, 1,
3534 if (!obj_request
->osd_req
)
3537 osd_req_op_extent_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_READ
,
3538 offset
, length
, 0, 0);
3539 osd_req_op_extent_osd_data_pages(obj_request
->osd_req
, 0,
3541 obj_request
->length
,
3542 obj_request
->offset
& ~PAGE_MASK
,
3544 rbd_osd_req_format_read(obj_request
);
3546 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3549 ret
= rbd_obj_request_wait(obj_request
);
3553 ret
= obj_request
->result
;
3557 rbd_assert(obj_request
->xferred
<= (u64
) SIZE_MAX
);
3558 size
= (size_t) obj_request
->xferred
;
3559 ceph_copy_from_page_vector(pages
, buf
, 0, size
);
3560 rbd_assert(size
<= (size_t)INT_MAX
);
3564 rbd_obj_request_put(obj_request
);
3566 ceph_release_page_vector(pages
, page_count
);
3572 * Read the complete header for the given rbd device. On successful
3573 * return, the rbd_dev->header field will contain up-to-date
3574 * information about the image.
3576 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
3578 struct rbd_image_header_ondisk
*ondisk
= NULL
;
3585 * The complete header will include an array of its 64-bit
3586 * snapshot ids, followed by the names of those snapshots as
3587 * a contiguous block of NUL-terminated strings. Note that
3588 * the number of snapshots could change by the time we read
3589 * it in, in which case we re-read it.
3596 size
= sizeof (*ondisk
);
3597 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
3599 ondisk
= kmalloc(size
, GFP_KERNEL
);
3603 ret
= rbd_obj_read_sync(rbd_dev
, rbd_dev
->header_name
,
3607 if ((size_t)ret
< size
) {
3609 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
3613 if (!rbd_dev_ondisk_valid(ondisk
)) {
3615 rbd_warn(rbd_dev
, "invalid header");
3619 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
3620 want_count
= snap_count
;
3621 snap_count
= le32_to_cpu(ondisk
->snap_count
);
3622 } while (snap_count
!= want_count
);
3624 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
3632 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3633 * has disappeared from the (just updated) snapshot context.
3635 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
3639 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
3642 snap_id
= rbd_dev
->spec
->snap_id
;
3643 if (snap_id
== CEPH_NOSNAP
)
3646 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
3647 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3650 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
3656 * Don't hold the lock while doing disk operations,
3657 * or lock ordering will conflict with the bdev mutex via:
3658 * rbd_add() -> blkdev_get() -> rbd_open()
3660 spin_lock_irq(&rbd_dev
->lock
);
3661 removing
= test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
);
3662 spin_unlock_irq(&rbd_dev
->lock
);
3664 * If the device is being removed, rbd_dev->disk has
3665 * been destroyed, so don't try to update its size
3668 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
3669 dout("setting size to %llu sectors", (unsigned long long)size
);
3670 set_capacity(rbd_dev
->disk
, size
);
3671 revalidate_disk(rbd_dev
->disk
);
3675 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
3680 down_write(&rbd_dev
->header_rwsem
);
3681 mapping_size
= rbd_dev
->mapping
.size
;
3683 ret
= rbd_dev_header_info(rbd_dev
);
3688 * If there is a parent, see if it has disappeared due to the
3689 * mapped image getting flattened.
3691 if (rbd_dev
->parent
) {
3692 ret
= rbd_dev_v2_parent_info(rbd_dev
);
3697 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
) {
3698 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
3700 /* validate mapped snapshot's EXISTS flag */
3701 rbd_exists_validate(rbd_dev
);
3705 up_write(&rbd_dev
->header_rwsem
);
3706 if (!ret
&& mapping_size
!= rbd_dev
->mapping
.size
)
3707 rbd_dev_update_size(rbd_dev
);
3712 static int rbd_init_request(void *data
, struct request
*rq
,
3713 unsigned int hctx_idx
, unsigned int request_idx
,
3714 unsigned int numa_node
)
3716 struct work_struct
*work
= blk_mq_rq_to_pdu(rq
);
3718 INIT_WORK(work
, rbd_queue_workfn
);
3722 static struct blk_mq_ops rbd_mq_ops
= {
3723 .queue_rq
= rbd_queue_rq
,
3724 .map_queue
= blk_mq_map_queue
,
3725 .init_request
= rbd_init_request
,
3728 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
3730 struct gendisk
*disk
;
3731 struct request_queue
*q
;
3735 /* create gendisk info */
3736 disk
= alloc_disk(single_major
?
3737 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
3738 RBD_MINORS_PER_MAJOR
);
3742 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
3744 disk
->major
= rbd_dev
->major
;
3745 disk
->first_minor
= rbd_dev
->minor
;
3747 disk
->flags
|= GENHD_FL_EXT_DEVT
;
3748 disk
->fops
= &rbd_bd_ops
;
3749 disk
->private_data
= rbd_dev
;
3751 memset(&rbd_dev
->tag_set
, 0, sizeof(rbd_dev
->tag_set
));
3752 rbd_dev
->tag_set
.ops
= &rbd_mq_ops
;
3753 rbd_dev
->tag_set
.queue_depth
= BLKDEV_MAX_RQ
;
3754 rbd_dev
->tag_set
.numa_node
= NUMA_NO_NODE
;
3755 rbd_dev
->tag_set
.flags
=
3756 BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
3757 rbd_dev
->tag_set
.nr_hw_queues
= 1;
3758 rbd_dev
->tag_set
.cmd_size
= sizeof(struct work_struct
);
3760 err
= blk_mq_alloc_tag_set(&rbd_dev
->tag_set
);
3764 q
= blk_mq_init_queue(&rbd_dev
->tag_set
);
3770 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
3771 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3773 /* set io sizes to object size */
3774 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
3775 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
3776 blk_queue_max_segment_size(q
, segment_size
);
3777 blk_queue_io_min(q
, segment_size
);
3778 blk_queue_io_opt(q
, segment_size
);
3780 /* enable the discard support */
3781 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
3782 q
->limits
.discard_granularity
= segment_size
;
3783 q
->limits
.discard_alignment
= segment_size
;
3784 q
->limits
.max_discard_sectors
= segment_size
/ SECTOR_SIZE
;
3785 q
->limits
.discard_zeroes_data
= 1;
3787 blk_queue_merge_bvec(q
, rbd_merge_bvec
);
3790 q
->queuedata
= rbd_dev
;
3792 rbd_dev
->disk
= disk
;
3796 blk_mq_free_tag_set(&rbd_dev
->tag_set
);
3806 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
3808 return container_of(dev
, struct rbd_device
, dev
);
3811 static ssize_t
rbd_size_show(struct device
*dev
,
3812 struct device_attribute
*attr
, char *buf
)
3814 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3816 return sprintf(buf
, "%llu\n",
3817 (unsigned long long)rbd_dev
->mapping
.size
);
3821 * Note this shows the features for whatever's mapped, which is not
3822 * necessarily the base image.
3824 static ssize_t
rbd_features_show(struct device
*dev
,
3825 struct device_attribute
*attr
, char *buf
)
3827 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3829 return sprintf(buf
, "0x%016llx\n",
3830 (unsigned long long)rbd_dev
->mapping
.features
);
3833 static ssize_t
rbd_major_show(struct device
*dev
,
3834 struct device_attribute
*attr
, char *buf
)
3836 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3839 return sprintf(buf
, "%d\n", rbd_dev
->major
);
3841 return sprintf(buf
, "(none)\n");
3844 static ssize_t
rbd_minor_show(struct device
*dev
,
3845 struct device_attribute
*attr
, char *buf
)
3847 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3849 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
3852 static ssize_t
rbd_client_id_show(struct device
*dev
,
3853 struct device_attribute
*attr
, char *buf
)
3855 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3857 return sprintf(buf
, "client%lld\n",
3858 ceph_client_id(rbd_dev
->rbd_client
->client
));
3861 static ssize_t
rbd_pool_show(struct device
*dev
,
3862 struct device_attribute
*attr
, char *buf
)
3864 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3866 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
3869 static ssize_t
rbd_pool_id_show(struct device
*dev
,
3870 struct device_attribute
*attr
, char *buf
)
3872 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3874 return sprintf(buf
, "%llu\n",
3875 (unsigned long long) rbd_dev
->spec
->pool_id
);
3878 static ssize_t
rbd_name_show(struct device
*dev
,
3879 struct device_attribute
*attr
, char *buf
)
3881 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3883 if (rbd_dev
->spec
->image_name
)
3884 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
3886 return sprintf(buf
, "(unknown)\n");
3889 static ssize_t
rbd_image_id_show(struct device
*dev
,
3890 struct device_attribute
*attr
, char *buf
)
3892 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3894 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
3898 * Shows the name of the currently-mapped snapshot (or
3899 * RBD_SNAP_HEAD_NAME for the base image).
3901 static ssize_t
rbd_snap_show(struct device
*dev
,
3902 struct device_attribute
*attr
,
3905 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3907 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
3911 * For a v2 image, shows the chain of parent images, separated by empty
3912 * lines. For v1 images or if there is no parent, shows "(no parent
3915 static ssize_t
rbd_parent_show(struct device
*dev
,
3916 struct device_attribute
*attr
,
3919 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3922 if (!rbd_dev
->parent
)
3923 return sprintf(buf
, "(no parent image)\n");
3925 for ( ; rbd_dev
->parent
; rbd_dev
= rbd_dev
->parent
) {
3926 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
3928 count
+= sprintf(&buf
[count
], "%s"
3929 "pool_id %llu\npool_name %s\n"
3930 "image_id %s\nimage_name %s\n"
3931 "snap_id %llu\nsnap_name %s\n"
3933 !count
? "" : "\n", /* first? */
3934 spec
->pool_id
, spec
->pool_name
,
3935 spec
->image_id
, spec
->image_name
?: "(unknown)",
3936 spec
->snap_id
, spec
->snap_name
,
3937 rbd_dev
->parent_overlap
);
3943 static ssize_t
rbd_image_refresh(struct device
*dev
,
3944 struct device_attribute
*attr
,
3948 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3951 ret
= rbd_dev_refresh(rbd_dev
);
3958 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
3959 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
3960 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
3961 static DEVICE_ATTR(minor
, S_IRUGO
, rbd_minor_show
, NULL
);
3962 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
3963 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
3964 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
3965 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
3966 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
3967 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
3968 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
3969 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
3971 static struct attribute
*rbd_attrs
[] = {
3972 &dev_attr_size
.attr
,
3973 &dev_attr_features
.attr
,
3974 &dev_attr_major
.attr
,
3975 &dev_attr_minor
.attr
,
3976 &dev_attr_client_id
.attr
,
3977 &dev_attr_pool
.attr
,
3978 &dev_attr_pool_id
.attr
,
3979 &dev_attr_name
.attr
,
3980 &dev_attr_image_id
.attr
,
3981 &dev_attr_current_snap
.attr
,
3982 &dev_attr_parent
.attr
,
3983 &dev_attr_refresh
.attr
,
3987 static struct attribute_group rbd_attr_group
= {
3991 static const struct attribute_group
*rbd_attr_groups
[] = {
3996 static void rbd_sysfs_dev_release(struct device
*dev
)
4000 static struct device_type rbd_device_type
= {
4002 .groups
= rbd_attr_groups
,
4003 .release
= rbd_sysfs_dev_release
,
4006 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
4008 kref_get(&spec
->kref
);
4013 static void rbd_spec_free(struct kref
*kref
);
4014 static void rbd_spec_put(struct rbd_spec
*spec
)
4017 kref_put(&spec
->kref
, rbd_spec_free
);
4020 static struct rbd_spec
*rbd_spec_alloc(void)
4022 struct rbd_spec
*spec
;
4024 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
4028 spec
->pool_id
= CEPH_NOPOOL
;
4029 spec
->snap_id
= CEPH_NOSNAP
;
4030 kref_init(&spec
->kref
);
4035 static void rbd_spec_free(struct kref
*kref
)
4037 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
4039 kfree(spec
->pool_name
);
4040 kfree(spec
->image_id
);
4041 kfree(spec
->image_name
);
4042 kfree(spec
->snap_name
);
4046 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
4047 struct rbd_spec
*spec
)
4049 struct rbd_device
*rbd_dev
;
4051 rbd_dev
= kzalloc(sizeof (*rbd_dev
), GFP_KERNEL
);
4055 spin_lock_init(&rbd_dev
->lock
);
4057 atomic_set(&rbd_dev
->parent_ref
, 0);
4058 INIT_LIST_HEAD(&rbd_dev
->node
);
4059 init_rwsem(&rbd_dev
->header_rwsem
);
4061 rbd_dev
->spec
= spec
;
4062 rbd_dev
->rbd_client
= rbdc
;
4064 /* Initialize the layout used for all rbd requests */
4066 rbd_dev
->layout
.fl_stripe_unit
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
4067 rbd_dev
->layout
.fl_stripe_count
= cpu_to_le32(1);
4068 rbd_dev
->layout
.fl_object_size
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
4069 rbd_dev
->layout
.fl_pg_pool
= cpu_to_le32((u32
) spec
->pool_id
);
4074 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
4076 rbd_put_client(rbd_dev
->rbd_client
);
4077 rbd_spec_put(rbd_dev
->spec
);
4082 * Get the size and object order for an image snapshot, or if
4083 * snap_id is CEPH_NOSNAP, gets this information for the base
4086 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
4087 u8
*order
, u64
*snap_size
)
4089 __le64 snapid
= cpu_to_le64(snap_id
);
4094 } __attribute__ ((packed
)) size_buf
= { 0 };
4096 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4098 &snapid
, sizeof (snapid
),
4099 &size_buf
, sizeof (size_buf
));
4100 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4103 if (ret
< sizeof (size_buf
))
4107 *order
= size_buf
.order
;
4108 dout(" order %u", (unsigned int)*order
);
4110 *snap_size
= le64_to_cpu(size_buf
.size
);
4112 dout(" snap_id 0x%016llx snap_size = %llu\n",
4113 (unsigned long long)snap_id
,
4114 (unsigned long long)*snap_size
);
4119 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
4121 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
4122 &rbd_dev
->header
.obj_order
,
4123 &rbd_dev
->header
.image_size
);
4126 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
4132 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
4136 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4137 "rbd", "get_object_prefix", NULL
, 0,
4138 reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
4139 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4144 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
4145 p
+ ret
, NULL
, GFP_NOIO
);
4148 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
4149 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
4150 rbd_dev
->header
.object_prefix
= NULL
;
4152 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
4160 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
4163 __le64 snapid
= cpu_to_le64(snap_id
);
4167 } __attribute__ ((packed
)) features_buf
= { 0 };
4171 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4172 "rbd", "get_features",
4173 &snapid
, sizeof (snapid
),
4174 &features_buf
, sizeof (features_buf
));
4175 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4178 if (ret
< sizeof (features_buf
))
4181 incompat
= le64_to_cpu(features_buf
.incompat
);
4182 if (incompat
& ~RBD_FEATURES_SUPPORTED
)
4185 *snap_features
= le64_to_cpu(features_buf
.features
);
4187 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4188 (unsigned long long)snap_id
,
4189 (unsigned long long)*snap_features
,
4190 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
4195 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
4197 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
4198 &rbd_dev
->header
.features
);
4201 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
4203 struct rbd_spec
*parent_spec
;
4205 void *reply_buf
= NULL
;
4215 parent_spec
= rbd_spec_alloc();
4219 size
= sizeof (__le64
) + /* pool_id */
4220 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
4221 sizeof (__le64
) + /* snap_id */
4222 sizeof (__le64
); /* overlap */
4223 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4229 snapid
= cpu_to_le64(rbd_dev
->spec
->snap_id
);
4230 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4231 "rbd", "get_parent",
4232 &snapid
, sizeof (snapid
),
4234 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4239 end
= reply_buf
+ ret
;
4241 ceph_decode_64_safe(&p
, end
, pool_id
, out_err
);
4242 if (pool_id
== CEPH_NOPOOL
) {
4244 * Either the parent never existed, or we have
4245 * record of it but the image got flattened so it no
4246 * longer has a parent. When the parent of a
4247 * layered image disappears we immediately set the
4248 * overlap to 0. The effect of this is that all new
4249 * requests will be treated as if the image had no
4252 if (rbd_dev
->parent_overlap
) {
4253 rbd_dev
->parent_overlap
= 0;
4254 rbd_dev_parent_put(rbd_dev
);
4255 pr_info("%s: clone image has been flattened\n",
4256 rbd_dev
->disk
->disk_name
);
4259 goto out
; /* No parent? No problem. */
4262 /* The ceph file layout needs to fit pool id in 32 bits */
4265 if (pool_id
> (u64
)U32_MAX
) {
4266 rbd_warn(NULL
, "parent pool id too large (%llu > %u)",
4267 (unsigned long long)pool_id
, U32_MAX
);
4271 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4272 if (IS_ERR(image_id
)) {
4273 ret
= PTR_ERR(image_id
);
4276 ceph_decode_64_safe(&p
, end
, snap_id
, out_err
);
4277 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
4280 * The parent won't change (except when the clone is
4281 * flattened, already handled that). So we only need to
4282 * record the parent spec we have not already done so.
4284 if (!rbd_dev
->parent_spec
) {
4285 parent_spec
->pool_id
= pool_id
;
4286 parent_spec
->image_id
= image_id
;
4287 parent_spec
->snap_id
= snap_id
;
4288 rbd_dev
->parent_spec
= parent_spec
;
4289 parent_spec
= NULL
; /* rbd_dev now owns this */
4295 * We always update the parent overlap. If it's zero we issue
4296 * a warning, as we will proceed as if there was no parent.
4300 /* refresh, careful to warn just once */
4301 if (rbd_dev
->parent_overlap
)
4303 "clone now standalone (overlap became 0)");
4306 rbd_warn(rbd_dev
, "clone is standalone (overlap 0)");
4309 rbd_dev
->parent_overlap
= overlap
;
4315 rbd_spec_put(parent_spec
);
4320 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
4324 __le64 stripe_count
;
4325 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
4326 size_t size
= sizeof (striping_info_buf
);
4333 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4334 "rbd", "get_stripe_unit_count", NULL
, 0,
4335 (char *)&striping_info_buf
, size
);
4336 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4343 * We don't actually support the "fancy striping" feature
4344 * (STRIPINGV2) yet, but if the striping sizes are the
4345 * defaults the behavior is the same as before. So find
4346 * out, and only fail if the image has non-default values.
4349 obj_size
= (u64
)1 << rbd_dev
->header
.obj_order
;
4350 p
= &striping_info_buf
;
4351 stripe_unit
= ceph_decode_64(&p
);
4352 if (stripe_unit
!= obj_size
) {
4353 rbd_warn(rbd_dev
, "unsupported stripe unit "
4354 "(got %llu want %llu)",
4355 stripe_unit
, obj_size
);
4358 stripe_count
= ceph_decode_64(&p
);
4359 if (stripe_count
!= 1) {
4360 rbd_warn(rbd_dev
, "unsupported stripe count "
4361 "(got %llu want 1)", stripe_count
);
4364 rbd_dev
->header
.stripe_unit
= stripe_unit
;
4365 rbd_dev
->header
.stripe_count
= stripe_count
;
4370 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
4372 size_t image_id_size
;
4377 void *reply_buf
= NULL
;
4379 char *image_name
= NULL
;
4382 rbd_assert(!rbd_dev
->spec
->image_name
);
4384 len
= strlen(rbd_dev
->spec
->image_id
);
4385 image_id_size
= sizeof (__le32
) + len
;
4386 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
4391 end
= image_id
+ image_id_size
;
4392 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
4394 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
4395 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4399 ret
= rbd_obj_method_sync(rbd_dev
, RBD_DIRECTORY
,
4400 "rbd", "dir_get_name",
4401 image_id
, image_id_size
,
4406 end
= reply_buf
+ ret
;
4408 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
4409 if (IS_ERR(image_name
))
4412 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
4420 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4422 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4423 const char *snap_name
;
4426 /* Skip over names until we find the one we are looking for */
4428 snap_name
= rbd_dev
->header
.snap_names
;
4429 while (which
< snapc
->num_snaps
) {
4430 if (!strcmp(name
, snap_name
))
4431 return snapc
->snaps
[which
];
4432 snap_name
+= strlen(snap_name
) + 1;
4438 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4440 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4445 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
4446 const char *snap_name
;
4448 snap_id
= snapc
->snaps
[which
];
4449 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
4450 if (IS_ERR(snap_name
)) {
4451 /* ignore no-longer existing snapshots */
4452 if (PTR_ERR(snap_name
) == -ENOENT
)
4457 found
= !strcmp(name
, snap_name
);
4460 return found
? snap_id
: CEPH_NOSNAP
;
4464 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4465 * no snapshot by that name is found, or if an error occurs.
4467 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4469 if (rbd_dev
->image_format
== 1)
4470 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
4472 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
4476 * An image being mapped will have everything but the snap id.
4478 static int rbd_spec_fill_snap_id(struct rbd_device
*rbd_dev
)
4480 struct rbd_spec
*spec
= rbd_dev
->spec
;
4482 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
&& spec
->pool_name
);
4483 rbd_assert(spec
->image_id
&& spec
->image_name
);
4484 rbd_assert(spec
->snap_name
);
4486 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
4489 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
4490 if (snap_id
== CEPH_NOSNAP
)
4493 spec
->snap_id
= snap_id
;
4495 spec
->snap_id
= CEPH_NOSNAP
;
4502 * A parent image will have all ids but none of the names.
4504 * All names in an rbd spec are dynamically allocated. It's OK if we
4505 * can't figure out the name for an image id.
4507 static int rbd_spec_fill_names(struct rbd_device
*rbd_dev
)
4509 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4510 struct rbd_spec
*spec
= rbd_dev
->spec
;
4511 const char *pool_name
;
4512 const char *image_name
;
4513 const char *snap_name
;
4516 rbd_assert(spec
->pool_id
!= CEPH_NOPOOL
);
4517 rbd_assert(spec
->image_id
);
4518 rbd_assert(spec
->snap_id
!= CEPH_NOSNAP
);
4520 /* Get the pool name; we have to make our own copy of this */
4522 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
4524 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
4527 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
4531 /* Fetch the image name; tolerate failure here */
4533 image_name
= rbd_dev_image_name(rbd_dev
);
4535 rbd_warn(rbd_dev
, "unable to get image name");
4537 /* Fetch the snapshot name */
4539 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
4540 if (IS_ERR(snap_name
)) {
4541 ret
= PTR_ERR(snap_name
);
4545 spec
->pool_name
= pool_name
;
4546 spec
->image_name
= image_name
;
4547 spec
->snap_name
= snap_name
;
4557 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
4566 struct ceph_snap_context
*snapc
;
4570 * We'll need room for the seq value (maximum snapshot id),
4571 * snapshot count, and array of that many snapshot ids.
4572 * For now we have a fixed upper limit on the number we're
4573 * prepared to receive.
4575 size
= sizeof (__le64
) + sizeof (__le32
) +
4576 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
4577 reply_buf
= kzalloc(size
, GFP_KERNEL
);
4581 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4582 "rbd", "get_snapcontext", NULL
, 0,
4584 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4589 end
= reply_buf
+ ret
;
4591 ceph_decode_64_safe(&p
, end
, seq
, out
);
4592 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
4595 * Make sure the reported number of snapshot ids wouldn't go
4596 * beyond the end of our buffer. But before checking that,
4597 * make sure the computed size of the snapshot context we
4598 * allocate is representable in a size_t.
4600 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
4605 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
4609 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
4615 for (i
= 0; i
< snap_count
; i
++)
4616 snapc
->snaps
[i
] = ceph_decode_64(&p
);
4618 ceph_put_snap_context(rbd_dev
->header
.snapc
);
4619 rbd_dev
->header
.snapc
= snapc
;
4621 dout(" snap context seq = %llu, snap_count = %u\n",
4622 (unsigned long long)seq
, (unsigned int)snap_count
);
4629 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
4640 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
4641 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4643 return ERR_PTR(-ENOMEM
);
4645 snapid
= cpu_to_le64(snap_id
);
4646 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4647 "rbd", "get_snapshot_name",
4648 &snapid
, sizeof (snapid
),
4650 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4652 snap_name
= ERR_PTR(ret
);
4657 end
= reply_buf
+ ret
;
4658 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4659 if (IS_ERR(snap_name
))
4662 dout(" snap_id 0x%016llx snap_name = %s\n",
4663 (unsigned long long)snap_id
, snap_name
);
4670 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
4672 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
4675 ret
= rbd_dev_v2_image_size(rbd_dev
);
4680 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
4685 ret
= rbd_dev_v2_snap_context(rbd_dev
);
4686 dout("rbd_dev_v2_snap_context returned %d\n", ret
);
4691 static int rbd_dev_header_info(struct rbd_device
*rbd_dev
)
4693 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
4695 if (rbd_dev
->image_format
== 1)
4696 return rbd_dev_v1_header_info(rbd_dev
);
4698 return rbd_dev_v2_header_info(rbd_dev
);
4701 static int rbd_bus_add_dev(struct rbd_device
*rbd_dev
)
4706 dev
= &rbd_dev
->dev
;
4707 dev
->bus
= &rbd_bus_type
;
4708 dev
->type
= &rbd_device_type
;
4709 dev
->parent
= &rbd_root_dev
;
4710 dev
->release
= rbd_dev_device_release
;
4711 dev_set_name(dev
, "%d", rbd_dev
->dev_id
);
4712 ret
= device_register(dev
);
4717 static void rbd_bus_del_dev(struct rbd_device
*rbd_dev
)
4719 device_unregister(&rbd_dev
->dev
);
4723 * Get a unique rbd identifier for the given new rbd_dev, and add
4724 * the rbd_dev to the global list.
4726 static int rbd_dev_id_get(struct rbd_device
*rbd_dev
)
4730 new_dev_id
= ida_simple_get(&rbd_dev_id_ida
,
4731 0, minor_to_rbd_dev_id(1 << MINORBITS
),
4736 rbd_dev
->dev_id
= new_dev_id
;
4738 spin_lock(&rbd_dev_list_lock
);
4739 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
4740 spin_unlock(&rbd_dev_list_lock
);
4742 dout("rbd_dev %p given dev id %d\n", rbd_dev
, rbd_dev
->dev_id
);
4748 * Remove an rbd_dev from the global list, and record that its
4749 * identifier is no longer in use.
4751 static void rbd_dev_id_put(struct rbd_device
*rbd_dev
)
4753 spin_lock(&rbd_dev_list_lock
);
4754 list_del_init(&rbd_dev
->node
);
4755 spin_unlock(&rbd_dev_list_lock
);
4757 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4759 dout("rbd_dev %p released dev id %d\n", rbd_dev
, rbd_dev
->dev_id
);
4763 * Skips over white space at *buf, and updates *buf to point to the
4764 * first found non-space character (if any). Returns the length of
4765 * the token (string of non-white space characters) found. Note
4766 * that *buf must be terminated with '\0'.
4768 static inline size_t next_token(const char **buf
)
4771 * These are the characters that produce nonzero for
4772 * isspace() in the "C" and "POSIX" locales.
4774 const char *spaces
= " \f\n\r\t\v";
4776 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
4778 return strcspn(*buf
, spaces
); /* Return token length */
4782 * Finds the next token in *buf, dynamically allocates a buffer big
4783 * enough to hold a copy of it, and copies the token into the new
4784 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4785 * that a duplicate buffer is created even for a zero-length token.
4787 * Returns a pointer to the newly-allocated duplicate, or a null
4788 * pointer if memory for the duplicate was not available. If
4789 * the lenp argument is a non-null pointer, the length of the token
4790 * (not including the '\0') is returned in *lenp.
4792 * If successful, the *buf pointer will be updated to point beyond
4793 * the end of the found token.
4795 * Note: uses GFP_KERNEL for allocation.
4797 static inline char *dup_token(const char **buf
, size_t *lenp
)
4802 len
= next_token(buf
);
4803 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
4806 *(dup
+ len
) = '\0';
4816 * Parse the options provided for an "rbd add" (i.e., rbd image
4817 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4818 * and the data written is passed here via a NUL-terminated buffer.
4819 * Returns 0 if successful or an error code otherwise.
4821 * The information extracted from these options is recorded in
4822 * the other parameters which return dynamically-allocated
4825 * The address of a pointer that will refer to a ceph options
4826 * structure. Caller must release the returned pointer using
4827 * ceph_destroy_options() when it is no longer needed.
4829 * Address of an rbd options pointer. Fully initialized by
4830 * this function; caller must release with kfree().
4832 * Address of an rbd image specification pointer. Fully
4833 * initialized by this function based on parsed options.
4834 * Caller must release with rbd_spec_put().
4836 * The options passed take this form:
4837 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4840 * A comma-separated list of one or more monitor addresses.
4841 * A monitor address is an ip address, optionally followed
4842 * by a port number (separated by a colon).
4843 * I.e.: ip1[:port1][,ip2[:port2]...]
4845 * A comma-separated list of ceph and/or rbd options.
4847 * The name of the rados pool containing the rbd image.
4849 * The name of the image in that pool to map.
4851 * An optional snapshot id. If provided, the mapping will
4852 * present data from the image at the time that snapshot was
4853 * created. The image head is used if no snapshot id is
4854 * provided. Snapshot mappings are always read-only.
4856 static int rbd_add_parse_args(const char *buf
,
4857 struct ceph_options
**ceph_opts
,
4858 struct rbd_options
**opts
,
4859 struct rbd_spec
**rbd_spec
)
4863 const char *mon_addrs
;
4865 size_t mon_addrs_size
;
4866 struct rbd_spec
*spec
= NULL
;
4867 struct rbd_options
*rbd_opts
= NULL
;
4868 struct ceph_options
*copts
;
4871 /* The first four tokens are required */
4873 len
= next_token(&buf
);
4875 rbd_warn(NULL
, "no monitor address(es) provided");
4879 mon_addrs_size
= len
+ 1;
4883 options
= dup_token(&buf
, NULL
);
4887 rbd_warn(NULL
, "no options provided");
4891 spec
= rbd_spec_alloc();
4895 spec
->pool_name
= dup_token(&buf
, NULL
);
4896 if (!spec
->pool_name
)
4898 if (!*spec
->pool_name
) {
4899 rbd_warn(NULL
, "no pool name provided");
4903 spec
->image_name
= dup_token(&buf
, NULL
);
4904 if (!spec
->image_name
)
4906 if (!*spec
->image_name
) {
4907 rbd_warn(NULL
, "no image name provided");
4912 * Snapshot name is optional; default is to use "-"
4913 * (indicating the head/no snapshot).
4915 len
= next_token(&buf
);
4917 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
4918 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
4919 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
4920 ret
= -ENAMETOOLONG
;
4923 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
4926 *(snap_name
+ len
) = '\0';
4927 spec
->snap_name
= snap_name
;
4929 /* Initialize all rbd options to the defaults */
4931 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
4935 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
4937 copts
= ceph_parse_options(options
, mon_addrs
,
4938 mon_addrs
+ mon_addrs_size
- 1,
4939 parse_rbd_opts_token
, rbd_opts
);
4940 if (IS_ERR(copts
)) {
4941 ret
= PTR_ERR(copts
);
4962 * Return pool id (>= 0) or a negative error code.
4964 static int rbd_add_get_pool_id(struct rbd_client
*rbdc
, const char *pool_name
)
4967 unsigned long timeout
= rbdc
->client
->options
->mount_timeout
* HZ
;
4972 ret
= ceph_pg_poolid_by_name(rbdc
->client
->osdc
.osdmap
, pool_name
);
4973 if (ret
== -ENOENT
&& tries
++ < 1) {
4974 ret
= ceph_monc_do_get_version(&rbdc
->client
->monc
, "osdmap",
4979 if (rbdc
->client
->osdc
.osdmap
->epoch
< newest_epoch
) {
4980 ceph_monc_request_next_osdmap(&rbdc
->client
->monc
);
4981 (void) ceph_monc_wait_osdmap(&rbdc
->client
->monc
,
4982 newest_epoch
, timeout
);
4985 /* the osdmap we have is new enough */
4994 * An rbd format 2 image has a unique identifier, distinct from the
4995 * name given to it by the user. Internally, that identifier is
4996 * what's used to specify the names of objects related to the image.
4998 * A special "rbd id" object is used to map an rbd image name to its
4999 * id. If that object doesn't exist, then there is no v2 rbd image
5000 * with the supplied name.
5002 * This function will record the given rbd_dev's image_id field if
5003 * it can be determined, and in that case will return 0. If any
5004 * errors occur a negative errno will be returned and the rbd_dev's
5005 * image_id field will be unchanged (and should be NULL).
5007 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
5016 * When probing a parent image, the image id is already
5017 * known (and the image name likely is not). There's no
5018 * need to fetch the image id again in this case. We
5019 * do still need to set the image format though.
5021 if (rbd_dev
->spec
->image_id
) {
5022 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
5028 * First, see if the format 2 image id file exists, and if
5029 * so, get the image's persistent id from it.
5031 size
= sizeof (RBD_ID_PREFIX
) + strlen(rbd_dev
->spec
->image_name
);
5032 object_name
= kmalloc(size
, GFP_NOIO
);
5035 sprintf(object_name
, "%s%s", RBD_ID_PREFIX
, rbd_dev
->spec
->image_name
);
5036 dout("rbd id object name is %s\n", object_name
);
5038 /* Response will be an encoded string, which includes a length */
5040 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
5041 response
= kzalloc(size
, GFP_NOIO
);
5047 /* If it doesn't exist we'll assume it's a format 1 image */
5049 ret
= rbd_obj_method_sync(rbd_dev
, object_name
,
5050 "rbd", "get_id", NULL
, 0,
5051 response
, RBD_IMAGE_ID_LEN_MAX
);
5052 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
5053 if (ret
== -ENOENT
) {
5054 image_id
= kstrdup("", GFP_KERNEL
);
5055 ret
= image_id
? 0 : -ENOMEM
;
5057 rbd_dev
->image_format
= 1;
5058 } else if (ret
>= 0) {
5061 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
5063 ret
= PTR_ERR_OR_ZERO(image_id
);
5065 rbd_dev
->image_format
= 2;
5069 rbd_dev
->spec
->image_id
= image_id
;
5070 dout("image_id is %s\n", image_id
);
5080 * Undo whatever state changes are made by v1 or v2 header info
5083 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
5085 struct rbd_image_header
*header
;
5087 rbd_dev_parent_put(rbd_dev
);
5089 /* Free dynamic fields from the header, then zero it out */
5091 header
= &rbd_dev
->header
;
5092 ceph_put_snap_context(header
->snapc
);
5093 kfree(header
->snap_sizes
);
5094 kfree(header
->snap_names
);
5095 kfree(header
->object_prefix
);
5096 memset(header
, 0, sizeof (*header
));
5099 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
5103 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
5108 * Get the and check features for the image. Currently the
5109 * features are assumed to never change.
5111 ret
= rbd_dev_v2_features(rbd_dev
);
5115 /* If the image supports fancy striping, get its parameters */
5117 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
5118 ret
= rbd_dev_v2_striping_info(rbd_dev
);
5122 /* No support for crypto and compression type format 2 images */
5126 rbd_dev
->header
.features
= 0;
5127 kfree(rbd_dev
->header
.object_prefix
);
5128 rbd_dev
->header
.object_prefix
= NULL
;
5133 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
)
5135 struct rbd_device
*parent
= NULL
;
5136 struct rbd_spec
*parent_spec
;
5137 struct rbd_client
*rbdc
;
5140 if (!rbd_dev
->parent_spec
)
5143 * We need to pass a reference to the client and the parent
5144 * spec when creating the parent rbd_dev. Images related by
5145 * parent/child relationships always share both.
5147 parent_spec
= rbd_spec_get(rbd_dev
->parent_spec
);
5148 rbdc
= __rbd_get_client(rbd_dev
->rbd_client
);
5151 parent
= rbd_dev_create(rbdc
, parent_spec
);
5155 ret
= rbd_dev_image_probe(parent
, false);
5158 rbd_dev
->parent
= parent
;
5159 atomic_set(&rbd_dev
->parent_ref
, 1);
5164 rbd_dev_unparent(rbd_dev
);
5165 kfree(rbd_dev
->header_name
);
5166 rbd_dev_destroy(parent
);
5168 rbd_put_client(rbdc
);
5169 rbd_spec_put(parent_spec
);
5175 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
5179 /* Get an id and fill in device name. */
5181 ret
= rbd_dev_id_get(rbd_dev
);
5185 BUILD_BUG_ON(DEV_NAME_LEN
5186 < sizeof (RBD_DRV_NAME
) + MAX_INT_FORMAT_WIDTH
);
5187 sprintf(rbd_dev
->name
, "%s%d", RBD_DRV_NAME
, rbd_dev
->dev_id
);
5189 /* Record our major and minor device numbers. */
5191 if (!single_major
) {
5192 ret
= register_blkdev(0, rbd_dev
->name
);
5196 rbd_dev
->major
= ret
;
5199 rbd_dev
->major
= rbd_major
;
5200 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
5203 /* Set up the blkdev mapping. */
5205 ret
= rbd_init_disk(rbd_dev
);
5207 goto err_out_blkdev
;
5209 ret
= rbd_dev_mapping_set(rbd_dev
);
5213 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
5214 set_disk_ro(rbd_dev
->disk
, rbd_dev
->mapping
.read_only
);
5216 ret
= rbd_bus_add_dev(rbd_dev
);
5218 goto err_out_mapping
;
5220 /* Everything's ready. Announce the disk to the world. */
5222 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5223 add_disk(rbd_dev
->disk
);
5225 pr_info("%s: added with size 0x%llx\n", rbd_dev
->disk
->disk_name
,
5226 (unsigned long long) rbd_dev
->mapping
.size
);
5231 rbd_dev_mapping_clear(rbd_dev
);
5233 rbd_free_disk(rbd_dev
);
5236 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5238 rbd_dev_id_put(rbd_dev
);
5239 rbd_dev_mapping_clear(rbd_dev
);
5244 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
5246 struct rbd_spec
*spec
= rbd_dev
->spec
;
5249 /* Record the header object name for this rbd image. */
5251 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5253 if (rbd_dev
->image_format
== 1)
5254 size
= strlen(spec
->image_name
) + sizeof (RBD_SUFFIX
);
5256 size
= sizeof (RBD_HEADER_PREFIX
) + strlen(spec
->image_id
);
5258 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
5259 if (!rbd_dev
->header_name
)
5262 if (rbd_dev
->image_format
== 1)
5263 sprintf(rbd_dev
->header_name
, "%s%s",
5264 spec
->image_name
, RBD_SUFFIX
);
5266 sprintf(rbd_dev
->header_name
, "%s%s",
5267 RBD_HEADER_PREFIX
, spec
->image_id
);
5271 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
5273 rbd_dev_unprobe(rbd_dev
);
5274 kfree(rbd_dev
->header_name
);
5275 rbd_dev
->header_name
= NULL
;
5276 rbd_dev
->image_format
= 0;
5277 kfree(rbd_dev
->spec
->image_id
);
5278 rbd_dev
->spec
->image_id
= NULL
;
5280 rbd_dev_destroy(rbd_dev
);
5284 * Probe for the existence of the header object for the given rbd
5285 * device. If this image is the one being mapped (i.e., not a
5286 * parent), initiate a watch on its header object before using that
5287 * object to get detailed information about the rbd image.
5289 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
)
5294 * Get the id from the image id object. Unless there's an
5295 * error, rbd_dev->spec->image_id will be filled in with
5296 * a dynamically-allocated string, and rbd_dev->image_format
5297 * will be set to either 1 or 2.
5299 ret
= rbd_dev_image_id(rbd_dev
);
5303 ret
= rbd_dev_header_name(rbd_dev
);
5305 goto err_out_format
;
5308 ret
= rbd_dev_header_watch_sync(rbd_dev
);
5311 pr_info("image %s/%s does not exist\n",
5312 rbd_dev
->spec
->pool_name
,
5313 rbd_dev
->spec
->image_name
);
5314 goto out_header_name
;
5318 ret
= rbd_dev_header_info(rbd_dev
);
5323 * If this image is the one being mapped, we have pool name and
5324 * id, image name and id, and snap name - need to fill snap id.
5325 * Otherwise this is a parent image, identified by pool, image
5326 * and snap ids - need to fill in names for those ids.
5329 ret
= rbd_spec_fill_snap_id(rbd_dev
);
5331 ret
= rbd_spec_fill_names(rbd_dev
);
5334 pr_info("snap %s/%s@%s does not exist\n",
5335 rbd_dev
->spec
->pool_name
,
5336 rbd_dev
->spec
->image_name
,
5337 rbd_dev
->spec
->snap_name
);
5341 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
5342 ret
= rbd_dev_v2_parent_info(rbd_dev
);
5347 * Need to warn users if this image is the one being
5348 * mapped and has a parent.
5350 if (mapping
&& rbd_dev
->parent_spec
)
5352 "WARNING: kernel layering is EXPERIMENTAL!");
5355 ret
= rbd_dev_probe_parent(rbd_dev
);
5359 dout("discovered format %u image, header name is %s\n",
5360 rbd_dev
->image_format
, rbd_dev
->header_name
);
5364 rbd_dev_unprobe(rbd_dev
);
5367 rbd_dev_header_unwatch_sync(rbd_dev
);
5369 kfree(rbd_dev
->header_name
);
5370 rbd_dev
->header_name
= NULL
;
5372 rbd_dev
->image_format
= 0;
5373 kfree(rbd_dev
->spec
->image_id
);
5374 rbd_dev
->spec
->image_id
= NULL
;
5378 static ssize_t
do_rbd_add(struct bus_type
*bus
,
5382 struct rbd_device
*rbd_dev
= NULL
;
5383 struct ceph_options
*ceph_opts
= NULL
;
5384 struct rbd_options
*rbd_opts
= NULL
;
5385 struct rbd_spec
*spec
= NULL
;
5386 struct rbd_client
*rbdc
;
5390 if (!try_module_get(THIS_MODULE
))
5393 /* parse add command */
5394 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
5396 goto err_out_module
;
5397 read_only
= rbd_opts
->read_only
;
5399 rbd_opts
= NULL
; /* done with this */
5401 rbdc
= rbd_get_client(ceph_opts
);
5408 rc
= rbd_add_get_pool_id(rbdc
, spec
->pool_name
);
5411 pr_info("pool %s does not exist\n", spec
->pool_name
);
5412 goto err_out_client
;
5414 spec
->pool_id
= (u64
)rc
;
5416 /* The ceph file layout needs to fit pool id in 32 bits */
5418 if (spec
->pool_id
> (u64
)U32_MAX
) {
5419 rbd_warn(NULL
, "pool id too large (%llu > %u)",
5420 (unsigned long long)spec
->pool_id
, U32_MAX
);
5422 goto err_out_client
;
5425 rbd_dev
= rbd_dev_create(rbdc
, spec
);
5427 goto err_out_client
;
5428 rbdc
= NULL
; /* rbd_dev now owns this */
5429 spec
= NULL
; /* rbd_dev now owns this */
5431 rc
= rbd_dev_image_probe(rbd_dev
, true);
5433 goto err_out_rbd_dev
;
5435 /* If we are mapping a snapshot it must be marked read-only */
5437 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
5439 rbd_dev
->mapping
.read_only
= read_only
;
5441 rc
= rbd_dev_device_setup(rbd_dev
);
5444 * rbd_dev_header_unwatch_sync() can't be moved into
5445 * rbd_dev_image_release() without refactoring, see
5446 * commit 1f3ef78861ac.
5448 rbd_dev_header_unwatch_sync(rbd_dev
);
5449 rbd_dev_image_release(rbd_dev
);
5450 goto err_out_module
;
5456 rbd_dev_destroy(rbd_dev
);
5458 rbd_put_client(rbdc
);
5462 module_put(THIS_MODULE
);
5464 dout("Error adding device %s\n", buf
);
5469 static ssize_t
rbd_add(struct bus_type
*bus
,
5476 return do_rbd_add(bus
, buf
, count
);
5479 static ssize_t
rbd_add_single_major(struct bus_type
*bus
,
5483 return do_rbd_add(bus
, buf
, count
);
5486 static void rbd_dev_device_release(struct device
*dev
)
5488 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5490 rbd_free_disk(rbd_dev
);
5491 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5492 rbd_dev_mapping_clear(rbd_dev
);
5494 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5495 rbd_dev_id_put(rbd_dev
);
5496 rbd_dev_mapping_clear(rbd_dev
);
5499 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
5501 while (rbd_dev
->parent
) {
5502 struct rbd_device
*first
= rbd_dev
;
5503 struct rbd_device
*second
= first
->parent
;
5504 struct rbd_device
*third
;
5507 * Follow to the parent with no grandparent and
5510 while (second
&& (third
= second
->parent
)) {
5515 rbd_dev_image_release(second
);
5516 first
->parent
= NULL
;
5517 first
->parent_overlap
= 0;
5519 rbd_assert(first
->parent_spec
);
5520 rbd_spec_put(first
->parent_spec
);
5521 first
->parent_spec
= NULL
;
5525 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
5529 struct rbd_device
*rbd_dev
= NULL
;
5530 struct list_head
*tmp
;
5533 bool already
= false;
5536 ret
= kstrtoul(buf
, 10, &ul
);
5540 /* convert to int; abort if we lost anything in the conversion */
5546 spin_lock(&rbd_dev_list_lock
);
5547 list_for_each(tmp
, &rbd_dev_list
) {
5548 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
5549 if (rbd_dev
->dev_id
== dev_id
) {
5555 spin_lock_irq(&rbd_dev
->lock
);
5556 if (rbd_dev
->open_count
)
5559 already
= test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
5561 spin_unlock_irq(&rbd_dev
->lock
);
5563 spin_unlock(&rbd_dev_list_lock
);
5564 if (ret
< 0 || already
)
5567 rbd_dev_header_unwatch_sync(rbd_dev
);
5569 * flush remaining watch callbacks - these must be complete
5570 * before the osd_client is shutdown
5572 dout("%s: flushing notifies", __func__
);
5573 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
5576 * Don't free anything from rbd_dev->disk until after all
5577 * notifies are completely processed. Otherwise
5578 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5579 * in a potential use after free of rbd_dev->disk or rbd_dev.
5581 rbd_bus_del_dev(rbd_dev
);
5582 rbd_dev_image_release(rbd_dev
);
5583 module_put(THIS_MODULE
);
5588 static ssize_t
rbd_remove(struct bus_type
*bus
,
5595 return do_rbd_remove(bus
, buf
, count
);
5598 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
,
5602 return do_rbd_remove(bus
, buf
, count
);
5606 * create control files in sysfs
5609 static int rbd_sysfs_init(void)
5613 ret
= device_register(&rbd_root_dev
);
5617 ret
= bus_register(&rbd_bus_type
);
5619 device_unregister(&rbd_root_dev
);
5624 static void rbd_sysfs_cleanup(void)
5626 bus_unregister(&rbd_bus_type
);
5627 device_unregister(&rbd_root_dev
);
5630 static int rbd_slab_init(void)
5632 rbd_assert(!rbd_img_request_cache
);
5633 rbd_img_request_cache
= kmem_cache_create("rbd_img_request",
5634 sizeof (struct rbd_img_request
),
5635 __alignof__(struct rbd_img_request
),
5637 if (!rbd_img_request_cache
)
5640 rbd_assert(!rbd_obj_request_cache
);
5641 rbd_obj_request_cache
= kmem_cache_create("rbd_obj_request",
5642 sizeof (struct rbd_obj_request
),
5643 __alignof__(struct rbd_obj_request
),
5645 if (!rbd_obj_request_cache
)
5648 rbd_assert(!rbd_segment_name_cache
);
5649 rbd_segment_name_cache
= kmem_cache_create("rbd_segment_name",
5650 CEPH_MAX_OID_NAME_LEN
+ 1, 1, 0, NULL
);
5651 if (rbd_segment_name_cache
)
5654 if (rbd_obj_request_cache
) {
5655 kmem_cache_destroy(rbd_obj_request_cache
);
5656 rbd_obj_request_cache
= NULL
;
5659 kmem_cache_destroy(rbd_img_request_cache
);
5660 rbd_img_request_cache
= NULL
;
5665 static void rbd_slab_exit(void)
5667 rbd_assert(rbd_segment_name_cache
);
5668 kmem_cache_destroy(rbd_segment_name_cache
);
5669 rbd_segment_name_cache
= NULL
;
5671 rbd_assert(rbd_obj_request_cache
);
5672 kmem_cache_destroy(rbd_obj_request_cache
);
5673 rbd_obj_request_cache
= NULL
;
5675 rbd_assert(rbd_img_request_cache
);
5676 kmem_cache_destroy(rbd_img_request_cache
);
5677 rbd_img_request_cache
= NULL
;
5680 static int __init
rbd_init(void)
5684 if (!libceph_compatible(NULL
)) {
5685 rbd_warn(NULL
, "libceph incompatibility (quitting)");
5689 rc
= rbd_slab_init();
5694 * The number of active work items is limited by the number of
5695 * rbd devices * queue depth, so leave @max_active at default.
5697 rbd_wq
= alloc_workqueue(RBD_DRV_NAME
, WQ_MEM_RECLAIM
, 0);
5704 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
5705 if (rbd_major
< 0) {
5711 rc
= rbd_sysfs_init();
5713 goto err_out_blkdev
;
5716 pr_info("loaded (major %d)\n", rbd_major
);
5718 pr_info("loaded\n");
5724 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
5726 destroy_workqueue(rbd_wq
);
5732 static void __exit
rbd_exit(void)
5734 ida_destroy(&rbd_dev_id_ida
);
5735 rbd_sysfs_cleanup();
5737 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
5738 destroy_workqueue(rbd_wq
);
5742 module_init(rbd_init
);
5743 module_exit(rbd_exit
);
5745 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5746 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5747 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5748 /* following authorship retained from original osdblk.c */
5749 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5751 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5752 MODULE_LICENSE("GPL");