2 rbd.c -- Export ceph rados objects as a Linux block device
5 based on drivers/block/osdblk.c:
7 Copyright 2009 Red Hat, Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 For usage instructions, please refer to:
26 Documentation/ABI/testing/sysfs-bus-rbd
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
40 #include <linux/blkdev.h>
42 #include "rbd_types.h"
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
55 /* It might be useful to have these defined elsewhere */
57 #define U8_MAX ((u8) (~0U))
58 #define U16_MAX ((u16) (~0U))
59 #define U32_MAX ((u32) (~0U))
60 #define U64_MAX ((u64) (~0ULL))
62 #define RBD_DRV_NAME "rbd"
63 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
65 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
67 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
68 #define RBD_MAX_SNAP_NAME_LEN \
69 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
71 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
73 #define RBD_SNAP_HEAD_NAME "-"
75 /* This allows a single page to hold an image name sent by OSD */
76 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
77 #define RBD_IMAGE_ID_LEN_MAX 64
79 #define RBD_OBJ_PREFIX_LEN_MAX 64
83 #define RBD_FEATURE_LAYERING 1
85 /* Features supported by this (client software) implementation. */
87 #define RBD_FEATURES_ALL (0)
90 * An RBD device name will be "rbd#", where the "rbd" comes from
91 * RBD_DRV_NAME above, and # is a unique integer identifier.
92 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
93 * enough to hold all possible device names.
95 #define DEV_NAME_LEN 32
96 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
99 * block device image metadata (in-memory version)
101 struct rbd_image_header
{
102 /* These four fields never change for a given rbd image */
109 /* The remaining fields need to be updated occasionally */
111 struct ceph_snap_context
*snapc
;
119 * An rbd image specification.
121 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
122 * identify an image. Each rbd_dev structure includes a pointer to
123 * an rbd_spec structure that encapsulates this identity.
125 * Each of the id's in an rbd_spec has an associated name. For a
126 * user-mapped image, the names are supplied and the id's associated
127 * with them are looked up. For a layered image, a parent image is
128 * defined by the tuple, and the names are looked up.
130 * An rbd_dev structure contains a parent_spec pointer which is
131 * non-null if the image it represents is a child in a layered
132 * image. This pointer will refer to the rbd_spec structure used
133 * by the parent rbd_dev for its own identity (i.e., the structure
134 * is shared between the parent and child).
136 * Since these structures are populated once, during the discovery
137 * phase of image construction, they are effectively immutable so
138 * we make no effort to synchronize access to them.
140 * Note that code herein does not assume the image name is known (it
141 * could be a null pointer).
157 * an instance of the client. multiple devices may share an rbd client.
160 struct ceph_client
*client
;
162 struct list_head node
;
165 struct rbd_img_request
;
166 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
168 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
170 struct rbd_obj_request
;
171 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
173 enum obj_request_type
{
174 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
177 struct rbd_obj_request
{
178 const char *object_name
;
179 u64 offset
; /* object start byte */
180 u64 length
; /* bytes from offset */
182 struct rbd_img_request
*img_request
;
183 struct list_head links
; /* img_request->obj_requests */
184 u32 which
; /* posn image request list */
186 enum obj_request_type type
;
188 struct bio
*bio_list
;
195 struct ceph_osd_request
*osd_req
;
197 u64 xferred
; /* bytes transferred */
202 rbd_obj_callback_t callback
;
203 struct completion completion
;
208 struct rbd_img_request
{
210 struct rbd_device
*rbd_dev
;
211 u64 offset
; /* starting image byte offset */
212 u64 length
; /* byte count from offset */
213 bool write_request
; /* false for read */
215 struct ceph_snap_context
*snapc
; /* for writes */
216 u64 snap_id
; /* for reads */
218 spinlock_t completion_lock
;/* protects next_completion */
220 rbd_img_callback_t callback
;
222 u32 obj_request_count
;
223 struct list_head obj_requests
; /* rbd_obj_request structs */
228 #define for_each_obj_request(ireq, oreq) \
229 list_for_each_entry(oreq, &ireq->obj_requests, links)
230 #define for_each_obj_request_from(ireq, oreq) \
231 list_for_each_entry_from(oreq, &ireq->obj_requests, links)
232 #define for_each_obj_request_safe(ireq, oreq, n) \
233 list_for_each_entry_safe_reverse(oreq, n, &ireq->obj_requests, links)
239 struct list_head node
;
254 int dev_id
; /* blkdev unique id */
256 int major
; /* blkdev assigned major */
257 struct gendisk
*disk
; /* blkdev's gendisk and rq */
259 u32 image_format
; /* Either 1 or 2 */
260 struct rbd_client
*rbd_client
;
262 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
264 spinlock_t lock
; /* queue, flags, open_count */
266 struct rbd_image_header header
;
267 unsigned long flags
; /* possibly lock protected */
268 struct rbd_spec
*spec
;
272 struct ceph_file_layout layout
;
274 struct ceph_osd_event
*watch_event
;
275 struct rbd_obj_request
*watch_request
;
277 struct rbd_spec
*parent_spec
;
280 /* protects updating the header */
281 struct rw_semaphore header_rwsem
;
283 struct rbd_mapping mapping
;
285 struct list_head node
;
287 /* list of snapshots */
288 struct list_head snaps
;
292 unsigned long open_count
; /* protected by lock */
296 * Flag bits for rbd_dev->flags. If atomicity is required,
297 * rbd_dev->lock is used to protect access.
299 * Currently, only the "removing" flag (which is coupled with the
300 * "open_count" field) requires atomic access.
303 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
304 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
307 static DEFINE_MUTEX(ctl_mutex
); /* Serialize open/close/setup/teardown */
309 static LIST_HEAD(rbd_dev_list
); /* devices */
310 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
312 static LIST_HEAD(rbd_client_list
); /* clients */
313 static DEFINE_SPINLOCK(rbd_client_list_lock
);
315 static int rbd_dev_snaps_update(struct rbd_device
*rbd_dev
);
316 static int rbd_dev_snaps_register(struct rbd_device
*rbd_dev
);
318 static void rbd_dev_release(struct device
*dev
);
319 static void rbd_remove_snap_dev(struct rbd_snap
*snap
);
321 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
323 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
326 static struct bus_attribute rbd_bus_attrs
[] = {
327 __ATTR(add
, S_IWUSR
, NULL
, rbd_add
),
328 __ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
),
332 static struct bus_type rbd_bus_type
= {
334 .bus_attrs
= rbd_bus_attrs
,
337 static void rbd_root_dev_release(struct device
*dev
)
341 static struct device rbd_root_dev
= {
343 .release
= rbd_root_dev_release
,
346 static __printf(2, 3)
347 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
349 struct va_format vaf
;
357 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
358 else if (rbd_dev
->disk
)
359 printk(KERN_WARNING
"%s: %s: %pV\n",
360 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
361 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
362 printk(KERN_WARNING
"%s: image %s: %pV\n",
363 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
364 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
365 printk(KERN_WARNING
"%s: id %s: %pV\n",
366 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
368 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
369 RBD_DRV_NAME
, rbd_dev
, &vaf
);
374 #define rbd_assert(expr) \
375 if (unlikely(!(expr))) { \
376 printk(KERN_ERR "\nAssertion failure in %s() " \
378 "\trbd_assert(%s);\n\n", \
379 __func__, __LINE__, #expr); \
382 #else /* !RBD_DEBUG */
383 # define rbd_assert(expr) ((void) 0)
384 #endif /* !RBD_DEBUG */
386 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
, u64
*hver
);
387 static int rbd_dev_v2_refresh(struct rbd_device
*rbd_dev
, u64
*hver
);
389 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
391 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
392 bool removing
= false;
394 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
397 spin_lock(&rbd_dev
->lock
);
398 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
401 rbd_dev
->open_count
++;
402 spin_unlock(&rbd_dev
->lock
);
406 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
407 (void) get_device(&rbd_dev
->dev
);
408 set_device_ro(bdev
, rbd_dev
->mapping
.read_only
);
409 mutex_unlock(&ctl_mutex
);
414 static int rbd_release(struct gendisk
*disk
, fmode_t mode
)
416 struct rbd_device
*rbd_dev
= disk
->private_data
;
417 unsigned long open_count_before
;
419 spin_lock(&rbd_dev
->lock
);
420 open_count_before
= rbd_dev
->open_count
--;
421 spin_unlock(&rbd_dev
->lock
);
422 rbd_assert(open_count_before
> 0);
424 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
425 put_device(&rbd_dev
->dev
);
426 mutex_unlock(&ctl_mutex
);
431 static const struct block_device_operations rbd_bd_ops
= {
432 .owner
= THIS_MODULE
,
434 .release
= rbd_release
,
438 * Initialize an rbd client instance.
441 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
443 struct rbd_client
*rbdc
;
446 dout("rbd_client_create\n");
447 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
451 kref_init(&rbdc
->kref
);
452 INIT_LIST_HEAD(&rbdc
->node
);
454 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
456 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
457 if (IS_ERR(rbdc
->client
))
459 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
461 ret
= ceph_open_session(rbdc
->client
);
465 spin_lock(&rbd_client_list_lock
);
466 list_add_tail(&rbdc
->node
, &rbd_client_list
);
467 spin_unlock(&rbd_client_list_lock
);
469 mutex_unlock(&ctl_mutex
);
471 dout("rbd_client_create created %p\n", rbdc
);
475 ceph_destroy_client(rbdc
->client
);
477 mutex_unlock(&ctl_mutex
);
481 ceph_destroy_options(ceph_opts
);
486 * Find a ceph client with specific addr and configuration. If
487 * found, bump its reference count.
489 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
491 struct rbd_client
*client_node
;
494 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
497 spin_lock(&rbd_client_list_lock
);
498 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
499 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
500 kref_get(&client_node
->kref
);
505 spin_unlock(&rbd_client_list_lock
);
507 return found
? client_node
: NULL
;
517 /* string args above */
520 /* Boolean args above */
524 static match_table_t rbd_opts_tokens
= {
526 /* string args above */
527 {Opt_read_only
, "read_only"},
528 {Opt_read_only
, "ro"}, /* Alternate spelling */
529 {Opt_read_write
, "read_write"},
530 {Opt_read_write
, "rw"}, /* Alternate spelling */
531 /* Boolean args above */
539 #define RBD_READ_ONLY_DEFAULT false
541 static int parse_rbd_opts_token(char *c
, void *private)
543 struct rbd_options
*rbd_opts
= private;
544 substring_t argstr
[MAX_OPT_ARGS
];
545 int token
, intval
, ret
;
547 token
= match_token(c
, rbd_opts_tokens
, argstr
);
551 if (token
< Opt_last_int
) {
552 ret
= match_int(&argstr
[0], &intval
);
554 pr_err("bad mount option arg (not int) "
558 dout("got int token %d val %d\n", token
, intval
);
559 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
560 dout("got string token %d val %s\n", token
,
562 } else if (token
> Opt_last_string
&& token
< Opt_last_bool
) {
563 dout("got Boolean token %d\n", token
);
565 dout("got token %d\n", token
);
570 rbd_opts
->read_only
= true;
573 rbd_opts
->read_only
= false;
583 * Get a ceph client with specific addr and configuration, if one does
584 * not exist create it.
586 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
588 struct rbd_client
*rbdc
;
590 rbdc
= rbd_client_find(ceph_opts
);
591 if (rbdc
) /* using an existing client */
592 ceph_destroy_options(ceph_opts
);
594 rbdc
= rbd_client_create(ceph_opts
);
600 * Destroy ceph client
602 * Caller must hold rbd_client_list_lock.
604 static void rbd_client_release(struct kref
*kref
)
606 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
608 dout("rbd_release_client %p\n", rbdc
);
609 spin_lock(&rbd_client_list_lock
);
610 list_del(&rbdc
->node
);
611 spin_unlock(&rbd_client_list_lock
);
613 ceph_destroy_client(rbdc
->client
);
618 * Drop reference to ceph client node. If it's not referenced anymore, release
621 static void rbd_put_client(struct rbd_client
*rbdc
)
624 kref_put(&rbdc
->kref
, rbd_client_release
);
627 static bool rbd_image_format_valid(u32 image_format
)
629 return image_format
== 1 || image_format
== 2;
632 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
637 /* The header has to start with the magic rbd header text */
638 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
641 /* The bio layer requires at least sector-sized I/O */
643 if (ondisk
->options
.order
< SECTOR_SHIFT
)
646 /* If we use u64 in a few spots we may be able to loosen this */
648 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
652 * The size of a snapshot header has to fit in a size_t, and
653 * that limits the number of snapshots.
655 snap_count
= le32_to_cpu(ondisk
->snap_count
);
656 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
657 if (snap_count
> size
/ sizeof (__le64
))
661 * Not only that, but the size of the entire the snapshot
662 * header must also be representable in a size_t.
664 size
-= snap_count
* sizeof (__le64
);
665 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
672 * Create a new header structure, translate header format from the on-disk
675 static int rbd_header_from_disk(struct rbd_image_header
*header
,
676 struct rbd_image_header_ondisk
*ondisk
)
683 memset(header
, 0, sizeof (*header
));
685 snap_count
= le32_to_cpu(ondisk
->snap_count
);
687 len
= strnlen(ondisk
->object_prefix
, sizeof (ondisk
->object_prefix
));
688 header
->object_prefix
= kmalloc(len
+ 1, GFP_KERNEL
);
689 if (!header
->object_prefix
)
691 memcpy(header
->object_prefix
, ondisk
->object_prefix
, len
);
692 header
->object_prefix
[len
] = '\0';
695 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
697 /* Save a copy of the snapshot names */
699 if (snap_names_len
> (u64
) SIZE_MAX
)
701 header
->snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
702 if (!header
->snap_names
)
705 * Note that rbd_dev_v1_header_read() guarantees
706 * the ondisk buffer we're working with has
707 * snap_names_len bytes beyond the end of the
708 * snapshot id array, this memcpy() is safe.
710 memcpy(header
->snap_names
, &ondisk
->snaps
[snap_count
],
713 /* Record each snapshot's size */
715 size
= snap_count
* sizeof (*header
->snap_sizes
);
716 header
->snap_sizes
= kmalloc(size
, GFP_KERNEL
);
717 if (!header
->snap_sizes
)
719 for (i
= 0; i
< snap_count
; i
++)
720 header
->snap_sizes
[i
] =
721 le64_to_cpu(ondisk
->snaps
[i
].image_size
);
723 WARN_ON(ondisk
->snap_names_len
);
724 header
->snap_names
= NULL
;
725 header
->snap_sizes
= NULL
;
728 header
->features
= 0; /* No features support in v1 images */
729 header
->obj_order
= ondisk
->options
.order
;
730 header
->crypt_type
= ondisk
->options
.crypt_type
;
731 header
->comp_type
= ondisk
->options
.comp_type
;
733 /* Allocate and fill in the snapshot context */
735 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
736 size
= sizeof (struct ceph_snap_context
);
737 size
+= snap_count
* sizeof (header
->snapc
->snaps
[0]);
738 header
->snapc
= kzalloc(size
, GFP_KERNEL
);
742 atomic_set(&header
->snapc
->nref
, 1);
743 header
->snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
744 header
->snapc
->num_snaps
= snap_count
;
745 for (i
= 0; i
< snap_count
; i
++)
746 header
->snapc
->snaps
[i
] =
747 le64_to_cpu(ondisk
->snaps
[i
].id
);
752 kfree(header
->snap_sizes
);
753 header
->snap_sizes
= NULL
;
754 kfree(header
->snap_names
);
755 header
->snap_names
= NULL
;
756 kfree(header
->object_prefix
);
757 header
->object_prefix
= NULL
;
762 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
764 struct rbd_snap
*snap
;
766 if (snap_id
== CEPH_NOSNAP
)
767 return RBD_SNAP_HEAD_NAME
;
769 list_for_each_entry(snap
, &rbd_dev
->snaps
, node
)
770 if (snap_id
== snap
->id
)
776 static int snap_by_name(struct rbd_device
*rbd_dev
, const char *snap_name
)
779 struct rbd_snap
*snap
;
781 list_for_each_entry(snap
, &rbd_dev
->snaps
, node
) {
782 if (!strcmp(snap_name
, snap
->name
)) {
783 rbd_dev
->spec
->snap_id
= snap
->id
;
784 rbd_dev
->mapping
.size
= snap
->size
;
785 rbd_dev
->mapping
.features
= snap
->features
;
794 static int rbd_dev_set_mapping(struct rbd_device
*rbd_dev
)
798 if (!memcmp(rbd_dev
->spec
->snap_name
, RBD_SNAP_HEAD_NAME
,
799 sizeof (RBD_SNAP_HEAD_NAME
))) {
800 rbd_dev
->spec
->snap_id
= CEPH_NOSNAP
;
801 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
802 rbd_dev
->mapping
.features
= rbd_dev
->header
.features
;
805 ret
= snap_by_name(rbd_dev
, rbd_dev
->spec
->snap_name
);
808 rbd_dev
->mapping
.read_only
= true;
810 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
816 static void rbd_header_free(struct rbd_image_header
*header
)
818 kfree(header
->object_prefix
);
819 header
->object_prefix
= NULL
;
820 kfree(header
->snap_sizes
);
821 header
->snap_sizes
= NULL
;
822 kfree(header
->snap_names
);
823 header
->snap_names
= NULL
;
824 ceph_put_snap_context(header
->snapc
);
825 header
->snapc
= NULL
;
828 static const char *rbd_segment_name(struct rbd_device
*rbd_dev
, u64 offset
)
834 name
= kmalloc(MAX_OBJ_NAME_SIZE
+ 1, GFP_NOIO
);
837 segment
= offset
>> rbd_dev
->header
.obj_order
;
838 ret
= snprintf(name
, MAX_OBJ_NAME_SIZE
+ 1, "%s.%012llx",
839 rbd_dev
->header
.object_prefix
, segment
);
840 if (ret
< 0 || ret
> MAX_OBJ_NAME_SIZE
) {
841 pr_err("error formatting segment name for #%llu (%d)\n",
850 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
852 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
854 return offset
& (segment_size
- 1);
857 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
858 u64 offset
, u64 length
)
860 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
862 offset
&= segment_size
- 1;
864 rbd_assert(length
<= U64_MAX
- offset
);
865 if (offset
+ length
> segment_size
)
866 length
= segment_size
- offset
;
872 * returns the size of an object in the image
874 static u64
rbd_obj_bytes(struct rbd_image_header
*header
)
876 return 1 << header
->obj_order
;
883 static void bio_chain_put(struct bio
*chain
)
889 chain
= chain
->bi_next
;
895 * zeros a bio chain, starting at specific offset
897 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
906 bio_for_each_segment(bv
, chain
, i
) {
907 if (pos
+ bv
->bv_len
> start_ofs
) {
908 int remainder
= max(start_ofs
- pos
, 0);
909 buf
= bvec_kmap_irq(bv
, &flags
);
910 memset(buf
+ remainder
, 0,
911 bv
->bv_len
- remainder
);
912 bvec_kunmap_irq(buf
, &flags
);
917 chain
= chain
->bi_next
;
922 * Clone a portion of a bio, starting at the given byte offset
923 * and continuing for the number of bytes indicated.
925 static struct bio
*bio_clone_range(struct bio
*bio_src
,
934 unsigned short end_idx
;
938 /* Handle the easy case for the caller */
940 if (!offset
&& len
== bio_src
->bi_size
)
941 return bio_clone(bio_src
, gfpmask
);
943 if (WARN_ON_ONCE(!len
))
945 if (WARN_ON_ONCE(len
> bio_src
->bi_size
))
947 if (WARN_ON_ONCE(offset
> bio_src
->bi_size
- len
))
950 /* Find first affected segment... */
953 __bio_for_each_segment(bv
, bio_src
, idx
, 0) {
954 if (resid
< bv
->bv_len
)
960 /* ...and the last affected segment */
963 __bio_for_each_segment(bv
, bio_src
, end_idx
, idx
) {
964 if (resid
<= bv
->bv_len
)
968 vcnt
= end_idx
- idx
+ 1;
970 /* Build the clone */
972 bio
= bio_alloc(gfpmask
, (unsigned int) vcnt
);
974 return NULL
; /* ENOMEM */
976 bio
->bi_bdev
= bio_src
->bi_bdev
;
977 bio
->bi_sector
= bio_src
->bi_sector
+ (offset
>> SECTOR_SHIFT
);
978 bio
->bi_rw
= bio_src
->bi_rw
;
979 bio
->bi_flags
|= 1 << BIO_CLONED
;
982 * Copy over our part of the bio_vec, then update the first
983 * and last (or only) entries.
985 memcpy(&bio
->bi_io_vec
[0], &bio_src
->bi_io_vec
[idx
],
986 vcnt
* sizeof (struct bio_vec
));
987 bio
->bi_io_vec
[0].bv_offset
+= voff
;
989 bio
->bi_io_vec
[0].bv_len
-= voff
;
990 bio
->bi_io_vec
[vcnt
- 1].bv_len
= resid
;
992 bio
->bi_io_vec
[0].bv_len
= len
;
1003 * Clone a portion of a bio chain, starting at the given byte offset
1004 * into the first bio in the source chain and continuing for the
1005 * number of bytes indicated. The result is another bio chain of
1006 * exactly the given length, or a null pointer on error.
1008 * The bio_src and offset parameters are both in-out. On entry they
1009 * refer to the first source bio and the offset into that bio where
1010 * the start of data to be cloned is located.
1012 * On return, bio_src is updated to refer to the bio in the source
1013 * chain that contains first un-cloned byte, and *offset will
1014 * contain the offset of that byte within that bio.
1016 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1017 unsigned int *offset
,
1021 struct bio
*bi
= *bio_src
;
1022 unsigned int off
= *offset
;
1023 struct bio
*chain
= NULL
;
1026 /* Build up a chain of clone bios up to the limit */
1028 if (!bi
|| off
>= bi
->bi_size
|| !len
)
1029 return NULL
; /* Nothing to clone */
1033 unsigned int bi_size
;
1037 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1038 goto out_err
; /* EINVAL; ran out of bio's */
1040 bi_size
= min_t(unsigned int, bi
->bi_size
- off
, len
);
1041 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1043 goto out_err
; /* ENOMEM */
1046 end
= &bio
->bi_next
;
1049 if (off
== bi
->bi_size
) {
1060 bio_chain_put(chain
);
1065 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1067 kref_get(&obj_request
->kref
);
1070 static void rbd_obj_request_destroy(struct kref
*kref
);
1071 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1073 rbd_assert(obj_request
!= NULL
);
1074 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1077 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1079 kref_get(&img_request
->kref
);
1082 static void rbd_img_request_destroy(struct kref
*kref
);
1083 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1085 rbd_assert(img_request
!= NULL
);
1086 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1089 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1090 struct rbd_obj_request
*obj_request
)
1092 rbd_assert(obj_request
->img_request
== NULL
);
1094 rbd_obj_request_get(obj_request
);
1095 obj_request
->img_request
= img_request
;
1096 obj_request
->which
= img_request
->obj_request_count
;
1097 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1098 img_request
->obj_request_count
++;
1099 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1102 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1103 struct rbd_obj_request
*obj_request
)
1105 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1107 list_del(&obj_request
->links
);
1108 rbd_assert(img_request
->obj_request_count
> 0);
1109 img_request
->obj_request_count
--;
1110 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1111 obj_request
->which
= BAD_WHICH
;
1112 rbd_assert(obj_request
->img_request
== img_request
);
1113 obj_request
->img_request
= NULL
;
1114 obj_request
->callback
= NULL
;
1115 rbd_obj_request_put(obj_request
);
1118 static bool obj_request_type_valid(enum obj_request_type type
)
1121 case OBJ_REQUEST_NODATA
:
1122 case OBJ_REQUEST_BIO
:
1123 case OBJ_REQUEST_PAGES
:
1130 struct ceph_osd_req_op
*rbd_osd_req_op_create(u16 opcode
, ...)
1132 struct ceph_osd_req_op
*op
;
1136 op
= kzalloc(sizeof (*op
), GFP_NOIO
);
1140 va_start(args
, opcode
);
1142 case CEPH_OSD_OP_READ
:
1143 case CEPH_OSD_OP_WRITE
:
1144 /* rbd_osd_req_op_create(READ, offset, length) */
1145 /* rbd_osd_req_op_create(WRITE, offset, length) */
1146 op
->extent
.offset
= va_arg(args
, u64
);
1147 op
->extent
.length
= va_arg(args
, u64
);
1148 if (opcode
== CEPH_OSD_OP_WRITE
)
1149 op
->payload_len
= op
->extent
.length
;
1151 case CEPH_OSD_OP_CALL
:
1152 /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
1153 op
->cls
.class_name
= va_arg(args
, char *);
1154 size
= strlen(op
->cls
.class_name
);
1155 rbd_assert(size
<= (size_t) U8_MAX
);
1156 op
->cls
.class_len
= size
;
1157 op
->payload_len
= size
;
1159 op
->cls
.method_name
= va_arg(args
, char *);
1160 size
= strlen(op
->cls
.method_name
);
1161 rbd_assert(size
<= (size_t) U8_MAX
);
1162 op
->cls
.method_len
= size
;
1163 op
->payload_len
+= size
;
1166 op
->cls
.indata
= va_arg(args
, void *);
1167 size
= va_arg(args
, size_t);
1168 rbd_assert(size
<= (size_t) U32_MAX
);
1169 op
->cls
.indata_len
= (u32
) size
;
1170 op
->payload_len
+= size
;
1172 case CEPH_OSD_OP_NOTIFY_ACK
:
1173 case CEPH_OSD_OP_WATCH
:
1174 /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
1175 /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
1176 op
->watch
.cookie
= va_arg(args
, u64
);
1177 op
->watch
.ver
= va_arg(args
, u64
);
1178 op
->watch
.ver
= cpu_to_le64(op
->watch
.ver
);
1179 if (opcode
== CEPH_OSD_OP_WATCH
&& va_arg(args
, int))
1180 op
->watch
.flag
= (u8
) 1;
1183 rbd_warn(NULL
, "unsupported opcode %hu\n", opcode
);
1193 static void rbd_osd_req_op_destroy(struct ceph_osd_req_op
*op
)
1198 static int rbd_obj_request_submit(struct ceph_osd_client
*osdc
,
1199 struct rbd_obj_request
*obj_request
)
1201 return ceph_osdc_start_request(osdc
, obj_request
->osd_req
, false);
1204 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1206 if (img_request
->callback
)
1207 img_request
->callback(img_request
);
1209 rbd_img_request_put(img_request
);
1212 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1214 static int rbd_obj_request_wait(struct rbd_obj_request
*obj_request
)
1216 return wait_for_completion_interruptible(&obj_request
->completion
);
1219 static void rbd_osd_trivial_callback(struct rbd_obj_request
*obj_request
,
1220 struct ceph_osd_op
*op
)
1222 atomic_set(&obj_request
->done
, 1);
1225 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1227 if (obj_request
->callback
)
1228 obj_request
->callback(obj_request
);
1230 complete_all(&obj_request
->completion
);
1233 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
,
1234 struct ceph_osd_op
*op
)
1239 * We support a 64-bit length, but ultimately it has to be
1240 * passed to blk_end_request(), which takes an unsigned int.
1242 xferred
= le64_to_cpu(op
->extent
.length
);
1243 rbd_assert(xferred
< (u64
) UINT_MAX
);
1244 if (obj_request
->result
== (s32
) -ENOENT
) {
1245 zero_bio_chain(obj_request
->bio_list
, 0);
1246 obj_request
->result
= 0;
1247 } else if (xferred
< obj_request
->length
&& !obj_request
->result
) {
1248 zero_bio_chain(obj_request
->bio_list
, xferred
);
1249 xferred
= obj_request
->length
;
1251 obj_request
->xferred
= xferred
;
1252 atomic_set(&obj_request
->done
, 1);
1255 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
,
1256 struct ceph_osd_op
*op
)
1258 obj_request
->xferred
= le64_to_cpu(op
->extent
.length
);
1259 atomic_set(&obj_request
->done
, 1);
1262 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
,
1263 struct ceph_msg
*msg
)
1265 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1266 struct ceph_osd_reply_head
*reply_head
;
1267 struct ceph_osd_op
*op
;
1271 rbd_assert(osd_req
== obj_request
->osd_req
);
1272 rbd_assert(!!obj_request
->img_request
^
1273 (obj_request
->which
== BAD_WHICH
));
1275 obj_request
->xferred
= le32_to_cpu(msg
->hdr
.data_len
);
1276 reply_head
= msg
->front
.iov_base
;
1277 obj_request
->result
= (s32
) le32_to_cpu(reply_head
->result
);
1278 obj_request
->version
= le64_to_cpu(osd_req
->r_reassert_version
.version
);
1280 num_ops
= le32_to_cpu(reply_head
->num_ops
);
1281 WARN_ON(num_ops
!= 1); /* For now */
1283 op
= &reply_head
->ops
[0];
1284 opcode
= le16_to_cpu(op
->op
);
1286 case CEPH_OSD_OP_READ
:
1287 rbd_osd_read_callback(obj_request
, op
);
1289 case CEPH_OSD_OP_WRITE
:
1290 rbd_osd_write_callback(obj_request
, op
);
1292 case CEPH_OSD_OP_CALL
:
1293 case CEPH_OSD_OP_NOTIFY_ACK
:
1294 case CEPH_OSD_OP_WATCH
:
1295 rbd_osd_trivial_callback(obj_request
, op
);
1298 rbd_warn(NULL
, "%s: unsupported op %hu\n",
1299 obj_request
->object_name
, (unsigned short) opcode
);
1303 if (atomic_read(&obj_request
->done
))
1304 rbd_obj_request_complete(obj_request
);
1307 static struct ceph_osd_request
*rbd_osd_req_create(
1308 struct rbd_device
*rbd_dev
,
1310 struct rbd_obj_request
*obj_request
,
1311 struct ceph_osd_req_op
*op
)
1313 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1314 struct ceph_snap_context
*snapc
= NULL
;
1315 struct ceph_osd_client
*osdc
;
1316 struct ceph_osd_request
*osd_req
;
1317 struct timespec now
;
1318 struct timespec
*mtime
;
1319 u64 snap_id
= CEPH_NOSNAP
;
1320 u64 offset
= obj_request
->offset
;
1321 u64 length
= obj_request
->length
;
1324 rbd_assert(img_request
->write_request
== write_request
);
1325 if (img_request
->write_request
)
1326 snapc
= img_request
->snapc
;
1328 snap_id
= img_request
->snap_id
;
1331 /* Allocate and initialize the request, for the single op */
1333 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1334 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, 1, false, GFP_ATOMIC
);
1336 return NULL
; /* ENOMEM */
1338 rbd_assert(obj_request_type_valid(obj_request
->type
));
1339 switch (obj_request
->type
) {
1340 case OBJ_REQUEST_NODATA
:
1341 break; /* Nothing to do */
1342 case OBJ_REQUEST_BIO
:
1343 rbd_assert(obj_request
->bio_list
!= NULL
);
1344 osd_req
->r_bio
= obj_request
->bio_list
;
1345 bio_get(osd_req
->r_bio
);
1346 /* osd client requires "num pages" even for bio */
1347 osd_req
->r_num_pages
= calc_pages_for(offset
, length
);
1349 case OBJ_REQUEST_PAGES
:
1350 osd_req
->r_pages
= obj_request
->pages
;
1351 osd_req
->r_num_pages
= obj_request
->page_count
;
1352 osd_req
->r_page_alignment
= offset
& ~PAGE_MASK
;
1356 if (write_request
) {
1357 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1361 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1362 mtime
= NULL
; /* not needed for reads */
1363 offset
= 0; /* These are not used... */
1364 length
= 0; /* ...for osd read requests */
1367 osd_req
->r_callback
= rbd_osd_req_callback
;
1368 osd_req
->r_priv
= obj_request
;
1370 osd_req
->r_oid_len
= strlen(obj_request
->object_name
);
1371 rbd_assert(osd_req
->r_oid_len
< sizeof (osd_req
->r_oid
));
1372 memcpy(osd_req
->r_oid
, obj_request
->object_name
, osd_req
->r_oid_len
);
1374 osd_req
->r_file_layout
= rbd_dev
->layout
; /* struct */
1376 /* osd_req will get its own reference to snapc (if non-null) */
1378 ceph_osdc_build_request(osd_req
, offset
, length
, 1, op
,
1379 snapc
, snap_id
, mtime
);
1384 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1386 ceph_osdc_put_request(osd_req
);
1389 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1391 static struct rbd_obj_request
*rbd_obj_request_create(const char *object_name
,
1392 u64 offset
, u64 length
,
1393 enum obj_request_type type
)
1395 struct rbd_obj_request
*obj_request
;
1399 rbd_assert(obj_request_type_valid(type
));
1401 size
= strlen(object_name
) + 1;
1402 obj_request
= kzalloc(sizeof (*obj_request
) + size
, GFP_KERNEL
);
1406 name
= (char *)(obj_request
+ 1);
1407 obj_request
->object_name
= memcpy(name
, object_name
, size
);
1408 obj_request
->offset
= offset
;
1409 obj_request
->length
= length
;
1410 obj_request
->which
= BAD_WHICH
;
1411 obj_request
->type
= type
;
1412 INIT_LIST_HEAD(&obj_request
->links
);
1413 atomic_set(&obj_request
->done
, 0);
1414 init_completion(&obj_request
->completion
);
1415 kref_init(&obj_request
->kref
);
1420 static void rbd_obj_request_destroy(struct kref
*kref
)
1422 struct rbd_obj_request
*obj_request
;
1424 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1426 rbd_assert(obj_request
->img_request
== NULL
);
1427 rbd_assert(obj_request
->which
== BAD_WHICH
);
1429 if (obj_request
->osd_req
)
1430 rbd_osd_req_destroy(obj_request
->osd_req
);
1432 rbd_assert(obj_request_type_valid(obj_request
->type
));
1433 switch (obj_request
->type
) {
1434 case OBJ_REQUEST_NODATA
:
1435 break; /* Nothing to do */
1436 case OBJ_REQUEST_BIO
:
1437 if (obj_request
->bio_list
)
1438 bio_chain_put(obj_request
->bio_list
);
1440 case OBJ_REQUEST_PAGES
:
1441 if (obj_request
->pages
)
1442 ceph_release_page_vector(obj_request
->pages
,
1443 obj_request
->page_count
);
1451 * Caller is responsible for filling in the list of object requests
1452 * that comprises the image request, and the Linux request pointer
1453 * (if there is one).
1455 struct rbd_img_request
*rbd_img_request_create(struct rbd_device
*rbd_dev
,
1456 u64 offset
, u64 length
,
1459 struct rbd_img_request
*img_request
;
1460 struct ceph_snap_context
*snapc
= NULL
;
1462 img_request
= kmalloc(sizeof (*img_request
), GFP_ATOMIC
);
1466 if (write_request
) {
1467 down_read(&rbd_dev
->header_rwsem
);
1468 snapc
= ceph_get_snap_context(rbd_dev
->header
.snapc
);
1469 up_read(&rbd_dev
->header_rwsem
);
1470 if (WARN_ON(!snapc
)) {
1472 return NULL
; /* Shouldn't happen */
1476 img_request
->rq
= NULL
;
1477 img_request
->rbd_dev
= rbd_dev
;
1478 img_request
->offset
= offset
;
1479 img_request
->length
= length
;
1480 img_request
->write_request
= write_request
;
1482 img_request
->snapc
= snapc
;
1484 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
1485 spin_lock_init(&img_request
->completion_lock
);
1486 img_request
->next_completion
= 0;
1487 img_request
->callback
= NULL
;
1488 img_request
->obj_request_count
= 0;
1489 INIT_LIST_HEAD(&img_request
->obj_requests
);
1490 kref_init(&img_request
->kref
);
1492 rbd_img_request_get(img_request
); /* Avoid a warning */
1493 rbd_img_request_put(img_request
); /* TEMPORARY */
1498 static void rbd_img_request_destroy(struct kref
*kref
)
1500 struct rbd_img_request
*img_request
;
1501 struct rbd_obj_request
*obj_request
;
1502 struct rbd_obj_request
*next_obj_request
;
1504 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
1506 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1507 rbd_img_obj_request_del(img_request
, obj_request
);
1508 rbd_assert(img_request
->obj_request_count
== 0);
1510 if (img_request
->write_request
)
1511 ceph_put_snap_context(img_request
->snapc
);
1516 static int rbd_img_request_fill_bio(struct rbd_img_request
*img_request
,
1517 struct bio
*bio_list
)
1519 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
1520 struct rbd_obj_request
*obj_request
= NULL
;
1521 struct rbd_obj_request
*next_obj_request
;
1522 unsigned int bio_offset
;
1527 opcode
= img_request
->write_request
? CEPH_OSD_OP_WRITE
1530 image_offset
= img_request
->offset
;
1531 rbd_assert(image_offset
== bio_list
->bi_sector
<< SECTOR_SHIFT
);
1532 resid
= img_request
->length
;
1534 const char *object_name
;
1535 unsigned int clone_size
;
1536 struct ceph_osd_req_op
*op
;
1540 object_name
= rbd_segment_name(rbd_dev
, image_offset
);
1543 offset
= rbd_segment_offset(rbd_dev
, image_offset
);
1544 length
= rbd_segment_length(rbd_dev
, image_offset
, resid
);
1545 obj_request
= rbd_obj_request_create(object_name
,
1548 kfree(object_name
); /* object request has its own copy */
1552 rbd_assert(length
<= (u64
) UINT_MAX
);
1553 clone_size
= (unsigned int) length
;
1554 obj_request
->bio_list
= bio_chain_clone_range(&bio_list
,
1555 &bio_offset
, clone_size
,
1557 if (!obj_request
->bio_list
)
1561 * Build up the op to use in building the osd
1562 * request. Note that the contents of the op are
1563 * copied by rbd_osd_req_create().
1565 op
= rbd_osd_req_op_create(opcode
, offset
, length
);
1568 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
,
1569 img_request
->write_request
,
1571 rbd_osd_req_op_destroy(op
);
1572 if (!obj_request
->osd_req
)
1574 /* status and version are initially zero-filled */
1576 rbd_img_obj_request_add(img_request
, obj_request
);
1578 image_offset
+= length
;
1585 rbd_obj_request_put(obj_request
);
1587 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1588 rbd_obj_request_put(obj_request
);
1593 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
1595 struct rbd_img_request
*img_request
;
1596 u32 which
= obj_request
->which
;
1599 img_request
= obj_request
->img_request
;
1600 rbd_assert(img_request
!= NULL
);
1601 rbd_assert(img_request
->rq
!= NULL
);
1602 rbd_assert(which
!= BAD_WHICH
);
1603 rbd_assert(which
< img_request
->obj_request_count
);
1604 rbd_assert(which
>= img_request
->next_completion
);
1606 spin_lock_irq(&img_request
->completion_lock
);
1607 if (which
!= img_request
->next_completion
)
1610 for_each_obj_request_from(img_request
, obj_request
) {
1611 unsigned int xferred
;
1615 rbd_assert(which
< img_request
->obj_request_count
);
1617 if (!atomic_read(&obj_request
->done
))
1620 rbd_assert(obj_request
->xferred
<= (u64
) UINT_MAX
);
1621 xferred
= (unsigned int) obj_request
->xferred
;
1622 result
= (int) obj_request
->result
;
1624 rbd_warn(NULL
, "obj_request %s result %d xferred %u\n",
1625 img_request
->write_request
? "write" : "read",
1628 more
= blk_end_request(img_request
->rq
, result
, xferred
);
1631 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
1632 img_request
->next_completion
= which
;
1634 spin_unlock_irq(&img_request
->completion_lock
);
1637 rbd_img_request_complete(img_request
);
1640 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
1642 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
1643 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1644 struct rbd_obj_request
*obj_request
;
1646 for_each_obj_request(img_request
, obj_request
) {
1649 obj_request
->callback
= rbd_img_obj_callback
;
1650 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1654 * The image request has its own reference to each
1655 * of its object requests, so we can safely drop the
1658 rbd_obj_request_put(obj_request
);
1664 static int rbd_obj_notify_ack(struct rbd_device
*rbd_dev
,
1665 u64 ver
, u64 notify_id
)
1667 struct rbd_obj_request
*obj_request
;
1668 struct ceph_osd_req_op
*op
;
1669 struct ceph_osd_client
*osdc
;
1672 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
1673 OBJ_REQUEST_NODATA
);
1678 op
= rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK
, notify_id
, ver
);
1681 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
1683 rbd_osd_req_op_destroy(op
);
1684 if (!obj_request
->osd_req
)
1687 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1688 obj_request
->callback
= rbd_obj_request_put
;
1689 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1692 rbd_obj_request_put(obj_request
);
1697 static void rbd_watch_cb(u64 ver
, u64 notify_id
, u8 opcode
, void *data
)
1699 struct rbd_device
*rbd_dev
= (struct rbd_device
*)data
;
1706 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1707 rbd_dev
->header_name
, (unsigned long long) notify_id
,
1708 (unsigned int) opcode
);
1709 rc
= rbd_dev_refresh(rbd_dev
, &hver
);
1711 rbd_warn(rbd_dev
, "got notification but failed to "
1712 " update snaps: %d\n", rc
);
1714 rbd_obj_notify_ack(rbd_dev
, hver
, notify_id
);
1718 * Request sync osd watch/unwatch. The value of "start" determines
1719 * whether a watch request is being initiated or torn down.
1721 static int rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
, int start
)
1723 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1724 struct rbd_obj_request
*obj_request
;
1725 struct ceph_osd_req_op
*op
;
1728 rbd_assert(start
^ !!rbd_dev
->watch_event
);
1729 rbd_assert(start
^ !!rbd_dev
->watch_request
);
1732 ret
= ceph_osdc_create_event(osdc
, rbd_watch_cb
, 0, rbd_dev
,
1733 &rbd_dev
->watch_event
);
1736 rbd_assert(rbd_dev
->watch_event
!= NULL
);
1740 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
1741 OBJ_REQUEST_NODATA
);
1745 op
= rbd_osd_req_op_create(CEPH_OSD_OP_WATCH
,
1746 rbd_dev
->watch_event
->cookie
,
1747 rbd_dev
->header
.obj_version
, start
);
1750 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, true,
1752 rbd_osd_req_op_destroy(op
);
1753 if (!obj_request
->osd_req
)
1757 ceph_osdc_set_request_linger(osdc
, obj_request
->osd_req
);
1759 ceph_osdc_unregister_linger_request(osdc
,
1760 rbd_dev
->watch_request
->osd_req
);
1761 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1764 ret
= rbd_obj_request_wait(obj_request
);
1767 ret
= obj_request
->result
;
1772 * A watch request is set to linger, so the underlying osd
1773 * request won't go away until we unregister it. We retain
1774 * a pointer to the object request during that time (in
1775 * rbd_dev->watch_request), so we'll keep a reference to
1776 * it. We'll drop that reference (below) after we've
1780 rbd_dev
->watch_request
= obj_request
;
1785 /* We have successfully torn down the watch request */
1787 rbd_obj_request_put(rbd_dev
->watch_request
);
1788 rbd_dev
->watch_request
= NULL
;
1790 /* Cancel the event if we're tearing down, or on error */
1791 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
1792 rbd_dev
->watch_event
= NULL
;
1794 rbd_obj_request_put(obj_request
);
1800 * Synchronous osd object method call
1802 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
1803 const char *object_name
,
1804 const char *class_name
,
1805 const char *method_name
,
1806 const char *outbound
,
1807 size_t outbound_size
,
1809 size_t inbound_size
,
1812 struct rbd_obj_request
*obj_request
;
1813 struct ceph_osd_client
*osdc
;
1814 struct ceph_osd_req_op
*op
;
1815 struct page
**pages
;
1820 * Method calls are ultimately read operations but they
1821 * don't involve object data (so no offset or length).
1822 * The result should placed into the inbound buffer
1823 * provided. They also supply outbound data--parameters for
1824 * the object method. Currently if this is present it will
1827 page_count
= (u32
) calc_pages_for(0, inbound_size
);
1828 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
1830 return PTR_ERR(pages
);
1833 obj_request
= rbd_obj_request_create(object_name
, 0, 0,
1838 obj_request
->pages
= pages
;
1839 obj_request
->page_count
= page_count
;
1841 op
= rbd_osd_req_op_create(CEPH_OSD_OP_CALL
, class_name
,
1842 method_name
, outbound
, outbound_size
);
1845 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
1847 rbd_osd_req_op_destroy(op
);
1848 if (!obj_request
->osd_req
)
1851 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1852 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1855 ret
= rbd_obj_request_wait(obj_request
);
1859 ret
= obj_request
->result
;
1862 ret
= ceph_copy_from_page_vector(pages
, inbound
, 0,
1863 obj_request
->xferred
);
1865 *version
= obj_request
->version
;
1868 rbd_obj_request_put(obj_request
);
1870 ceph_release_page_vector(pages
, page_count
);
1875 static void rbd_request_fn(struct request_queue
*q
)
1877 struct rbd_device
*rbd_dev
= q
->queuedata
;
1878 bool read_only
= rbd_dev
->mapping
.read_only
;
1882 while ((rq
= blk_fetch_request(q
))) {
1883 bool write_request
= rq_data_dir(rq
) == WRITE
;
1884 struct rbd_img_request
*img_request
;
1888 /* Ignore any non-FS requests that filter through. */
1890 if (rq
->cmd_type
!= REQ_TYPE_FS
) {
1891 __blk_end_request_all(rq
, 0);
1895 spin_unlock_irq(q
->queue_lock
);
1897 /* Disallow writes to a read-only device */
1899 if (write_request
) {
1903 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
1907 * Quit early if the mapped snapshot no longer
1908 * exists. It's still possible the snapshot will
1909 * have disappeared by the time our request arrives
1910 * at the osd, but there's no sense in sending it if
1913 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
1914 dout("request for non-existent snapshot");
1915 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
1920 offset
= (u64
) blk_rq_pos(rq
) << SECTOR_SHIFT
;
1921 length
= (u64
) blk_rq_bytes(rq
);
1924 if (WARN_ON(offset
&& length
> U64_MAX
- offset
+ 1))
1925 goto end_request
; /* Shouldn't happen */
1928 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
,
1933 img_request
->rq
= rq
;
1935 result
= rbd_img_request_fill_bio(img_request
, rq
->bio
);
1937 result
= rbd_img_request_submit(img_request
);
1939 rbd_img_request_put(img_request
);
1941 spin_lock_irq(q
->queue_lock
);
1943 rbd_warn(rbd_dev
, "obj_request %s result %d\n",
1944 write_request
? "write" : "read", result
);
1945 __blk_end_request_all(rq
, result
);
1951 * a queue callback. Makes sure that we don't create a bio that spans across
1952 * multiple osd objects. One exception would be with a single page bios,
1953 * which we handle later at bio_chain_clone_range()
1955 static int rbd_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
1956 struct bio_vec
*bvec
)
1958 struct rbd_device
*rbd_dev
= q
->queuedata
;
1959 sector_t sector_offset
;
1960 sector_t sectors_per_obj
;
1961 sector_t obj_sector_offset
;
1965 * Find how far into its rbd object the partition-relative
1966 * bio start sector is to offset relative to the enclosing
1969 sector_offset
= get_start_sect(bmd
->bi_bdev
) + bmd
->bi_sector
;
1970 sectors_per_obj
= 1 << (rbd_dev
->header
.obj_order
- SECTOR_SHIFT
);
1971 obj_sector_offset
= sector_offset
& (sectors_per_obj
- 1);
1974 * Compute the number of bytes from that offset to the end
1975 * of the object. Account for what's already used by the bio.
1977 ret
= (int) (sectors_per_obj
- obj_sector_offset
) << SECTOR_SHIFT
;
1978 if (ret
> bmd
->bi_size
)
1979 ret
-= bmd
->bi_size
;
1984 * Don't send back more than was asked for. And if the bio
1985 * was empty, let the whole thing through because: "Note
1986 * that a block device *must* allow a single page to be
1987 * added to an empty bio."
1989 rbd_assert(bvec
->bv_len
<= PAGE_SIZE
);
1990 if (ret
> (int) bvec
->bv_len
|| !bmd
->bi_size
)
1991 ret
= (int) bvec
->bv_len
;
1996 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
1998 struct gendisk
*disk
= rbd_dev
->disk
;
2003 if (disk
->flags
& GENHD_FL_UP
)
2006 blk_cleanup_queue(disk
->queue
);
2010 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
2011 const char *object_name
,
2012 u64 offset
, u64 length
,
2013 char *buf
, u64
*version
)
2016 struct ceph_osd_req_op
*op
;
2017 struct rbd_obj_request
*obj_request
;
2018 struct ceph_osd_client
*osdc
;
2019 struct page
**pages
= NULL
;
2023 page_count
= (u32
) calc_pages_for(offset
, length
);
2024 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2026 ret
= PTR_ERR(pages
);
2029 obj_request
= rbd_obj_request_create(object_name
, offset
, length
,
2034 obj_request
->pages
= pages
;
2035 obj_request
->page_count
= page_count
;
2037 op
= rbd_osd_req_op_create(CEPH_OSD_OP_READ
, offset
, length
);
2040 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
2042 rbd_osd_req_op_destroy(op
);
2043 if (!obj_request
->osd_req
)
2046 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2047 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2050 ret
= rbd_obj_request_wait(obj_request
);
2054 ret
= obj_request
->result
;
2057 ret
= ceph_copy_from_page_vector(pages
, buf
, 0, obj_request
->xferred
);
2059 *version
= obj_request
->version
;
2062 rbd_obj_request_put(obj_request
);
2064 ceph_release_page_vector(pages
, page_count
);
2070 * Read the complete header for the given rbd device.
2072 * Returns a pointer to a dynamically-allocated buffer containing
2073 * the complete and validated header. Caller can pass the address
2074 * of a variable that will be filled in with the version of the
2075 * header object at the time it was read.
2077 * Returns a pointer-coded errno if a failure occurs.
2079 static struct rbd_image_header_ondisk
*
2080 rbd_dev_v1_header_read(struct rbd_device
*rbd_dev
, u64
*version
)
2082 struct rbd_image_header_ondisk
*ondisk
= NULL
;
2089 * The complete header will include an array of its 64-bit
2090 * snapshot ids, followed by the names of those snapshots as
2091 * a contiguous block of NUL-terminated strings. Note that
2092 * the number of snapshots could change by the time we read
2093 * it in, in which case we re-read it.
2100 size
= sizeof (*ondisk
);
2101 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
2103 ondisk
= kmalloc(size
, GFP_KERNEL
);
2105 return ERR_PTR(-ENOMEM
);
2107 ret
= rbd_obj_read_sync(rbd_dev
, rbd_dev
->header_name
,
2109 (char *) ondisk
, version
);
2113 if (WARN_ON((size_t) ret
< size
)) {
2115 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
2119 if (!rbd_dev_ondisk_valid(ondisk
)) {
2121 rbd_warn(rbd_dev
, "invalid header");
2125 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
2126 want_count
= snap_count
;
2127 snap_count
= le32_to_cpu(ondisk
->snap_count
);
2128 } while (snap_count
!= want_count
);
2135 return ERR_PTR(ret
);
2139 * reload the ondisk the header
2141 static int rbd_read_header(struct rbd_device
*rbd_dev
,
2142 struct rbd_image_header
*header
)
2144 struct rbd_image_header_ondisk
*ondisk
;
2148 ondisk
= rbd_dev_v1_header_read(rbd_dev
, &ver
);
2150 return PTR_ERR(ondisk
);
2151 ret
= rbd_header_from_disk(header
, ondisk
);
2153 header
->obj_version
= ver
;
2159 static void rbd_remove_all_snaps(struct rbd_device
*rbd_dev
)
2161 struct rbd_snap
*snap
;
2162 struct rbd_snap
*next
;
2164 list_for_each_entry_safe(snap
, next
, &rbd_dev
->snaps
, node
)
2165 rbd_remove_snap_dev(snap
);
2168 static void rbd_update_mapping_size(struct rbd_device
*rbd_dev
)
2172 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
2175 size
= (sector_t
) rbd_dev
->header
.image_size
/ SECTOR_SIZE
;
2176 dout("setting size to %llu sectors", (unsigned long long) size
);
2177 rbd_dev
->mapping
.size
= (u64
) size
;
2178 set_capacity(rbd_dev
->disk
, size
);
2182 * only read the first part of the ondisk header, without the snaps info
2184 static int rbd_dev_v1_refresh(struct rbd_device
*rbd_dev
, u64
*hver
)
2187 struct rbd_image_header h
;
2189 ret
= rbd_read_header(rbd_dev
, &h
);
2193 down_write(&rbd_dev
->header_rwsem
);
2195 /* Update image size, and check for resize of mapped image */
2196 rbd_dev
->header
.image_size
= h
.image_size
;
2197 rbd_update_mapping_size(rbd_dev
);
2199 /* rbd_dev->header.object_prefix shouldn't change */
2200 kfree(rbd_dev
->header
.snap_sizes
);
2201 kfree(rbd_dev
->header
.snap_names
);
2202 /* osd requests may still refer to snapc */
2203 ceph_put_snap_context(rbd_dev
->header
.snapc
);
2206 *hver
= h
.obj_version
;
2207 rbd_dev
->header
.obj_version
= h
.obj_version
;
2208 rbd_dev
->header
.image_size
= h
.image_size
;
2209 rbd_dev
->header
.snapc
= h
.snapc
;
2210 rbd_dev
->header
.snap_names
= h
.snap_names
;
2211 rbd_dev
->header
.snap_sizes
= h
.snap_sizes
;
2212 /* Free the extra copy of the object prefix */
2213 WARN_ON(strcmp(rbd_dev
->header
.object_prefix
, h
.object_prefix
));
2214 kfree(h
.object_prefix
);
2216 ret
= rbd_dev_snaps_update(rbd_dev
);
2218 ret
= rbd_dev_snaps_register(rbd_dev
);
2220 up_write(&rbd_dev
->header_rwsem
);
2225 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
, u64
*hver
)
2229 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
2230 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2231 if (rbd_dev
->image_format
== 1)
2232 ret
= rbd_dev_v1_refresh(rbd_dev
, hver
);
2234 ret
= rbd_dev_v2_refresh(rbd_dev
, hver
);
2235 mutex_unlock(&ctl_mutex
);
2240 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
2242 struct gendisk
*disk
;
2243 struct request_queue
*q
;
2246 /* create gendisk info */
2247 disk
= alloc_disk(RBD_MINORS_PER_MAJOR
);
2251 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
2253 disk
->major
= rbd_dev
->major
;
2254 disk
->first_minor
= 0;
2255 disk
->fops
= &rbd_bd_ops
;
2256 disk
->private_data
= rbd_dev
;
2258 q
= blk_init_queue(rbd_request_fn
, &rbd_dev
->lock
);
2262 /* We use the default size, but let's be explicit about it. */
2263 blk_queue_physical_block_size(q
, SECTOR_SIZE
);
2265 /* set io sizes to object size */
2266 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
2267 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
2268 blk_queue_max_segment_size(q
, segment_size
);
2269 blk_queue_io_min(q
, segment_size
);
2270 blk_queue_io_opt(q
, segment_size
);
2272 blk_queue_merge_bvec(q
, rbd_merge_bvec
);
2275 q
->queuedata
= rbd_dev
;
2277 rbd_dev
->disk
= disk
;
2279 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
2292 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
2294 return container_of(dev
, struct rbd_device
, dev
);
2297 static ssize_t
rbd_size_show(struct device
*dev
,
2298 struct device_attribute
*attr
, char *buf
)
2300 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2303 down_read(&rbd_dev
->header_rwsem
);
2304 size
= get_capacity(rbd_dev
->disk
);
2305 up_read(&rbd_dev
->header_rwsem
);
2307 return sprintf(buf
, "%llu\n", (unsigned long long) size
* SECTOR_SIZE
);
2311 * Note this shows the features for whatever's mapped, which is not
2312 * necessarily the base image.
2314 static ssize_t
rbd_features_show(struct device
*dev
,
2315 struct device_attribute
*attr
, char *buf
)
2317 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2319 return sprintf(buf
, "0x%016llx\n",
2320 (unsigned long long) rbd_dev
->mapping
.features
);
2323 static ssize_t
rbd_major_show(struct device
*dev
,
2324 struct device_attribute
*attr
, char *buf
)
2326 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2328 return sprintf(buf
, "%d\n", rbd_dev
->major
);
2331 static ssize_t
rbd_client_id_show(struct device
*dev
,
2332 struct device_attribute
*attr
, char *buf
)
2334 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2336 return sprintf(buf
, "client%lld\n",
2337 ceph_client_id(rbd_dev
->rbd_client
->client
));
2340 static ssize_t
rbd_pool_show(struct device
*dev
,
2341 struct device_attribute
*attr
, char *buf
)
2343 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2345 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
2348 static ssize_t
rbd_pool_id_show(struct device
*dev
,
2349 struct device_attribute
*attr
, char *buf
)
2351 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2353 return sprintf(buf
, "%llu\n",
2354 (unsigned long long) rbd_dev
->spec
->pool_id
);
2357 static ssize_t
rbd_name_show(struct device
*dev
,
2358 struct device_attribute
*attr
, char *buf
)
2360 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2362 if (rbd_dev
->spec
->image_name
)
2363 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
2365 return sprintf(buf
, "(unknown)\n");
2368 static ssize_t
rbd_image_id_show(struct device
*dev
,
2369 struct device_attribute
*attr
, char *buf
)
2371 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2373 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
2377 * Shows the name of the currently-mapped snapshot (or
2378 * RBD_SNAP_HEAD_NAME for the base image).
2380 static ssize_t
rbd_snap_show(struct device
*dev
,
2381 struct device_attribute
*attr
,
2384 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2386 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
2390 * For an rbd v2 image, shows the pool id, image id, and snapshot id
2391 * for the parent image. If there is no parent, simply shows
2392 * "(no parent image)".
2394 static ssize_t
rbd_parent_show(struct device
*dev
,
2395 struct device_attribute
*attr
,
2398 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2399 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
2404 return sprintf(buf
, "(no parent image)\n");
2406 count
= sprintf(bufp
, "pool_id %llu\npool_name %s\n",
2407 (unsigned long long) spec
->pool_id
, spec
->pool_name
);
2412 count
= sprintf(bufp
, "image_id %s\nimage_name %s\n", spec
->image_id
,
2413 spec
->image_name
? spec
->image_name
: "(unknown)");
2418 count
= sprintf(bufp
, "snap_id %llu\nsnap_name %s\n",
2419 (unsigned long long) spec
->snap_id
, spec
->snap_name
);
2424 count
= sprintf(bufp
, "overlap %llu\n", rbd_dev
->parent_overlap
);
2429 return (ssize_t
) (bufp
- buf
);
2432 static ssize_t
rbd_image_refresh(struct device
*dev
,
2433 struct device_attribute
*attr
,
2437 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2440 ret
= rbd_dev_refresh(rbd_dev
, NULL
);
2442 return ret
< 0 ? ret
: size
;
2445 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
2446 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
2447 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
2448 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
2449 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
2450 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
2451 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
2452 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
2453 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
2454 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
2455 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
2457 static struct attribute
*rbd_attrs
[] = {
2458 &dev_attr_size
.attr
,
2459 &dev_attr_features
.attr
,
2460 &dev_attr_major
.attr
,
2461 &dev_attr_client_id
.attr
,
2462 &dev_attr_pool
.attr
,
2463 &dev_attr_pool_id
.attr
,
2464 &dev_attr_name
.attr
,
2465 &dev_attr_image_id
.attr
,
2466 &dev_attr_current_snap
.attr
,
2467 &dev_attr_parent
.attr
,
2468 &dev_attr_refresh
.attr
,
2472 static struct attribute_group rbd_attr_group
= {
2476 static const struct attribute_group
*rbd_attr_groups
[] = {
2481 static void rbd_sysfs_dev_release(struct device
*dev
)
2485 static struct device_type rbd_device_type
= {
2487 .groups
= rbd_attr_groups
,
2488 .release
= rbd_sysfs_dev_release
,
2496 static ssize_t
rbd_snap_size_show(struct device
*dev
,
2497 struct device_attribute
*attr
,
2500 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2502 return sprintf(buf
, "%llu\n", (unsigned long long)snap
->size
);
2505 static ssize_t
rbd_snap_id_show(struct device
*dev
,
2506 struct device_attribute
*attr
,
2509 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2511 return sprintf(buf
, "%llu\n", (unsigned long long)snap
->id
);
2514 static ssize_t
rbd_snap_features_show(struct device
*dev
,
2515 struct device_attribute
*attr
,
2518 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2520 return sprintf(buf
, "0x%016llx\n",
2521 (unsigned long long) snap
->features
);
2524 static DEVICE_ATTR(snap_size
, S_IRUGO
, rbd_snap_size_show
, NULL
);
2525 static DEVICE_ATTR(snap_id
, S_IRUGO
, rbd_snap_id_show
, NULL
);
2526 static DEVICE_ATTR(snap_features
, S_IRUGO
, rbd_snap_features_show
, NULL
);
2528 static struct attribute
*rbd_snap_attrs
[] = {
2529 &dev_attr_snap_size
.attr
,
2530 &dev_attr_snap_id
.attr
,
2531 &dev_attr_snap_features
.attr
,
2535 static struct attribute_group rbd_snap_attr_group
= {
2536 .attrs
= rbd_snap_attrs
,
2539 static void rbd_snap_dev_release(struct device
*dev
)
2541 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2546 static const struct attribute_group
*rbd_snap_attr_groups
[] = {
2547 &rbd_snap_attr_group
,
2551 static struct device_type rbd_snap_device_type
= {
2552 .groups
= rbd_snap_attr_groups
,
2553 .release
= rbd_snap_dev_release
,
2556 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
2558 kref_get(&spec
->kref
);
2563 static void rbd_spec_free(struct kref
*kref
);
2564 static void rbd_spec_put(struct rbd_spec
*spec
)
2567 kref_put(&spec
->kref
, rbd_spec_free
);
2570 static struct rbd_spec
*rbd_spec_alloc(void)
2572 struct rbd_spec
*spec
;
2574 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
2577 kref_init(&spec
->kref
);
2579 rbd_spec_put(rbd_spec_get(spec
)); /* TEMPORARY */
2584 static void rbd_spec_free(struct kref
*kref
)
2586 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
2588 kfree(spec
->pool_name
);
2589 kfree(spec
->image_id
);
2590 kfree(spec
->image_name
);
2591 kfree(spec
->snap_name
);
2595 struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
2596 struct rbd_spec
*spec
)
2598 struct rbd_device
*rbd_dev
;
2600 rbd_dev
= kzalloc(sizeof (*rbd_dev
), GFP_KERNEL
);
2604 spin_lock_init(&rbd_dev
->lock
);
2606 INIT_LIST_HEAD(&rbd_dev
->node
);
2607 INIT_LIST_HEAD(&rbd_dev
->snaps
);
2608 init_rwsem(&rbd_dev
->header_rwsem
);
2610 rbd_dev
->spec
= spec
;
2611 rbd_dev
->rbd_client
= rbdc
;
2613 /* Initialize the layout used for all rbd requests */
2615 rbd_dev
->layout
.fl_stripe_unit
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
2616 rbd_dev
->layout
.fl_stripe_count
= cpu_to_le32(1);
2617 rbd_dev
->layout
.fl_object_size
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
2618 rbd_dev
->layout
.fl_pg_pool
= cpu_to_le32((u32
) spec
->pool_id
);
2623 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
2625 rbd_spec_put(rbd_dev
->parent_spec
);
2626 kfree(rbd_dev
->header_name
);
2627 rbd_put_client(rbd_dev
->rbd_client
);
2628 rbd_spec_put(rbd_dev
->spec
);
2632 static bool rbd_snap_registered(struct rbd_snap
*snap
)
2634 bool ret
= snap
->dev
.type
== &rbd_snap_device_type
;
2635 bool reg
= device_is_registered(&snap
->dev
);
2637 rbd_assert(!ret
^ reg
);
2642 static void rbd_remove_snap_dev(struct rbd_snap
*snap
)
2644 list_del(&snap
->node
);
2645 if (device_is_registered(&snap
->dev
))
2646 device_unregister(&snap
->dev
);
2649 static int rbd_register_snap_dev(struct rbd_snap
*snap
,
2650 struct device
*parent
)
2652 struct device
*dev
= &snap
->dev
;
2655 dev
->type
= &rbd_snap_device_type
;
2656 dev
->parent
= parent
;
2657 dev
->release
= rbd_snap_dev_release
;
2658 dev_set_name(dev
, "%s%s", RBD_SNAP_DEV_NAME_PREFIX
, snap
->name
);
2659 dout("%s: registering device for snapshot %s\n", __func__
, snap
->name
);
2661 ret
= device_register(dev
);
2666 static struct rbd_snap
*__rbd_add_snap_dev(struct rbd_device
*rbd_dev
,
2667 const char *snap_name
,
2668 u64 snap_id
, u64 snap_size
,
2671 struct rbd_snap
*snap
;
2674 snap
= kzalloc(sizeof (*snap
), GFP_KERNEL
);
2676 return ERR_PTR(-ENOMEM
);
2679 snap
->name
= kstrdup(snap_name
, GFP_KERNEL
);
2684 snap
->size
= snap_size
;
2685 snap
->features
= snap_features
;
2693 return ERR_PTR(ret
);
2696 static char *rbd_dev_v1_snap_info(struct rbd_device
*rbd_dev
, u32 which
,
2697 u64
*snap_size
, u64
*snap_features
)
2701 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
2703 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
2704 *snap_features
= 0; /* No features for v1 */
2706 /* Skip over names until we find the one we are looking for */
2708 snap_name
= rbd_dev
->header
.snap_names
;
2710 snap_name
+= strlen(snap_name
) + 1;
2716 * Get the size and object order for an image snapshot, or if
2717 * snap_id is CEPH_NOSNAP, gets this information for the base
2720 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
2721 u8
*order
, u64
*snap_size
)
2723 __le64 snapid
= cpu_to_le64(snap_id
);
2728 } __attribute__ ((packed
)) size_buf
= { 0 };
2730 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2732 (char *) &snapid
, sizeof (snapid
),
2733 (char *) &size_buf
, sizeof (size_buf
), NULL
);
2734 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2738 *order
= size_buf
.order
;
2739 *snap_size
= le64_to_cpu(size_buf
.size
);
2741 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2742 (unsigned long long) snap_id
, (unsigned int) *order
,
2743 (unsigned long long) *snap_size
);
2748 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
2750 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
2751 &rbd_dev
->header
.obj_order
,
2752 &rbd_dev
->header
.image_size
);
2755 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
2761 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
2765 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2766 "rbd", "get_object_prefix",
2768 reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
, NULL
);
2769 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2772 ret
= 0; /* rbd_obj_method_sync() can return positive */
2775 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
2776 p
+ RBD_OBJ_PREFIX_LEN_MAX
,
2779 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
2780 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
2781 rbd_dev
->header
.object_prefix
= NULL
;
2783 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
2792 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
2795 __le64 snapid
= cpu_to_le64(snap_id
);
2799 } features_buf
= { 0 };
2803 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2804 "rbd", "get_features",
2805 (char *) &snapid
, sizeof (snapid
),
2806 (char *) &features_buf
, sizeof (features_buf
),
2808 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2812 incompat
= le64_to_cpu(features_buf
.incompat
);
2813 if (incompat
& ~RBD_FEATURES_ALL
)
2816 *snap_features
= le64_to_cpu(features_buf
.features
);
2818 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2819 (unsigned long long) snap_id
,
2820 (unsigned long long) *snap_features
,
2821 (unsigned long long) le64_to_cpu(features_buf
.incompat
));
2826 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
2828 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
2829 &rbd_dev
->header
.features
);
2832 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
2834 struct rbd_spec
*parent_spec
;
2836 void *reply_buf
= NULL
;
2844 parent_spec
= rbd_spec_alloc();
2848 size
= sizeof (__le64
) + /* pool_id */
2849 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
2850 sizeof (__le64
) + /* snap_id */
2851 sizeof (__le64
); /* overlap */
2852 reply_buf
= kmalloc(size
, GFP_KERNEL
);
2858 snapid
= cpu_to_le64(CEPH_NOSNAP
);
2859 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2860 "rbd", "get_parent",
2861 (char *) &snapid
, sizeof (snapid
),
2862 (char *) reply_buf
, size
, NULL
);
2863 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2869 end
= (char *) reply_buf
+ size
;
2870 ceph_decode_64_safe(&p
, end
, parent_spec
->pool_id
, out_err
);
2871 if (parent_spec
->pool_id
== CEPH_NOPOOL
)
2872 goto out
; /* No parent? No problem. */
2874 /* The ceph file layout needs to fit pool id in 32 bits */
2877 if (WARN_ON(parent_spec
->pool_id
> (u64
) U32_MAX
))
2880 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
2881 if (IS_ERR(image_id
)) {
2882 ret
= PTR_ERR(image_id
);
2885 parent_spec
->image_id
= image_id
;
2886 ceph_decode_64_safe(&p
, end
, parent_spec
->snap_id
, out_err
);
2887 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
2889 rbd_dev
->parent_overlap
= overlap
;
2890 rbd_dev
->parent_spec
= parent_spec
;
2891 parent_spec
= NULL
; /* rbd_dev now owns this */
2896 rbd_spec_put(parent_spec
);
2901 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
2903 size_t image_id_size
;
2908 void *reply_buf
= NULL
;
2910 char *image_name
= NULL
;
2913 rbd_assert(!rbd_dev
->spec
->image_name
);
2915 len
= strlen(rbd_dev
->spec
->image_id
);
2916 image_id_size
= sizeof (__le32
) + len
;
2917 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
2922 end
= (char *) image_id
+ image_id_size
;
2923 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
) len
);
2925 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
2926 reply_buf
= kmalloc(size
, GFP_KERNEL
);
2930 ret
= rbd_obj_method_sync(rbd_dev
, RBD_DIRECTORY
,
2931 "rbd", "dir_get_name",
2932 image_id
, image_id_size
,
2933 (char *) reply_buf
, size
, NULL
);
2937 end
= (char *) reply_buf
+ size
;
2938 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
2939 if (IS_ERR(image_name
))
2942 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
2951 * When a parent image gets probed, we only have the pool, image,
2952 * and snapshot ids but not the names of any of them. This call
2953 * is made later to fill in those names. It has to be done after
2954 * rbd_dev_snaps_update() has completed because some of the
2955 * information (in particular, snapshot name) is not available
2958 static int rbd_dev_probe_update_spec(struct rbd_device
*rbd_dev
)
2960 struct ceph_osd_client
*osdc
;
2962 void *reply_buf
= NULL
;
2965 if (rbd_dev
->spec
->pool_name
)
2966 return 0; /* Already have the names */
2968 /* Look up the pool name */
2970 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2971 name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, rbd_dev
->spec
->pool_id
);
2973 rbd_warn(rbd_dev
, "there is no pool with id %llu",
2974 rbd_dev
->spec
->pool_id
); /* Really a BUG() */
2978 rbd_dev
->spec
->pool_name
= kstrdup(name
, GFP_KERNEL
);
2979 if (!rbd_dev
->spec
->pool_name
)
2982 /* Fetch the image name; tolerate failure here */
2984 name
= rbd_dev_image_name(rbd_dev
);
2986 rbd_dev
->spec
->image_name
= (char *) name
;
2988 rbd_warn(rbd_dev
, "unable to get image name");
2990 /* Look up the snapshot name. */
2992 name
= rbd_snap_name(rbd_dev
, rbd_dev
->spec
->snap_id
);
2994 rbd_warn(rbd_dev
, "no snapshot with id %llu",
2995 rbd_dev
->spec
->snap_id
); /* Really a BUG() */
2999 rbd_dev
->spec
->snap_name
= kstrdup(name
, GFP_KERNEL
);
3000 if(!rbd_dev
->spec
->snap_name
)
3006 kfree(rbd_dev
->spec
->pool_name
);
3007 rbd_dev
->spec
->pool_name
= NULL
;
3012 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
, u64
*ver
)
3021 struct ceph_snap_context
*snapc
;
3025 * We'll need room for the seq value (maximum snapshot id),
3026 * snapshot count, and array of that many snapshot ids.
3027 * For now we have a fixed upper limit on the number we're
3028 * prepared to receive.
3030 size
= sizeof (__le64
) + sizeof (__le32
) +
3031 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
3032 reply_buf
= kzalloc(size
, GFP_KERNEL
);
3036 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3037 "rbd", "get_snapcontext",
3039 reply_buf
, size
, ver
);
3040 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3046 end
= (char *) reply_buf
+ size
;
3047 ceph_decode_64_safe(&p
, end
, seq
, out
);
3048 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
3051 * Make sure the reported number of snapshot ids wouldn't go
3052 * beyond the end of our buffer. But before checking that,
3053 * make sure the computed size of the snapshot context we
3054 * allocate is representable in a size_t.
3056 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
3061 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
3064 size
= sizeof (struct ceph_snap_context
) +
3065 snap_count
* sizeof (snapc
->snaps
[0]);
3066 snapc
= kmalloc(size
, GFP_KERNEL
);
3072 atomic_set(&snapc
->nref
, 1);
3074 snapc
->num_snaps
= snap_count
;
3075 for (i
= 0; i
< snap_count
; i
++)
3076 snapc
->snaps
[i
] = ceph_decode_64(&p
);
3078 rbd_dev
->header
.snapc
= snapc
;
3080 dout(" snap context seq = %llu, snap_count = %u\n",
3081 (unsigned long long) seq
, (unsigned int) snap_count
);
3089 static char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
3099 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
3100 reply_buf
= kmalloc(size
, GFP_KERNEL
);
3102 return ERR_PTR(-ENOMEM
);
3104 snap_id
= cpu_to_le64(rbd_dev
->header
.snapc
->snaps
[which
]);
3105 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3106 "rbd", "get_snapshot_name",
3107 (char *) &snap_id
, sizeof (snap_id
),
3108 reply_buf
, size
, NULL
);
3109 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3114 end
= (char *) reply_buf
+ size
;
3115 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
3116 if (IS_ERR(snap_name
)) {
3117 ret
= PTR_ERR(snap_name
);
3120 dout(" snap_id 0x%016llx snap_name = %s\n",
3121 (unsigned long long) le64_to_cpu(snap_id
), snap_name
);
3129 return ERR_PTR(ret
);
3132 static char *rbd_dev_v2_snap_info(struct rbd_device
*rbd_dev
, u32 which
,
3133 u64
*snap_size
, u64
*snap_features
)
3139 snap_id
= rbd_dev
->header
.snapc
->snaps
[which
];
3140 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, &order
, snap_size
);
3142 return ERR_PTR(ret
);
3143 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, snap_features
);
3145 return ERR_PTR(ret
);
3147 return rbd_dev_v2_snap_name(rbd_dev
, which
);
3150 static char *rbd_dev_snap_info(struct rbd_device
*rbd_dev
, u32 which
,
3151 u64
*snap_size
, u64
*snap_features
)
3153 if (rbd_dev
->image_format
== 1)
3154 return rbd_dev_v1_snap_info(rbd_dev
, which
,
3155 snap_size
, snap_features
);
3156 if (rbd_dev
->image_format
== 2)
3157 return rbd_dev_v2_snap_info(rbd_dev
, which
,
3158 snap_size
, snap_features
);
3159 return ERR_PTR(-EINVAL
);
3162 static int rbd_dev_v2_refresh(struct rbd_device
*rbd_dev
, u64
*hver
)
3167 down_write(&rbd_dev
->header_rwsem
);
3169 /* Grab old order first, to see if it changes */
3171 obj_order
= rbd_dev
->header
.obj_order
,
3172 ret
= rbd_dev_v2_image_size(rbd_dev
);
3175 if (rbd_dev
->header
.obj_order
!= obj_order
) {
3179 rbd_update_mapping_size(rbd_dev
);
3181 ret
= rbd_dev_v2_snap_context(rbd_dev
, hver
);
3182 dout("rbd_dev_v2_snap_context returned %d\n", ret
);
3185 ret
= rbd_dev_snaps_update(rbd_dev
);
3186 dout("rbd_dev_snaps_update returned %d\n", ret
);
3189 ret
= rbd_dev_snaps_register(rbd_dev
);
3190 dout("rbd_dev_snaps_register returned %d\n", ret
);
3192 up_write(&rbd_dev
->header_rwsem
);
3198 * Scan the rbd device's current snapshot list and compare it to the
3199 * newly-received snapshot context. Remove any existing snapshots
3200 * not present in the new snapshot context. Add a new snapshot for
3201 * any snaphots in the snapshot context not in the current list.
3202 * And verify there are no changes to snapshots we already know
3205 * Assumes the snapshots in the snapshot context are sorted by
3206 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
3207 * are also maintained in that order.)
3209 static int rbd_dev_snaps_update(struct rbd_device
*rbd_dev
)
3211 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
3212 const u32 snap_count
= snapc
->num_snaps
;
3213 struct list_head
*head
= &rbd_dev
->snaps
;
3214 struct list_head
*links
= head
->next
;
3217 dout("%s: snap count is %u\n", __func__
, (unsigned int) snap_count
);
3218 while (index
< snap_count
|| links
!= head
) {
3220 struct rbd_snap
*snap
;
3223 u64 snap_features
= 0;
3225 snap_id
= index
< snap_count
? snapc
->snaps
[index
]
3227 snap
= links
!= head
? list_entry(links
, struct rbd_snap
, node
)
3229 rbd_assert(!snap
|| snap
->id
!= CEPH_NOSNAP
);
3231 if (snap_id
== CEPH_NOSNAP
|| (snap
&& snap
->id
> snap_id
)) {
3232 struct list_head
*next
= links
->next
;
3235 * A previously-existing snapshot is not in
3236 * the new snap context.
3238 * If the now missing snapshot is the one the
3239 * image is mapped to, clear its exists flag
3240 * so we can avoid sending any more requests
3243 if (rbd_dev
->spec
->snap_id
== snap
->id
)
3244 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3245 rbd_remove_snap_dev(snap
);
3246 dout("%ssnap id %llu has been removed\n",
3247 rbd_dev
->spec
->snap_id
== snap
->id
?
3249 (unsigned long long) snap
->id
);
3251 /* Done with this list entry; advance */
3257 snap_name
= rbd_dev_snap_info(rbd_dev
, index
,
3258 &snap_size
, &snap_features
);
3259 if (IS_ERR(snap_name
))
3260 return PTR_ERR(snap_name
);
3262 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count
,
3263 (unsigned long long) snap_id
);
3264 if (!snap
|| (snap_id
!= CEPH_NOSNAP
&& snap
->id
< snap_id
)) {
3265 struct rbd_snap
*new_snap
;
3267 /* We haven't seen this snapshot before */
3269 new_snap
= __rbd_add_snap_dev(rbd_dev
, snap_name
,
3270 snap_id
, snap_size
, snap_features
);
3271 if (IS_ERR(new_snap
)) {
3272 int err
= PTR_ERR(new_snap
);
3274 dout(" failed to add dev, error %d\n", err
);
3279 /* New goes before existing, or at end of list */
3281 dout(" added dev%s\n", snap
? "" : " at end\n");
3283 list_add_tail(&new_snap
->node
, &snap
->node
);
3285 list_add_tail(&new_snap
->node
, head
);
3287 /* Already have this one */
3289 dout(" already present\n");
3291 rbd_assert(snap
->size
== snap_size
);
3292 rbd_assert(!strcmp(snap
->name
, snap_name
));
3293 rbd_assert(snap
->features
== snap_features
);
3295 /* Done with this list entry; advance */
3297 links
= links
->next
;
3300 /* Advance to the next entry in the snapshot context */
3304 dout("%s: done\n", __func__
);
3310 * Scan the list of snapshots and register the devices for any that
3311 * have not already been registered.
3313 static int rbd_dev_snaps_register(struct rbd_device
*rbd_dev
)
3315 struct rbd_snap
*snap
;
3318 dout("%s called\n", __func__
);
3319 if (WARN_ON(!device_is_registered(&rbd_dev
->dev
)))
3322 list_for_each_entry(snap
, &rbd_dev
->snaps
, node
) {
3323 if (!rbd_snap_registered(snap
)) {
3324 ret
= rbd_register_snap_dev(snap
, &rbd_dev
->dev
);
3329 dout("%s: returning %d\n", __func__
, ret
);
3334 static int rbd_bus_add_dev(struct rbd_device
*rbd_dev
)
3339 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
3341 dev
= &rbd_dev
->dev
;
3342 dev
->bus
= &rbd_bus_type
;
3343 dev
->type
= &rbd_device_type
;
3344 dev
->parent
= &rbd_root_dev
;
3345 dev
->release
= rbd_dev_release
;
3346 dev_set_name(dev
, "%d", rbd_dev
->dev_id
);
3347 ret
= device_register(dev
);
3349 mutex_unlock(&ctl_mutex
);
3354 static void rbd_bus_del_dev(struct rbd_device
*rbd_dev
)
3356 device_unregister(&rbd_dev
->dev
);
3359 static atomic64_t rbd_dev_id_max
= ATOMIC64_INIT(0);
3362 * Get a unique rbd identifier for the given new rbd_dev, and add
3363 * the rbd_dev to the global list. The minimum rbd id is 1.
3365 static void rbd_dev_id_get(struct rbd_device
*rbd_dev
)
3367 rbd_dev
->dev_id
= atomic64_inc_return(&rbd_dev_id_max
);
3369 spin_lock(&rbd_dev_list_lock
);
3370 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
3371 spin_unlock(&rbd_dev_list_lock
);
3372 dout("rbd_dev %p given dev id %llu\n", rbd_dev
,
3373 (unsigned long long) rbd_dev
->dev_id
);
3377 * Remove an rbd_dev from the global list, and record that its
3378 * identifier is no longer in use.
3380 static void rbd_dev_id_put(struct rbd_device
*rbd_dev
)
3382 struct list_head
*tmp
;
3383 int rbd_id
= rbd_dev
->dev_id
;
3386 rbd_assert(rbd_id
> 0);
3388 dout("rbd_dev %p released dev id %llu\n", rbd_dev
,
3389 (unsigned long long) rbd_dev
->dev_id
);
3390 spin_lock(&rbd_dev_list_lock
);
3391 list_del_init(&rbd_dev
->node
);
3394 * If the id being "put" is not the current maximum, there
3395 * is nothing special we need to do.
3397 if (rbd_id
!= atomic64_read(&rbd_dev_id_max
)) {
3398 spin_unlock(&rbd_dev_list_lock
);
3403 * We need to update the current maximum id. Search the
3404 * list to find out what it is. We're more likely to find
3405 * the maximum at the end, so search the list backward.
3408 list_for_each_prev(tmp
, &rbd_dev_list
) {
3409 struct rbd_device
*rbd_dev
;
3411 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
3412 if (rbd_dev
->dev_id
> max_id
)
3413 max_id
= rbd_dev
->dev_id
;
3415 spin_unlock(&rbd_dev_list_lock
);
3418 * The max id could have been updated by rbd_dev_id_get(), in
3419 * which case it now accurately reflects the new maximum.
3420 * Be careful not to overwrite the maximum value in that
3423 atomic64_cmpxchg(&rbd_dev_id_max
, rbd_id
, max_id
);
3424 dout(" max dev id has been reset\n");
3428 * Skips over white space at *buf, and updates *buf to point to the
3429 * first found non-space character (if any). Returns the length of
3430 * the token (string of non-white space characters) found. Note
3431 * that *buf must be terminated with '\0'.
3433 static inline size_t next_token(const char **buf
)
3436 * These are the characters that produce nonzero for
3437 * isspace() in the "C" and "POSIX" locales.
3439 const char *spaces
= " \f\n\r\t\v";
3441 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
3443 return strcspn(*buf
, spaces
); /* Return token length */
3447 * Finds the next token in *buf, and if the provided token buffer is
3448 * big enough, copies the found token into it. The result, if
3449 * copied, is guaranteed to be terminated with '\0'. Note that *buf
3450 * must be terminated with '\0' on entry.
3452 * Returns the length of the token found (not including the '\0').
3453 * Return value will be 0 if no token is found, and it will be >=
3454 * token_size if the token would not fit.
3456 * The *buf pointer will be updated to point beyond the end of the
3457 * found token. Note that this occurs even if the token buffer is
3458 * too small to hold it.
3460 static inline size_t copy_token(const char **buf
,
3466 len
= next_token(buf
);
3467 if (len
< token_size
) {
3468 memcpy(token
, *buf
, len
);
3469 *(token
+ len
) = '\0';
3477 * Finds the next token in *buf, dynamically allocates a buffer big
3478 * enough to hold a copy of it, and copies the token into the new
3479 * buffer. The copy is guaranteed to be terminated with '\0'. Note
3480 * that a duplicate buffer is created even for a zero-length token.
3482 * Returns a pointer to the newly-allocated duplicate, or a null
3483 * pointer if memory for the duplicate was not available. If
3484 * the lenp argument is a non-null pointer, the length of the token
3485 * (not including the '\0') is returned in *lenp.
3487 * If successful, the *buf pointer will be updated to point beyond
3488 * the end of the found token.
3490 * Note: uses GFP_KERNEL for allocation.
3492 static inline char *dup_token(const char **buf
, size_t *lenp
)
3497 len
= next_token(buf
);
3498 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
3501 *(dup
+ len
) = '\0';
3511 * Parse the options provided for an "rbd add" (i.e., rbd image
3512 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
3513 * and the data written is passed here via a NUL-terminated buffer.
3514 * Returns 0 if successful or an error code otherwise.
3516 * The information extracted from these options is recorded in
3517 * the other parameters which return dynamically-allocated
3520 * The address of a pointer that will refer to a ceph options
3521 * structure. Caller must release the returned pointer using
3522 * ceph_destroy_options() when it is no longer needed.
3524 * Address of an rbd options pointer. Fully initialized by
3525 * this function; caller must release with kfree().
3527 * Address of an rbd image specification pointer. Fully
3528 * initialized by this function based on parsed options.
3529 * Caller must release with rbd_spec_put().
3531 * The options passed take this form:
3532 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3535 * A comma-separated list of one or more monitor addresses.
3536 * A monitor address is an ip address, optionally followed
3537 * by a port number (separated by a colon).
3538 * I.e.: ip1[:port1][,ip2[:port2]...]
3540 * A comma-separated list of ceph and/or rbd options.
3542 * The name of the rados pool containing the rbd image.
3544 * The name of the image in that pool to map.
3546 * An optional snapshot id. If provided, the mapping will
3547 * present data from the image at the time that snapshot was
3548 * created. The image head is used if no snapshot id is
3549 * provided. Snapshot mappings are always read-only.
3551 static int rbd_add_parse_args(const char *buf
,
3552 struct ceph_options
**ceph_opts
,
3553 struct rbd_options
**opts
,
3554 struct rbd_spec
**rbd_spec
)
3558 const char *mon_addrs
;
3559 size_t mon_addrs_size
;
3560 struct rbd_spec
*spec
= NULL
;
3561 struct rbd_options
*rbd_opts
= NULL
;
3562 struct ceph_options
*copts
;
3565 /* The first four tokens are required */
3567 len
= next_token(&buf
);
3569 rbd_warn(NULL
, "no monitor address(es) provided");
3573 mon_addrs_size
= len
+ 1;
3577 options
= dup_token(&buf
, NULL
);
3581 rbd_warn(NULL
, "no options provided");
3585 spec
= rbd_spec_alloc();
3589 spec
->pool_name
= dup_token(&buf
, NULL
);
3590 if (!spec
->pool_name
)
3592 if (!*spec
->pool_name
) {
3593 rbd_warn(NULL
, "no pool name provided");
3597 spec
->image_name
= dup_token(&buf
, NULL
);
3598 if (!spec
->image_name
)
3600 if (!*spec
->image_name
) {
3601 rbd_warn(NULL
, "no image name provided");
3606 * Snapshot name is optional; default is to use "-"
3607 * (indicating the head/no snapshot).
3609 len
= next_token(&buf
);
3611 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
3612 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
3613 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
3614 ret
= -ENAMETOOLONG
;
3617 spec
->snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
3618 if (!spec
->snap_name
)
3620 *(spec
->snap_name
+ len
) = '\0';
3622 /* Initialize all rbd options to the defaults */
3624 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
3628 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
3630 copts
= ceph_parse_options(options
, mon_addrs
,
3631 mon_addrs
+ mon_addrs_size
- 1,
3632 parse_rbd_opts_token
, rbd_opts
);
3633 if (IS_ERR(copts
)) {
3634 ret
= PTR_ERR(copts
);
3655 * An rbd format 2 image has a unique identifier, distinct from the
3656 * name given to it by the user. Internally, that identifier is
3657 * what's used to specify the names of objects related to the image.
3659 * A special "rbd id" object is used to map an rbd image name to its
3660 * id. If that object doesn't exist, then there is no v2 rbd image
3661 * with the supplied name.
3663 * This function will record the given rbd_dev's image_id field if
3664 * it can be determined, and in that case will return 0. If any
3665 * errors occur a negative errno will be returned and the rbd_dev's
3666 * image_id field will be unchanged (and should be NULL).
3668 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
3677 * When probing a parent image, the image id is already
3678 * known (and the image name likely is not). There's no
3679 * need to fetch the image id again in this case.
3681 if (rbd_dev
->spec
->image_id
)
3685 * First, see if the format 2 image id file exists, and if
3686 * so, get the image's persistent id from it.
3688 size
= sizeof (RBD_ID_PREFIX
) + strlen(rbd_dev
->spec
->image_name
);
3689 object_name
= kmalloc(size
, GFP_NOIO
);
3692 sprintf(object_name
, "%s%s", RBD_ID_PREFIX
, rbd_dev
->spec
->image_name
);
3693 dout("rbd id object name is %s\n", object_name
);
3695 /* Response will be an encoded string, which includes a length */
3697 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
3698 response
= kzalloc(size
, GFP_NOIO
);
3704 ret
= rbd_obj_method_sync(rbd_dev
, object_name
,
3707 response
, RBD_IMAGE_ID_LEN_MAX
, NULL
);
3708 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3711 ret
= 0; /* rbd_obj_method_sync() can return positive */
3714 rbd_dev
->spec
->image_id
= ceph_extract_encoded_string(&p
,
3715 p
+ RBD_IMAGE_ID_LEN_MAX
,
3717 if (IS_ERR(rbd_dev
->spec
->image_id
)) {
3718 ret
= PTR_ERR(rbd_dev
->spec
->image_id
);
3719 rbd_dev
->spec
->image_id
= NULL
;
3721 dout("image_id is %s\n", rbd_dev
->spec
->image_id
);
3730 static int rbd_dev_v1_probe(struct rbd_device
*rbd_dev
)
3735 /* Version 1 images have no id; empty string is used */
3737 rbd_dev
->spec
->image_id
= kstrdup("", GFP_KERNEL
);
3738 if (!rbd_dev
->spec
->image_id
)
3741 /* Record the header object name for this rbd image. */
3743 size
= strlen(rbd_dev
->spec
->image_name
) + sizeof (RBD_SUFFIX
);
3744 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
3745 if (!rbd_dev
->header_name
) {
3749 sprintf(rbd_dev
->header_name
, "%s%s",
3750 rbd_dev
->spec
->image_name
, RBD_SUFFIX
);
3752 /* Populate rbd image metadata */
3754 ret
= rbd_read_header(rbd_dev
, &rbd_dev
->header
);
3758 /* Version 1 images have no parent (no layering) */
3760 rbd_dev
->parent_spec
= NULL
;
3761 rbd_dev
->parent_overlap
= 0;
3763 rbd_dev
->image_format
= 1;
3765 dout("discovered version 1 image, header name is %s\n",
3766 rbd_dev
->header_name
);
3771 kfree(rbd_dev
->header_name
);
3772 rbd_dev
->header_name
= NULL
;
3773 kfree(rbd_dev
->spec
->image_id
);
3774 rbd_dev
->spec
->image_id
= NULL
;
3779 static int rbd_dev_v2_probe(struct rbd_device
*rbd_dev
)
3786 * Image id was filled in by the caller. Record the header
3787 * object name for this rbd image.
3789 size
= sizeof (RBD_HEADER_PREFIX
) + strlen(rbd_dev
->spec
->image_id
);
3790 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
3791 if (!rbd_dev
->header_name
)
3793 sprintf(rbd_dev
->header_name
, "%s%s",
3794 RBD_HEADER_PREFIX
, rbd_dev
->spec
->image_id
);
3796 /* Get the size and object order for the image */
3798 ret
= rbd_dev_v2_image_size(rbd_dev
);
3802 /* Get the object prefix (a.k.a. block_name) for the image */
3804 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
3808 /* Get the and check features for the image */
3810 ret
= rbd_dev_v2_features(rbd_dev
);
3814 /* If the image supports layering, get the parent info */
3816 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
3817 ret
= rbd_dev_v2_parent_info(rbd_dev
);
3822 /* crypto and compression type aren't (yet) supported for v2 images */
3824 rbd_dev
->header
.crypt_type
= 0;
3825 rbd_dev
->header
.comp_type
= 0;
3827 /* Get the snapshot context, plus the header version */
3829 ret
= rbd_dev_v2_snap_context(rbd_dev
, &ver
);
3832 rbd_dev
->header
.obj_version
= ver
;
3834 rbd_dev
->image_format
= 2;
3836 dout("discovered version 2 image, header name is %s\n",
3837 rbd_dev
->header_name
);
3841 rbd_dev
->parent_overlap
= 0;
3842 rbd_spec_put(rbd_dev
->parent_spec
);
3843 rbd_dev
->parent_spec
= NULL
;
3844 kfree(rbd_dev
->header_name
);
3845 rbd_dev
->header_name
= NULL
;
3846 kfree(rbd_dev
->header
.object_prefix
);
3847 rbd_dev
->header
.object_prefix
= NULL
;
3852 static int rbd_dev_probe_finish(struct rbd_device
*rbd_dev
)
3856 /* no need to lock here, as rbd_dev is not registered yet */
3857 ret
= rbd_dev_snaps_update(rbd_dev
);
3861 ret
= rbd_dev_probe_update_spec(rbd_dev
);
3865 ret
= rbd_dev_set_mapping(rbd_dev
);
3869 /* generate unique id: find highest unique id, add one */
3870 rbd_dev_id_get(rbd_dev
);
3872 /* Fill in the device name, now that we have its id. */
3873 BUILD_BUG_ON(DEV_NAME_LEN
3874 < sizeof (RBD_DRV_NAME
) + MAX_INT_FORMAT_WIDTH
);
3875 sprintf(rbd_dev
->name
, "%s%d", RBD_DRV_NAME
, rbd_dev
->dev_id
);
3877 /* Get our block major device number. */
3879 ret
= register_blkdev(0, rbd_dev
->name
);
3882 rbd_dev
->major
= ret
;
3884 /* Set up the blkdev mapping. */
3886 ret
= rbd_init_disk(rbd_dev
);
3888 goto err_out_blkdev
;
3890 ret
= rbd_bus_add_dev(rbd_dev
);
3895 * At this point cleanup in the event of an error is the job
3896 * of the sysfs code (initiated by rbd_bus_del_dev()).
3898 down_write(&rbd_dev
->header_rwsem
);
3899 ret
= rbd_dev_snaps_register(rbd_dev
);
3900 up_write(&rbd_dev
->header_rwsem
);
3904 ret
= rbd_dev_header_watch_sync(rbd_dev
, 1);
3908 /* Everything's ready. Announce the disk to the world. */
3910 add_disk(rbd_dev
->disk
);
3912 pr_info("%s: added with size 0x%llx\n", rbd_dev
->disk
->disk_name
,
3913 (unsigned long long) rbd_dev
->mapping
.size
);
3917 /* this will also clean up rest of rbd_dev stuff */
3919 rbd_bus_del_dev(rbd_dev
);
3923 rbd_free_disk(rbd_dev
);
3925 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
3927 rbd_dev_id_put(rbd_dev
);
3929 rbd_remove_all_snaps(rbd_dev
);
3935 * Probe for the existence of the header object for the given rbd
3936 * device. For format 2 images this includes determining the image
3939 static int rbd_dev_probe(struct rbd_device
*rbd_dev
)
3944 * Get the id from the image id object. If it's not a
3945 * format 2 image, we'll get ENOENT back, and we'll assume
3946 * it's a format 1 image.
3948 ret
= rbd_dev_image_id(rbd_dev
);
3950 ret
= rbd_dev_v1_probe(rbd_dev
);
3952 ret
= rbd_dev_v2_probe(rbd_dev
);
3954 dout("probe failed, returning %d\n", ret
);
3959 ret
= rbd_dev_probe_finish(rbd_dev
);
3961 rbd_header_free(&rbd_dev
->header
);
3966 static ssize_t
rbd_add(struct bus_type
*bus
,
3970 struct rbd_device
*rbd_dev
= NULL
;
3971 struct ceph_options
*ceph_opts
= NULL
;
3972 struct rbd_options
*rbd_opts
= NULL
;
3973 struct rbd_spec
*spec
= NULL
;
3974 struct rbd_client
*rbdc
;
3975 struct ceph_osd_client
*osdc
;
3978 if (!try_module_get(THIS_MODULE
))
3981 /* parse add command */
3982 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
3984 goto err_out_module
;
3986 rbdc
= rbd_get_client(ceph_opts
);
3991 ceph_opts
= NULL
; /* rbd_dev client now owns this */
3994 osdc
= &rbdc
->client
->osdc
;
3995 rc
= ceph_pg_poolid_by_name(osdc
->osdmap
, spec
->pool_name
);
3997 goto err_out_client
;
3998 spec
->pool_id
= (u64
) rc
;
4000 /* The ceph file layout needs to fit pool id in 32 bits */
4002 if (WARN_ON(spec
->pool_id
> (u64
) U32_MAX
)) {
4004 goto err_out_client
;
4007 rbd_dev
= rbd_dev_create(rbdc
, spec
);
4009 goto err_out_client
;
4010 rbdc
= NULL
; /* rbd_dev now owns this */
4011 spec
= NULL
; /* rbd_dev now owns this */
4013 rbd_dev
->mapping
.read_only
= rbd_opts
->read_only
;
4015 rbd_opts
= NULL
; /* done with this */
4017 rc
= rbd_dev_probe(rbd_dev
);
4019 goto err_out_rbd_dev
;
4023 rbd_dev_destroy(rbd_dev
);
4025 rbd_put_client(rbdc
);
4028 ceph_destroy_options(ceph_opts
);
4032 module_put(THIS_MODULE
);
4034 dout("Error adding device %s\n", buf
);
4036 return (ssize_t
) rc
;
4039 static struct rbd_device
*__rbd_get_dev(unsigned long dev_id
)
4041 struct list_head
*tmp
;
4042 struct rbd_device
*rbd_dev
;
4044 spin_lock(&rbd_dev_list_lock
);
4045 list_for_each(tmp
, &rbd_dev_list
) {
4046 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
4047 if (rbd_dev
->dev_id
== dev_id
) {
4048 spin_unlock(&rbd_dev_list_lock
);
4052 spin_unlock(&rbd_dev_list_lock
);
4056 static void rbd_dev_release(struct device
*dev
)
4058 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4060 if (rbd_dev
->watch_event
)
4061 rbd_dev_header_watch_sync(rbd_dev
, 0);
4063 /* clean up and free blkdev */
4064 rbd_free_disk(rbd_dev
);
4065 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
4067 /* release allocated disk header fields */
4068 rbd_header_free(&rbd_dev
->header
);
4070 /* done with the id, and with the rbd_dev */
4071 rbd_dev_id_put(rbd_dev
);
4072 rbd_assert(rbd_dev
->rbd_client
!= NULL
);
4073 rbd_dev_destroy(rbd_dev
);
4075 /* release module ref */
4076 module_put(THIS_MODULE
);
4079 static ssize_t
rbd_remove(struct bus_type
*bus
,
4083 struct rbd_device
*rbd_dev
= NULL
;
4088 rc
= strict_strtoul(buf
, 10, &ul
);
4092 /* convert to int; abort if we lost anything in the conversion */
4093 target_id
= (int) ul
;
4094 if (target_id
!= ul
)
4097 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
4099 rbd_dev
= __rbd_get_dev(target_id
);
4105 spin_lock(&rbd_dev
->lock
);
4106 if (rbd_dev
->open_count
)
4109 set_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
);
4110 spin_unlock(&rbd_dev
->lock
);
4114 rbd_remove_all_snaps(rbd_dev
);
4115 rbd_bus_del_dev(rbd_dev
);
4118 mutex_unlock(&ctl_mutex
);
4124 * create control files in sysfs
4127 static int rbd_sysfs_init(void)
4131 ret
= device_register(&rbd_root_dev
);
4135 ret
= bus_register(&rbd_bus_type
);
4137 device_unregister(&rbd_root_dev
);
4142 static void rbd_sysfs_cleanup(void)
4144 bus_unregister(&rbd_bus_type
);
4145 device_unregister(&rbd_root_dev
);
4148 int __init
rbd_init(void)
4152 rc
= rbd_sysfs_init();
4155 pr_info("loaded " RBD_DRV_NAME_LONG
"\n");
4159 void __exit
rbd_exit(void)
4161 rbd_sysfs_cleanup();
4164 module_init(rbd_init
);
4165 module_exit(rbd_exit
);
4167 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4168 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4169 MODULE_DESCRIPTION("rados block device");
4171 /* following authorship retained from original osdblk.c */
4172 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4174 MODULE_LICENSE("GPL");