2 rbd.c -- Export ceph rados objects as a Linux block device
5 based on drivers/block/osdblk.c:
7 Copyright 2009 Red Hat, Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 For usage instructions, please refer to:
26 Documentation/ABI/testing/sysfs-bus-rbd
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
40 #include <linux/blkdev.h>
42 #include "rbd_types.h"
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
55 /* It might be useful to have these defined elsewhere */
57 #define U8_MAX ((u8) (~0U))
58 #define U16_MAX ((u16) (~0U))
59 #define U32_MAX ((u32) (~0U))
60 #define U64_MAX ((u64) (~0ULL))
62 #define RBD_DRV_NAME "rbd"
63 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
65 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
67 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
68 #define RBD_MAX_SNAP_NAME_LEN \
69 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
71 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
73 #define RBD_SNAP_HEAD_NAME "-"
75 /* This allows a single page to hold an image name sent by OSD */
76 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
77 #define RBD_IMAGE_ID_LEN_MAX 64
79 #define RBD_OBJ_PREFIX_LEN_MAX 64
83 #define RBD_FEATURE_LAYERING 1
85 /* Features supported by this (client software) implementation. */
87 #define RBD_FEATURES_ALL (0)
90 * An RBD device name will be "rbd#", where the "rbd" comes from
91 * RBD_DRV_NAME above, and # is a unique integer identifier.
92 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
93 * enough to hold all possible device names.
95 #define DEV_NAME_LEN 32
96 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
99 * block device image metadata (in-memory version)
101 struct rbd_image_header
{
102 /* These four fields never change for a given rbd image */
109 /* The remaining fields need to be updated occasionally */
111 struct ceph_snap_context
*snapc
;
119 * An rbd image specification.
121 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
122 * identify an image. Each rbd_dev structure includes a pointer to
123 * an rbd_spec structure that encapsulates this identity.
125 * Each of the id's in an rbd_spec has an associated name. For a
126 * user-mapped image, the names are supplied and the id's associated
127 * with them are looked up. For a layered image, a parent image is
128 * defined by the tuple, and the names are looked up.
130 * An rbd_dev structure contains a parent_spec pointer which is
131 * non-null if the image it represents is a child in a layered
132 * image. This pointer will refer to the rbd_spec structure used
133 * by the parent rbd_dev for its own identity (i.e., the structure
134 * is shared between the parent and child).
136 * Since these structures are populated once, during the discovery
137 * phase of image construction, they are effectively immutable so
138 * we make no effort to synchronize access to them.
140 * Note that code herein does not assume the image name is known (it
141 * could be a null pointer).
157 * an instance of the client. multiple devices may share an rbd client.
160 struct ceph_client
*client
;
162 struct list_head node
;
165 struct rbd_img_request
;
166 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
168 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
170 struct rbd_obj_request
;
171 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
173 enum obj_request_type
{
174 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
177 struct rbd_obj_request
{
178 const char *object_name
;
179 u64 offset
; /* object start byte */
180 u64 length
; /* bytes from offset */
182 struct rbd_img_request
*img_request
;
183 struct list_head links
; /* img_request->obj_requests */
184 u32 which
; /* posn image request list */
186 enum obj_request_type type
;
188 struct bio
*bio_list
;
195 struct ceph_osd_request
*osd_req
;
197 u64 xferred
; /* bytes transferred */
202 rbd_obj_callback_t callback
;
203 struct completion completion
;
208 struct rbd_img_request
{
210 struct rbd_device
*rbd_dev
;
211 u64 offset
; /* starting image byte offset */
212 u64 length
; /* byte count from offset */
213 bool write_request
; /* false for read */
215 struct ceph_snap_context
*snapc
; /* for writes */
216 u64 snap_id
; /* for reads */
218 spinlock_t completion_lock
;/* protects next_completion */
220 rbd_img_callback_t callback
;
222 u32 obj_request_count
;
223 struct list_head obj_requests
; /* rbd_obj_request structs */
228 #define for_each_obj_request(ireq, oreq) \
229 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
230 #define for_each_obj_request_from(ireq, oreq) \
231 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
232 #define for_each_obj_request_safe(ireq, oreq, n) \
233 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
239 struct list_head node
;
254 int dev_id
; /* blkdev unique id */
256 int major
; /* blkdev assigned major */
257 struct gendisk
*disk
; /* blkdev's gendisk and rq */
259 u32 image_format
; /* Either 1 or 2 */
260 struct rbd_client
*rbd_client
;
262 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
264 spinlock_t lock
; /* queue, flags, open_count */
266 struct rbd_image_header header
;
267 unsigned long flags
; /* possibly lock protected */
268 struct rbd_spec
*spec
;
272 struct ceph_file_layout layout
;
274 struct ceph_osd_event
*watch_event
;
275 struct rbd_obj_request
*watch_request
;
277 struct rbd_spec
*parent_spec
;
280 /* protects updating the header */
281 struct rw_semaphore header_rwsem
;
283 struct rbd_mapping mapping
;
285 struct list_head node
;
287 /* list of snapshots */
288 struct list_head snaps
;
292 unsigned long open_count
; /* protected by lock */
296 * Flag bits for rbd_dev->flags. If atomicity is required,
297 * rbd_dev->lock is used to protect access.
299 * Currently, only the "removing" flag (which is coupled with the
300 * "open_count" field) requires atomic access.
303 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
304 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
307 static DEFINE_MUTEX(ctl_mutex
); /* Serialize open/close/setup/teardown */
309 static LIST_HEAD(rbd_dev_list
); /* devices */
310 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
312 static LIST_HEAD(rbd_client_list
); /* clients */
313 static DEFINE_SPINLOCK(rbd_client_list_lock
);
315 static int rbd_dev_snaps_update(struct rbd_device
*rbd_dev
);
316 static int rbd_dev_snaps_register(struct rbd_device
*rbd_dev
);
318 static void rbd_dev_release(struct device
*dev
);
319 static void rbd_remove_snap_dev(struct rbd_snap
*snap
);
321 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
323 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
326 static struct bus_attribute rbd_bus_attrs
[] = {
327 __ATTR(add
, S_IWUSR
, NULL
, rbd_add
),
328 __ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
),
332 static struct bus_type rbd_bus_type
= {
334 .bus_attrs
= rbd_bus_attrs
,
337 static void rbd_root_dev_release(struct device
*dev
)
341 static struct device rbd_root_dev
= {
343 .release
= rbd_root_dev_release
,
346 static __printf(2, 3)
347 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
349 struct va_format vaf
;
357 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
358 else if (rbd_dev
->disk
)
359 printk(KERN_WARNING
"%s: %s: %pV\n",
360 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
361 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
362 printk(KERN_WARNING
"%s: image %s: %pV\n",
363 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
364 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
365 printk(KERN_WARNING
"%s: id %s: %pV\n",
366 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
368 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
369 RBD_DRV_NAME
, rbd_dev
, &vaf
);
374 #define rbd_assert(expr) \
375 if (unlikely(!(expr))) { \
376 printk(KERN_ERR "\nAssertion failure in %s() " \
378 "\trbd_assert(%s);\n\n", \
379 __func__, __LINE__, #expr); \
382 #else /* !RBD_DEBUG */
383 # define rbd_assert(expr) ((void) 0)
384 #endif /* !RBD_DEBUG */
386 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
, u64
*hver
);
387 static int rbd_dev_v2_refresh(struct rbd_device
*rbd_dev
, u64
*hver
);
389 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
391 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
392 bool removing
= false;
394 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
397 spin_lock_irq(&rbd_dev
->lock
);
398 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
401 rbd_dev
->open_count
++;
402 spin_unlock_irq(&rbd_dev
->lock
);
406 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
407 (void) get_device(&rbd_dev
->dev
);
408 set_device_ro(bdev
, rbd_dev
->mapping
.read_only
);
409 mutex_unlock(&ctl_mutex
);
414 static int rbd_release(struct gendisk
*disk
, fmode_t mode
)
416 struct rbd_device
*rbd_dev
= disk
->private_data
;
417 unsigned long open_count_before
;
419 spin_lock_irq(&rbd_dev
->lock
);
420 open_count_before
= rbd_dev
->open_count
--;
421 spin_unlock_irq(&rbd_dev
->lock
);
422 rbd_assert(open_count_before
> 0);
424 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
425 put_device(&rbd_dev
->dev
);
426 mutex_unlock(&ctl_mutex
);
431 static const struct block_device_operations rbd_bd_ops
= {
432 .owner
= THIS_MODULE
,
434 .release
= rbd_release
,
438 * Initialize an rbd client instance.
441 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
443 struct rbd_client
*rbdc
;
446 dout("rbd_client_create\n");
447 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
451 kref_init(&rbdc
->kref
);
452 INIT_LIST_HEAD(&rbdc
->node
);
454 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
456 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
457 if (IS_ERR(rbdc
->client
))
459 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
461 ret
= ceph_open_session(rbdc
->client
);
465 spin_lock(&rbd_client_list_lock
);
466 list_add_tail(&rbdc
->node
, &rbd_client_list
);
467 spin_unlock(&rbd_client_list_lock
);
469 mutex_unlock(&ctl_mutex
);
471 dout("rbd_client_create created %p\n", rbdc
);
475 ceph_destroy_client(rbdc
->client
);
477 mutex_unlock(&ctl_mutex
);
481 ceph_destroy_options(ceph_opts
);
486 * Find a ceph client with specific addr and configuration. If
487 * found, bump its reference count.
489 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
491 struct rbd_client
*client_node
;
494 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
497 spin_lock(&rbd_client_list_lock
);
498 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
499 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
500 kref_get(&client_node
->kref
);
505 spin_unlock(&rbd_client_list_lock
);
507 return found
? client_node
: NULL
;
517 /* string args above */
520 /* Boolean args above */
524 static match_table_t rbd_opts_tokens
= {
526 /* string args above */
527 {Opt_read_only
, "read_only"},
528 {Opt_read_only
, "ro"}, /* Alternate spelling */
529 {Opt_read_write
, "read_write"},
530 {Opt_read_write
, "rw"}, /* Alternate spelling */
531 /* Boolean args above */
539 #define RBD_READ_ONLY_DEFAULT false
541 static int parse_rbd_opts_token(char *c
, void *private)
543 struct rbd_options
*rbd_opts
= private;
544 substring_t argstr
[MAX_OPT_ARGS
];
545 int token
, intval
, ret
;
547 token
= match_token(c
, rbd_opts_tokens
, argstr
);
551 if (token
< Opt_last_int
) {
552 ret
= match_int(&argstr
[0], &intval
);
554 pr_err("bad mount option arg (not int) "
558 dout("got int token %d val %d\n", token
, intval
);
559 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
560 dout("got string token %d val %s\n", token
,
562 } else if (token
> Opt_last_string
&& token
< Opt_last_bool
) {
563 dout("got Boolean token %d\n", token
);
565 dout("got token %d\n", token
);
570 rbd_opts
->read_only
= true;
573 rbd_opts
->read_only
= false;
583 * Get a ceph client with specific addr and configuration, if one does
584 * not exist create it.
586 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
588 struct rbd_client
*rbdc
;
590 rbdc
= rbd_client_find(ceph_opts
);
591 if (rbdc
) /* using an existing client */
592 ceph_destroy_options(ceph_opts
);
594 rbdc
= rbd_client_create(ceph_opts
);
600 * Destroy ceph client
602 * Caller must hold rbd_client_list_lock.
604 static void rbd_client_release(struct kref
*kref
)
606 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
608 dout("rbd_release_client %p\n", rbdc
);
609 spin_lock(&rbd_client_list_lock
);
610 list_del(&rbdc
->node
);
611 spin_unlock(&rbd_client_list_lock
);
613 ceph_destroy_client(rbdc
->client
);
618 * Drop reference to ceph client node. If it's not referenced anymore, release
621 static void rbd_put_client(struct rbd_client
*rbdc
)
624 kref_put(&rbdc
->kref
, rbd_client_release
);
627 static bool rbd_image_format_valid(u32 image_format
)
629 return image_format
== 1 || image_format
== 2;
632 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
637 /* The header has to start with the magic rbd header text */
638 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
641 /* The bio layer requires at least sector-sized I/O */
643 if (ondisk
->options
.order
< SECTOR_SHIFT
)
646 /* If we use u64 in a few spots we may be able to loosen this */
648 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
652 * The size of a snapshot header has to fit in a size_t, and
653 * that limits the number of snapshots.
655 snap_count
= le32_to_cpu(ondisk
->snap_count
);
656 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
657 if (snap_count
> size
/ sizeof (__le64
))
661 * Not only that, but the size of the entire the snapshot
662 * header must also be representable in a size_t.
664 size
-= snap_count
* sizeof (__le64
);
665 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
672 * Create a new header structure, translate header format from the on-disk
675 static int rbd_header_from_disk(struct rbd_image_header
*header
,
676 struct rbd_image_header_ondisk
*ondisk
)
683 memset(header
, 0, sizeof (*header
));
685 snap_count
= le32_to_cpu(ondisk
->snap_count
);
687 len
= strnlen(ondisk
->object_prefix
, sizeof (ondisk
->object_prefix
));
688 header
->object_prefix
= kmalloc(len
+ 1, GFP_KERNEL
);
689 if (!header
->object_prefix
)
691 memcpy(header
->object_prefix
, ondisk
->object_prefix
, len
);
692 header
->object_prefix
[len
] = '\0';
695 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
697 /* Save a copy of the snapshot names */
699 if (snap_names_len
> (u64
) SIZE_MAX
)
701 header
->snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
702 if (!header
->snap_names
)
705 * Note that rbd_dev_v1_header_read() guarantees
706 * the ondisk buffer we're working with has
707 * snap_names_len bytes beyond the end of the
708 * snapshot id array, this memcpy() is safe.
710 memcpy(header
->snap_names
, &ondisk
->snaps
[snap_count
],
713 /* Record each snapshot's size */
715 size
= snap_count
* sizeof (*header
->snap_sizes
);
716 header
->snap_sizes
= kmalloc(size
, GFP_KERNEL
);
717 if (!header
->snap_sizes
)
719 for (i
= 0; i
< snap_count
; i
++)
720 header
->snap_sizes
[i
] =
721 le64_to_cpu(ondisk
->snaps
[i
].image_size
);
723 WARN_ON(ondisk
->snap_names_len
);
724 header
->snap_names
= NULL
;
725 header
->snap_sizes
= NULL
;
728 header
->features
= 0; /* No features support in v1 images */
729 header
->obj_order
= ondisk
->options
.order
;
730 header
->crypt_type
= ondisk
->options
.crypt_type
;
731 header
->comp_type
= ondisk
->options
.comp_type
;
733 /* Allocate and fill in the snapshot context */
735 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
736 size
= sizeof (struct ceph_snap_context
);
737 size
+= snap_count
* sizeof (header
->snapc
->snaps
[0]);
738 header
->snapc
= kzalloc(size
, GFP_KERNEL
);
742 atomic_set(&header
->snapc
->nref
, 1);
743 header
->snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
744 header
->snapc
->num_snaps
= snap_count
;
745 for (i
= 0; i
< snap_count
; i
++)
746 header
->snapc
->snaps
[i
] =
747 le64_to_cpu(ondisk
->snaps
[i
].id
);
752 kfree(header
->snap_sizes
);
753 header
->snap_sizes
= NULL
;
754 kfree(header
->snap_names
);
755 header
->snap_names
= NULL
;
756 kfree(header
->object_prefix
);
757 header
->object_prefix
= NULL
;
762 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
764 struct rbd_snap
*snap
;
766 if (snap_id
== CEPH_NOSNAP
)
767 return RBD_SNAP_HEAD_NAME
;
769 list_for_each_entry(snap
, &rbd_dev
->snaps
, node
)
770 if (snap_id
== snap
->id
)
776 static int snap_by_name(struct rbd_device
*rbd_dev
, const char *snap_name
)
779 struct rbd_snap
*snap
;
781 list_for_each_entry(snap
, &rbd_dev
->snaps
, node
) {
782 if (!strcmp(snap_name
, snap
->name
)) {
783 rbd_dev
->spec
->snap_id
= snap
->id
;
784 rbd_dev
->mapping
.size
= snap
->size
;
785 rbd_dev
->mapping
.features
= snap
->features
;
794 static int rbd_dev_set_mapping(struct rbd_device
*rbd_dev
)
798 if (!memcmp(rbd_dev
->spec
->snap_name
, RBD_SNAP_HEAD_NAME
,
799 sizeof (RBD_SNAP_HEAD_NAME
))) {
800 rbd_dev
->spec
->snap_id
= CEPH_NOSNAP
;
801 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
802 rbd_dev
->mapping
.features
= rbd_dev
->header
.features
;
805 ret
= snap_by_name(rbd_dev
, rbd_dev
->spec
->snap_name
);
808 rbd_dev
->mapping
.read_only
= true;
810 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
816 static void rbd_header_free(struct rbd_image_header
*header
)
818 kfree(header
->object_prefix
);
819 header
->object_prefix
= NULL
;
820 kfree(header
->snap_sizes
);
821 header
->snap_sizes
= NULL
;
822 kfree(header
->snap_names
);
823 header
->snap_names
= NULL
;
824 ceph_put_snap_context(header
->snapc
);
825 header
->snapc
= NULL
;
828 static const char *rbd_segment_name(struct rbd_device
*rbd_dev
, u64 offset
)
834 name
= kmalloc(MAX_OBJ_NAME_SIZE
+ 1, GFP_NOIO
);
837 segment
= offset
>> rbd_dev
->header
.obj_order
;
838 ret
= snprintf(name
, MAX_OBJ_NAME_SIZE
+ 1, "%s.%012llx",
839 rbd_dev
->header
.object_prefix
, segment
);
840 if (ret
< 0 || ret
> MAX_OBJ_NAME_SIZE
) {
841 pr_err("error formatting segment name for #%llu (%d)\n",
850 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
852 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
854 return offset
& (segment_size
- 1);
857 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
858 u64 offset
, u64 length
)
860 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
862 offset
&= segment_size
- 1;
864 rbd_assert(length
<= U64_MAX
- offset
);
865 if (offset
+ length
> segment_size
)
866 length
= segment_size
- offset
;
872 * returns the size of an object in the image
874 static u64
rbd_obj_bytes(struct rbd_image_header
*header
)
876 return 1 << header
->obj_order
;
883 static void bio_chain_put(struct bio
*chain
)
889 chain
= chain
->bi_next
;
895 * zeros a bio chain, starting at specific offset
897 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
906 bio_for_each_segment(bv
, chain
, i
) {
907 if (pos
+ bv
->bv_len
> start_ofs
) {
908 int remainder
= max(start_ofs
- pos
, 0);
909 buf
= bvec_kmap_irq(bv
, &flags
);
910 memset(buf
+ remainder
, 0,
911 bv
->bv_len
- remainder
);
912 bvec_kunmap_irq(buf
, &flags
);
917 chain
= chain
->bi_next
;
922 * Clone a portion of a bio, starting at the given byte offset
923 * and continuing for the number of bytes indicated.
925 static struct bio
*bio_clone_range(struct bio
*bio_src
,
934 unsigned short end_idx
;
938 /* Handle the easy case for the caller */
940 if (!offset
&& len
== bio_src
->bi_size
)
941 return bio_clone(bio_src
, gfpmask
);
943 if (WARN_ON_ONCE(!len
))
945 if (WARN_ON_ONCE(len
> bio_src
->bi_size
))
947 if (WARN_ON_ONCE(offset
> bio_src
->bi_size
- len
))
950 /* Find first affected segment... */
953 __bio_for_each_segment(bv
, bio_src
, idx
, 0) {
954 if (resid
< bv
->bv_len
)
960 /* ...and the last affected segment */
963 __bio_for_each_segment(bv
, bio_src
, end_idx
, idx
) {
964 if (resid
<= bv
->bv_len
)
968 vcnt
= end_idx
- idx
+ 1;
970 /* Build the clone */
972 bio
= bio_alloc(gfpmask
, (unsigned int) vcnt
);
974 return NULL
; /* ENOMEM */
976 bio
->bi_bdev
= bio_src
->bi_bdev
;
977 bio
->bi_sector
= bio_src
->bi_sector
+ (offset
>> SECTOR_SHIFT
);
978 bio
->bi_rw
= bio_src
->bi_rw
;
979 bio
->bi_flags
|= 1 << BIO_CLONED
;
982 * Copy over our part of the bio_vec, then update the first
983 * and last (or only) entries.
985 memcpy(&bio
->bi_io_vec
[0], &bio_src
->bi_io_vec
[idx
],
986 vcnt
* sizeof (struct bio_vec
));
987 bio
->bi_io_vec
[0].bv_offset
+= voff
;
989 bio
->bi_io_vec
[0].bv_len
-= voff
;
990 bio
->bi_io_vec
[vcnt
- 1].bv_len
= resid
;
992 bio
->bi_io_vec
[0].bv_len
= len
;
1003 * Clone a portion of a bio chain, starting at the given byte offset
1004 * into the first bio in the source chain and continuing for the
1005 * number of bytes indicated. The result is another bio chain of
1006 * exactly the given length, or a null pointer on error.
1008 * The bio_src and offset parameters are both in-out. On entry they
1009 * refer to the first source bio and the offset into that bio where
1010 * the start of data to be cloned is located.
1012 * On return, bio_src is updated to refer to the bio in the source
1013 * chain that contains first un-cloned byte, and *offset will
1014 * contain the offset of that byte within that bio.
1016 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1017 unsigned int *offset
,
1021 struct bio
*bi
= *bio_src
;
1022 unsigned int off
= *offset
;
1023 struct bio
*chain
= NULL
;
1026 /* Build up a chain of clone bios up to the limit */
1028 if (!bi
|| off
>= bi
->bi_size
|| !len
)
1029 return NULL
; /* Nothing to clone */
1033 unsigned int bi_size
;
1037 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1038 goto out_err
; /* EINVAL; ran out of bio's */
1040 bi_size
= min_t(unsigned int, bi
->bi_size
- off
, len
);
1041 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1043 goto out_err
; /* ENOMEM */
1046 end
= &bio
->bi_next
;
1049 if (off
== bi
->bi_size
) {
1060 bio_chain_put(chain
);
1065 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1067 kref_get(&obj_request
->kref
);
1070 static void rbd_obj_request_destroy(struct kref
*kref
);
1071 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1073 rbd_assert(obj_request
!= NULL
);
1074 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1077 static void rbd_img_request_get(struct rbd_img_request
*img_request
)
1079 kref_get(&img_request
->kref
);
1082 static void rbd_img_request_destroy(struct kref
*kref
);
1083 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1085 rbd_assert(img_request
!= NULL
);
1086 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1089 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1090 struct rbd_obj_request
*obj_request
)
1092 rbd_assert(obj_request
->img_request
== NULL
);
1094 rbd_obj_request_get(obj_request
);
1095 obj_request
->img_request
= img_request
;
1096 obj_request
->which
= img_request
->obj_request_count
;
1097 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1098 img_request
->obj_request_count
++;
1099 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1102 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1103 struct rbd_obj_request
*obj_request
)
1105 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1107 list_del(&obj_request
->links
);
1108 rbd_assert(img_request
->obj_request_count
> 0);
1109 img_request
->obj_request_count
--;
1110 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1111 obj_request
->which
= BAD_WHICH
;
1112 rbd_assert(obj_request
->img_request
== img_request
);
1113 obj_request
->img_request
= NULL
;
1114 obj_request
->callback
= NULL
;
1115 rbd_obj_request_put(obj_request
);
1118 static bool obj_request_type_valid(enum obj_request_type type
)
1121 case OBJ_REQUEST_NODATA
:
1122 case OBJ_REQUEST_BIO
:
1123 case OBJ_REQUEST_PAGES
:
1130 struct ceph_osd_req_op
*rbd_osd_req_op_create(u16 opcode
, ...)
1132 struct ceph_osd_req_op
*op
;
1136 op
= kzalloc(sizeof (*op
), GFP_NOIO
);
1140 va_start(args
, opcode
);
1142 case CEPH_OSD_OP_READ
:
1143 case CEPH_OSD_OP_WRITE
:
1144 /* rbd_osd_req_op_create(READ, offset, length) */
1145 /* rbd_osd_req_op_create(WRITE, offset, length) */
1146 op
->extent
.offset
= va_arg(args
, u64
);
1147 op
->extent
.length
= va_arg(args
, u64
);
1148 if (opcode
== CEPH_OSD_OP_WRITE
)
1149 op
->payload_len
= op
->extent
.length
;
1151 case CEPH_OSD_OP_STAT
:
1153 case CEPH_OSD_OP_CALL
:
1154 /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
1155 op
->cls
.class_name
= va_arg(args
, char *);
1156 size
= strlen(op
->cls
.class_name
);
1157 rbd_assert(size
<= (size_t) U8_MAX
);
1158 op
->cls
.class_len
= size
;
1159 op
->payload_len
= size
;
1161 op
->cls
.method_name
= va_arg(args
, char *);
1162 size
= strlen(op
->cls
.method_name
);
1163 rbd_assert(size
<= (size_t) U8_MAX
);
1164 op
->cls
.method_len
= size
;
1165 op
->payload_len
+= size
;
1168 op
->cls
.indata
= va_arg(args
, void *);
1169 size
= va_arg(args
, size_t);
1170 rbd_assert(size
<= (size_t) U32_MAX
);
1171 op
->cls
.indata_len
= (u32
) size
;
1172 op
->payload_len
+= size
;
1174 case CEPH_OSD_OP_NOTIFY_ACK
:
1175 case CEPH_OSD_OP_WATCH
:
1176 /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
1177 /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
1178 op
->watch
.cookie
= va_arg(args
, u64
);
1179 op
->watch
.ver
= va_arg(args
, u64
);
1180 op
->watch
.ver
= cpu_to_le64(op
->watch
.ver
);
1181 if (opcode
== CEPH_OSD_OP_WATCH
&& va_arg(args
, int))
1182 op
->watch
.flag
= (u8
) 1;
1185 rbd_warn(NULL
, "unsupported opcode %hu\n", opcode
);
1195 static void rbd_osd_req_op_destroy(struct ceph_osd_req_op
*op
)
1200 static int rbd_obj_request_submit(struct ceph_osd_client
*osdc
,
1201 struct rbd_obj_request
*obj_request
)
1203 return ceph_osdc_start_request(osdc
, obj_request
->osd_req
, false);
1206 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1208 if (img_request
->callback
)
1209 img_request
->callback(img_request
);
1211 rbd_img_request_put(img_request
);
1214 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1216 static int rbd_obj_request_wait(struct rbd_obj_request
*obj_request
)
1218 return wait_for_completion_interruptible(&obj_request
->completion
);
1221 static void obj_request_done_init(struct rbd_obj_request
*obj_request
)
1223 atomic_set(&obj_request
->done
, 0);
1227 static void obj_request_done_set(struct rbd_obj_request
*obj_request
)
1231 done
= atomic_inc_return(&obj_request
->done
);
1233 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1234 struct rbd_device
*rbd_dev
;
1236 rbd_dev
= img_request
? img_request
->rbd_dev
: NULL
;
1237 rbd_warn(rbd_dev
, "obj_request %p was already done\n",
1242 static bool obj_request_done_test(struct rbd_obj_request
*obj_request
)
1245 return atomic_read(&obj_request
->done
) != 0;
1248 static void rbd_osd_trivial_callback(struct rbd_obj_request
*obj_request
,
1249 struct ceph_osd_op
*op
)
1251 obj_request_done_set(obj_request
);
1254 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1256 if (obj_request
->callback
)
1257 obj_request
->callback(obj_request
);
1259 complete_all(&obj_request
->completion
);
1262 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
,
1263 struct ceph_osd_op
*op
)
1268 * We support a 64-bit length, but ultimately it has to be
1269 * passed to blk_end_request(), which takes an unsigned int.
1271 xferred
= le64_to_cpu(op
->extent
.length
);
1272 rbd_assert(xferred
< (u64
) UINT_MAX
);
1273 if (obj_request
->result
== (s32
) -ENOENT
) {
1274 zero_bio_chain(obj_request
->bio_list
, 0);
1275 obj_request
->result
= 0;
1276 } else if (xferred
< obj_request
->length
&& !obj_request
->result
) {
1277 zero_bio_chain(obj_request
->bio_list
, xferred
);
1278 xferred
= obj_request
->length
;
1280 obj_request
->xferred
= xferred
;
1281 obj_request_done_set(obj_request
);
1284 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
,
1285 struct ceph_osd_op
*op
)
1287 obj_request
->xferred
= le64_to_cpu(op
->extent
.length
);
1288 obj_request_done_set(obj_request
);
1292 * For a simple stat call there's nothing to do. We'll do more if
1293 * this is part of a write sequence for a layered image.
1295 static void rbd_osd_stat_callback(struct rbd_obj_request
*obj_request
,
1296 struct ceph_osd_op
*op
)
1298 obj_request_done_set(obj_request
);
1301 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
,
1302 struct ceph_msg
*msg
)
1304 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1305 struct ceph_osd_reply_head
*reply_head
;
1306 struct ceph_osd_op
*op
;
1310 rbd_assert(osd_req
== obj_request
->osd_req
);
1311 rbd_assert(!!obj_request
->img_request
^
1312 (obj_request
->which
== BAD_WHICH
));
1314 obj_request
->xferred
= le32_to_cpu(msg
->hdr
.data_len
);
1315 reply_head
= msg
->front
.iov_base
;
1316 obj_request
->result
= (s32
) le32_to_cpu(reply_head
->result
);
1317 obj_request
->version
= le64_to_cpu(osd_req
->r_reassert_version
.version
);
1319 num_ops
= le32_to_cpu(reply_head
->num_ops
);
1320 WARN_ON(num_ops
!= 1); /* For now */
1322 op
= &reply_head
->ops
[0];
1323 opcode
= le16_to_cpu(op
->op
);
1325 case CEPH_OSD_OP_READ
:
1326 rbd_osd_read_callback(obj_request
, op
);
1328 case CEPH_OSD_OP_WRITE
:
1329 rbd_osd_write_callback(obj_request
, op
);
1331 case CEPH_OSD_OP_STAT
:
1332 rbd_osd_stat_callback(obj_request
, op
);
1334 case CEPH_OSD_OP_CALL
:
1335 case CEPH_OSD_OP_NOTIFY_ACK
:
1336 case CEPH_OSD_OP_WATCH
:
1337 rbd_osd_trivial_callback(obj_request
, op
);
1340 rbd_warn(NULL
, "%s: unsupported op %hu\n",
1341 obj_request
->object_name
, (unsigned short) opcode
);
1345 if (obj_request_done_test(obj_request
))
1346 rbd_obj_request_complete(obj_request
);
1349 static struct ceph_osd_request
*rbd_osd_req_create(
1350 struct rbd_device
*rbd_dev
,
1352 struct rbd_obj_request
*obj_request
,
1353 struct ceph_osd_req_op
*op
)
1355 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1356 struct ceph_snap_context
*snapc
= NULL
;
1357 struct ceph_osd_client
*osdc
;
1358 struct ceph_osd_request
*osd_req
;
1359 struct timespec now
;
1360 struct timespec
*mtime
;
1361 u64 snap_id
= CEPH_NOSNAP
;
1362 u64 offset
= obj_request
->offset
;
1363 u64 length
= obj_request
->length
;
1366 rbd_assert(img_request
->write_request
== write_request
);
1367 if (img_request
->write_request
)
1368 snapc
= img_request
->snapc
;
1370 snap_id
= img_request
->snap_id
;
1373 /* Allocate and initialize the request, for the single op */
1375 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1376 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, 1, false, GFP_ATOMIC
);
1378 return NULL
; /* ENOMEM */
1380 rbd_assert(obj_request_type_valid(obj_request
->type
));
1381 switch (obj_request
->type
) {
1382 case OBJ_REQUEST_NODATA
:
1383 break; /* Nothing to do */
1384 case OBJ_REQUEST_BIO
:
1385 rbd_assert(obj_request
->bio_list
!= NULL
);
1386 osd_req
->r_bio
= obj_request
->bio_list
;
1388 case OBJ_REQUEST_PAGES
:
1389 osd_req
->r_pages
= obj_request
->pages
;
1390 osd_req
->r_num_pages
= obj_request
->page_count
;
1391 osd_req
->r_page_alignment
= offset
& ~PAGE_MASK
;
1395 if (write_request
) {
1396 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1400 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1401 mtime
= NULL
; /* not needed for reads */
1402 offset
= 0; /* These are not used... */
1403 length
= 0; /* ...for osd read requests */
1406 osd_req
->r_callback
= rbd_osd_req_callback
;
1407 osd_req
->r_priv
= obj_request
;
1409 osd_req
->r_oid_len
= strlen(obj_request
->object_name
);
1410 rbd_assert(osd_req
->r_oid_len
< sizeof (osd_req
->r_oid
));
1411 memcpy(osd_req
->r_oid
, obj_request
->object_name
, osd_req
->r_oid_len
);
1413 osd_req
->r_file_layout
= rbd_dev
->layout
; /* struct */
1415 /* osd_req will get its own reference to snapc (if non-null) */
1417 ceph_osdc_build_request(osd_req
, offset
, length
, 1, op
,
1418 snapc
, snap_id
, mtime
);
1423 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1425 ceph_osdc_put_request(osd_req
);
1428 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1430 static struct rbd_obj_request
*rbd_obj_request_create(const char *object_name
,
1431 u64 offset
, u64 length
,
1432 enum obj_request_type type
)
1434 struct rbd_obj_request
*obj_request
;
1438 rbd_assert(obj_request_type_valid(type
));
1440 size
= strlen(object_name
) + 1;
1441 obj_request
= kzalloc(sizeof (*obj_request
) + size
, GFP_KERNEL
);
1445 name
= (char *)(obj_request
+ 1);
1446 obj_request
->object_name
= memcpy(name
, object_name
, size
);
1447 obj_request
->offset
= offset
;
1448 obj_request
->length
= length
;
1449 obj_request
->which
= BAD_WHICH
;
1450 obj_request
->type
= type
;
1451 INIT_LIST_HEAD(&obj_request
->links
);
1452 obj_request_done_init(obj_request
);
1453 init_completion(&obj_request
->completion
);
1454 kref_init(&obj_request
->kref
);
1459 static void rbd_obj_request_destroy(struct kref
*kref
)
1461 struct rbd_obj_request
*obj_request
;
1463 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1465 rbd_assert(obj_request
->img_request
== NULL
);
1466 rbd_assert(obj_request
->which
== BAD_WHICH
);
1468 if (obj_request
->osd_req
)
1469 rbd_osd_req_destroy(obj_request
->osd_req
);
1471 rbd_assert(obj_request_type_valid(obj_request
->type
));
1472 switch (obj_request
->type
) {
1473 case OBJ_REQUEST_NODATA
:
1474 break; /* Nothing to do */
1475 case OBJ_REQUEST_BIO
:
1476 if (obj_request
->bio_list
)
1477 bio_chain_put(obj_request
->bio_list
);
1479 case OBJ_REQUEST_PAGES
:
1480 if (obj_request
->pages
)
1481 ceph_release_page_vector(obj_request
->pages
,
1482 obj_request
->page_count
);
1490 * Caller is responsible for filling in the list of object requests
1491 * that comprises the image request, and the Linux request pointer
1492 * (if there is one).
1494 struct rbd_img_request
*rbd_img_request_create(struct rbd_device
*rbd_dev
,
1495 u64 offset
, u64 length
,
1498 struct rbd_img_request
*img_request
;
1499 struct ceph_snap_context
*snapc
= NULL
;
1501 img_request
= kmalloc(sizeof (*img_request
), GFP_ATOMIC
);
1505 if (write_request
) {
1506 down_read(&rbd_dev
->header_rwsem
);
1507 snapc
= ceph_get_snap_context(rbd_dev
->header
.snapc
);
1508 up_read(&rbd_dev
->header_rwsem
);
1509 if (WARN_ON(!snapc
)) {
1511 return NULL
; /* Shouldn't happen */
1515 img_request
->rq
= NULL
;
1516 img_request
->rbd_dev
= rbd_dev
;
1517 img_request
->offset
= offset
;
1518 img_request
->length
= length
;
1519 img_request
->write_request
= write_request
;
1521 img_request
->snapc
= snapc
;
1523 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
1524 spin_lock_init(&img_request
->completion_lock
);
1525 img_request
->next_completion
= 0;
1526 img_request
->callback
= NULL
;
1527 img_request
->obj_request_count
= 0;
1528 INIT_LIST_HEAD(&img_request
->obj_requests
);
1529 kref_init(&img_request
->kref
);
1531 rbd_img_request_get(img_request
); /* Avoid a warning */
1532 rbd_img_request_put(img_request
); /* TEMPORARY */
1537 static void rbd_img_request_destroy(struct kref
*kref
)
1539 struct rbd_img_request
*img_request
;
1540 struct rbd_obj_request
*obj_request
;
1541 struct rbd_obj_request
*next_obj_request
;
1543 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
1545 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1546 rbd_img_obj_request_del(img_request
, obj_request
);
1547 rbd_assert(img_request
->obj_request_count
== 0);
1549 if (img_request
->write_request
)
1550 ceph_put_snap_context(img_request
->snapc
);
1555 static int rbd_img_request_fill_bio(struct rbd_img_request
*img_request
,
1556 struct bio
*bio_list
)
1558 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
1559 struct rbd_obj_request
*obj_request
= NULL
;
1560 struct rbd_obj_request
*next_obj_request
;
1561 unsigned int bio_offset
;
1566 opcode
= img_request
->write_request
? CEPH_OSD_OP_WRITE
1569 image_offset
= img_request
->offset
;
1570 rbd_assert(image_offset
== bio_list
->bi_sector
<< SECTOR_SHIFT
);
1571 resid
= img_request
->length
;
1572 rbd_assert(resid
> 0);
1574 const char *object_name
;
1575 unsigned int clone_size
;
1576 struct ceph_osd_req_op
*op
;
1580 object_name
= rbd_segment_name(rbd_dev
, image_offset
);
1583 offset
= rbd_segment_offset(rbd_dev
, image_offset
);
1584 length
= rbd_segment_length(rbd_dev
, image_offset
, resid
);
1585 obj_request
= rbd_obj_request_create(object_name
,
1588 kfree(object_name
); /* object request has its own copy */
1592 rbd_assert(length
<= (u64
) UINT_MAX
);
1593 clone_size
= (unsigned int) length
;
1594 obj_request
->bio_list
= bio_chain_clone_range(&bio_list
,
1595 &bio_offset
, clone_size
,
1597 if (!obj_request
->bio_list
)
1601 * Build up the op to use in building the osd
1602 * request. Note that the contents of the op are
1603 * copied by rbd_osd_req_create().
1605 op
= rbd_osd_req_op_create(opcode
, offset
, length
);
1608 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
,
1609 img_request
->write_request
,
1611 rbd_osd_req_op_destroy(op
);
1612 if (!obj_request
->osd_req
)
1614 /* status and version are initially zero-filled */
1616 rbd_img_obj_request_add(img_request
, obj_request
);
1618 image_offset
+= length
;
1625 rbd_obj_request_put(obj_request
);
1627 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
1628 rbd_obj_request_put(obj_request
);
1633 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
1635 struct rbd_img_request
*img_request
;
1636 u32 which
= obj_request
->which
;
1639 img_request
= obj_request
->img_request
;
1641 rbd_assert(img_request
!= NULL
);
1642 rbd_assert(img_request
->rq
!= NULL
);
1643 rbd_assert(img_request
->obj_request_count
> 0);
1644 rbd_assert(which
!= BAD_WHICH
);
1645 rbd_assert(which
< img_request
->obj_request_count
);
1646 rbd_assert(which
>= img_request
->next_completion
);
1648 spin_lock_irq(&img_request
->completion_lock
);
1649 if (which
!= img_request
->next_completion
)
1652 for_each_obj_request_from(img_request
, obj_request
) {
1653 unsigned int xferred
;
1657 rbd_assert(which
< img_request
->obj_request_count
);
1659 if (!obj_request_done_test(obj_request
))
1662 rbd_assert(obj_request
->xferred
<= (u64
) UINT_MAX
);
1663 xferred
= (unsigned int) obj_request
->xferred
;
1664 result
= (int) obj_request
->result
;
1666 rbd_warn(NULL
, "obj_request %s result %d xferred %u\n",
1667 img_request
->write_request
? "write" : "read",
1670 more
= blk_end_request(img_request
->rq
, result
, xferred
);
1673 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
1674 img_request
->next_completion
= which
;
1676 spin_unlock_irq(&img_request
->completion_lock
);
1679 rbd_img_request_complete(img_request
);
1682 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
1684 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
1685 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1686 struct rbd_obj_request
*obj_request
;
1688 for_each_obj_request(img_request
, obj_request
) {
1691 obj_request
->callback
= rbd_img_obj_callback
;
1692 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1696 * The image request has its own reference to each
1697 * of its object requests, so we can safely drop the
1700 rbd_obj_request_put(obj_request
);
1706 static int rbd_obj_notify_ack(struct rbd_device
*rbd_dev
,
1707 u64 ver
, u64 notify_id
)
1709 struct rbd_obj_request
*obj_request
;
1710 struct ceph_osd_req_op
*op
;
1711 struct ceph_osd_client
*osdc
;
1714 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
1715 OBJ_REQUEST_NODATA
);
1720 op
= rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK
, notify_id
, ver
);
1723 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
1725 rbd_osd_req_op_destroy(op
);
1726 if (!obj_request
->osd_req
)
1729 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1730 obj_request
->callback
= rbd_obj_request_put
;
1731 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1734 rbd_obj_request_put(obj_request
);
1739 static void rbd_watch_cb(u64 ver
, u64 notify_id
, u8 opcode
, void *data
)
1741 struct rbd_device
*rbd_dev
= (struct rbd_device
*)data
;
1748 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1749 rbd_dev
->header_name
, (unsigned long long) notify_id
,
1750 (unsigned int) opcode
);
1751 rc
= rbd_dev_refresh(rbd_dev
, &hver
);
1753 rbd_warn(rbd_dev
, "got notification but failed to "
1754 " update snaps: %d\n", rc
);
1756 rbd_obj_notify_ack(rbd_dev
, hver
, notify_id
);
1760 * Request sync osd watch/unwatch. The value of "start" determines
1761 * whether a watch request is being initiated or torn down.
1763 static int rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
, int start
)
1765 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1766 struct rbd_obj_request
*obj_request
;
1767 struct ceph_osd_req_op
*op
;
1770 rbd_assert(start
^ !!rbd_dev
->watch_event
);
1771 rbd_assert(start
^ !!rbd_dev
->watch_request
);
1774 ret
= ceph_osdc_create_event(osdc
, rbd_watch_cb
, rbd_dev
,
1775 &rbd_dev
->watch_event
);
1778 rbd_assert(rbd_dev
->watch_event
!= NULL
);
1782 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
1783 OBJ_REQUEST_NODATA
);
1787 op
= rbd_osd_req_op_create(CEPH_OSD_OP_WATCH
,
1788 rbd_dev
->watch_event
->cookie
,
1789 rbd_dev
->header
.obj_version
, start
);
1792 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, true,
1794 rbd_osd_req_op_destroy(op
);
1795 if (!obj_request
->osd_req
)
1799 ceph_osdc_set_request_linger(osdc
, obj_request
->osd_req
);
1801 ceph_osdc_unregister_linger_request(osdc
,
1802 rbd_dev
->watch_request
->osd_req
);
1803 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1806 ret
= rbd_obj_request_wait(obj_request
);
1809 ret
= obj_request
->result
;
1814 * A watch request is set to linger, so the underlying osd
1815 * request won't go away until we unregister it. We retain
1816 * a pointer to the object request during that time (in
1817 * rbd_dev->watch_request), so we'll keep a reference to
1818 * it. We'll drop that reference (below) after we've
1822 rbd_dev
->watch_request
= obj_request
;
1827 /* We have successfully torn down the watch request */
1829 rbd_obj_request_put(rbd_dev
->watch_request
);
1830 rbd_dev
->watch_request
= NULL
;
1832 /* Cancel the event if we're tearing down, or on error */
1833 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
1834 rbd_dev
->watch_event
= NULL
;
1836 rbd_obj_request_put(obj_request
);
1842 * Synchronous osd object method call
1844 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
1845 const char *object_name
,
1846 const char *class_name
,
1847 const char *method_name
,
1848 const char *outbound
,
1849 size_t outbound_size
,
1851 size_t inbound_size
,
1854 struct rbd_obj_request
*obj_request
;
1855 struct ceph_osd_client
*osdc
;
1856 struct ceph_osd_req_op
*op
;
1857 struct page
**pages
;
1862 * Method calls are ultimately read operations but they
1863 * don't involve object data (so no offset or length).
1864 * The result should placed into the inbound buffer
1865 * provided. They also supply outbound data--parameters for
1866 * the object method. Currently if this is present it will
1869 page_count
= (u32
) calc_pages_for(0, inbound_size
);
1870 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
1872 return PTR_ERR(pages
);
1875 obj_request
= rbd_obj_request_create(object_name
, 0, 0,
1880 obj_request
->pages
= pages
;
1881 obj_request
->page_count
= page_count
;
1883 op
= rbd_osd_req_op_create(CEPH_OSD_OP_CALL
, class_name
,
1884 method_name
, outbound
, outbound_size
);
1887 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
1889 rbd_osd_req_op_destroy(op
);
1890 if (!obj_request
->osd_req
)
1893 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1894 ret
= rbd_obj_request_submit(osdc
, obj_request
);
1897 ret
= rbd_obj_request_wait(obj_request
);
1901 ret
= obj_request
->result
;
1905 ceph_copy_from_page_vector(pages
, inbound
, 0, obj_request
->xferred
);
1907 *version
= obj_request
->version
;
1910 rbd_obj_request_put(obj_request
);
1912 ceph_release_page_vector(pages
, page_count
);
1917 static void rbd_request_fn(struct request_queue
*q
)
1919 struct rbd_device
*rbd_dev
= q
->queuedata
;
1920 bool read_only
= rbd_dev
->mapping
.read_only
;
1924 while ((rq
= blk_fetch_request(q
))) {
1925 bool write_request
= rq_data_dir(rq
) == WRITE
;
1926 struct rbd_img_request
*img_request
;
1930 /* Ignore any non-FS requests that filter through. */
1932 if (rq
->cmd_type
!= REQ_TYPE_FS
) {
1933 dout("%s: non-fs request type %d\n", __func__
,
1934 (int) rq
->cmd_type
);
1935 __blk_end_request_all(rq
, 0);
1939 /* Ignore/skip any zero-length requests */
1941 offset
= (u64
) blk_rq_pos(rq
) << SECTOR_SHIFT
;
1942 length
= (u64
) blk_rq_bytes(rq
);
1945 dout("%s: zero-length request\n", __func__
);
1946 __blk_end_request_all(rq
, 0);
1950 spin_unlock_irq(q
->queue_lock
);
1952 /* Disallow writes to a read-only device */
1954 if (write_request
) {
1958 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
1962 * Quit early if the mapped snapshot no longer
1963 * exists. It's still possible the snapshot will
1964 * have disappeared by the time our request arrives
1965 * at the osd, but there's no sense in sending it if
1968 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
1969 dout("request for non-existent snapshot");
1970 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
1976 if (WARN_ON(offset
&& length
> U64_MAX
- offset
+ 1))
1977 goto end_request
; /* Shouldn't happen */
1980 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
,
1985 img_request
->rq
= rq
;
1987 result
= rbd_img_request_fill_bio(img_request
, rq
->bio
);
1989 result
= rbd_img_request_submit(img_request
);
1991 rbd_img_request_put(img_request
);
1993 spin_lock_irq(q
->queue_lock
);
1995 rbd_warn(rbd_dev
, "obj_request %s result %d\n",
1996 write_request
? "write" : "read", result
);
1997 __blk_end_request_all(rq
, result
);
2003 * a queue callback. Makes sure that we don't create a bio that spans across
2004 * multiple osd objects. One exception would be with a single page bios,
2005 * which we handle later at bio_chain_clone_range()
2007 static int rbd_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
2008 struct bio_vec
*bvec
)
2010 struct rbd_device
*rbd_dev
= q
->queuedata
;
2011 sector_t sector_offset
;
2012 sector_t sectors_per_obj
;
2013 sector_t obj_sector_offset
;
2017 * Find how far into its rbd object the partition-relative
2018 * bio start sector is to offset relative to the enclosing
2021 sector_offset
= get_start_sect(bmd
->bi_bdev
) + bmd
->bi_sector
;
2022 sectors_per_obj
= 1 << (rbd_dev
->header
.obj_order
- SECTOR_SHIFT
);
2023 obj_sector_offset
= sector_offset
& (sectors_per_obj
- 1);
2026 * Compute the number of bytes from that offset to the end
2027 * of the object. Account for what's already used by the bio.
2029 ret
= (int) (sectors_per_obj
- obj_sector_offset
) << SECTOR_SHIFT
;
2030 if (ret
> bmd
->bi_size
)
2031 ret
-= bmd
->bi_size
;
2036 * Don't send back more than was asked for. And if the bio
2037 * was empty, let the whole thing through because: "Note
2038 * that a block device *must* allow a single page to be
2039 * added to an empty bio."
2041 rbd_assert(bvec
->bv_len
<= PAGE_SIZE
);
2042 if (ret
> (int) bvec
->bv_len
|| !bmd
->bi_size
)
2043 ret
= (int) bvec
->bv_len
;
2048 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
2050 struct gendisk
*disk
= rbd_dev
->disk
;
2055 if (disk
->flags
& GENHD_FL_UP
)
2058 blk_cleanup_queue(disk
->queue
);
2062 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
2063 const char *object_name
,
2064 u64 offset
, u64 length
,
2065 char *buf
, u64
*version
)
2068 struct ceph_osd_req_op
*op
;
2069 struct rbd_obj_request
*obj_request
;
2070 struct ceph_osd_client
*osdc
;
2071 struct page
**pages
= NULL
;
2076 page_count
= (u32
) calc_pages_for(offset
, length
);
2077 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2079 ret
= PTR_ERR(pages
);
2082 obj_request
= rbd_obj_request_create(object_name
, offset
, length
,
2087 obj_request
->pages
= pages
;
2088 obj_request
->page_count
= page_count
;
2090 op
= rbd_osd_req_op_create(CEPH_OSD_OP_READ
, offset
, length
);
2093 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false,
2095 rbd_osd_req_op_destroy(op
);
2096 if (!obj_request
->osd_req
)
2099 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2100 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2103 ret
= rbd_obj_request_wait(obj_request
);
2107 ret
= obj_request
->result
;
2111 rbd_assert(obj_request
->xferred
<= (u64
) SIZE_MAX
);
2112 size
= (size_t) obj_request
->xferred
;
2113 ceph_copy_from_page_vector(pages
, buf
, 0, size
);
2114 rbd_assert(size
<= (size_t) INT_MAX
);
2117 *version
= obj_request
->version
;
2120 rbd_obj_request_put(obj_request
);
2122 ceph_release_page_vector(pages
, page_count
);
2128 * Read the complete header for the given rbd device.
2130 * Returns a pointer to a dynamically-allocated buffer containing
2131 * the complete and validated header. Caller can pass the address
2132 * of a variable that will be filled in with the version of the
2133 * header object at the time it was read.
2135 * Returns a pointer-coded errno if a failure occurs.
2137 static struct rbd_image_header_ondisk
*
2138 rbd_dev_v1_header_read(struct rbd_device
*rbd_dev
, u64
*version
)
2140 struct rbd_image_header_ondisk
*ondisk
= NULL
;
2147 * The complete header will include an array of its 64-bit
2148 * snapshot ids, followed by the names of those snapshots as
2149 * a contiguous block of NUL-terminated strings. Note that
2150 * the number of snapshots could change by the time we read
2151 * it in, in which case we re-read it.
2158 size
= sizeof (*ondisk
);
2159 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
2161 ondisk
= kmalloc(size
, GFP_KERNEL
);
2163 return ERR_PTR(-ENOMEM
);
2165 ret
= rbd_obj_read_sync(rbd_dev
, rbd_dev
->header_name
,
2167 (char *) ondisk
, version
);
2170 if (WARN_ON((size_t) ret
< size
)) {
2172 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
2176 if (!rbd_dev_ondisk_valid(ondisk
)) {
2178 rbd_warn(rbd_dev
, "invalid header");
2182 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
2183 want_count
= snap_count
;
2184 snap_count
= le32_to_cpu(ondisk
->snap_count
);
2185 } while (snap_count
!= want_count
);
2192 return ERR_PTR(ret
);
2196 * reload the ondisk the header
2198 static int rbd_read_header(struct rbd_device
*rbd_dev
,
2199 struct rbd_image_header
*header
)
2201 struct rbd_image_header_ondisk
*ondisk
;
2205 ondisk
= rbd_dev_v1_header_read(rbd_dev
, &ver
);
2207 return PTR_ERR(ondisk
);
2208 ret
= rbd_header_from_disk(header
, ondisk
);
2210 header
->obj_version
= ver
;
2216 static void rbd_remove_all_snaps(struct rbd_device
*rbd_dev
)
2218 struct rbd_snap
*snap
;
2219 struct rbd_snap
*next
;
2221 list_for_each_entry_safe(snap
, next
, &rbd_dev
->snaps
, node
)
2222 rbd_remove_snap_dev(snap
);
2225 static void rbd_update_mapping_size(struct rbd_device
*rbd_dev
)
2229 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
2232 size
= (sector_t
) rbd_dev
->header
.image_size
/ SECTOR_SIZE
;
2233 dout("setting size to %llu sectors", (unsigned long long) size
);
2234 rbd_dev
->mapping
.size
= (u64
) size
;
2235 set_capacity(rbd_dev
->disk
, size
);
2239 * only read the first part of the ondisk header, without the snaps info
2241 static int rbd_dev_v1_refresh(struct rbd_device
*rbd_dev
, u64
*hver
)
2244 struct rbd_image_header h
;
2246 ret
= rbd_read_header(rbd_dev
, &h
);
2250 down_write(&rbd_dev
->header_rwsem
);
2252 /* Update image size, and check for resize of mapped image */
2253 rbd_dev
->header
.image_size
= h
.image_size
;
2254 rbd_update_mapping_size(rbd_dev
);
2256 /* rbd_dev->header.object_prefix shouldn't change */
2257 kfree(rbd_dev
->header
.snap_sizes
);
2258 kfree(rbd_dev
->header
.snap_names
);
2259 /* osd requests may still refer to snapc */
2260 ceph_put_snap_context(rbd_dev
->header
.snapc
);
2263 *hver
= h
.obj_version
;
2264 rbd_dev
->header
.obj_version
= h
.obj_version
;
2265 rbd_dev
->header
.image_size
= h
.image_size
;
2266 rbd_dev
->header
.snapc
= h
.snapc
;
2267 rbd_dev
->header
.snap_names
= h
.snap_names
;
2268 rbd_dev
->header
.snap_sizes
= h
.snap_sizes
;
2269 /* Free the extra copy of the object prefix */
2270 WARN_ON(strcmp(rbd_dev
->header
.object_prefix
, h
.object_prefix
));
2271 kfree(h
.object_prefix
);
2273 ret
= rbd_dev_snaps_update(rbd_dev
);
2275 ret
= rbd_dev_snaps_register(rbd_dev
);
2277 up_write(&rbd_dev
->header_rwsem
);
2282 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
, u64
*hver
)
2286 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
2287 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
2288 if (rbd_dev
->image_format
== 1)
2289 ret
= rbd_dev_v1_refresh(rbd_dev
, hver
);
2291 ret
= rbd_dev_v2_refresh(rbd_dev
, hver
);
2292 mutex_unlock(&ctl_mutex
);
2297 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
2299 struct gendisk
*disk
;
2300 struct request_queue
*q
;
2303 /* create gendisk info */
2304 disk
= alloc_disk(RBD_MINORS_PER_MAJOR
);
2308 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
2310 disk
->major
= rbd_dev
->major
;
2311 disk
->first_minor
= 0;
2312 disk
->fops
= &rbd_bd_ops
;
2313 disk
->private_data
= rbd_dev
;
2315 q
= blk_init_queue(rbd_request_fn
, &rbd_dev
->lock
);
2319 /* We use the default size, but let's be explicit about it. */
2320 blk_queue_physical_block_size(q
, SECTOR_SIZE
);
2322 /* set io sizes to object size */
2323 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
2324 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
2325 blk_queue_max_segment_size(q
, segment_size
);
2326 blk_queue_io_min(q
, segment_size
);
2327 blk_queue_io_opt(q
, segment_size
);
2329 blk_queue_merge_bvec(q
, rbd_merge_bvec
);
2332 q
->queuedata
= rbd_dev
;
2334 rbd_dev
->disk
= disk
;
2336 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
2349 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
2351 return container_of(dev
, struct rbd_device
, dev
);
2354 static ssize_t
rbd_size_show(struct device
*dev
,
2355 struct device_attribute
*attr
, char *buf
)
2357 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2360 down_read(&rbd_dev
->header_rwsem
);
2361 size
= get_capacity(rbd_dev
->disk
);
2362 up_read(&rbd_dev
->header_rwsem
);
2364 return sprintf(buf
, "%llu\n", (unsigned long long) size
* SECTOR_SIZE
);
2368 * Note this shows the features for whatever's mapped, which is not
2369 * necessarily the base image.
2371 static ssize_t
rbd_features_show(struct device
*dev
,
2372 struct device_attribute
*attr
, char *buf
)
2374 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2376 return sprintf(buf
, "0x%016llx\n",
2377 (unsigned long long) rbd_dev
->mapping
.features
);
2380 static ssize_t
rbd_major_show(struct device
*dev
,
2381 struct device_attribute
*attr
, char *buf
)
2383 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2385 return sprintf(buf
, "%d\n", rbd_dev
->major
);
2388 static ssize_t
rbd_client_id_show(struct device
*dev
,
2389 struct device_attribute
*attr
, char *buf
)
2391 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2393 return sprintf(buf
, "client%lld\n",
2394 ceph_client_id(rbd_dev
->rbd_client
->client
));
2397 static ssize_t
rbd_pool_show(struct device
*dev
,
2398 struct device_attribute
*attr
, char *buf
)
2400 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2402 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
2405 static ssize_t
rbd_pool_id_show(struct device
*dev
,
2406 struct device_attribute
*attr
, char *buf
)
2408 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2410 return sprintf(buf
, "%llu\n",
2411 (unsigned long long) rbd_dev
->spec
->pool_id
);
2414 static ssize_t
rbd_name_show(struct device
*dev
,
2415 struct device_attribute
*attr
, char *buf
)
2417 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2419 if (rbd_dev
->spec
->image_name
)
2420 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
2422 return sprintf(buf
, "(unknown)\n");
2425 static ssize_t
rbd_image_id_show(struct device
*dev
,
2426 struct device_attribute
*attr
, char *buf
)
2428 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2430 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
2434 * Shows the name of the currently-mapped snapshot (or
2435 * RBD_SNAP_HEAD_NAME for the base image).
2437 static ssize_t
rbd_snap_show(struct device
*dev
,
2438 struct device_attribute
*attr
,
2441 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2443 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
2447 * For an rbd v2 image, shows the pool id, image id, and snapshot id
2448 * for the parent image. If there is no parent, simply shows
2449 * "(no parent image)".
2451 static ssize_t
rbd_parent_show(struct device
*dev
,
2452 struct device_attribute
*attr
,
2455 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2456 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
2461 return sprintf(buf
, "(no parent image)\n");
2463 count
= sprintf(bufp
, "pool_id %llu\npool_name %s\n",
2464 (unsigned long long) spec
->pool_id
, spec
->pool_name
);
2469 count
= sprintf(bufp
, "image_id %s\nimage_name %s\n", spec
->image_id
,
2470 spec
->image_name
? spec
->image_name
: "(unknown)");
2475 count
= sprintf(bufp
, "snap_id %llu\nsnap_name %s\n",
2476 (unsigned long long) spec
->snap_id
, spec
->snap_name
);
2481 count
= sprintf(bufp
, "overlap %llu\n", rbd_dev
->parent_overlap
);
2486 return (ssize_t
) (bufp
- buf
);
2489 static ssize_t
rbd_image_refresh(struct device
*dev
,
2490 struct device_attribute
*attr
,
2494 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
2497 ret
= rbd_dev_refresh(rbd_dev
, NULL
);
2499 return ret
< 0 ? ret
: size
;
2502 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
2503 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
2504 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
2505 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
2506 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
2507 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
2508 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
2509 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
2510 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
2511 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
2512 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
2514 static struct attribute
*rbd_attrs
[] = {
2515 &dev_attr_size
.attr
,
2516 &dev_attr_features
.attr
,
2517 &dev_attr_major
.attr
,
2518 &dev_attr_client_id
.attr
,
2519 &dev_attr_pool
.attr
,
2520 &dev_attr_pool_id
.attr
,
2521 &dev_attr_name
.attr
,
2522 &dev_attr_image_id
.attr
,
2523 &dev_attr_current_snap
.attr
,
2524 &dev_attr_parent
.attr
,
2525 &dev_attr_refresh
.attr
,
2529 static struct attribute_group rbd_attr_group
= {
2533 static const struct attribute_group
*rbd_attr_groups
[] = {
2538 static void rbd_sysfs_dev_release(struct device
*dev
)
2542 static struct device_type rbd_device_type
= {
2544 .groups
= rbd_attr_groups
,
2545 .release
= rbd_sysfs_dev_release
,
2553 static ssize_t
rbd_snap_size_show(struct device
*dev
,
2554 struct device_attribute
*attr
,
2557 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2559 return sprintf(buf
, "%llu\n", (unsigned long long)snap
->size
);
2562 static ssize_t
rbd_snap_id_show(struct device
*dev
,
2563 struct device_attribute
*attr
,
2566 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2568 return sprintf(buf
, "%llu\n", (unsigned long long)snap
->id
);
2571 static ssize_t
rbd_snap_features_show(struct device
*dev
,
2572 struct device_attribute
*attr
,
2575 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2577 return sprintf(buf
, "0x%016llx\n",
2578 (unsigned long long) snap
->features
);
2581 static DEVICE_ATTR(snap_size
, S_IRUGO
, rbd_snap_size_show
, NULL
);
2582 static DEVICE_ATTR(snap_id
, S_IRUGO
, rbd_snap_id_show
, NULL
);
2583 static DEVICE_ATTR(snap_features
, S_IRUGO
, rbd_snap_features_show
, NULL
);
2585 static struct attribute
*rbd_snap_attrs
[] = {
2586 &dev_attr_snap_size
.attr
,
2587 &dev_attr_snap_id
.attr
,
2588 &dev_attr_snap_features
.attr
,
2592 static struct attribute_group rbd_snap_attr_group
= {
2593 .attrs
= rbd_snap_attrs
,
2596 static void rbd_snap_dev_release(struct device
*dev
)
2598 struct rbd_snap
*snap
= container_of(dev
, struct rbd_snap
, dev
);
2603 static const struct attribute_group
*rbd_snap_attr_groups
[] = {
2604 &rbd_snap_attr_group
,
2608 static struct device_type rbd_snap_device_type
= {
2609 .groups
= rbd_snap_attr_groups
,
2610 .release
= rbd_snap_dev_release
,
2613 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
2615 kref_get(&spec
->kref
);
2620 static void rbd_spec_free(struct kref
*kref
);
2621 static void rbd_spec_put(struct rbd_spec
*spec
)
2624 kref_put(&spec
->kref
, rbd_spec_free
);
2627 static struct rbd_spec
*rbd_spec_alloc(void)
2629 struct rbd_spec
*spec
;
2631 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
2634 kref_init(&spec
->kref
);
2636 rbd_spec_put(rbd_spec_get(spec
)); /* TEMPORARY */
2641 static void rbd_spec_free(struct kref
*kref
)
2643 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
2645 kfree(spec
->pool_name
);
2646 kfree(spec
->image_id
);
2647 kfree(spec
->image_name
);
2648 kfree(spec
->snap_name
);
2652 struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
2653 struct rbd_spec
*spec
)
2655 struct rbd_device
*rbd_dev
;
2657 rbd_dev
= kzalloc(sizeof (*rbd_dev
), GFP_KERNEL
);
2661 spin_lock_init(&rbd_dev
->lock
);
2663 INIT_LIST_HEAD(&rbd_dev
->node
);
2664 INIT_LIST_HEAD(&rbd_dev
->snaps
);
2665 init_rwsem(&rbd_dev
->header_rwsem
);
2667 rbd_dev
->spec
= spec
;
2668 rbd_dev
->rbd_client
= rbdc
;
2670 /* Initialize the layout used for all rbd requests */
2672 rbd_dev
->layout
.fl_stripe_unit
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
2673 rbd_dev
->layout
.fl_stripe_count
= cpu_to_le32(1);
2674 rbd_dev
->layout
.fl_object_size
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
2675 rbd_dev
->layout
.fl_pg_pool
= cpu_to_le32((u32
) spec
->pool_id
);
2680 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
2682 rbd_spec_put(rbd_dev
->parent_spec
);
2683 kfree(rbd_dev
->header_name
);
2684 rbd_put_client(rbd_dev
->rbd_client
);
2685 rbd_spec_put(rbd_dev
->spec
);
2689 static bool rbd_snap_registered(struct rbd_snap
*snap
)
2691 bool ret
= snap
->dev
.type
== &rbd_snap_device_type
;
2692 bool reg
= device_is_registered(&snap
->dev
);
2694 rbd_assert(!ret
^ reg
);
2699 static void rbd_remove_snap_dev(struct rbd_snap
*snap
)
2701 list_del(&snap
->node
);
2702 if (device_is_registered(&snap
->dev
))
2703 device_unregister(&snap
->dev
);
2706 static int rbd_register_snap_dev(struct rbd_snap
*snap
,
2707 struct device
*parent
)
2709 struct device
*dev
= &snap
->dev
;
2712 dev
->type
= &rbd_snap_device_type
;
2713 dev
->parent
= parent
;
2714 dev
->release
= rbd_snap_dev_release
;
2715 dev_set_name(dev
, "%s%s", RBD_SNAP_DEV_NAME_PREFIX
, snap
->name
);
2716 dout("%s: registering device for snapshot %s\n", __func__
, snap
->name
);
2718 ret
= device_register(dev
);
2723 static struct rbd_snap
*__rbd_add_snap_dev(struct rbd_device
*rbd_dev
,
2724 const char *snap_name
,
2725 u64 snap_id
, u64 snap_size
,
2728 struct rbd_snap
*snap
;
2731 snap
= kzalloc(sizeof (*snap
), GFP_KERNEL
);
2733 return ERR_PTR(-ENOMEM
);
2736 snap
->name
= kstrdup(snap_name
, GFP_KERNEL
);
2741 snap
->size
= snap_size
;
2742 snap
->features
= snap_features
;
2750 return ERR_PTR(ret
);
2753 static char *rbd_dev_v1_snap_info(struct rbd_device
*rbd_dev
, u32 which
,
2754 u64
*snap_size
, u64
*snap_features
)
2758 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
2760 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
2761 *snap_features
= 0; /* No features for v1 */
2763 /* Skip over names until we find the one we are looking for */
2765 snap_name
= rbd_dev
->header
.snap_names
;
2767 snap_name
+= strlen(snap_name
) + 1;
2773 * Get the size and object order for an image snapshot, or if
2774 * snap_id is CEPH_NOSNAP, gets this information for the base
2777 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
2778 u8
*order
, u64
*snap_size
)
2780 __le64 snapid
= cpu_to_le64(snap_id
);
2785 } __attribute__ ((packed
)) size_buf
= { 0 };
2787 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2789 (char *) &snapid
, sizeof (snapid
),
2790 (char *) &size_buf
, sizeof (size_buf
), NULL
);
2791 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2795 *order
= size_buf
.order
;
2796 *snap_size
= le64_to_cpu(size_buf
.size
);
2798 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2799 (unsigned long long) snap_id
, (unsigned int) *order
,
2800 (unsigned long long) *snap_size
);
2805 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
2807 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
2808 &rbd_dev
->header
.obj_order
,
2809 &rbd_dev
->header
.image_size
);
2812 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
2818 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
2822 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2823 "rbd", "get_object_prefix",
2825 reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
, NULL
);
2826 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2831 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
2832 p
+ RBD_OBJ_PREFIX_LEN_MAX
,
2835 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
2836 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
2837 rbd_dev
->header
.object_prefix
= NULL
;
2839 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
2848 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
2851 __le64 snapid
= cpu_to_le64(snap_id
);
2855 } features_buf
= { 0 };
2859 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2860 "rbd", "get_features",
2861 (char *) &snapid
, sizeof (snapid
),
2862 (char *) &features_buf
, sizeof (features_buf
),
2864 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2868 incompat
= le64_to_cpu(features_buf
.incompat
);
2869 if (incompat
& ~RBD_FEATURES_ALL
)
2872 *snap_features
= le64_to_cpu(features_buf
.features
);
2874 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2875 (unsigned long long) snap_id
,
2876 (unsigned long long) *snap_features
,
2877 (unsigned long long) le64_to_cpu(features_buf
.incompat
));
2882 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
2884 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
2885 &rbd_dev
->header
.features
);
2888 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
2890 struct rbd_spec
*parent_spec
;
2892 void *reply_buf
= NULL
;
2900 parent_spec
= rbd_spec_alloc();
2904 size
= sizeof (__le64
) + /* pool_id */
2905 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
2906 sizeof (__le64
) + /* snap_id */
2907 sizeof (__le64
); /* overlap */
2908 reply_buf
= kmalloc(size
, GFP_KERNEL
);
2914 snapid
= cpu_to_le64(CEPH_NOSNAP
);
2915 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
2916 "rbd", "get_parent",
2917 (char *) &snapid
, sizeof (snapid
),
2918 (char *) reply_buf
, size
, NULL
);
2919 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
2925 end
= (char *) reply_buf
+ size
;
2926 ceph_decode_64_safe(&p
, end
, parent_spec
->pool_id
, out_err
);
2927 if (parent_spec
->pool_id
== CEPH_NOPOOL
)
2928 goto out
; /* No parent? No problem. */
2930 /* The ceph file layout needs to fit pool id in 32 bits */
2933 if (WARN_ON(parent_spec
->pool_id
> (u64
) U32_MAX
))
2936 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
2937 if (IS_ERR(image_id
)) {
2938 ret
= PTR_ERR(image_id
);
2941 parent_spec
->image_id
= image_id
;
2942 ceph_decode_64_safe(&p
, end
, parent_spec
->snap_id
, out_err
);
2943 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
2945 rbd_dev
->parent_overlap
= overlap
;
2946 rbd_dev
->parent_spec
= parent_spec
;
2947 parent_spec
= NULL
; /* rbd_dev now owns this */
2952 rbd_spec_put(parent_spec
);
2957 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
2959 size_t image_id_size
;
2964 void *reply_buf
= NULL
;
2966 char *image_name
= NULL
;
2969 rbd_assert(!rbd_dev
->spec
->image_name
);
2971 len
= strlen(rbd_dev
->spec
->image_id
);
2972 image_id_size
= sizeof (__le32
) + len
;
2973 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
2978 end
= (char *) image_id
+ image_id_size
;
2979 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
) len
);
2981 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
2982 reply_buf
= kmalloc(size
, GFP_KERNEL
);
2986 ret
= rbd_obj_method_sync(rbd_dev
, RBD_DIRECTORY
,
2987 "rbd", "dir_get_name",
2988 image_id
, image_id_size
,
2989 (char *) reply_buf
, size
, NULL
);
2993 end
= (char *) reply_buf
+ size
;
2994 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
2995 if (IS_ERR(image_name
))
2998 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
3007 * When a parent image gets probed, we only have the pool, image,
3008 * and snapshot ids but not the names of any of them. This call
3009 * is made later to fill in those names. It has to be done after
3010 * rbd_dev_snaps_update() has completed because some of the
3011 * information (in particular, snapshot name) is not available
3014 static int rbd_dev_probe_update_spec(struct rbd_device
*rbd_dev
)
3016 struct ceph_osd_client
*osdc
;
3018 void *reply_buf
= NULL
;
3021 if (rbd_dev
->spec
->pool_name
)
3022 return 0; /* Already have the names */
3024 /* Look up the pool name */
3026 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3027 name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, rbd_dev
->spec
->pool_id
);
3029 rbd_warn(rbd_dev
, "there is no pool with id %llu",
3030 rbd_dev
->spec
->pool_id
); /* Really a BUG() */
3034 rbd_dev
->spec
->pool_name
= kstrdup(name
, GFP_KERNEL
);
3035 if (!rbd_dev
->spec
->pool_name
)
3038 /* Fetch the image name; tolerate failure here */
3040 name
= rbd_dev_image_name(rbd_dev
);
3042 rbd_dev
->spec
->image_name
= (char *) name
;
3044 rbd_warn(rbd_dev
, "unable to get image name");
3046 /* Look up the snapshot name. */
3048 name
= rbd_snap_name(rbd_dev
, rbd_dev
->spec
->snap_id
);
3050 rbd_warn(rbd_dev
, "no snapshot with id %llu",
3051 rbd_dev
->spec
->snap_id
); /* Really a BUG() */
3055 rbd_dev
->spec
->snap_name
= kstrdup(name
, GFP_KERNEL
);
3056 if(!rbd_dev
->spec
->snap_name
)
3062 kfree(rbd_dev
->spec
->pool_name
);
3063 rbd_dev
->spec
->pool_name
= NULL
;
3068 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
, u64
*ver
)
3077 struct ceph_snap_context
*snapc
;
3081 * We'll need room for the seq value (maximum snapshot id),
3082 * snapshot count, and array of that many snapshot ids.
3083 * For now we have a fixed upper limit on the number we're
3084 * prepared to receive.
3086 size
= sizeof (__le64
) + sizeof (__le32
) +
3087 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
3088 reply_buf
= kzalloc(size
, GFP_KERNEL
);
3092 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3093 "rbd", "get_snapcontext",
3095 reply_buf
, size
, ver
);
3096 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3102 end
= (char *) reply_buf
+ size
;
3103 ceph_decode_64_safe(&p
, end
, seq
, out
);
3104 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
3107 * Make sure the reported number of snapshot ids wouldn't go
3108 * beyond the end of our buffer. But before checking that,
3109 * make sure the computed size of the snapshot context we
3110 * allocate is representable in a size_t.
3112 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
3117 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
3120 size
= sizeof (struct ceph_snap_context
) +
3121 snap_count
* sizeof (snapc
->snaps
[0]);
3122 snapc
= kmalloc(size
, GFP_KERNEL
);
3128 atomic_set(&snapc
->nref
, 1);
3130 snapc
->num_snaps
= snap_count
;
3131 for (i
= 0; i
< snap_count
; i
++)
3132 snapc
->snaps
[i
] = ceph_decode_64(&p
);
3134 rbd_dev
->header
.snapc
= snapc
;
3136 dout(" snap context seq = %llu, snap_count = %u\n",
3137 (unsigned long long) seq
, (unsigned int) snap_count
);
3145 static char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
3155 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
3156 reply_buf
= kmalloc(size
, GFP_KERNEL
);
3158 return ERR_PTR(-ENOMEM
);
3160 snap_id
= cpu_to_le64(rbd_dev
->header
.snapc
->snaps
[which
]);
3161 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3162 "rbd", "get_snapshot_name",
3163 (char *) &snap_id
, sizeof (snap_id
),
3164 reply_buf
, size
, NULL
);
3165 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3170 end
= (char *) reply_buf
+ size
;
3171 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
3172 if (IS_ERR(snap_name
)) {
3173 ret
= PTR_ERR(snap_name
);
3176 dout(" snap_id 0x%016llx snap_name = %s\n",
3177 (unsigned long long) le64_to_cpu(snap_id
), snap_name
);
3185 return ERR_PTR(ret
);
3188 static char *rbd_dev_v2_snap_info(struct rbd_device
*rbd_dev
, u32 which
,
3189 u64
*snap_size
, u64
*snap_features
)
3195 snap_id
= rbd_dev
->header
.snapc
->snaps
[which
];
3196 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, &order
, snap_size
);
3198 return ERR_PTR(ret
);
3199 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, snap_features
);
3201 return ERR_PTR(ret
);
3203 return rbd_dev_v2_snap_name(rbd_dev
, which
);
3206 static char *rbd_dev_snap_info(struct rbd_device
*rbd_dev
, u32 which
,
3207 u64
*snap_size
, u64
*snap_features
)
3209 if (rbd_dev
->image_format
== 1)
3210 return rbd_dev_v1_snap_info(rbd_dev
, which
,
3211 snap_size
, snap_features
);
3212 if (rbd_dev
->image_format
== 2)
3213 return rbd_dev_v2_snap_info(rbd_dev
, which
,
3214 snap_size
, snap_features
);
3215 return ERR_PTR(-EINVAL
);
3218 static int rbd_dev_v2_refresh(struct rbd_device
*rbd_dev
, u64
*hver
)
3223 down_write(&rbd_dev
->header_rwsem
);
3225 /* Grab old order first, to see if it changes */
3227 obj_order
= rbd_dev
->header
.obj_order
,
3228 ret
= rbd_dev_v2_image_size(rbd_dev
);
3231 if (rbd_dev
->header
.obj_order
!= obj_order
) {
3235 rbd_update_mapping_size(rbd_dev
);
3237 ret
= rbd_dev_v2_snap_context(rbd_dev
, hver
);
3238 dout("rbd_dev_v2_snap_context returned %d\n", ret
);
3241 ret
= rbd_dev_snaps_update(rbd_dev
);
3242 dout("rbd_dev_snaps_update returned %d\n", ret
);
3245 ret
= rbd_dev_snaps_register(rbd_dev
);
3246 dout("rbd_dev_snaps_register returned %d\n", ret
);
3248 up_write(&rbd_dev
->header_rwsem
);
3254 * Scan the rbd device's current snapshot list and compare it to the
3255 * newly-received snapshot context. Remove any existing snapshots
3256 * not present in the new snapshot context. Add a new snapshot for
3257 * any snaphots in the snapshot context not in the current list.
3258 * And verify there are no changes to snapshots we already know
3261 * Assumes the snapshots in the snapshot context are sorted by
3262 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
3263 * are also maintained in that order.)
3265 static int rbd_dev_snaps_update(struct rbd_device
*rbd_dev
)
3267 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
3268 const u32 snap_count
= snapc
->num_snaps
;
3269 struct list_head
*head
= &rbd_dev
->snaps
;
3270 struct list_head
*links
= head
->next
;
3273 dout("%s: snap count is %u\n", __func__
, (unsigned int) snap_count
);
3274 while (index
< snap_count
|| links
!= head
) {
3276 struct rbd_snap
*snap
;
3279 u64 snap_features
= 0;
3281 snap_id
= index
< snap_count
? snapc
->snaps
[index
]
3283 snap
= links
!= head
? list_entry(links
, struct rbd_snap
, node
)
3285 rbd_assert(!snap
|| snap
->id
!= CEPH_NOSNAP
);
3287 if (snap_id
== CEPH_NOSNAP
|| (snap
&& snap
->id
> snap_id
)) {
3288 struct list_head
*next
= links
->next
;
3291 * A previously-existing snapshot is not in
3292 * the new snap context.
3294 * If the now missing snapshot is the one the
3295 * image is mapped to, clear its exists flag
3296 * so we can avoid sending any more requests
3299 if (rbd_dev
->spec
->snap_id
== snap
->id
)
3300 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3301 rbd_remove_snap_dev(snap
);
3302 dout("%ssnap id %llu has been removed\n",
3303 rbd_dev
->spec
->snap_id
== snap
->id
?
3305 (unsigned long long) snap
->id
);
3307 /* Done with this list entry; advance */
3313 snap_name
= rbd_dev_snap_info(rbd_dev
, index
,
3314 &snap_size
, &snap_features
);
3315 if (IS_ERR(snap_name
))
3316 return PTR_ERR(snap_name
);
3318 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count
,
3319 (unsigned long long) snap_id
);
3320 if (!snap
|| (snap_id
!= CEPH_NOSNAP
&& snap
->id
< snap_id
)) {
3321 struct rbd_snap
*new_snap
;
3323 /* We haven't seen this snapshot before */
3325 new_snap
= __rbd_add_snap_dev(rbd_dev
, snap_name
,
3326 snap_id
, snap_size
, snap_features
);
3327 if (IS_ERR(new_snap
)) {
3328 int err
= PTR_ERR(new_snap
);
3330 dout(" failed to add dev, error %d\n", err
);
3335 /* New goes before existing, or at end of list */
3337 dout(" added dev%s\n", snap
? "" : " at end\n");
3339 list_add_tail(&new_snap
->node
, &snap
->node
);
3341 list_add_tail(&new_snap
->node
, head
);
3343 /* Already have this one */
3345 dout(" already present\n");
3347 rbd_assert(snap
->size
== snap_size
);
3348 rbd_assert(!strcmp(snap
->name
, snap_name
));
3349 rbd_assert(snap
->features
== snap_features
);
3351 /* Done with this list entry; advance */
3353 links
= links
->next
;
3356 /* Advance to the next entry in the snapshot context */
3360 dout("%s: done\n", __func__
);
3366 * Scan the list of snapshots and register the devices for any that
3367 * have not already been registered.
3369 static int rbd_dev_snaps_register(struct rbd_device
*rbd_dev
)
3371 struct rbd_snap
*snap
;
3374 dout("%s called\n", __func__
);
3375 if (WARN_ON(!device_is_registered(&rbd_dev
->dev
)))
3378 list_for_each_entry(snap
, &rbd_dev
->snaps
, node
) {
3379 if (!rbd_snap_registered(snap
)) {
3380 ret
= rbd_register_snap_dev(snap
, &rbd_dev
->dev
);
3385 dout("%s: returning %d\n", __func__
, ret
);
3390 static int rbd_bus_add_dev(struct rbd_device
*rbd_dev
)
3395 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
3397 dev
= &rbd_dev
->dev
;
3398 dev
->bus
= &rbd_bus_type
;
3399 dev
->type
= &rbd_device_type
;
3400 dev
->parent
= &rbd_root_dev
;
3401 dev
->release
= rbd_dev_release
;
3402 dev_set_name(dev
, "%d", rbd_dev
->dev_id
);
3403 ret
= device_register(dev
);
3405 mutex_unlock(&ctl_mutex
);
3410 static void rbd_bus_del_dev(struct rbd_device
*rbd_dev
)
3412 device_unregister(&rbd_dev
->dev
);
3415 static atomic64_t rbd_dev_id_max
= ATOMIC64_INIT(0);
3418 * Get a unique rbd identifier for the given new rbd_dev, and add
3419 * the rbd_dev to the global list. The minimum rbd id is 1.
3421 static void rbd_dev_id_get(struct rbd_device
*rbd_dev
)
3423 rbd_dev
->dev_id
= atomic64_inc_return(&rbd_dev_id_max
);
3425 spin_lock(&rbd_dev_list_lock
);
3426 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
3427 spin_unlock(&rbd_dev_list_lock
);
3428 dout("rbd_dev %p given dev id %llu\n", rbd_dev
,
3429 (unsigned long long) rbd_dev
->dev_id
);
3433 * Remove an rbd_dev from the global list, and record that its
3434 * identifier is no longer in use.
3436 static void rbd_dev_id_put(struct rbd_device
*rbd_dev
)
3438 struct list_head
*tmp
;
3439 int rbd_id
= rbd_dev
->dev_id
;
3442 rbd_assert(rbd_id
> 0);
3444 dout("rbd_dev %p released dev id %llu\n", rbd_dev
,
3445 (unsigned long long) rbd_dev
->dev_id
);
3446 spin_lock(&rbd_dev_list_lock
);
3447 list_del_init(&rbd_dev
->node
);
3450 * If the id being "put" is not the current maximum, there
3451 * is nothing special we need to do.
3453 if (rbd_id
!= atomic64_read(&rbd_dev_id_max
)) {
3454 spin_unlock(&rbd_dev_list_lock
);
3459 * We need to update the current maximum id. Search the
3460 * list to find out what it is. We're more likely to find
3461 * the maximum at the end, so search the list backward.
3464 list_for_each_prev(tmp
, &rbd_dev_list
) {
3465 struct rbd_device
*rbd_dev
;
3467 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
3468 if (rbd_dev
->dev_id
> max_id
)
3469 max_id
= rbd_dev
->dev_id
;
3471 spin_unlock(&rbd_dev_list_lock
);
3474 * The max id could have been updated by rbd_dev_id_get(), in
3475 * which case it now accurately reflects the new maximum.
3476 * Be careful not to overwrite the maximum value in that
3479 atomic64_cmpxchg(&rbd_dev_id_max
, rbd_id
, max_id
);
3480 dout(" max dev id has been reset\n");
3484 * Skips over white space at *buf, and updates *buf to point to the
3485 * first found non-space character (if any). Returns the length of
3486 * the token (string of non-white space characters) found. Note
3487 * that *buf must be terminated with '\0'.
3489 static inline size_t next_token(const char **buf
)
3492 * These are the characters that produce nonzero for
3493 * isspace() in the "C" and "POSIX" locales.
3495 const char *spaces
= " \f\n\r\t\v";
3497 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
3499 return strcspn(*buf
, spaces
); /* Return token length */
3503 * Finds the next token in *buf, and if the provided token buffer is
3504 * big enough, copies the found token into it. The result, if
3505 * copied, is guaranteed to be terminated with '\0'. Note that *buf
3506 * must be terminated with '\0' on entry.
3508 * Returns the length of the token found (not including the '\0').
3509 * Return value will be 0 if no token is found, and it will be >=
3510 * token_size if the token would not fit.
3512 * The *buf pointer will be updated to point beyond the end of the
3513 * found token. Note that this occurs even if the token buffer is
3514 * too small to hold it.
3516 static inline size_t copy_token(const char **buf
,
3522 len
= next_token(buf
);
3523 if (len
< token_size
) {
3524 memcpy(token
, *buf
, len
);
3525 *(token
+ len
) = '\0';
3533 * Finds the next token in *buf, dynamically allocates a buffer big
3534 * enough to hold a copy of it, and copies the token into the new
3535 * buffer. The copy is guaranteed to be terminated with '\0'. Note
3536 * that a duplicate buffer is created even for a zero-length token.
3538 * Returns a pointer to the newly-allocated duplicate, or a null
3539 * pointer if memory for the duplicate was not available. If
3540 * the lenp argument is a non-null pointer, the length of the token
3541 * (not including the '\0') is returned in *lenp.
3543 * If successful, the *buf pointer will be updated to point beyond
3544 * the end of the found token.
3546 * Note: uses GFP_KERNEL for allocation.
3548 static inline char *dup_token(const char **buf
, size_t *lenp
)
3553 len
= next_token(buf
);
3554 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
3557 *(dup
+ len
) = '\0';
3567 * Parse the options provided for an "rbd add" (i.e., rbd image
3568 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
3569 * and the data written is passed here via a NUL-terminated buffer.
3570 * Returns 0 if successful or an error code otherwise.
3572 * The information extracted from these options is recorded in
3573 * the other parameters which return dynamically-allocated
3576 * The address of a pointer that will refer to a ceph options
3577 * structure. Caller must release the returned pointer using
3578 * ceph_destroy_options() when it is no longer needed.
3580 * Address of an rbd options pointer. Fully initialized by
3581 * this function; caller must release with kfree().
3583 * Address of an rbd image specification pointer. Fully
3584 * initialized by this function based on parsed options.
3585 * Caller must release with rbd_spec_put().
3587 * The options passed take this form:
3588 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3591 * A comma-separated list of one or more monitor addresses.
3592 * A monitor address is an ip address, optionally followed
3593 * by a port number (separated by a colon).
3594 * I.e.: ip1[:port1][,ip2[:port2]...]
3596 * A comma-separated list of ceph and/or rbd options.
3598 * The name of the rados pool containing the rbd image.
3600 * The name of the image in that pool to map.
3602 * An optional snapshot id. If provided, the mapping will
3603 * present data from the image at the time that snapshot was
3604 * created. The image head is used if no snapshot id is
3605 * provided. Snapshot mappings are always read-only.
3607 static int rbd_add_parse_args(const char *buf
,
3608 struct ceph_options
**ceph_opts
,
3609 struct rbd_options
**opts
,
3610 struct rbd_spec
**rbd_spec
)
3614 const char *mon_addrs
;
3615 size_t mon_addrs_size
;
3616 struct rbd_spec
*spec
= NULL
;
3617 struct rbd_options
*rbd_opts
= NULL
;
3618 struct ceph_options
*copts
;
3621 /* The first four tokens are required */
3623 len
= next_token(&buf
);
3625 rbd_warn(NULL
, "no monitor address(es) provided");
3629 mon_addrs_size
= len
+ 1;
3633 options
= dup_token(&buf
, NULL
);
3637 rbd_warn(NULL
, "no options provided");
3641 spec
= rbd_spec_alloc();
3645 spec
->pool_name
= dup_token(&buf
, NULL
);
3646 if (!spec
->pool_name
)
3648 if (!*spec
->pool_name
) {
3649 rbd_warn(NULL
, "no pool name provided");
3653 spec
->image_name
= dup_token(&buf
, NULL
);
3654 if (!spec
->image_name
)
3656 if (!*spec
->image_name
) {
3657 rbd_warn(NULL
, "no image name provided");
3662 * Snapshot name is optional; default is to use "-"
3663 * (indicating the head/no snapshot).
3665 len
= next_token(&buf
);
3667 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
3668 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
3669 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
3670 ret
= -ENAMETOOLONG
;
3673 spec
->snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
3674 if (!spec
->snap_name
)
3676 *(spec
->snap_name
+ len
) = '\0';
3678 /* Initialize all rbd options to the defaults */
3680 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
3684 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
3686 copts
= ceph_parse_options(options
, mon_addrs
,
3687 mon_addrs
+ mon_addrs_size
- 1,
3688 parse_rbd_opts_token
, rbd_opts
);
3689 if (IS_ERR(copts
)) {
3690 ret
= PTR_ERR(copts
);
3711 * An rbd format 2 image has a unique identifier, distinct from the
3712 * name given to it by the user. Internally, that identifier is
3713 * what's used to specify the names of objects related to the image.
3715 * A special "rbd id" object is used to map an rbd image name to its
3716 * id. If that object doesn't exist, then there is no v2 rbd image
3717 * with the supplied name.
3719 * This function will record the given rbd_dev's image_id field if
3720 * it can be determined, and in that case will return 0. If any
3721 * errors occur a negative errno will be returned and the rbd_dev's
3722 * image_id field will be unchanged (and should be NULL).
3724 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
3733 * When probing a parent image, the image id is already
3734 * known (and the image name likely is not). There's no
3735 * need to fetch the image id again in this case.
3737 if (rbd_dev
->spec
->image_id
)
3741 * First, see if the format 2 image id file exists, and if
3742 * so, get the image's persistent id from it.
3744 size
= sizeof (RBD_ID_PREFIX
) + strlen(rbd_dev
->spec
->image_name
);
3745 object_name
= kmalloc(size
, GFP_NOIO
);
3748 sprintf(object_name
, "%s%s", RBD_ID_PREFIX
, rbd_dev
->spec
->image_name
);
3749 dout("rbd id object name is %s\n", object_name
);
3751 /* Response will be an encoded string, which includes a length */
3753 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
3754 response
= kzalloc(size
, GFP_NOIO
);
3760 ret
= rbd_obj_method_sync(rbd_dev
, object_name
,
3763 response
, RBD_IMAGE_ID_LEN_MAX
, NULL
);
3764 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3769 rbd_dev
->spec
->image_id
= ceph_extract_encoded_string(&p
,
3770 p
+ RBD_IMAGE_ID_LEN_MAX
,
3772 if (IS_ERR(rbd_dev
->spec
->image_id
)) {
3773 ret
= PTR_ERR(rbd_dev
->spec
->image_id
);
3774 rbd_dev
->spec
->image_id
= NULL
;
3776 dout("image_id is %s\n", rbd_dev
->spec
->image_id
);
3785 static int rbd_dev_v1_probe(struct rbd_device
*rbd_dev
)
3790 /* Version 1 images have no id; empty string is used */
3792 rbd_dev
->spec
->image_id
= kstrdup("", GFP_KERNEL
);
3793 if (!rbd_dev
->spec
->image_id
)
3796 /* Record the header object name for this rbd image. */
3798 size
= strlen(rbd_dev
->spec
->image_name
) + sizeof (RBD_SUFFIX
);
3799 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
3800 if (!rbd_dev
->header_name
) {
3804 sprintf(rbd_dev
->header_name
, "%s%s",
3805 rbd_dev
->spec
->image_name
, RBD_SUFFIX
);
3807 /* Populate rbd image metadata */
3809 ret
= rbd_read_header(rbd_dev
, &rbd_dev
->header
);
3813 /* Version 1 images have no parent (no layering) */
3815 rbd_dev
->parent_spec
= NULL
;
3816 rbd_dev
->parent_overlap
= 0;
3818 rbd_dev
->image_format
= 1;
3820 dout("discovered version 1 image, header name is %s\n",
3821 rbd_dev
->header_name
);
3826 kfree(rbd_dev
->header_name
);
3827 rbd_dev
->header_name
= NULL
;
3828 kfree(rbd_dev
->spec
->image_id
);
3829 rbd_dev
->spec
->image_id
= NULL
;
3834 static int rbd_dev_v2_probe(struct rbd_device
*rbd_dev
)
3841 * Image id was filled in by the caller. Record the header
3842 * object name for this rbd image.
3844 size
= sizeof (RBD_HEADER_PREFIX
) + strlen(rbd_dev
->spec
->image_id
);
3845 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
3846 if (!rbd_dev
->header_name
)
3848 sprintf(rbd_dev
->header_name
, "%s%s",
3849 RBD_HEADER_PREFIX
, rbd_dev
->spec
->image_id
);
3851 /* Get the size and object order for the image */
3853 ret
= rbd_dev_v2_image_size(rbd_dev
);
3857 /* Get the object prefix (a.k.a. block_name) for the image */
3859 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
3863 /* Get the and check features for the image */
3865 ret
= rbd_dev_v2_features(rbd_dev
);
3869 /* If the image supports layering, get the parent info */
3871 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
) {
3872 ret
= rbd_dev_v2_parent_info(rbd_dev
);
3877 /* crypto and compression type aren't (yet) supported for v2 images */
3879 rbd_dev
->header
.crypt_type
= 0;
3880 rbd_dev
->header
.comp_type
= 0;
3882 /* Get the snapshot context, plus the header version */
3884 ret
= rbd_dev_v2_snap_context(rbd_dev
, &ver
);
3887 rbd_dev
->header
.obj_version
= ver
;
3889 rbd_dev
->image_format
= 2;
3891 dout("discovered version 2 image, header name is %s\n",
3892 rbd_dev
->header_name
);
3896 rbd_dev
->parent_overlap
= 0;
3897 rbd_spec_put(rbd_dev
->parent_spec
);
3898 rbd_dev
->parent_spec
= NULL
;
3899 kfree(rbd_dev
->header_name
);
3900 rbd_dev
->header_name
= NULL
;
3901 kfree(rbd_dev
->header
.object_prefix
);
3902 rbd_dev
->header
.object_prefix
= NULL
;
3907 static int rbd_dev_probe_finish(struct rbd_device
*rbd_dev
)
3911 /* no need to lock here, as rbd_dev is not registered yet */
3912 ret
= rbd_dev_snaps_update(rbd_dev
);
3916 ret
= rbd_dev_probe_update_spec(rbd_dev
);
3920 ret
= rbd_dev_set_mapping(rbd_dev
);
3924 /* generate unique id: find highest unique id, add one */
3925 rbd_dev_id_get(rbd_dev
);
3927 /* Fill in the device name, now that we have its id. */
3928 BUILD_BUG_ON(DEV_NAME_LEN
3929 < sizeof (RBD_DRV_NAME
) + MAX_INT_FORMAT_WIDTH
);
3930 sprintf(rbd_dev
->name
, "%s%d", RBD_DRV_NAME
, rbd_dev
->dev_id
);
3932 /* Get our block major device number. */
3934 ret
= register_blkdev(0, rbd_dev
->name
);
3937 rbd_dev
->major
= ret
;
3939 /* Set up the blkdev mapping. */
3941 ret
= rbd_init_disk(rbd_dev
);
3943 goto err_out_blkdev
;
3945 ret
= rbd_bus_add_dev(rbd_dev
);
3950 * At this point cleanup in the event of an error is the job
3951 * of the sysfs code (initiated by rbd_bus_del_dev()).
3953 down_write(&rbd_dev
->header_rwsem
);
3954 ret
= rbd_dev_snaps_register(rbd_dev
);
3955 up_write(&rbd_dev
->header_rwsem
);
3959 ret
= rbd_dev_header_watch_sync(rbd_dev
, 1);
3963 /* Everything's ready. Announce the disk to the world. */
3965 add_disk(rbd_dev
->disk
);
3967 pr_info("%s: added with size 0x%llx\n", rbd_dev
->disk
->disk_name
,
3968 (unsigned long long) rbd_dev
->mapping
.size
);
3972 /* this will also clean up rest of rbd_dev stuff */
3974 rbd_bus_del_dev(rbd_dev
);
3978 rbd_free_disk(rbd_dev
);
3980 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
3982 rbd_dev_id_put(rbd_dev
);
3984 rbd_remove_all_snaps(rbd_dev
);
3990 * Probe for the existence of the header object for the given rbd
3991 * device. For format 2 images this includes determining the image
3994 static int rbd_dev_probe(struct rbd_device
*rbd_dev
)
3999 * Get the id from the image id object. If it's not a
4000 * format 2 image, we'll get ENOENT back, and we'll assume
4001 * it's a format 1 image.
4003 ret
= rbd_dev_image_id(rbd_dev
);
4005 ret
= rbd_dev_v1_probe(rbd_dev
);
4007 ret
= rbd_dev_v2_probe(rbd_dev
);
4009 dout("probe failed, returning %d\n", ret
);
4014 ret
= rbd_dev_probe_finish(rbd_dev
);
4016 rbd_header_free(&rbd_dev
->header
);
4021 static ssize_t
rbd_add(struct bus_type
*bus
,
4025 struct rbd_device
*rbd_dev
= NULL
;
4026 struct ceph_options
*ceph_opts
= NULL
;
4027 struct rbd_options
*rbd_opts
= NULL
;
4028 struct rbd_spec
*spec
= NULL
;
4029 struct rbd_client
*rbdc
;
4030 struct ceph_osd_client
*osdc
;
4033 if (!try_module_get(THIS_MODULE
))
4036 /* parse add command */
4037 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
4039 goto err_out_module
;
4041 rbdc
= rbd_get_client(ceph_opts
);
4046 ceph_opts
= NULL
; /* rbd_dev client now owns this */
4049 osdc
= &rbdc
->client
->osdc
;
4050 rc
= ceph_pg_poolid_by_name(osdc
->osdmap
, spec
->pool_name
);
4052 goto err_out_client
;
4053 spec
->pool_id
= (u64
) rc
;
4055 /* The ceph file layout needs to fit pool id in 32 bits */
4057 if (WARN_ON(spec
->pool_id
> (u64
) U32_MAX
)) {
4059 goto err_out_client
;
4062 rbd_dev
= rbd_dev_create(rbdc
, spec
);
4064 goto err_out_client
;
4065 rbdc
= NULL
; /* rbd_dev now owns this */
4066 spec
= NULL
; /* rbd_dev now owns this */
4068 rbd_dev
->mapping
.read_only
= rbd_opts
->read_only
;
4070 rbd_opts
= NULL
; /* done with this */
4072 rc
= rbd_dev_probe(rbd_dev
);
4074 goto err_out_rbd_dev
;
4078 rbd_dev_destroy(rbd_dev
);
4080 rbd_put_client(rbdc
);
4083 ceph_destroy_options(ceph_opts
);
4087 module_put(THIS_MODULE
);
4089 dout("Error adding device %s\n", buf
);
4091 return (ssize_t
) rc
;
4094 static struct rbd_device
*__rbd_get_dev(unsigned long dev_id
)
4096 struct list_head
*tmp
;
4097 struct rbd_device
*rbd_dev
;
4099 spin_lock(&rbd_dev_list_lock
);
4100 list_for_each(tmp
, &rbd_dev_list
) {
4101 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
4102 if (rbd_dev
->dev_id
== dev_id
) {
4103 spin_unlock(&rbd_dev_list_lock
);
4107 spin_unlock(&rbd_dev_list_lock
);
4111 static void rbd_dev_release(struct device
*dev
)
4113 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
4115 if (rbd_dev
->watch_event
)
4116 rbd_dev_header_watch_sync(rbd_dev
, 0);
4118 /* clean up and free blkdev */
4119 rbd_free_disk(rbd_dev
);
4120 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
4122 /* release allocated disk header fields */
4123 rbd_header_free(&rbd_dev
->header
);
4125 /* done with the id, and with the rbd_dev */
4126 rbd_dev_id_put(rbd_dev
);
4127 rbd_assert(rbd_dev
->rbd_client
!= NULL
);
4128 rbd_dev_destroy(rbd_dev
);
4130 /* release module ref */
4131 module_put(THIS_MODULE
);
4134 static ssize_t
rbd_remove(struct bus_type
*bus
,
4138 struct rbd_device
*rbd_dev
= NULL
;
4143 rc
= strict_strtoul(buf
, 10, &ul
);
4147 /* convert to int; abort if we lost anything in the conversion */
4148 target_id
= (int) ul
;
4149 if (target_id
!= ul
)
4152 mutex_lock_nested(&ctl_mutex
, SINGLE_DEPTH_NESTING
);
4154 rbd_dev
= __rbd_get_dev(target_id
);
4160 spin_lock_irq(&rbd_dev
->lock
);
4161 if (rbd_dev
->open_count
)
4164 set_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
);
4165 spin_unlock_irq(&rbd_dev
->lock
);
4169 rbd_remove_all_snaps(rbd_dev
);
4170 rbd_bus_del_dev(rbd_dev
);
4173 mutex_unlock(&ctl_mutex
);
4179 * create control files in sysfs
4182 static int rbd_sysfs_init(void)
4186 ret
= device_register(&rbd_root_dev
);
4190 ret
= bus_register(&rbd_bus_type
);
4192 device_unregister(&rbd_root_dev
);
4197 static void rbd_sysfs_cleanup(void)
4199 bus_unregister(&rbd_bus_type
);
4200 device_unregister(&rbd_root_dev
);
4203 int __init
rbd_init(void)
4207 if (!libceph_compatible(NULL
)) {
4208 rbd_warn(NULL
, "libceph incompatibility (quitting)");
4212 rc
= rbd_sysfs_init();
4215 pr_info("loaded " RBD_DRV_NAME_LONG
"\n");
4219 void __exit
rbd_exit(void)
4221 rbd_sysfs_cleanup();
4224 module_init(rbd_init
);
4225 module_exit(rbd_exit
);
4227 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4228 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4229 MODULE_DESCRIPTION("rados block device");
4231 /* following authorship retained from original osdblk.c */
4232 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4234 MODULE_LICENSE("GPL");