3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t
*v
)
69 counter
= (unsigned int)__atomic_add_unless(v
, 1, 0);
70 if (counter
<= (unsigned int)INT_MAX
)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t
*v
)
83 counter
= atomic_dec_return(v
);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header
{
137 /* These six fields never change for a given rbd image */
144 u64 features
; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context
*snapc
;
149 char *snap_names
; /* format 1 only */
150 u64
*snap_sizes
; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name
;
182 const char *image_id
;
183 const char *image_name
;
186 const char *snap_name
;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client
*client
;
197 struct list_head node
;
200 struct rbd_img_request
;
201 typedef void (*rbd_img_callback_t
)(struct rbd_img_request
*);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request
;
206 typedef void (*rbd_obj_callback_t
)(struct rbd_obj_request
*);
208 enum obj_request_type
{
209 OBJ_REQUEST_NODATA
, OBJ_REQUEST_BIO
, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE
, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA
, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN
, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS
, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request
{
220 const char *object_name
;
221 u64 offset
; /* object start byte */
222 u64 length
; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request
*obj_request
; /* STAT op */
244 struct rbd_img_request
*img_request
;
246 /* links for img_request->obj_requests list */
247 struct list_head links
;
250 u32 which
; /* posn image request list */
252 enum obj_request_type type
;
254 struct bio
*bio_list
;
260 struct page
**copyup_pages
;
261 u32 copyup_page_count
;
263 struct ceph_osd_request
*osd_req
;
265 u64 xferred
; /* bytes transferred */
268 rbd_obj_callback_t callback
;
269 struct completion completion
;
275 IMG_REQ_WRITE
, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD
, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED
, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request
{
281 struct rbd_device
*rbd_dev
;
282 u64 offset
; /* starting image byte offset */
283 u64 length
; /* byte count from offset */
286 u64 snap_id
; /* for reads */
287 struct ceph_snap_context
*snapc
; /* for writes */
290 struct request
*rq
; /* block request */
291 struct rbd_obj_request
*obj_request
; /* obj req initiator */
293 struct page
**copyup_pages
;
294 u32 copyup_page_count
;
295 spinlock_t completion_lock
;/* protects next_completion */
297 rbd_img_callback_t callback
;
298 u64 xferred
;/* aggregate bytes transferred */
299 int result
; /* first nonzero obj_request result */
301 u32 obj_request_count
;
302 struct list_head obj_requests
; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id
; /* blkdev unique id */
326 int major
; /* blkdev assigned major */
328 struct gendisk
*disk
; /* blkdev's gendisk and rq */
330 u32 image_format
; /* Either 1 or 2 */
331 struct rbd_client
*rbd_client
;
333 char name
[DEV_NAME_LEN
]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock
; /* queue, flags, open_count */
337 struct rbd_image_header header
;
338 unsigned long flags
; /* possibly lock protected */
339 struct rbd_spec
*spec
;
343 struct ceph_file_layout layout
;
345 struct ceph_osd_event
*watch_event
;
346 struct rbd_obj_request
*watch_request
;
348 struct rbd_spec
*parent_spec
;
351 struct rbd_device
*parent
;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem
;
356 struct rbd_mapping mapping
;
358 struct list_head node
;
362 unsigned long open_count
; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS
, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING
, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex
); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list
); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock
);
382 static LIST_HEAD(rbd_client_list
); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock
);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache
*rbd_img_request_cache
;
388 static struct kmem_cache
*rbd_obj_request_cache
;
389 static struct kmem_cache
*rbd_segment_name_cache
;
391 static int rbd_major
;
392 static DEFINE_IDA(rbd_dev_id_ida
);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major
= false;
399 module_param(single_major
, bool, S_IRUGO
);
400 MODULE_PARM_DESC(single_major
, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request
*img_request
);
404 static void rbd_dev_device_release(struct device
*dev
);
406 static ssize_t
rbd_add(struct bus_type
*bus
, const char *buf
,
408 static ssize_t
rbd_remove(struct bus_type
*bus
, const char *buf
,
410 static ssize_t
rbd_add_single_major(struct bus_type
*bus
, const char *buf
,
412 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
, const char *buf
,
414 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
);
415 static void rbd_spec_put(struct rbd_spec
*spec
);
417 static int rbd_dev_id_to_minor(int dev_id
)
419 return dev_id
<< RBD_SINGLE_MAJOR_PART_SHIFT
;
422 static int minor_to_rbd_dev_id(int minor
)
424 return minor
>> RBD_SINGLE_MAJOR_PART_SHIFT
;
427 static BUS_ATTR(add
, S_IWUSR
, NULL
, rbd_add
);
428 static BUS_ATTR(remove
, S_IWUSR
, NULL
, rbd_remove
);
429 static BUS_ATTR(add_single_major
, S_IWUSR
, NULL
, rbd_add_single_major
);
430 static BUS_ATTR(remove_single_major
, S_IWUSR
, NULL
, rbd_remove_single_major
);
432 static struct attribute
*rbd_bus_attrs
[] = {
434 &bus_attr_remove
.attr
,
435 &bus_attr_add_single_major
.attr
,
436 &bus_attr_remove_single_major
.attr
,
440 static umode_t
rbd_bus_is_visible(struct kobject
*kobj
,
441 struct attribute
*attr
, int index
)
444 (attr
== &bus_attr_add_single_major
.attr
||
445 attr
== &bus_attr_remove_single_major
.attr
))
451 static const struct attribute_group rbd_bus_group
= {
452 .attrs
= rbd_bus_attrs
,
453 .is_visible
= rbd_bus_is_visible
,
455 __ATTRIBUTE_GROUPS(rbd_bus
);
457 static struct bus_type rbd_bus_type
= {
459 .bus_groups
= rbd_bus_groups
,
462 static void rbd_root_dev_release(struct device
*dev
)
466 static struct device rbd_root_dev
= {
468 .release
= rbd_root_dev_release
,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device
*rbd_dev
, const char *fmt
, ...)
474 struct va_format vaf
;
482 printk(KERN_WARNING
"%s: %pV\n", RBD_DRV_NAME
, &vaf
);
483 else if (rbd_dev
->disk
)
484 printk(KERN_WARNING
"%s: %s: %pV\n",
485 RBD_DRV_NAME
, rbd_dev
->disk
->disk_name
, &vaf
);
486 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_name
)
487 printk(KERN_WARNING
"%s: image %s: %pV\n",
488 RBD_DRV_NAME
, rbd_dev
->spec
->image_name
, &vaf
);
489 else if (rbd_dev
->spec
&& rbd_dev
->spec
->image_id
)
490 printk(KERN_WARNING
"%s: id %s: %pV\n",
491 RBD_DRV_NAME
, rbd_dev
->spec
->image_id
, &vaf
);
493 printk(KERN_WARNING
"%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME
, rbd_dev
, &vaf
);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
);
512 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
);
513 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
);
515 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
);
516 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
);
517 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
520 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
521 u8
*order
, u64
*snap_size
);
522 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
524 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
);
526 static int rbd_open(struct block_device
*bdev
, fmode_t mode
)
528 struct rbd_device
*rbd_dev
= bdev
->bd_disk
->private_data
;
529 bool removing
= false;
531 if ((mode
& FMODE_WRITE
) && rbd_dev
->mapping
.read_only
)
534 spin_lock_irq(&rbd_dev
->lock
);
535 if (test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
))
538 rbd_dev
->open_count
++;
539 spin_unlock_irq(&rbd_dev
->lock
);
543 (void) get_device(&rbd_dev
->dev
);
544 set_device_ro(bdev
, rbd_dev
->mapping
.read_only
);
549 static void rbd_release(struct gendisk
*disk
, fmode_t mode
)
551 struct rbd_device
*rbd_dev
= disk
->private_data
;
552 unsigned long open_count_before
;
554 spin_lock_irq(&rbd_dev
->lock
);
555 open_count_before
= rbd_dev
->open_count
--;
556 spin_unlock_irq(&rbd_dev
->lock
);
557 rbd_assert(open_count_before
> 0);
559 put_device(&rbd_dev
->dev
);
562 static const struct block_device_operations rbd_bd_ops
= {
563 .owner
= THIS_MODULE
,
565 .release
= rbd_release
,
569 * Initialize an rbd client instance. Success or not, this function
570 * consumes ceph_opts. Caller holds client_mutex.
572 static struct rbd_client
*rbd_client_create(struct ceph_options
*ceph_opts
)
574 struct rbd_client
*rbdc
;
577 dout("%s:\n", __func__
);
578 rbdc
= kmalloc(sizeof(struct rbd_client
), GFP_KERNEL
);
582 kref_init(&rbdc
->kref
);
583 INIT_LIST_HEAD(&rbdc
->node
);
585 rbdc
->client
= ceph_create_client(ceph_opts
, rbdc
, 0, 0);
586 if (IS_ERR(rbdc
->client
))
588 ceph_opts
= NULL
; /* Now rbdc->client is responsible for ceph_opts */
590 ret
= ceph_open_session(rbdc
->client
);
594 spin_lock(&rbd_client_list_lock
);
595 list_add_tail(&rbdc
->node
, &rbd_client_list
);
596 spin_unlock(&rbd_client_list_lock
);
598 dout("%s: rbdc %p\n", __func__
, rbdc
);
602 ceph_destroy_client(rbdc
->client
);
607 ceph_destroy_options(ceph_opts
);
608 dout("%s: error %d\n", __func__
, ret
);
613 static struct rbd_client
*__rbd_get_client(struct rbd_client
*rbdc
)
615 kref_get(&rbdc
->kref
);
621 * Find a ceph client with specific addr and configuration. If
622 * found, bump its reference count.
624 static struct rbd_client
*rbd_client_find(struct ceph_options
*ceph_opts
)
626 struct rbd_client
*client_node
;
629 if (ceph_opts
->flags
& CEPH_OPT_NOSHARE
)
632 spin_lock(&rbd_client_list_lock
);
633 list_for_each_entry(client_node
, &rbd_client_list
, node
) {
634 if (!ceph_compare_options(ceph_opts
, client_node
->client
)) {
635 __rbd_get_client(client_node
);
641 spin_unlock(&rbd_client_list_lock
);
643 return found
? client_node
: NULL
;
653 /* string args above */
656 /* Boolean args above */
660 static match_table_t rbd_opts_tokens
= {
662 /* string args above */
663 {Opt_read_only
, "read_only"},
664 {Opt_read_only
, "ro"}, /* Alternate spelling */
665 {Opt_read_write
, "read_write"},
666 {Opt_read_write
, "rw"}, /* Alternate spelling */
667 /* Boolean args above */
675 #define RBD_READ_ONLY_DEFAULT false
677 static int parse_rbd_opts_token(char *c
, void *private)
679 struct rbd_options
*rbd_opts
= private;
680 substring_t argstr
[MAX_OPT_ARGS
];
681 int token
, intval
, ret
;
683 token
= match_token(c
, rbd_opts_tokens
, argstr
);
687 if (token
< Opt_last_int
) {
688 ret
= match_int(&argstr
[0], &intval
);
690 pr_err("bad mount option arg (not int) "
694 dout("got int token %d val %d\n", token
, intval
);
695 } else if (token
> Opt_last_int
&& token
< Opt_last_string
) {
696 dout("got string token %d val %s\n", token
,
698 } else if (token
> Opt_last_string
&& token
< Opt_last_bool
) {
699 dout("got Boolean token %d\n", token
);
701 dout("got token %d\n", token
);
706 rbd_opts
->read_only
= true;
709 rbd_opts
->read_only
= false;
719 * Get a ceph client with specific addr and configuration, if one does
720 * not exist create it. Either way, ceph_opts is consumed by this
723 static struct rbd_client
*rbd_get_client(struct ceph_options
*ceph_opts
)
725 struct rbd_client
*rbdc
;
727 mutex_lock_nested(&client_mutex
, SINGLE_DEPTH_NESTING
);
728 rbdc
= rbd_client_find(ceph_opts
);
729 if (rbdc
) /* using an existing client */
730 ceph_destroy_options(ceph_opts
);
732 rbdc
= rbd_client_create(ceph_opts
);
733 mutex_unlock(&client_mutex
);
739 * Destroy ceph client
741 * Caller must hold rbd_client_list_lock.
743 static void rbd_client_release(struct kref
*kref
)
745 struct rbd_client
*rbdc
= container_of(kref
, struct rbd_client
, kref
);
747 dout("%s: rbdc %p\n", __func__
, rbdc
);
748 spin_lock(&rbd_client_list_lock
);
749 list_del(&rbdc
->node
);
750 spin_unlock(&rbd_client_list_lock
);
752 ceph_destroy_client(rbdc
->client
);
757 * Drop reference to ceph client node. If it's not referenced anymore, release
760 static void rbd_put_client(struct rbd_client
*rbdc
)
763 kref_put(&rbdc
->kref
, rbd_client_release
);
766 static bool rbd_image_format_valid(u32 image_format
)
768 return image_format
== 1 || image_format
== 2;
771 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk
*ondisk
)
776 /* The header has to start with the magic rbd header text */
777 if (memcmp(&ondisk
->text
, RBD_HEADER_TEXT
, sizeof (RBD_HEADER_TEXT
)))
780 /* The bio layer requires at least sector-sized I/O */
782 if (ondisk
->options
.order
< SECTOR_SHIFT
)
785 /* If we use u64 in a few spots we may be able to loosen this */
787 if (ondisk
->options
.order
> 8 * sizeof (int) - 1)
791 * The size of a snapshot header has to fit in a size_t, and
792 * that limits the number of snapshots.
794 snap_count
= le32_to_cpu(ondisk
->snap_count
);
795 size
= SIZE_MAX
- sizeof (struct ceph_snap_context
);
796 if (snap_count
> size
/ sizeof (__le64
))
800 * Not only that, but the size of the entire the snapshot
801 * header must also be representable in a size_t.
803 size
-= snap_count
* sizeof (__le64
);
804 if ((u64
) size
< le64_to_cpu(ondisk
->snap_names_len
))
811 * Fill an rbd image header with information from the given format 1
814 static int rbd_header_from_disk(struct rbd_device
*rbd_dev
,
815 struct rbd_image_header_ondisk
*ondisk
)
817 struct rbd_image_header
*header
= &rbd_dev
->header
;
818 bool first_time
= header
->object_prefix
== NULL
;
819 struct ceph_snap_context
*snapc
;
820 char *object_prefix
= NULL
;
821 char *snap_names
= NULL
;
822 u64
*snap_sizes
= NULL
;
828 /* Allocate this now to avoid having to handle failure below */
833 len
= strnlen(ondisk
->object_prefix
,
834 sizeof (ondisk
->object_prefix
));
835 object_prefix
= kmalloc(len
+ 1, GFP_KERNEL
);
838 memcpy(object_prefix
, ondisk
->object_prefix
, len
);
839 object_prefix
[len
] = '\0';
842 /* Allocate the snapshot context and fill it in */
844 snap_count
= le32_to_cpu(ondisk
->snap_count
);
845 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
848 snapc
->seq
= le64_to_cpu(ondisk
->snap_seq
);
850 struct rbd_image_snap_ondisk
*snaps
;
851 u64 snap_names_len
= le64_to_cpu(ondisk
->snap_names_len
);
853 /* We'll keep a copy of the snapshot names... */
855 if (snap_names_len
> (u64
)SIZE_MAX
)
857 snap_names
= kmalloc(snap_names_len
, GFP_KERNEL
);
861 /* ...as well as the array of their sizes. */
863 size
= snap_count
* sizeof (*header
->snap_sizes
);
864 snap_sizes
= kmalloc(size
, GFP_KERNEL
);
869 * Copy the names, and fill in each snapshot's id
872 * Note that rbd_dev_v1_header_info() guarantees the
873 * ondisk buffer we're working with has
874 * snap_names_len bytes beyond the end of the
875 * snapshot id array, this memcpy() is safe.
877 memcpy(snap_names
, &ondisk
->snaps
[snap_count
], snap_names_len
);
878 snaps
= ondisk
->snaps
;
879 for (i
= 0; i
< snap_count
; i
++) {
880 snapc
->snaps
[i
] = le64_to_cpu(snaps
[i
].id
);
881 snap_sizes
[i
] = le64_to_cpu(snaps
[i
].image_size
);
885 /* We won't fail any more, fill in the header */
888 header
->object_prefix
= object_prefix
;
889 header
->obj_order
= ondisk
->options
.order
;
890 header
->crypt_type
= ondisk
->options
.crypt_type
;
891 header
->comp_type
= ondisk
->options
.comp_type
;
892 /* The rest aren't used for format 1 images */
893 header
->stripe_unit
= 0;
894 header
->stripe_count
= 0;
895 header
->features
= 0;
897 ceph_put_snap_context(header
->snapc
);
898 kfree(header
->snap_names
);
899 kfree(header
->snap_sizes
);
902 /* The remaining fields always get updated (when we refresh) */
904 header
->image_size
= le64_to_cpu(ondisk
->image_size
);
905 header
->snapc
= snapc
;
906 header
->snap_names
= snap_names
;
907 header
->snap_sizes
= snap_sizes
;
909 /* Make sure mapping size is consistent with header info */
911 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
|| first_time
)
912 if (rbd_dev
->mapping
.size
!= header
->image_size
)
913 rbd_dev
->mapping
.size
= header
->image_size
;
921 ceph_put_snap_context(snapc
);
922 kfree(object_prefix
);
927 static const char *_rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
, u32 which
)
929 const char *snap_name
;
931 rbd_assert(which
< rbd_dev
->header
.snapc
->num_snaps
);
933 /* Skip over names until we find the one we are looking for */
935 snap_name
= rbd_dev
->header
.snap_names
;
937 snap_name
+= strlen(snap_name
) + 1;
939 return kstrdup(snap_name
, GFP_KERNEL
);
943 * Snapshot id comparison function for use with qsort()/bsearch().
944 * Note that result is for snapshots in *descending* order.
946 static int snapid_compare_reverse(const void *s1
, const void *s2
)
948 u64 snap_id1
= *(u64
*)s1
;
949 u64 snap_id2
= *(u64
*)s2
;
951 if (snap_id1
< snap_id2
)
953 return snap_id1
== snap_id2
? 0 : -1;
957 * Search a snapshot context to see if the given snapshot id is
960 * Returns the position of the snapshot id in the array if it's found,
961 * or BAD_SNAP_INDEX otherwise.
963 * Note: The snapshot array is in kept sorted (by the osd) in
964 * reverse order, highest snapshot id first.
966 static u32
rbd_dev_snap_index(struct rbd_device
*rbd_dev
, u64 snap_id
)
968 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
971 found
= bsearch(&snap_id
, &snapc
->snaps
, snapc
->num_snaps
,
972 sizeof (snap_id
), snapid_compare_reverse
);
974 return found
? (u32
)(found
- &snapc
->snaps
[0]) : BAD_SNAP_INDEX
;
977 static const char *rbd_dev_v1_snap_name(struct rbd_device
*rbd_dev
,
981 const char *snap_name
;
983 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
984 if (which
== BAD_SNAP_INDEX
)
985 return ERR_PTR(-ENOENT
);
987 snap_name
= _rbd_dev_v1_snap_name(rbd_dev
, which
);
988 return snap_name
? snap_name
: ERR_PTR(-ENOMEM
);
991 static const char *rbd_snap_name(struct rbd_device
*rbd_dev
, u64 snap_id
)
993 if (snap_id
== CEPH_NOSNAP
)
994 return RBD_SNAP_HEAD_NAME
;
996 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
997 if (rbd_dev
->image_format
== 1)
998 return rbd_dev_v1_snap_name(rbd_dev
, snap_id
);
1000 return rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
1003 static int rbd_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
1006 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1007 if (snap_id
== CEPH_NOSNAP
) {
1008 *snap_size
= rbd_dev
->header
.image_size
;
1009 } else if (rbd_dev
->image_format
== 1) {
1012 which
= rbd_dev_snap_index(rbd_dev
, snap_id
);
1013 if (which
== BAD_SNAP_INDEX
)
1016 *snap_size
= rbd_dev
->header
.snap_sizes
[which
];
1021 ret
= _rbd_dev_v2_snap_size(rbd_dev
, snap_id
, NULL
, &size
);
1030 static int rbd_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
1033 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
1034 if (snap_id
== CEPH_NOSNAP
) {
1035 *snap_features
= rbd_dev
->header
.features
;
1036 } else if (rbd_dev
->image_format
== 1) {
1037 *snap_features
= 0; /* No features for format 1 */
1042 ret
= _rbd_dev_v2_snap_features(rbd_dev
, snap_id
, &features
);
1046 *snap_features
= features
;
1051 static int rbd_dev_mapping_set(struct rbd_device
*rbd_dev
)
1053 u64 snap_id
= rbd_dev
->spec
->snap_id
;
1058 ret
= rbd_snap_size(rbd_dev
, snap_id
, &size
);
1061 ret
= rbd_snap_features(rbd_dev
, snap_id
, &features
);
1065 rbd_dev
->mapping
.size
= size
;
1066 rbd_dev
->mapping
.features
= features
;
1071 static void rbd_dev_mapping_clear(struct rbd_device
*rbd_dev
)
1073 rbd_dev
->mapping
.size
= 0;
1074 rbd_dev
->mapping
.features
= 0;
1077 static const char *rbd_segment_name(struct rbd_device
*rbd_dev
, u64 offset
)
1084 name
= kmem_cache_alloc(rbd_segment_name_cache
, GFP_NOIO
);
1087 segment
= offset
>> rbd_dev
->header
.obj_order
;
1088 name_format
= "%s.%012llx";
1089 if (rbd_dev
->image_format
== 2)
1090 name_format
= "%s.%016llx";
1091 ret
= snprintf(name
, CEPH_MAX_OID_NAME_LEN
+ 1, name_format
,
1092 rbd_dev
->header
.object_prefix
, segment
);
1093 if (ret
< 0 || ret
> CEPH_MAX_OID_NAME_LEN
) {
1094 pr_err("error formatting segment name for #%llu (%d)\n",
1103 static void rbd_segment_name_free(const char *name
)
1105 /* The explicit cast here is needed to drop the const qualifier */
1107 kmem_cache_free(rbd_segment_name_cache
, (void *)name
);
1110 static u64
rbd_segment_offset(struct rbd_device
*rbd_dev
, u64 offset
)
1112 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1114 return offset
& (segment_size
- 1);
1117 static u64
rbd_segment_length(struct rbd_device
*rbd_dev
,
1118 u64 offset
, u64 length
)
1120 u64 segment_size
= (u64
) 1 << rbd_dev
->header
.obj_order
;
1122 offset
&= segment_size
- 1;
1124 rbd_assert(length
<= U64_MAX
- offset
);
1125 if (offset
+ length
> segment_size
)
1126 length
= segment_size
- offset
;
1132 * returns the size of an object in the image
1134 static u64
rbd_obj_bytes(struct rbd_image_header
*header
)
1136 return 1 << header
->obj_order
;
1143 static void bio_chain_put(struct bio
*chain
)
1149 chain
= chain
->bi_next
;
1155 * zeros a bio chain, starting at specific offset
1157 static void zero_bio_chain(struct bio
*chain
, int start_ofs
)
1160 struct bvec_iter iter
;
1161 unsigned long flags
;
1166 bio_for_each_segment(bv
, chain
, iter
) {
1167 if (pos
+ bv
.bv_len
> start_ofs
) {
1168 int remainder
= max(start_ofs
- pos
, 0);
1169 buf
= bvec_kmap_irq(&bv
, &flags
);
1170 memset(buf
+ remainder
, 0,
1171 bv
.bv_len
- remainder
);
1172 flush_dcache_page(bv
.bv_page
);
1173 bvec_kunmap_irq(buf
, &flags
);
1178 chain
= chain
->bi_next
;
1183 * similar to zero_bio_chain(), zeros data defined by a page array,
1184 * starting at the given byte offset from the start of the array and
1185 * continuing up to the given end offset. The pages array is
1186 * assumed to be big enough to hold all bytes up to the end.
1188 static void zero_pages(struct page
**pages
, u64 offset
, u64 end
)
1190 struct page
**page
= &pages
[offset
>> PAGE_SHIFT
];
1192 rbd_assert(end
> offset
);
1193 rbd_assert(end
- offset
<= (u64
)SIZE_MAX
);
1194 while (offset
< end
) {
1197 unsigned long flags
;
1200 page_offset
= offset
& ~PAGE_MASK
;
1201 length
= min_t(size_t, PAGE_SIZE
- page_offset
, end
- offset
);
1202 local_irq_save(flags
);
1203 kaddr
= kmap_atomic(*page
);
1204 memset(kaddr
+ page_offset
, 0, length
);
1205 flush_dcache_page(*page
);
1206 kunmap_atomic(kaddr
);
1207 local_irq_restore(flags
);
1215 * Clone a portion of a bio, starting at the given byte offset
1216 * and continuing for the number of bytes indicated.
1218 static struct bio
*bio_clone_range(struct bio
*bio_src
,
1219 unsigned int offset
,
1225 bio
= bio_clone(bio_src
, gfpmask
);
1227 return NULL
; /* ENOMEM */
1229 bio_advance(bio
, offset
);
1230 bio
->bi_iter
.bi_size
= len
;
1236 * Clone a portion of a bio chain, starting at the given byte offset
1237 * into the first bio in the source chain and continuing for the
1238 * number of bytes indicated. The result is another bio chain of
1239 * exactly the given length, or a null pointer on error.
1241 * The bio_src and offset parameters are both in-out. On entry they
1242 * refer to the first source bio and the offset into that bio where
1243 * the start of data to be cloned is located.
1245 * On return, bio_src is updated to refer to the bio in the source
1246 * chain that contains first un-cloned byte, and *offset will
1247 * contain the offset of that byte within that bio.
1249 static struct bio
*bio_chain_clone_range(struct bio
**bio_src
,
1250 unsigned int *offset
,
1254 struct bio
*bi
= *bio_src
;
1255 unsigned int off
= *offset
;
1256 struct bio
*chain
= NULL
;
1259 /* Build up a chain of clone bios up to the limit */
1261 if (!bi
|| off
>= bi
->bi_iter
.bi_size
|| !len
)
1262 return NULL
; /* Nothing to clone */
1266 unsigned int bi_size
;
1270 rbd_warn(NULL
, "bio_chain exhausted with %u left", len
);
1271 goto out_err
; /* EINVAL; ran out of bio's */
1273 bi_size
= min_t(unsigned int, bi
->bi_iter
.bi_size
- off
, len
);
1274 bio
= bio_clone_range(bi
, off
, bi_size
, gfpmask
);
1276 goto out_err
; /* ENOMEM */
1279 end
= &bio
->bi_next
;
1282 if (off
== bi
->bi_iter
.bi_size
) {
1293 bio_chain_put(chain
);
1299 * The default/initial value for all object request flags is 0. For
1300 * each flag, once its value is set to 1 it is never reset to 0
1303 static void obj_request_img_data_set(struct rbd_obj_request
*obj_request
)
1305 if (test_and_set_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
)) {
1306 struct rbd_device
*rbd_dev
;
1308 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1309 rbd_warn(rbd_dev
, "obj_request %p already marked img_data\n",
1314 static bool obj_request_img_data_test(struct rbd_obj_request
*obj_request
)
1317 return test_bit(OBJ_REQ_IMG_DATA
, &obj_request
->flags
) != 0;
1320 static void obj_request_done_set(struct rbd_obj_request
*obj_request
)
1322 if (test_and_set_bit(OBJ_REQ_DONE
, &obj_request
->flags
)) {
1323 struct rbd_device
*rbd_dev
= NULL
;
1325 if (obj_request_img_data_test(obj_request
))
1326 rbd_dev
= obj_request
->img_request
->rbd_dev
;
1327 rbd_warn(rbd_dev
, "obj_request %p already marked done\n",
1332 static bool obj_request_done_test(struct rbd_obj_request
*obj_request
)
1335 return test_bit(OBJ_REQ_DONE
, &obj_request
->flags
) != 0;
1339 * This sets the KNOWN flag after (possibly) setting the EXISTS
1340 * flag. The latter is set based on the "exists" value provided.
1342 * Note that for our purposes once an object exists it never goes
1343 * away again. It's possible that the response from two existence
1344 * checks are separated by the creation of the target object, and
1345 * the first ("doesn't exist") response arrives *after* the second
1346 * ("does exist"). In that case we ignore the second one.
1348 static void obj_request_existence_set(struct rbd_obj_request
*obj_request
,
1352 set_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
);
1353 set_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
);
1357 static bool obj_request_known_test(struct rbd_obj_request
*obj_request
)
1360 return test_bit(OBJ_REQ_KNOWN
, &obj_request
->flags
) != 0;
1363 static bool obj_request_exists_test(struct rbd_obj_request
*obj_request
)
1366 return test_bit(OBJ_REQ_EXISTS
, &obj_request
->flags
) != 0;
1369 static void rbd_obj_request_get(struct rbd_obj_request
*obj_request
)
1371 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1372 atomic_read(&obj_request
->kref
.refcount
));
1373 kref_get(&obj_request
->kref
);
1376 static void rbd_obj_request_destroy(struct kref
*kref
);
1377 static void rbd_obj_request_put(struct rbd_obj_request
*obj_request
)
1379 rbd_assert(obj_request
!= NULL
);
1380 dout("%s: obj %p (was %d)\n", __func__
, obj_request
,
1381 atomic_read(&obj_request
->kref
.refcount
));
1382 kref_put(&obj_request
->kref
, rbd_obj_request_destroy
);
1385 static bool img_request_child_test(struct rbd_img_request
*img_request
);
1386 static void rbd_parent_request_destroy(struct kref
*kref
);
1387 static void rbd_img_request_destroy(struct kref
*kref
);
1388 static void rbd_img_request_put(struct rbd_img_request
*img_request
)
1390 rbd_assert(img_request
!= NULL
);
1391 dout("%s: img %p (was %d)\n", __func__
, img_request
,
1392 atomic_read(&img_request
->kref
.refcount
));
1393 if (img_request_child_test(img_request
))
1394 kref_put(&img_request
->kref
, rbd_parent_request_destroy
);
1396 kref_put(&img_request
->kref
, rbd_img_request_destroy
);
1399 static inline void rbd_img_obj_request_add(struct rbd_img_request
*img_request
,
1400 struct rbd_obj_request
*obj_request
)
1402 rbd_assert(obj_request
->img_request
== NULL
);
1404 /* Image request now owns object's original reference */
1405 obj_request
->img_request
= img_request
;
1406 obj_request
->which
= img_request
->obj_request_count
;
1407 rbd_assert(!obj_request_img_data_test(obj_request
));
1408 obj_request_img_data_set(obj_request
);
1409 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1410 img_request
->obj_request_count
++;
1411 list_add_tail(&obj_request
->links
, &img_request
->obj_requests
);
1412 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1413 obj_request
->which
);
1416 static inline void rbd_img_obj_request_del(struct rbd_img_request
*img_request
,
1417 struct rbd_obj_request
*obj_request
)
1419 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1421 dout("%s: img %p obj %p w=%u\n", __func__
, img_request
, obj_request
,
1422 obj_request
->which
);
1423 list_del(&obj_request
->links
);
1424 rbd_assert(img_request
->obj_request_count
> 0);
1425 img_request
->obj_request_count
--;
1426 rbd_assert(obj_request
->which
== img_request
->obj_request_count
);
1427 obj_request
->which
= BAD_WHICH
;
1428 rbd_assert(obj_request_img_data_test(obj_request
));
1429 rbd_assert(obj_request
->img_request
== img_request
);
1430 obj_request
->img_request
= NULL
;
1431 obj_request
->callback
= NULL
;
1432 rbd_obj_request_put(obj_request
);
1435 static bool obj_request_type_valid(enum obj_request_type type
)
1438 case OBJ_REQUEST_NODATA
:
1439 case OBJ_REQUEST_BIO
:
1440 case OBJ_REQUEST_PAGES
:
1447 static int rbd_obj_request_submit(struct ceph_osd_client
*osdc
,
1448 struct rbd_obj_request
*obj_request
)
1450 dout("%s: osdc %p obj %p\n", __func__
, osdc
, obj_request
);
1452 return ceph_osdc_start_request(osdc
, obj_request
->osd_req
, false);
1455 static void rbd_img_request_complete(struct rbd_img_request
*img_request
)
1458 dout("%s: img %p\n", __func__
, img_request
);
1461 * If no error occurred, compute the aggregate transfer
1462 * count for the image request. We could instead use
1463 * atomic64_cmpxchg() to update it as each object request
1464 * completes; not clear which way is better off hand.
1466 if (!img_request
->result
) {
1467 struct rbd_obj_request
*obj_request
;
1470 for_each_obj_request(img_request
, obj_request
)
1471 xferred
+= obj_request
->xferred
;
1472 img_request
->xferred
= xferred
;
1475 if (img_request
->callback
)
1476 img_request
->callback(img_request
);
1478 rbd_img_request_put(img_request
);
1481 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1483 static int rbd_obj_request_wait(struct rbd_obj_request
*obj_request
)
1485 dout("%s: obj %p\n", __func__
, obj_request
);
1487 return wait_for_completion_interruptible(&obj_request
->completion
);
1491 * The default/initial value for all image request flags is 0. Each
1492 * is conditionally set to 1 at image request initialization time
1493 * and currently never change thereafter.
1495 static void img_request_write_set(struct rbd_img_request
*img_request
)
1497 set_bit(IMG_REQ_WRITE
, &img_request
->flags
);
1501 static bool img_request_write_test(struct rbd_img_request
*img_request
)
1504 return test_bit(IMG_REQ_WRITE
, &img_request
->flags
) != 0;
1507 static void img_request_child_set(struct rbd_img_request
*img_request
)
1509 set_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1513 static void img_request_child_clear(struct rbd_img_request
*img_request
)
1515 clear_bit(IMG_REQ_CHILD
, &img_request
->flags
);
1519 static bool img_request_child_test(struct rbd_img_request
*img_request
)
1522 return test_bit(IMG_REQ_CHILD
, &img_request
->flags
) != 0;
1525 static void img_request_layered_set(struct rbd_img_request
*img_request
)
1527 set_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1531 static void img_request_layered_clear(struct rbd_img_request
*img_request
)
1533 clear_bit(IMG_REQ_LAYERED
, &img_request
->flags
);
1537 static bool img_request_layered_test(struct rbd_img_request
*img_request
)
1540 return test_bit(IMG_REQ_LAYERED
, &img_request
->flags
) != 0;
1544 rbd_img_obj_request_read_callback(struct rbd_obj_request
*obj_request
)
1546 u64 xferred
= obj_request
->xferred
;
1547 u64 length
= obj_request
->length
;
1549 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1550 obj_request
, obj_request
->img_request
, obj_request
->result
,
1553 * ENOENT means a hole in the image. We zero-fill the entire
1554 * length of the request. A short read also implies zero-fill
1555 * to the end of the request. An error requires the whole
1556 * length of the request to be reported finished with an error
1557 * to the block layer. In each case we update the xferred
1558 * count to indicate the whole request was satisfied.
1560 rbd_assert(obj_request
->type
!= OBJ_REQUEST_NODATA
);
1561 if (obj_request
->result
== -ENOENT
) {
1562 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1563 zero_bio_chain(obj_request
->bio_list
, 0);
1565 zero_pages(obj_request
->pages
, 0, length
);
1566 obj_request
->result
= 0;
1567 } else if (xferred
< length
&& !obj_request
->result
) {
1568 if (obj_request
->type
== OBJ_REQUEST_BIO
)
1569 zero_bio_chain(obj_request
->bio_list
, xferred
);
1571 zero_pages(obj_request
->pages
, xferred
, length
);
1573 obj_request
->xferred
= length
;
1574 obj_request_done_set(obj_request
);
1577 static void rbd_obj_request_complete(struct rbd_obj_request
*obj_request
)
1579 dout("%s: obj %p cb %p\n", __func__
, obj_request
,
1580 obj_request
->callback
);
1581 if (obj_request
->callback
)
1582 obj_request
->callback(obj_request
);
1584 complete_all(&obj_request
->completion
);
1587 static void rbd_osd_trivial_callback(struct rbd_obj_request
*obj_request
)
1589 dout("%s: obj %p\n", __func__
, obj_request
);
1590 obj_request_done_set(obj_request
);
1593 static void rbd_osd_read_callback(struct rbd_obj_request
*obj_request
)
1595 struct rbd_img_request
*img_request
= NULL
;
1596 struct rbd_device
*rbd_dev
= NULL
;
1597 bool layered
= false;
1599 if (obj_request_img_data_test(obj_request
)) {
1600 img_request
= obj_request
->img_request
;
1601 layered
= img_request
&& img_request_layered_test(img_request
);
1602 rbd_dev
= img_request
->rbd_dev
;
1605 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__
,
1606 obj_request
, img_request
, obj_request
->result
,
1607 obj_request
->xferred
, obj_request
->length
);
1608 if (layered
&& obj_request
->result
== -ENOENT
&&
1609 obj_request
->img_offset
< rbd_dev
->parent_overlap
)
1610 rbd_img_parent_read(obj_request
);
1611 else if (img_request
)
1612 rbd_img_obj_request_read_callback(obj_request
);
1614 obj_request_done_set(obj_request
);
1617 static void rbd_osd_write_callback(struct rbd_obj_request
*obj_request
)
1619 dout("%s: obj %p result %d %llu\n", __func__
, obj_request
,
1620 obj_request
->result
, obj_request
->length
);
1622 * There is no such thing as a successful short write. Set
1623 * it to our originally-requested length.
1625 obj_request
->xferred
= obj_request
->length
;
1626 obj_request_done_set(obj_request
);
1630 * For a simple stat call there's nothing to do. We'll do more if
1631 * this is part of a write sequence for a layered image.
1633 static void rbd_osd_stat_callback(struct rbd_obj_request
*obj_request
)
1635 dout("%s: obj %p\n", __func__
, obj_request
);
1636 obj_request_done_set(obj_request
);
1639 static void rbd_osd_req_callback(struct ceph_osd_request
*osd_req
,
1640 struct ceph_msg
*msg
)
1642 struct rbd_obj_request
*obj_request
= osd_req
->r_priv
;
1645 dout("%s: osd_req %p msg %p\n", __func__
, osd_req
, msg
);
1646 rbd_assert(osd_req
== obj_request
->osd_req
);
1647 if (obj_request_img_data_test(obj_request
)) {
1648 rbd_assert(obj_request
->img_request
);
1649 rbd_assert(obj_request
->which
!= BAD_WHICH
);
1651 rbd_assert(obj_request
->which
== BAD_WHICH
);
1654 if (osd_req
->r_result
< 0)
1655 obj_request
->result
= osd_req
->r_result
;
1657 rbd_assert(osd_req
->r_num_ops
<= CEPH_OSD_MAX_OP
);
1660 * We support a 64-bit length, but ultimately it has to be
1661 * passed to blk_end_request(), which takes an unsigned int.
1663 obj_request
->xferred
= osd_req
->r_reply_op_len
[0];
1664 rbd_assert(obj_request
->xferred
< (u64
)UINT_MAX
);
1666 opcode
= osd_req
->r_ops
[0].op
;
1668 case CEPH_OSD_OP_READ
:
1669 rbd_osd_read_callback(obj_request
);
1671 case CEPH_OSD_OP_SETALLOCHINT
:
1672 rbd_assert(osd_req
->r_ops
[1].op
== CEPH_OSD_OP_WRITE
);
1674 case CEPH_OSD_OP_WRITE
:
1675 rbd_osd_write_callback(obj_request
);
1677 case CEPH_OSD_OP_STAT
:
1678 rbd_osd_stat_callback(obj_request
);
1680 case CEPH_OSD_OP_CALL
:
1681 case CEPH_OSD_OP_NOTIFY_ACK
:
1682 case CEPH_OSD_OP_WATCH
:
1683 rbd_osd_trivial_callback(obj_request
);
1686 rbd_warn(NULL
, "%s: unsupported op %hu\n",
1687 obj_request
->object_name
, (unsigned short) opcode
);
1691 if (obj_request_done_test(obj_request
))
1692 rbd_obj_request_complete(obj_request
);
1695 static void rbd_osd_req_format_read(struct rbd_obj_request
*obj_request
)
1697 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1698 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1701 rbd_assert(osd_req
!= NULL
);
1703 snap_id
= img_request
? img_request
->snap_id
: CEPH_NOSNAP
;
1704 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1705 NULL
, snap_id
, NULL
);
1708 static void rbd_osd_req_format_write(struct rbd_obj_request
*obj_request
)
1710 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1711 struct ceph_osd_request
*osd_req
= obj_request
->osd_req
;
1712 struct ceph_snap_context
*snapc
;
1713 struct timespec mtime
= CURRENT_TIME
;
1715 rbd_assert(osd_req
!= NULL
);
1717 snapc
= img_request
? img_request
->snapc
: NULL
;
1718 ceph_osdc_build_request(osd_req
, obj_request
->offset
,
1719 snapc
, CEPH_NOSNAP
, &mtime
);
1723 * Create an osd request. A read request has one osd op (read).
1724 * A write request has either one (watch) or two (hint+write) osd ops.
1725 * (All rbd data writes are prefixed with an allocation hint op, but
1726 * technically osd watch is a write request, hence this distinction.)
1728 static struct ceph_osd_request
*rbd_osd_req_create(
1729 struct rbd_device
*rbd_dev
,
1731 unsigned int num_ops
,
1732 struct rbd_obj_request
*obj_request
)
1734 struct ceph_snap_context
*snapc
= NULL
;
1735 struct ceph_osd_client
*osdc
;
1736 struct ceph_osd_request
*osd_req
;
1738 if (obj_request_img_data_test(obj_request
)) {
1739 struct rbd_img_request
*img_request
= obj_request
->img_request
;
1741 rbd_assert(write_request
==
1742 img_request_write_test(img_request
));
1744 snapc
= img_request
->snapc
;
1747 rbd_assert(num_ops
== 1 || (write_request
&& num_ops
== 2));
1749 /* Allocate and initialize the request, for the num_ops ops */
1751 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1752 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, num_ops
, false,
1755 return NULL
; /* ENOMEM */
1758 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1760 osd_req
->r_flags
= CEPH_OSD_FLAG_READ
;
1762 osd_req
->r_callback
= rbd_osd_req_callback
;
1763 osd_req
->r_priv
= obj_request
;
1765 osd_req
->r_base_oloc
.pool
= ceph_file_layout_pg_pool(rbd_dev
->layout
);
1766 ceph_oid_set_name(&osd_req
->r_base_oid
, obj_request
->object_name
);
1772 * Create a copyup osd request based on the information in the
1773 * object request supplied. A copyup request has three osd ops,
1774 * a copyup method call, a hint op, and a write op.
1776 static struct ceph_osd_request
*
1777 rbd_osd_req_create_copyup(struct rbd_obj_request
*obj_request
)
1779 struct rbd_img_request
*img_request
;
1780 struct ceph_snap_context
*snapc
;
1781 struct rbd_device
*rbd_dev
;
1782 struct ceph_osd_client
*osdc
;
1783 struct ceph_osd_request
*osd_req
;
1785 rbd_assert(obj_request_img_data_test(obj_request
));
1786 img_request
= obj_request
->img_request
;
1787 rbd_assert(img_request
);
1788 rbd_assert(img_request_write_test(img_request
));
1790 /* Allocate and initialize the request, for the three ops */
1792 snapc
= img_request
->snapc
;
1793 rbd_dev
= img_request
->rbd_dev
;
1794 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
1795 osd_req
= ceph_osdc_alloc_request(osdc
, snapc
, 3, false, GFP_ATOMIC
);
1797 return NULL
; /* ENOMEM */
1799 osd_req
->r_flags
= CEPH_OSD_FLAG_WRITE
| CEPH_OSD_FLAG_ONDISK
;
1800 osd_req
->r_callback
= rbd_osd_req_callback
;
1801 osd_req
->r_priv
= obj_request
;
1803 osd_req
->r_base_oloc
.pool
= ceph_file_layout_pg_pool(rbd_dev
->layout
);
1804 ceph_oid_set_name(&osd_req
->r_base_oid
, obj_request
->object_name
);
1810 static void rbd_osd_req_destroy(struct ceph_osd_request
*osd_req
)
1812 ceph_osdc_put_request(osd_req
);
1815 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1817 static struct rbd_obj_request
*rbd_obj_request_create(const char *object_name
,
1818 u64 offset
, u64 length
,
1819 enum obj_request_type type
)
1821 struct rbd_obj_request
*obj_request
;
1825 rbd_assert(obj_request_type_valid(type
));
1827 size
= strlen(object_name
) + 1;
1828 name
= kmalloc(size
, GFP_KERNEL
);
1832 obj_request
= kmem_cache_zalloc(rbd_obj_request_cache
, GFP_KERNEL
);
1838 obj_request
->object_name
= memcpy(name
, object_name
, size
);
1839 obj_request
->offset
= offset
;
1840 obj_request
->length
= length
;
1841 obj_request
->flags
= 0;
1842 obj_request
->which
= BAD_WHICH
;
1843 obj_request
->type
= type
;
1844 INIT_LIST_HEAD(&obj_request
->links
);
1845 init_completion(&obj_request
->completion
);
1846 kref_init(&obj_request
->kref
);
1848 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__
, object_name
,
1849 offset
, length
, (int)type
, obj_request
);
1854 static void rbd_obj_request_destroy(struct kref
*kref
)
1856 struct rbd_obj_request
*obj_request
;
1858 obj_request
= container_of(kref
, struct rbd_obj_request
, kref
);
1860 dout("%s: obj %p\n", __func__
, obj_request
);
1862 rbd_assert(obj_request
->img_request
== NULL
);
1863 rbd_assert(obj_request
->which
== BAD_WHICH
);
1865 if (obj_request
->osd_req
)
1866 rbd_osd_req_destroy(obj_request
->osd_req
);
1868 rbd_assert(obj_request_type_valid(obj_request
->type
));
1869 switch (obj_request
->type
) {
1870 case OBJ_REQUEST_NODATA
:
1871 break; /* Nothing to do */
1872 case OBJ_REQUEST_BIO
:
1873 if (obj_request
->bio_list
)
1874 bio_chain_put(obj_request
->bio_list
);
1876 case OBJ_REQUEST_PAGES
:
1877 if (obj_request
->pages
)
1878 ceph_release_page_vector(obj_request
->pages
,
1879 obj_request
->page_count
);
1883 kfree(obj_request
->object_name
);
1884 obj_request
->object_name
= NULL
;
1885 kmem_cache_free(rbd_obj_request_cache
, obj_request
);
1888 /* It's OK to call this for a device with no parent */
1890 static void rbd_spec_put(struct rbd_spec
*spec
);
1891 static void rbd_dev_unparent(struct rbd_device
*rbd_dev
)
1893 rbd_dev_remove_parent(rbd_dev
);
1894 rbd_spec_put(rbd_dev
->parent_spec
);
1895 rbd_dev
->parent_spec
= NULL
;
1896 rbd_dev
->parent_overlap
= 0;
1900 * Parent image reference counting is used to determine when an
1901 * image's parent fields can be safely torn down--after there are no
1902 * more in-flight requests to the parent image. When the last
1903 * reference is dropped, cleaning them up is safe.
1905 static void rbd_dev_parent_put(struct rbd_device
*rbd_dev
)
1909 if (!rbd_dev
->parent_spec
)
1912 counter
= atomic_dec_return_safe(&rbd_dev
->parent_ref
);
1916 /* Last reference; clean up parent data structures */
1919 rbd_dev_unparent(rbd_dev
);
1921 rbd_warn(rbd_dev
, "parent reference underflow\n");
1925 * If an image has a non-zero parent overlap, get a reference to its
1928 * We must get the reference before checking for the overlap to
1929 * coordinate properly with zeroing the parent overlap in
1930 * rbd_dev_v2_parent_info() when an image gets flattened. We
1931 * drop it again if there is no overlap.
1933 * Returns true if the rbd device has a parent with a non-zero
1934 * overlap and a reference for it was successfully taken, or
1937 static bool rbd_dev_parent_get(struct rbd_device
*rbd_dev
)
1941 if (!rbd_dev
->parent_spec
)
1944 counter
= atomic_inc_return_safe(&rbd_dev
->parent_ref
);
1945 if (counter
> 0 && rbd_dev
->parent_overlap
)
1948 /* Image was flattened, but parent is not yet torn down */
1951 rbd_warn(rbd_dev
, "parent reference overflow\n");
1957 * Caller is responsible for filling in the list of object requests
1958 * that comprises the image request, and the Linux request pointer
1959 * (if there is one).
1961 static struct rbd_img_request
*rbd_img_request_create(
1962 struct rbd_device
*rbd_dev
,
1963 u64 offset
, u64 length
,
1966 struct rbd_img_request
*img_request
;
1968 img_request
= kmem_cache_alloc(rbd_img_request_cache
, GFP_ATOMIC
);
1972 if (write_request
) {
1973 down_read(&rbd_dev
->header_rwsem
);
1974 ceph_get_snap_context(rbd_dev
->header
.snapc
);
1975 up_read(&rbd_dev
->header_rwsem
);
1978 img_request
->rq
= NULL
;
1979 img_request
->rbd_dev
= rbd_dev
;
1980 img_request
->offset
= offset
;
1981 img_request
->length
= length
;
1982 img_request
->flags
= 0;
1983 if (write_request
) {
1984 img_request_write_set(img_request
);
1985 img_request
->snapc
= rbd_dev
->header
.snapc
;
1987 img_request
->snap_id
= rbd_dev
->spec
->snap_id
;
1989 if (rbd_dev_parent_get(rbd_dev
))
1990 img_request_layered_set(img_request
);
1991 spin_lock_init(&img_request
->completion_lock
);
1992 img_request
->next_completion
= 0;
1993 img_request
->callback
= NULL
;
1994 img_request
->result
= 0;
1995 img_request
->obj_request_count
= 0;
1996 INIT_LIST_HEAD(&img_request
->obj_requests
);
1997 kref_init(&img_request
->kref
);
1999 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__
, rbd_dev
,
2000 write_request
? "write" : "read", offset
, length
,
2006 static void rbd_img_request_destroy(struct kref
*kref
)
2008 struct rbd_img_request
*img_request
;
2009 struct rbd_obj_request
*obj_request
;
2010 struct rbd_obj_request
*next_obj_request
;
2012 img_request
= container_of(kref
, struct rbd_img_request
, kref
);
2014 dout("%s: img %p\n", __func__
, img_request
);
2016 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2017 rbd_img_obj_request_del(img_request
, obj_request
);
2018 rbd_assert(img_request
->obj_request_count
== 0);
2020 if (img_request_layered_test(img_request
)) {
2021 img_request_layered_clear(img_request
);
2022 rbd_dev_parent_put(img_request
->rbd_dev
);
2025 if (img_request_write_test(img_request
))
2026 ceph_put_snap_context(img_request
->snapc
);
2028 kmem_cache_free(rbd_img_request_cache
, img_request
);
2031 static struct rbd_img_request
*rbd_parent_request_create(
2032 struct rbd_obj_request
*obj_request
,
2033 u64 img_offset
, u64 length
)
2035 struct rbd_img_request
*parent_request
;
2036 struct rbd_device
*rbd_dev
;
2038 rbd_assert(obj_request
->img_request
);
2039 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2041 parent_request
= rbd_img_request_create(rbd_dev
->parent
,
2042 img_offset
, length
, false);
2043 if (!parent_request
)
2046 img_request_child_set(parent_request
);
2047 rbd_obj_request_get(obj_request
);
2048 parent_request
->obj_request
= obj_request
;
2050 return parent_request
;
2053 static void rbd_parent_request_destroy(struct kref
*kref
)
2055 struct rbd_img_request
*parent_request
;
2056 struct rbd_obj_request
*orig_request
;
2058 parent_request
= container_of(kref
, struct rbd_img_request
, kref
);
2059 orig_request
= parent_request
->obj_request
;
2061 parent_request
->obj_request
= NULL
;
2062 rbd_obj_request_put(orig_request
);
2063 img_request_child_clear(parent_request
);
2065 rbd_img_request_destroy(kref
);
2068 static bool rbd_img_obj_end_request(struct rbd_obj_request
*obj_request
)
2070 struct rbd_img_request
*img_request
;
2071 unsigned int xferred
;
2075 rbd_assert(obj_request_img_data_test(obj_request
));
2076 img_request
= obj_request
->img_request
;
2078 rbd_assert(obj_request
->xferred
<= (u64
)UINT_MAX
);
2079 xferred
= (unsigned int)obj_request
->xferred
;
2080 result
= obj_request
->result
;
2082 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2084 rbd_warn(rbd_dev
, "%s %llx at %llx (%llx)\n",
2085 img_request_write_test(img_request
) ? "write" : "read",
2086 obj_request
->length
, obj_request
->img_offset
,
2087 obj_request
->offset
);
2088 rbd_warn(rbd_dev
, " result %d xferred %x\n",
2090 if (!img_request
->result
)
2091 img_request
->result
= result
;
2094 /* Image object requests don't own their page array */
2096 if (obj_request
->type
== OBJ_REQUEST_PAGES
) {
2097 obj_request
->pages
= NULL
;
2098 obj_request
->page_count
= 0;
2101 if (img_request_child_test(img_request
)) {
2102 rbd_assert(img_request
->obj_request
!= NULL
);
2103 more
= obj_request
->which
< img_request
->obj_request_count
- 1;
2105 rbd_assert(img_request
->rq
!= NULL
);
2106 more
= blk_end_request(img_request
->rq
, result
, xferred
);
2112 static void rbd_img_obj_callback(struct rbd_obj_request
*obj_request
)
2114 struct rbd_img_request
*img_request
;
2115 u32 which
= obj_request
->which
;
2118 rbd_assert(obj_request_img_data_test(obj_request
));
2119 img_request
= obj_request
->img_request
;
2121 dout("%s: img %p obj %p\n", __func__
, img_request
, obj_request
);
2122 rbd_assert(img_request
!= NULL
);
2123 rbd_assert(img_request
->obj_request_count
> 0);
2124 rbd_assert(which
!= BAD_WHICH
);
2125 rbd_assert(which
< img_request
->obj_request_count
);
2127 spin_lock_irq(&img_request
->completion_lock
);
2128 if (which
!= img_request
->next_completion
)
2131 for_each_obj_request_from(img_request
, obj_request
) {
2133 rbd_assert(which
< img_request
->obj_request_count
);
2135 if (!obj_request_done_test(obj_request
))
2137 more
= rbd_img_obj_end_request(obj_request
);
2141 rbd_assert(more
^ (which
== img_request
->obj_request_count
));
2142 img_request
->next_completion
= which
;
2144 spin_unlock_irq(&img_request
->completion_lock
);
2147 rbd_img_request_complete(img_request
);
2151 * Split up an image request into one or more object requests, each
2152 * to a different object. The "type" parameter indicates whether
2153 * "data_desc" is the pointer to the head of a list of bio
2154 * structures, or the base of a page array. In either case this
2155 * function assumes data_desc describes memory sufficient to hold
2156 * all data described by the image request.
2158 static int rbd_img_request_fill(struct rbd_img_request
*img_request
,
2159 enum obj_request_type type
,
2162 struct rbd_device
*rbd_dev
= img_request
->rbd_dev
;
2163 struct rbd_obj_request
*obj_request
= NULL
;
2164 struct rbd_obj_request
*next_obj_request
;
2165 bool write_request
= img_request_write_test(img_request
);
2166 struct bio
*bio_list
= NULL
;
2167 unsigned int bio_offset
= 0;
2168 struct page
**pages
= NULL
;
2173 dout("%s: img %p type %d data_desc %p\n", __func__
, img_request
,
2174 (int)type
, data_desc
);
2176 opcode
= write_request
? CEPH_OSD_OP_WRITE
: CEPH_OSD_OP_READ
;
2177 img_offset
= img_request
->offset
;
2178 resid
= img_request
->length
;
2179 rbd_assert(resid
> 0);
2181 if (type
== OBJ_REQUEST_BIO
) {
2182 bio_list
= data_desc
;
2183 rbd_assert(img_offset
==
2184 bio_list
->bi_iter
.bi_sector
<< SECTOR_SHIFT
);
2186 rbd_assert(type
== OBJ_REQUEST_PAGES
);
2191 struct ceph_osd_request
*osd_req
;
2192 const char *object_name
;
2195 unsigned int which
= 0;
2197 object_name
= rbd_segment_name(rbd_dev
, img_offset
);
2200 offset
= rbd_segment_offset(rbd_dev
, img_offset
);
2201 length
= rbd_segment_length(rbd_dev
, img_offset
, resid
);
2202 obj_request
= rbd_obj_request_create(object_name
,
2203 offset
, length
, type
);
2204 /* object request has its own copy of the object name */
2205 rbd_segment_name_free(object_name
);
2210 * set obj_request->img_request before creating the
2211 * osd_request so that it gets the right snapc
2213 rbd_img_obj_request_add(img_request
, obj_request
);
2215 if (type
== OBJ_REQUEST_BIO
) {
2216 unsigned int clone_size
;
2218 rbd_assert(length
<= (u64
)UINT_MAX
);
2219 clone_size
= (unsigned int)length
;
2220 obj_request
->bio_list
=
2221 bio_chain_clone_range(&bio_list
,
2225 if (!obj_request
->bio_list
)
2228 unsigned int page_count
;
2230 obj_request
->pages
= pages
;
2231 page_count
= (u32
)calc_pages_for(offset
, length
);
2232 obj_request
->page_count
= page_count
;
2233 if ((offset
+ length
) & ~PAGE_MASK
)
2234 page_count
--; /* more on last page */
2235 pages
+= page_count
;
2238 osd_req
= rbd_osd_req_create(rbd_dev
, write_request
,
2239 (write_request
? 2 : 1),
2243 obj_request
->osd_req
= osd_req
;
2244 obj_request
->callback
= rbd_img_obj_callback
;
2246 if (write_request
) {
2247 osd_req_op_alloc_hint_init(osd_req
, which
,
2248 rbd_obj_bytes(&rbd_dev
->header
),
2249 rbd_obj_bytes(&rbd_dev
->header
));
2253 osd_req_op_extent_init(osd_req
, which
, opcode
, offset
, length
,
2255 if (type
== OBJ_REQUEST_BIO
)
2256 osd_req_op_extent_osd_data_bio(osd_req
, which
,
2257 obj_request
->bio_list
, length
);
2259 osd_req_op_extent_osd_data_pages(osd_req
, which
,
2260 obj_request
->pages
, length
,
2261 offset
& ~PAGE_MASK
, false, false);
2264 rbd_osd_req_format_write(obj_request
);
2266 rbd_osd_req_format_read(obj_request
);
2268 obj_request
->img_offset
= img_offset
;
2270 img_offset
+= length
;
2277 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
)
2278 rbd_img_obj_request_del(img_request
, obj_request
);
2284 rbd_img_obj_copyup_callback(struct rbd_obj_request
*obj_request
)
2286 struct rbd_img_request
*img_request
;
2287 struct rbd_device
*rbd_dev
;
2288 struct page
**pages
;
2291 rbd_assert(obj_request
->type
== OBJ_REQUEST_BIO
);
2292 rbd_assert(obj_request_img_data_test(obj_request
));
2293 img_request
= obj_request
->img_request
;
2294 rbd_assert(img_request
);
2296 rbd_dev
= img_request
->rbd_dev
;
2297 rbd_assert(rbd_dev
);
2299 pages
= obj_request
->copyup_pages
;
2300 rbd_assert(pages
!= NULL
);
2301 obj_request
->copyup_pages
= NULL
;
2302 page_count
= obj_request
->copyup_page_count
;
2303 rbd_assert(page_count
);
2304 obj_request
->copyup_page_count
= 0;
2305 ceph_release_page_vector(pages
, page_count
);
2308 * We want the transfer count to reflect the size of the
2309 * original write request. There is no such thing as a
2310 * successful short write, so if the request was successful
2311 * we can just set it to the originally-requested length.
2313 if (!obj_request
->result
)
2314 obj_request
->xferred
= obj_request
->length
;
2316 /* Finish up with the normal image object callback */
2318 rbd_img_obj_callback(obj_request
);
2322 rbd_img_obj_parent_read_full_callback(struct rbd_img_request
*img_request
)
2324 struct rbd_obj_request
*orig_request
;
2325 struct ceph_osd_request
*osd_req
;
2326 struct ceph_osd_client
*osdc
;
2327 struct rbd_device
*rbd_dev
;
2328 struct page
**pages
;
2335 rbd_assert(img_request_child_test(img_request
));
2337 /* First get what we need from the image request */
2339 pages
= img_request
->copyup_pages
;
2340 rbd_assert(pages
!= NULL
);
2341 img_request
->copyup_pages
= NULL
;
2342 page_count
= img_request
->copyup_page_count
;
2343 rbd_assert(page_count
);
2344 img_request
->copyup_page_count
= 0;
2346 orig_request
= img_request
->obj_request
;
2347 rbd_assert(orig_request
!= NULL
);
2348 rbd_assert(obj_request_type_valid(orig_request
->type
));
2349 img_result
= img_request
->result
;
2350 parent_length
= img_request
->length
;
2351 rbd_assert(parent_length
== img_request
->xferred
);
2352 rbd_img_request_put(img_request
);
2354 rbd_assert(orig_request
->img_request
);
2355 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2356 rbd_assert(rbd_dev
);
2359 * If the overlap has become 0 (most likely because the
2360 * image has been flattened) we need to free the pages
2361 * and re-submit the original write request.
2363 if (!rbd_dev
->parent_overlap
) {
2364 struct ceph_osd_client
*osdc
;
2366 ceph_release_page_vector(pages
, page_count
);
2367 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2368 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2377 * The original osd request is of no use to use any more.
2378 * We need a new one that can hold the three ops in a copyup
2379 * request. Allocate the new copyup osd request for the
2380 * original request, and release the old one.
2382 img_result
= -ENOMEM
;
2383 osd_req
= rbd_osd_req_create_copyup(orig_request
);
2386 rbd_osd_req_destroy(orig_request
->osd_req
);
2387 orig_request
->osd_req
= osd_req
;
2388 orig_request
->copyup_pages
= pages
;
2389 orig_request
->copyup_page_count
= page_count
;
2391 /* Initialize the copyup op */
2393 osd_req_op_cls_init(osd_req
, 0, CEPH_OSD_OP_CALL
, "rbd", "copyup");
2394 osd_req_op_cls_request_data_pages(osd_req
, 0, pages
, parent_length
, 0,
2397 /* Then the hint op */
2399 osd_req_op_alloc_hint_init(osd_req
, 1, rbd_obj_bytes(&rbd_dev
->header
),
2400 rbd_obj_bytes(&rbd_dev
->header
));
2402 /* And the original write request op */
2404 offset
= orig_request
->offset
;
2405 length
= orig_request
->length
;
2406 osd_req_op_extent_init(osd_req
, 2, CEPH_OSD_OP_WRITE
,
2407 offset
, length
, 0, 0);
2408 if (orig_request
->type
== OBJ_REQUEST_BIO
)
2409 osd_req_op_extent_osd_data_bio(osd_req
, 2,
2410 orig_request
->bio_list
, length
);
2412 osd_req_op_extent_osd_data_pages(osd_req
, 2,
2413 orig_request
->pages
, length
,
2414 offset
& ~PAGE_MASK
, false, false);
2416 rbd_osd_req_format_write(orig_request
);
2418 /* All set, send it off. */
2420 orig_request
->callback
= rbd_img_obj_copyup_callback
;
2421 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2422 img_result
= rbd_obj_request_submit(osdc
, orig_request
);
2426 /* Record the error code and complete the request */
2428 orig_request
->result
= img_result
;
2429 orig_request
->xferred
= 0;
2430 obj_request_done_set(orig_request
);
2431 rbd_obj_request_complete(orig_request
);
2435 * Read from the parent image the range of data that covers the
2436 * entire target of the given object request. This is used for
2437 * satisfying a layered image write request when the target of an
2438 * object request from the image request does not exist.
2440 * A page array big enough to hold the returned data is allocated
2441 * and supplied to rbd_img_request_fill() as the "data descriptor."
2442 * When the read completes, this page array will be transferred to
2443 * the original object request for the copyup operation.
2445 * If an error occurs, record it as the result of the original
2446 * object request and mark it done so it gets completed.
2448 static int rbd_img_obj_parent_read_full(struct rbd_obj_request
*obj_request
)
2450 struct rbd_img_request
*img_request
= NULL
;
2451 struct rbd_img_request
*parent_request
= NULL
;
2452 struct rbd_device
*rbd_dev
;
2455 struct page
**pages
= NULL
;
2459 rbd_assert(obj_request_img_data_test(obj_request
));
2460 rbd_assert(obj_request_type_valid(obj_request
->type
));
2462 img_request
= obj_request
->img_request
;
2463 rbd_assert(img_request
!= NULL
);
2464 rbd_dev
= img_request
->rbd_dev
;
2465 rbd_assert(rbd_dev
->parent
!= NULL
);
2468 * Determine the byte range covered by the object in the
2469 * child image to which the original request was to be sent.
2471 img_offset
= obj_request
->img_offset
- obj_request
->offset
;
2472 length
= (u64
)1 << rbd_dev
->header
.obj_order
;
2475 * There is no defined parent data beyond the parent
2476 * overlap, so limit what we read at that boundary if
2479 if (img_offset
+ length
> rbd_dev
->parent_overlap
) {
2480 rbd_assert(img_offset
< rbd_dev
->parent_overlap
);
2481 length
= rbd_dev
->parent_overlap
- img_offset
;
2485 * Allocate a page array big enough to receive the data read
2488 page_count
= (u32
)calc_pages_for(0, length
);
2489 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2490 if (IS_ERR(pages
)) {
2491 result
= PTR_ERR(pages
);
2497 parent_request
= rbd_parent_request_create(obj_request
,
2498 img_offset
, length
);
2499 if (!parent_request
)
2502 result
= rbd_img_request_fill(parent_request
, OBJ_REQUEST_PAGES
, pages
);
2505 parent_request
->copyup_pages
= pages
;
2506 parent_request
->copyup_page_count
= page_count
;
2508 parent_request
->callback
= rbd_img_obj_parent_read_full_callback
;
2509 result
= rbd_img_request_submit(parent_request
);
2513 parent_request
->copyup_pages
= NULL
;
2514 parent_request
->copyup_page_count
= 0;
2515 parent_request
->obj_request
= NULL
;
2516 rbd_obj_request_put(obj_request
);
2519 ceph_release_page_vector(pages
, page_count
);
2521 rbd_img_request_put(parent_request
);
2522 obj_request
->result
= result
;
2523 obj_request
->xferred
= 0;
2524 obj_request_done_set(obj_request
);
2529 static void rbd_img_obj_exists_callback(struct rbd_obj_request
*obj_request
)
2531 struct rbd_obj_request
*orig_request
;
2532 struct rbd_device
*rbd_dev
;
2535 rbd_assert(!obj_request_img_data_test(obj_request
));
2538 * All we need from the object request is the original
2539 * request and the result of the STAT op. Grab those, then
2540 * we're done with the request.
2542 orig_request
= obj_request
->obj_request
;
2543 obj_request
->obj_request
= NULL
;
2544 rbd_obj_request_put(orig_request
);
2545 rbd_assert(orig_request
);
2546 rbd_assert(orig_request
->img_request
);
2548 result
= obj_request
->result
;
2549 obj_request
->result
= 0;
2551 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__
,
2552 obj_request
, orig_request
, result
,
2553 obj_request
->xferred
, obj_request
->length
);
2554 rbd_obj_request_put(obj_request
);
2557 * If the overlap has become 0 (most likely because the
2558 * image has been flattened) we need to free the pages
2559 * and re-submit the original write request.
2561 rbd_dev
= orig_request
->img_request
->rbd_dev
;
2562 if (!rbd_dev
->parent_overlap
) {
2563 struct ceph_osd_client
*osdc
;
2565 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2566 result
= rbd_obj_request_submit(osdc
, orig_request
);
2572 * Our only purpose here is to determine whether the object
2573 * exists, and we don't want to treat the non-existence as
2574 * an error. If something else comes back, transfer the
2575 * error to the original request and complete it now.
2578 obj_request_existence_set(orig_request
, true);
2579 } else if (result
== -ENOENT
) {
2580 obj_request_existence_set(orig_request
, false);
2581 } else if (result
) {
2582 orig_request
->result
= result
;
2587 * Resubmit the original request now that we have recorded
2588 * whether the target object exists.
2590 orig_request
->result
= rbd_img_obj_request_submit(orig_request
);
2592 if (orig_request
->result
)
2593 rbd_obj_request_complete(orig_request
);
2596 static int rbd_img_obj_exists_submit(struct rbd_obj_request
*obj_request
)
2598 struct rbd_obj_request
*stat_request
;
2599 struct rbd_device
*rbd_dev
;
2600 struct ceph_osd_client
*osdc
;
2601 struct page
**pages
= NULL
;
2607 * The response data for a STAT call consists of:
2614 size
= sizeof (__le64
) + sizeof (__le32
) + sizeof (__le32
);
2615 page_count
= (u32
)calc_pages_for(0, size
);
2616 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2618 return PTR_ERR(pages
);
2621 stat_request
= rbd_obj_request_create(obj_request
->object_name
, 0, 0,
2626 rbd_obj_request_get(obj_request
);
2627 stat_request
->obj_request
= obj_request
;
2628 stat_request
->pages
= pages
;
2629 stat_request
->page_count
= page_count
;
2631 rbd_assert(obj_request
->img_request
);
2632 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2633 stat_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, 1,
2635 if (!stat_request
->osd_req
)
2637 stat_request
->callback
= rbd_img_obj_exists_callback
;
2639 osd_req_op_init(stat_request
->osd_req
, 0, CEPH_OSD_OP_STAT
);
2640 osd_req_op_raw_data_in_pages(stat_request
->osd_req
, 0, pages
, size
, 0,
2642 rbd_osd_req_format_read(stat_request
);
2644 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2645 ret
= rbd_obj_request_submit(osdc
, stat_request
);
2648 rbd_obj_request_put(obj_request
);
2653 static int rbd_img_obj_request_submit(struct rbd_obj_request
*obj_request
)
2655 struct rbd_img_request
*img_request
;
2656 struct rbd_device
*rbd_dev
;
2659 rbd_assert(obj_request_img_data_test(obj_request
));
2661 img_request
= obj_request
->img_request
;
2662 rbd_assert(img_request
);
2663 rbd_dev
= img_request
->rbd_dev
;
2666 * Only writes to layered images need special handling.
2667 * Reads and non-layered writes are simple object requests.
2668 * Layered writes that start beyond the end of the overlap
2669 * with the parent have no parent data, so they too are
2670 * simple object requests. Finally, if the target object is
2671 * known to already exist, its parent data has already been
2672 * copied, so a write to the object can also be handled as a
2673 * simple object request.
2675 if (!img_request_write_test(img_request
) ||
2676 !img_request_layered_test(img_request
) ||
2677 rbd_dev
->parent_overlap
<= obj_request
->img_offset
||
2678 ((known
= obj_request_known_test(obj_request
)) &&
2679 obj_request_exists_test(obj_request
))) {
2681 struct rbd_device
*rbd_dev
;
2682 struct ceph_osd_client
*osdc
;
2684 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2685 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2687 return rbd_obj_request_submit(osdc
, obj_request
);
2691 * It's a layered write. The target object might exist but
2692 * we may not know that yet. If we know it doesn't exist,
2693 * start by reading the data for the full target object from
2694 * the parent so we can use it for a copyup to the target.
2697 return rbd_img_obj_parent_read_full(obj_request
);
2699 /* We don't know whether the target exists. Go find out. */
2701 return rbd_img_obj_exists_submit(obj_request
);
2704 static int rbd_img_request_submit(struct rbd_img_request
*img_request
)
2706 struct rbd_obj_request
*obj_request
;
2707 struct rbd_obj_request
*next_obj_request
;
2709 dout("%s: img %p\n", __func__
, img_request
);
2710 for_each_obj_request_safe(img_request
, obj_request
, next_obj_request
) {
2713 ret
= rbd_img_obj_request_submit(obj_request
);
2721 static void rbd_img_parent_read_callback(struct rbd_img_request
*img_request
)
2723 struct rbd_obj_request
*obj_request
;
2724 struct rbd_device
*rbd_dev
;
2729 rbd_assert(img_request_child_test(img_request
));
2731 /* First get what we need from the image request and release it */
2733 obj_request
= img_request
->obj_request
;
2734 img_xferred
= img_request
->xferred
;
2735 img_result
= img_request
->result
;
2736 rbd_img_request_put(img_request
);
2739 * If the overlap has become 0 (most likely because the
2740 * image has been flattened) we need to re-submit the
2743 rbd_assert(obj_request
);
2744 rbd_assert(obj_request
->img_request
);
2745 rbd_dev
= obj_request
->img_request
->rbd_dev
;
2746 if (!rbd_dev
->parent_overlap
) {
2747 struct ceph_osd_client
*osdc
;
2749 osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2750 img_result
= rbd_obj_request_submit(osdc
, obj_request
);
2755 obj_request
->result
= img_result
;
2756 if (obj_request
->result
)
2760 * We need to zero anything beyond the parent overlap
2761 * boundary. Since rbd_img_obj_request_read_callback()
2762 * will zero anything beyond the end of a short read, an
2763 * easy way to do this is to pretend the data from the
2764 * parent came up short--ending at the overlap boundary.
2766 rbd_assert(obj_request
->img_offset
< U64_MAX
- obj_request
->length
);
2767 obj_end
= obj_request
->img_offset
+ obj_request
->length
;
2768 if (obj_end
> rbd_dev
->parent_overlap
) {
2771 if (obj_request
->img_offset
< rbd_dev
->parent_overlap
)
2772 xferred
= rbd_dev
->parent_overlap
-
2773 obj_request
->img_offset
;
2775 obj_request
->xferred
= min(img_xferred
, xferred
);
2777 obj_request
->xferred
= img_xferred
;
2780 rbd_img_obj_request_read_callback(obj_request
);
2781 rbd_obj_request_complete(obj_request
);
2784 static void rbd_img_parent_read(struct rbd_obj_request
*obj_request
)
2786 struct rbd_img_request
*img_request
;
2789 rbd_assert(obj_request_img_data_test(obj_request
));
2790 rbd_assert(obj_request
->img_request
!= NULL
);
2791 rbd_assert(obj_request
->result
== (s32
) -ENOENT
);
2792 rbd_assert(obj_request_type_valid(obj_request
->type
));
2794 /* rbd_read_finish(obj_request, obj_request->length); */
2795 img_request
= rbd_parent_request_create(obj_request
,
2796 obj_request
->img_offset
,
2797 obj_request
->length
);
2802 if (obj_request
->type
== OBJ_REQUEST_BIO
)
2803 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
2804 obj_request
->bio_list
);
2806 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_PAGES
,
2807 obj_request
->pages
);
2811 img_request
->callback
= rbd_img_parent_read_callback
;
2812 result
= rbd_img_request_submit(img_request
);
2819 rbd_img_request_put(img_request
);
2820 obj_request
->result
= result
;
2821 obj_request
->xferred
= 0;
2822 obj_request_done_set(obj_request
);
2825 static int rbd_obj_notify_ack_sync(struct rbd_device
*rbd_dev
, u64 notify_id
)
2827 struct rbd_obj_request
*obj_request
;
2828 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2831 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
2832 OBJ_REQUEST_NODATA
);
2837 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, 1,
2839 if (!obj_request
->osd_req
)
2842 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_NOTIFY_ACK
,
2844 rbd_osd_req_format_read(obj_request
);
2846 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2849 ret
= rbd_obj_request_wait(obj_request
);
2851 rbd_obj_request_put(obj_request
);
2856 static void rbd_watch_cb(u64 ver
, u64 notify_id
, u8 opcode
, void *data
)
2858 struct rbd_device
*rbd_dev
= (struct rbd_device
*)data
;
2864 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__
,
2865 rbd_dev
->header_name
, (unsigned long long)notify_id
,
2866 (unsigned int)opcode
);
2867 ret
= rbd_dev_refresh(rbd_dev
);
2869 rbd_warn(rbd_dev
, "header refresh error (%d)\n", ret
);
2871 rbd_obj_notify_ack_sync(rbd_dev
, notify_id
);
2875 * Request sync osd watch/unwatch. The value of "start" determines
2876 * whether a watch request is being initiated or torn down.
2878 static int __rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
, bool start
)
2880 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2881 struct rbd_obj_request
*obj_request
;
2884 rbd_assert(start
^ !!rbd_dev
->watch_event
);
2885 rbd_assert(start
^ !!rbd_dev
->watch_request
);
2888 ret
= ceph_osdc_create_event(osdc
, rbd_watch_cb
, rbd_dev
,
2889 &rbd_dev
->watch_event
);
2892 rbd_assert(rbd_dev
->watch_event
!= NULL
);
2896 obj_request
= rbd_obj_request_create(rbd_dev
->header_name
, 0, 0,
2897 OBJ_REQUEST_NODATA
);
2901 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, true, 1,
2903 if (!obj_request
->osd_req
)
2907 ceph_osdc_set_request_linger(osdc
, obj_request
->osd_req
);
2909 ceph_osdc_unregister_linger_request(osdc
,
2910 rbd_dev
->watch_request
->osd_req
);
2912 osd_req_op_watch_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_WATCH
,
2913 rbd_dev
->watch_event
->cookie
, 0, start
? 1 : 0);
2914 rbd_osd_req_format_write(obj_request
);
2916 ret
= rbd_obj_request_submit(osdc
, obj_request
);
2919 ret
= rbd_obj_request_wait(obj_request
);
2922 ret
= obj_request
->result
;
2927 * A watch request is set to linger, so the underlying osd
2928 * request won't go away until we unregister it. We retain
2929 * a pointer to the object request during that time (in
2930 * rbd_dev->watch_request), so we'll keep a reference to
2931 * it. We'll drop that reference (below) after we've
2935 rbd_dev
->watch_request
= obj_request
;
2940 /* We have successfully torn down the watch request */
2942 rbd_obj_request_put(rbd_dev
->watch_request
);
2943 rbd_dev
->watch_request
= NULL
;
2945 /* Cancel the event if we're tearing down, or on error */
2946 ceph_osdc_cancel_event(rbd_dev
->watch_event
);
2947 rbd_dev
->watch_event
= NULL
;
2949 rbd_obj_request_put(obj_request
);
2954 static int rbd_dev_header_watch_sync(struct rbd_device
*rbd_dev
)
2956 return __rbd_dev_header_watch_sync(rbd_dev
, true);
2959 static void rbd_dev_header_unwatch_sync(struct rbd_device
*rbd_dev
)
2963 ret
= __rbd_dev_header_watch_sync(rbd_dev
, false);
2965 rbd_warn(rbd_dev
, "unable to tear down watch request: %d\n",
2971 * Synchronous osd object method call. Returns the number of bytes
2972 * returned in the outbound buffer, or a negative error code.
2974 static int rbd_obj_method_sync(struct rbd_device
*rbd_dev
,
2975 const char *object_name
,
2976 const char *class_name
,
2977 const char *method_name
,
2978 const void *outbound
,
2979 size_t outbound_size
,
2981 size_t inbound_size
)
2983 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
2984 struct rbd_obj_request
*obj_request
;
2985 struct page
**pages
;
2990 * Method calls are ultimately read operations. The result
2991 * should placed into the inbound buffer provided. They
2992 * also supply outbound data--parameters for the object
2993 * method. Currently if this is present it will be a
2996 page_count
= (u32
)calc_pages_for(0, inbound_size
);
2997 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
2999 return PTR_ERR(pages
);
3002 obj_request
= rbd_obj_request_create(object_name
, 0, inbound_size
,
3007 obj_request
->pages
= pages
;
3008 obj_request
->page_count
= page_count
;
3010 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, 1,
3012 if (!obj_request
->osd_req
)
3015 osd_req_op_cls_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_CALL
,
3016 class_name
, method_name
);
3017 if (outbound_size
) {
3018 struct ceph_pagelist
*pagelist
;
3020 pagelist
= kmalloc(sizeof (*pagelist
), GFP_NOFS
);
3024 ceph_pagelist_init(pagelist
);
3025 ceph_pagelist_append(pagelist
, outbound
, outbound_size
);
3026 osd_req_op_cls_request_data_pagelist(obj_request
->osd_req
, 0,
3029 osd_req_op_cls_response_data_pages(obj_request
->osd_req
, 0,
3030 obj_request
->pages
, inbound_size
,
3032 rbd_osd_req_format_read(obj_request
);
3034 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3037 ret
= rbd_obj_request_wait(obj_request
);
3041 ret
= obj_request
->result
;
3045 rbd_assert(obj_request
->xferred
< (u64
)INT_MAX
);
3046 ret
= (int)obj_request
->xferred
;
3047 ceph_copy_from_page_vector(pages
, inbound
, 0, obj_request
->xferred
);
3050 rbd_obj_request_put(obj_request
);
3052 ceph_release_page_vector(pages
, page_count
);
3057 static void rbd_request_fn(struct request_queue
*q
)
3058 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
3060 struct rbd_device
*rbd_dev
= q
->queuedata
;
3061 bool read_only
= rbd_dev
->mapping
.read_only
;
3065 while ((rq
= blk_fetch_request(q
))) {
3066 bool write_request
= rq_data_dir(rq
) == WRITE
;
3067 struct rbd_img_request
*img_request
;
3071 /* Ignore any non-FS requests that filter through. */
3073 if (rq
->cmd_type
!= REQ_TYPE_FS
) {
3074 dout("%s: non-fs request type %d\n", __func__
,
3075 (int) rq
->cmd_type
);
3076 __blk_end_request_all(rq
, 0);
3080 /* Ignore/skip any zero-length requests */
3082 offset
= (u64
) blk_rq_pos(rq
) << SECTOR_SHIFT
;
3083 length
= (u64
) blk_rq_bytes(rq
);
3086 dout("%s: zero-length request\n", __func__
);
3087 __blk_end_request_all(rq
, 0);
3091 spin_unlock_irq(q
->queue_lock
);
3093 /* Disallow writes to a read-only device */
3095 if (write_request
) {
3099 rbd_assert(rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
);
3103 * Quit early if the mapped snapshot no longer
3104 * exists. It's still possible the snapshot will
3105 * have disappeared by the time our request arrives
3106 * at the osd, but there's no sense in sending it if
3109 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
)) {
3110 dout("request for non-existent snapshot");
3111 rbd_assert(rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
);
3117 if (offset
&& length
> U64_MAX
- offset
+ 1) {
3118 rbd_warn(rbd_dev
, "bad request range (%llu~%llu)\n",
3120 goto end_request
; /* Shouldn't happen */
3124 if (offset
+ length
> rbd_dev
->mapping
.size
) {
3125 rbd_warn(rbd_dev
, "beyond EOD (%llu~%llu > %llu)\n",
3126 offset
, length
, rbd_dev
->mapping
.size
);
3131 img_request
= rbd_img_request_create(rbd_dev
, offset
, length
,
3136 img_request
->rq
= rq
;
3138 result
= rbd_img_request_fill(img_request
, OBJ_REQUEST_BIO
,
3141 result
= rbd_img_request_submit(img_request
);
3143 rbd_img_request_put(img_request
);
3145 spin_lock_irq(q
->queue_lock
);
3147 rbd_warn(rbd_dev
, "%s %llx at %llx result %d\n",
3148 write_request
? "write" : "read",
3149 length
, offset
, result
);
3151 __blk_end_request_all(rq
, result
);
3157 * a queue callback. Makes sure that we don't create a bio that spans across
3158 * multiple osd objects. One exception would be with a single page bios,
3159 * which we handle later at bio_chain_clone_range()
3161 static int rbd_merge_bvec(struct request_queue
*q
, struct bvec_merge_data
*bmd
,
3162 struct bio_vec
*bvec
)
3164 struct rbd_device
*rbd_dev
= q
->queuedata
;
3165 sector_t sector_offset
;
3166 sector_t sectors_per_obj
;
3167 sector_t obj_sector_offset
;
3171 * Find how far into its rbd object the partition-relative
3172 * bio start sector is to offset relative to the enclosing
3175 sector_offset
= get_start_sect(bmd
->bi_bdev
) + bmd
->bi_sector
;
3176 sectors_per_obj
= 1 << (rbd_dev
->header
.obj_order
- SECTOR_SHIFT
);
3177 obj_sector_offset
= sector_offset
& (sectors_per_obj
- 1);
3180 * Compute the number of bytes from that offset to the end
3181 * of the object. Account for what's already used by the bio.
3183 ret
= (int) (sectors_per_obj
- obj_sector_offset
) << SECTOR_SHIFT
;
3184 if (ret
> bmd
->bi_size
)
3185 ret
-= bmd
->bi_size
;
3190 * Don't send back more than was asked for. And if the bio
3191 * was empty, let the whole thing through because: "Note
3192 * that a block device *must* allow a single page to be
3193 * added to an empty bio."
3195 rbd_assert(bvec
->bv_len
<= PAGE_SIZE
);
3196 if (ret
> (int) bvec
->bv_len
|| !bmd
->bi_size
)
3197 ret
= (int) bvec
->bv_len
;
3202 static void rbd_free_disk(struct rbd_device
*rbd_dev
)
3204 struct gendisk
*disk
= rbd_dev
->disk
;
3209 rbd_dev
->disk
= NULL
;
3210 if (disk
->flags
& GENHD_FL_UP
) {
3213 blk_cleanup_queue(disk
->queue
);
3218 static int rbd_obj_read_sync(struct rbd_device
*rbd_dev
,
3219 const char *object_name
,
3220 u64 offset
, u64 length
, void *buf
)
3223 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
3224 struct rbd_obj_request
*obj_request
;
3225 struct page
**pages
= NULL
;
3230 page_count
= (u32
) calc_pages_for(offset
, length
);
3231 pages
= ceph_alloc_page_vector(page_count
, GFP_KERNEL
);
3233 ret
= PTR_ERR(pages
);
3236 obj_request
= rbd_obj_request_create(object_name
, offset
, length
,
3241 obj_request
->pages
= pages
;
3242 obj_request
->page_count
= page_count
;
3244 obj_request
->osd_req
= rbd_osd_req_create(rbd_dev
, false, 1,
3246 if (!obj_request
->osd_req
)
3249 osd_req_op_extent_init(obj_request
->osd_req
, 0, CEPH_OSD_OP_READ
,
3250 offset
, length
, 0, 0);
3251 osd_req_op_extent_osd_data_pages(obj_request
->osd_req
, 0,
3253 obj_request
->length
,
3254 obj_request
->offset
& ~PAGE_MASK
,
3256 rbd_osd_req_format_read(obj_request
);
3258 ret
= rbd_obj_request_submit(osdc
, obj_request
);
3261 ret
= rbd_obj_request_wait(obj_request
);
3265 ret
= obj_request
->result
;
3269 rbd_assert(obj_request
->xferred
<= (u64
) SIZE_MAX
);
3270 size
= (size_t) obj_request
->xferred
;
3271 ceph_copy_from_page_vector(pages
, buf
, 0, size
);
3272 rbd_assert(size
<= (size_t)INT_MAX
);
3276 rbd_obj_request_put(obj_request
);
3278 ceph_release_page_vector(pages
, page_count
);
3284 * Read the complete header for the given rbd device. On successful
3285 * return, the rbd_dev->header field will contain up-to-date
3286 * information about the image.
3288 static int rbd_dev_v1_header_info(struct rbd_device
*rbd_dev
)
3290 struct rbd_image_header_ondisk
*ondisk
= NULL
;
3297 * The complete header will include an array of its 64-bit
3298 * snapshot ids, followed by the names of those snapshots as
3299 * a contiguous block of NUL-terminated strings. Note that
3300 * the number of snapshots could change by the time we read
3301 * it in, in which case we re-read it.
3308 size
= sizeof (*ondisk
);
3309 size
+= snap_count
* sizeof (struct rbd_image_snap_ondisk
);
3311 ondisk
= kmalloc(size
, GFP_KERNEL
);
3315 ret
= rbd_obj_read_sync(rbd_dev
, rbd_dev
->header_name
,
3319 if ((size_t)ret
< size
) {
3321 rbd_warn(rbd_dev
, "short header read (want %zd got %d)",
3325 if (!rbd_dev_ondisk_valid(ondisk
)) {
3327 rbd_warn(rbd_dev
, "invalid header");
3331 names_size
= le64_to_cpu(ondisk
->snap_names_len
);
3332 want_count
= snap_count
;
3333 snap_count
= le32_to_cpu(ondisk
->snap_count
);
3334 } while (snap_count
!= want_count
);
3336 ret
= rbd_header_from_disk(rbd_dev
, ondisk
);
3344 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3345 * has disappeared from the (just updated) snapshot context.
3347 static void rbd_exists_validate(struct rbd_device
*rbd_dev
)
3351 if (!test_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
))
3354 snap_id
= rbd_dev
->spec
->snap_id
;
3355 if (snap_id
== CEPH_NOSNAP
)
3358 if (rbd_dev_snap_index(rbd_dev
, snap_id
) == BAD_SNAP_INDEX
)
3359 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
3362 static void rbd_dev_update_size(struct rbd_device
*rbd_dev
)
3368 * Don't hold the lock while doing disk operations,
3369 * or lock ordering will conflict with the bdev mutex via:
3370 * rbd_add() -> blkdev_get() -> rbd_open()
3372 spin_lock_irq(&rbd_dev
->lock
);
3373 removing
= test_bit(RBD_DEV_FLAG_REMOVING
, &rbd_dev
->flags
);
3374 spin_unlock_irq(&rbd_dev
->lock
);
3376 * If the device is being removed, rbd_dev->disk has
3377 * been destroyed, so don't try to update its size
3380 size
= (sector_t
)rbd_dev
->mapping
.size
/ SECTOR_SIZE
;
3381 dout("setting size to %llu sectors", (unsigned long long)size
);
3382 set_capacity(rbd_dev
->disk
, size
);
3383 revalidate_disk(rbd_dev
->disk
);
3387 static int rbd_dev_refresh(struct rbd_device
*rbd_dev
)
3392 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
3393 down_write(&rbd_dev
->header_rwsem
);
3394 mapping_size
= rbd_dev
->mapping
.size
;
3395 if (rbd_dev
->image_format
== 1)
3396 ret
= rbd_dev_v1_header_info(rbd_dev
);
3398 ret
= rbd_dev_v2_header_info(rbd_dev
);
3400 /* If it's a mapped snapshot, validate its EXISTS flag */
3402 rbd_exists_validate(rbd_dev
);
3403 up_write(&rbd_dev
->header_rwsem
);
3405 if (mapping_size
!= rbd_dev
->mapping
.size
) {
3406 rbd_dev_update_size(rbd_dev
);
3412 static int rbd_init_disk(struct rbd_device
*rbd_dev
)
3414 struct gendisk
*disk
;
3415 struct request_queue
*q
;
3418 /* create gendisk info */
3419 disk
= alloc_disk(single_major
?
3420 (1 << RBD_SINGLE_MAJOR_PART_SHIFT
) :
3421 RBD_MINORS_PER_MAJOR
);
3425 snprintf(disk
->disk_name
, sizeof(disk
->disk_name
), RBD_DRV_NAME
"%d",
3427 disk
->major
= rbd_dev
->major
;
3428 disk
->first_minor
= rbd_dev
->minor
;
3430 disk
->flags
|= GENHD_FL_EXT_DEVT
;
3431 disk
->fops
= &rbd_bd_ops
;
3432 disk
->private_data
= rbd_dev
;
3434 q
= blk_init_queue(rbd_request_fn
, &rbd_dev
->lock
);
3438 /* We use the default size, but let's be explicit about it. */
3439 blk_queue_physical_block_size(q
, SECTOR_SIZE
);
3441 /* set io sizes to object size */
3442 segment_size
= rbd_obj_bytes(&rbd_dev
->header
);
3443 blk_queue_max_hw_sectors(q
, segment_size
/ SECTOR_SIZE
);
3444 blk_queue_max_segment_size(q
, segment_size
);
3445 blk_queue_io_min(q
, segment_size
);
3446 blk_queue_io_opt(q
, segment_size
);
3448 blk_queue_merge_bvec(q
, rbd_merge_bvec
);
3451 q
->queuedata
= rbd_dev
;
3453 rbd_dev
->disk
= disk
;
3466 static struct rbd_device
*dev_to_rbd_dev(struct device
*dev
)
3468 return container_of(dev
, struct rbd_device
, dev
);
3471 static ssize_t
rbd_size_show(struct device
*dev
,
3472 struct device_attribute
*attr
, char *buf
)
3474 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3476 return sprintf(buf
, "%llu\n",
3477 (unsigned long long)rbd_dev
->mapping
.size
);
3481 * Note this shows the features for whatever's mapped, which is not
3482 * necessarily the base image.
3484 static ssize_t
rbd_features_show(struct device
*dev
,
3485 struct device_attribute
*attr
, char *buf
)
3487 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3489 return sprintf(buf
, "0x%016llx\n",
3490 (unsigned long long)rbd_dev
->mapping
.features
);
3493 static ssize_t
rbd_major_show(struct device
*dev
,
3494 struct device_attribute
*attr
, char *buf
)
3496 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3499 return sprintf(buf
, "%d\n", rbd_dev
->major
);
3501 return sprintf(buf
, "(none)\n");
3504 static ssize_t
rbd_minor_show(struct device
*dev
,
3505 struct device_attribute
*attr
, char *buf
)
3507 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3509 return sprintf(buf
, "%d\n", rbd_dev
->minor
);
3512 static ssize_t
rbd_client_id_show(struct device
*dev
,
3513 struct device_attribute
*attr
, char *buf
)
3515 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3517 return sprintf(buf
, "client%lld\n",
3518 ceph_client_id(rbd_dev
->rbd_client
->client
));
3521 static ssize_t
rbd_pool_show(struct device
*dev
,
3522 struct device_attribute
*attr
, char *buf
)
3524 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3526 return sprintf(buf
, "%s\n", rbd_dev
->spec
->pool_name
);
3529 static ssize_t
rbd_pool_id_show(struct device
*dev
,
3530 struct device_attribute
*attr
, char *buf
)
3532 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3534 return sprintf(buf
, "%llu\n",
3535 (unsigned long long) rbd_dev
->spec
->pool_id
);
3538 static ssize_t
rbd_name_show(struct device
*dev
,
3539 struct device_attribute
*attr
, char *buf
)
3541 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3543 if (rbd_dev
->spec
->image_name
)
3544 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_name
);
3546 return sprintf(buf
, "(unknown)\n");
3549 static ssize_t
rbd_image_id_show(struct device
*dev
,
3550 struct device_attribute
*attr
, char *buf
)
3552 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3554 return sprintf(buf
, "%s\n", rbd_dev
->spec
->image_id
);
3558 * Shows the name of the currently-mapped snapshot (or
3559 * RBD_SNAP_HEAD_NAME for the base image).
3561 static ssize_t
rbd_snap_show(struct device
*dev
,
3562 struct device_attribute
*attr
,
3565 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3567 return sprintf(buf
, "%s\n", rbd_dev
->spec
->snap_name
);
3571 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3572 * for the parent image. If there is no parent, simply shows
3573 * "(no parent image)".
3575 static ssize_t
rbd_parent_show(struct device
*dev
,
3576 struct device_attribute
*attr
,
3579 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3580 struct rbd_spec
*spec
= rbd_dev
->parent_spec
;
3585 return sprintf(buf
, "(no parent image)\n");
3587 count
= sprintf(bufp
, "pool_id %llu\npool_name %s\n",
3588 (unsigned long long) spec
->pool_id
, spec
->pool_name
);
3593 count
= sprintf(bufp
, "image_id %s\nimage_name %s\n", spec
->image_id
,
3594 spec
->image_name
? spec
->image_name
: "(unknown)");
3599 count
= sprintf(bufp
, "snap_id %llu\nsnap_name %s\n",
3600 (unsigned long long) spec
->snap_id
, spec
->snap_name
);
3605 count
= sprintf(bufp
, "overlap %llu\n", rbd_dev
->parent_overlap
);
3610 return (ssize_t
) (bufp
- buf
);
3613 static ssize_t
rbd_image_refresh(struct device
*dev
,
3614 struct device_attribute
*attr
,
3618 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
3621 ret
= rbd_dev_refresh(rbd_dev
);
3623 rbd_warn(rbd_dev
, ": manual header refresh error (%d)\n", ret
);
3625 return ret
< 0 ? ret
: size
;
3628 static DEVICE_ATTR(size
, S_IRUGO
, rbd_size_show
, NULL
);
3629 static DEVICE_ATTR(features
, S_IRUGO
, rbd_features_show
, NULL
);
3630 static DEVICE_ATTR(major
, S_IRUGO
, rbd_major_show
, NULL
);
3631 static DEVICE_ATTR(minor
, S_IRUGO
, rbd_minor_show
, NULL
);
3632 static DEVICE_ATTR(client_id
, S_IRUGO
, rbd_client_id_show
, NULL
);
3633 static DEVICE_ATTR(pool
, S_IRUGO
, rbd_pool_show
, NULL
);
3634 static DEVICE_ATTR(pool_id
, S_IRUGO
, rbd_pool_id_show
, NULL
);
3635 static DEVICE_ATTR(name
, S_IRUGO
, rbd_name_show
, NULL
);
3636 static DEVICE_ATTR(image_id
, S_IRUGO
, rbd_image_id_show
, NULL
);
3637 static DEVICE_ATTR(refresh
, S_IWUSR
, NULL
, rbd_image_refresh
);
3638 static DEVICE_ATTR(current_snap
, S_IRUGO
, rbd_snap_show
, NULL
);
3639 static DEVICE_ATTR(parent
, S_IRUGO
, rbd_parent_show
, NULL
);
3641 static struct attribute
*rbd_attrs
[] = {
3642 &dev_attr_size
.attr
,
3643 &dev_attr_features
.attr
,
3644 &dev_attr_major
.attr
,
3645 &dev_attr_minor
.attr
,
3646 &dev_attr_client_id
.attr
,
3647 &dev_attr_pool
.attr
,
3648 &dev_attr_pool_id
.attr
,
3649 &dev_attr_name
.attr
,
3650 &dev_attr_image_id
.attr
,
3651 &dev_attr_current_snap
.attr
,
3652 &dev_attr_parent
.attr
,
3653 &dev_attr_refresh
.attr
,
3657 static struct attribute_group rbd_attr_group
= {
3661 static const struct attribute_group
*rbd_attr_groups
[] = {
3666 static void rbd_sysfs_dev_release(struct device
*dev
)
3670 static struct device_type rbd_device_type
= {
3672 .groups
= rbd_attr_groups
,
3673 .release
= rbd_sysfs_dev_release
,
3676 static struct rbd_spec
*rbd_spec_get(struct rbd_spec
*spec
)
3678 kref_get(&spec
->kref
);
3683 static void rbd_spec_free(struct kref
*kref
);
3684 static void rbd_spec_put(struct rbd_spec
*spec
)
3687 kref_put(&spec
->kref
, rbd_spec_free
);
3690 static struct rbd_spec
*rbd_spec_alloc(void)
3692 struct rbd_spec
*spec
;
3694 spec
= kzalloc(sizeof (*spec
), GFP_KERNEL
);
3697 kref_init(&spec
->kref
);
3702 static void rbd_spec_free(struct kref
*kref
)
3704 struct rbd_spec
*spec
= container_of(kref
, struct rbd_spec
, kref
);
3706 kfree(spec
->pool_name
);
3707 kfree(spec
->image_id
);
3708 kfree(spec
->image_name
);
3709 kfree(spec
->snap_name
);
3713 static struct rbd_device
*rbd_dev_create(struct rbd_client
*rbdc
,
3714 struct rbd_spec
*spec
)
3716 struct rbd_device
*rbd_dev
;
3718 rbd_dev
= kzalloc(sizeof (*rbd_dev
), GFP_KERNEL
);
3722 spin_lock_init(&rbd_dev
->lock
);
3724 atomic_set(&rbd_dev
->parent_ref
, 0);
3725 INIT_LIST_HEAD(&rbd_dev
->node
);
3726 init_rwsem(&rbd_dev
->header_rwsem
);
3728 rbd_dev
->spec
= spec
;
3729 rbd_dev
->rbd_client
= rbdc
;
3731 /* Initialize the layout used for all rbd requests */
3733 rbd_dev
->layout
.fl_stripe_unit
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
3734 rbd_dev
->layout
.fl_stripe_count
= cpu_to_le32(1);
3735 rbd_dev
->layout
.fl_object_size
= cpu_to_le32(1 << RBD_MAX_OBJ_ORDER
);
3736 rbd_dev
->layout
.fl_pg_pool
= cpu_to_le32((u32
) spec
->pool_id
);
3741 static void rbd_dev_destroy(struct rbd_device
*rbd_dev
)
3743 rbd_put_client(rbd_dev
->rbd_client
);
3744 rbd_spec_put(rbd_dev
->spec
);
3749 * Get the size and object order for an image snapshot, or if
3750 * snap_id is CEPH_NOSNAP, gets this information for the base
3753 static int _rbd_dev_v2_snap_size(struct rbd_device
*rbd_dev
, u64 snap_id
,
3754 u8
*order
, u64
*snap_size
)
3756 __le64 snapid
= cpu_to_le64(snap_id
);
3761 } __attribute__ ((packed
)) size_buf
= { 0 };
3763 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3765 &snapid
, sizeof (snapid
),
3766 &size_buf
, sizeof (size_buf
));
3767 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3770 if (ret
< sizeof (size_buf
))
3774 *order
= size_buf
.order
;
3775 dout(" order %u", (unsigned int)*order
);
3777 *snap_size
= le64_to_cpu(size_buf
.size
);
3779 dout(" snap_id 0x%016llx snap_size = %llu\n",
3780 (unsigned long long)snap_id
,
3781 (unsigned long long)*snap_size
);
3786 static int rbd_dev_v2_image_size(struct rbd_device
*rbd_dev
)
3788 return _rbd_dev_v2_snap_size(rbd_dev
, CEPH_NOSNAP
,
3789 &rbd_dev
->header
.obj_order
,
3790 &rbd_dev
->header
.image_size
);
3793 static int rbd_dev_v2_object_prefix(struct rbd_device
*rbd_dev
)
3799 reply_buf
= kzalloc(RBD_OBJ_PREFIX_LEN_MAX
, GFP_KERNEL
);
3803 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3804 "rbd", "get_object_prefix", NULL
, 0,
3805 reply_buf
, RBD_OBJ_PREFIX_LEN_MAX
);
3806 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3811 rbd_dev
->header
.object_prefix
= ceph_extract_encoded_string(&p
,
3812 p
+ ret
, NULL
, GFP_NOIO
);
3815 if (IS_ERR(rbd_dev
->header
.object_prefix
)) {
3816 ret
= PTR_ERR(rbd_dev
->header
.object_prefix
);
3817 rbd_dev
->header
.object_prefix
= NULL
;
3819 dout(" object_prefix = %s\n", rbd_dev
->header
.object_prefix
);
3827 static int _rbd_dev_v2_snap_features(struct rbd_device
*rbd_dev
, u64 snap_id
,
3830 __le64 snapid
= cpu_to_le64(snap_id
);
3834 } __attribute__ ((packed
)) features_buf
= { 0 };
3838 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3839 "rbd", "get_features",
3840 &snapid
, sizeof (snapid
),
3841 &features_buf
, sizeof (features_buf
));
3842 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3845 if (ret
< sizeof (features_buf
))
3848 incompat
= le64_to_cpu(features_buf
.incompat
);
3849 if (incompat
& ~RBD_FEATURES_SUPPORTED
)
3852 *snap_features
= le64_to_cpu(features_buf
.features
);
3854 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3855 (unsigned long long)snap_id
,
3856 (unsigned long long)*snap_features
,
3857 (unsigned long long)le64_to_cpu(features_buf
.incompat
));
3862 static int rbd_dev_v2_features(struct rbd_device
*rbd_dev
)
3864 return _rbd_dev_v2_snap_features(rbd_dev
, CEPH_NOSNAP
,
3865 &rbd_dev
->header
.features
);
3868 static int rbd_dev_v2_parent_info(struct rbd_device
*rbd_dev
)
3870 struct rbd_spec
*parent_spec
;
3872 void *reply_buf
= NULL
;
3882 parent_spec
= rbd_spec_alloc();
3886 size
= sizeof (__le64
) + /* pool_id */
3887 sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
+ /* image_id */
3888 sizeof (__le64
) + /* snap_id */
3889 sizeof (__le64
); /* overlap */
3890 reply_buf
= kmalloc(size
, GFP_KERNEL
);
3896 snapid
= cpu_to_le64(CEPH_NOSNAP
);
3897 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
3898 "rbd", "get_parent",
3899 &snapid
, sizeof (snapid
),
3901 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
3906 end
= reply_buf
+ ret
;
3908 ceph_decode_64_safe(&p
, end
, pool_id
, out_err
);
3909 if (pool_id
== CEPH_NOPOOL
) {
3911 * Either the parent never existed, or we have
3912 * record of it but the image got flattened so it no
3913 * longer has a parent. When the parent of a
3914 * layered image disappears we immediately set the
3915 * overlap to 0. The effect of this is that all new
3916 * requests will be treated as if the image had no
3919 if (rbd_dev
->parent_overlap
) {
3920 rbd_dev
->parent_overlap
= 0;
3922 rbd_dev_parent_put(rbd_dev
);
3923 pr_info("%s: clone image has been flattened\n",
3924 rbd_dev
->disk
->disk_name
);
3927 goto out
; /* No parent? No problem. */
3930 /* The ceph file layout needs to fit pool id in 32 bits */
3933 if (pool_id
> (u64
)U32_MAX
) {
3934 rbd_warn(NULL
, "parent pool id too large (%llu > %u)\n",
3935 (unsigned long long)pool_id
, U32_MAX
);
3939 image_id
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
3940 if (IS_ERR(image_id
)) {
3941 ret
= PTR_ERR(image_id
);
3944 ceph_decode_64_safe(&p
, end
, snap_id
, out_err
);
3945 ceph_decode_64_safe(&p
, end
, overlap
, out_err
);
3948 * The parent won't change (except when the clone is
3949 * flattened, already handled that). So we only need to
3950 * record the parent spec we have not already done so.
3952 if (!rbd_dev
->parent_spec
) {
3953 parent_spec
->pool_id
= pool_id
;
3954 parent_spec
->image_id
= image_id
;
3955 parent_spec
->snap_id
= snap_id
;
3956 rbd_dev
->parent_spec
= parent_spec
;
3957 parent_spec
= NULL
; /* rbd_dev now owns this */
3961 * We always update the parent overlap. If it's zero we
3962 * treat it specially.
3964 rbd_dev
->parent_overlap
= overlap
;
3968 /* A null parent_spec indicates it's the initial probe */
3972 * The overlap has become zero, so the clone
3973 * must have been resized down to 0 at some
3974 * point. Treat this the same as a flatten.
3976 rbd_dev_parent_put(rbd_dev
);
3977 pr_info("%s: clone image now standalone\n",
3978 rbd_dev
->disk
->disk_name
);
3981 * For the initial probe, if we find the
3982 * overlap is zero we just pretend there was
3985 rbd_warn(rbd_dev
, "ignoring parent of "
3986 "clone with overlap 0\n");
3993 rbd_spec_put(parent_spec
);
3998 static int rbd_dev_v2_striping_info(struct rbd_device
*rbd_dev
)
4002 __le64 stripe_count
;
4003 } __attribute__ ((packed
)) striping_info_buf
= { 0 };
4004 size_t size
= sizeof (striping_info_buf
);
4011 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4012 "rbd", "get_stripe_unit_count", NULL
, 0,
4013 (char *)&striping_info_buf
, size
);
4014 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4021 * We don't actually support the "fancy striping" feature
4022 * (STRIPINGV2) yet, but if the striping sizes are the
4023 * defaults the behavior is the same as before. So find
4024 * out, and only fail if the image has non-default values.
4027 obj_size
= (u64
)1 << rbd_dev
->header
.obj_order
;
4028 p
= &striping_info_buf
;
4029 stripe_unit
= ceph_decode_64(&p
);
4030 if (stripe_unit
!= obj_size
) {
4031 rbd_warn(rbd_dev
, "unsupported stripe unit "
4032 "(got %llu want %llu)",
4033 stripe_unit
, obj_size
);
4036 stripe_count
= ceph_decode_64(&p
);
4037 if (stripe_count
!= 1) {
4038 rbd_warn(rbd_dev
, "unsupported stripe count "
4039 "(got %llu want 1)", stripe_count
);
4042 rbd_dev
->header
.stripe_unit
= stripe_unit
;
4043 rbd_dev
->header
.stripe_count
= stripe_count
;
4048 static char *rbd_dev_image_name(struct rbd_device
*rbd_dev
)
4050 size_t image_id_size
;
4055 void *reply_buf
= NULL
;
4057 char *image_name
= NULL
;
4060 rbd_assert(!rbd_dev
->spec
->image_name
);
4062 len
= strlen(rbd_dev
->spec
->image_id
);
4063 image_id_size
= sizeof (__le32
) + len
;
4064 image_id
= kmalloc(image_id_size
, GFP_KERNEL
);
4069 end
= image_id
+ image_id_size
;
4070 ceph_encode_string(&p
, end
, rbd_dev
->spec
->image_id
, (u32
)len
);
4072 size
= sizeof (__le32
) + RBD_IMAGE_NAME_LEN_MAX
;
4073 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4077 ret
= rbd_obj_method_sync(rbd_dev
, RBD_DIRECTORY
,
4078 "rbd", "dir_get_name",
4079 image_id
, image_id_size
,
4084 end
= reply_buf
+ ret
;
4086 image_name
= ceph_extract_encoded_string(&p
, end
, &len
, GFP_KERNEL
);
4087 if (IS_ERR(image_name
))
4090 dout("%s: name is %s len is %zd\n", __func__
, image_name
, len
);
4098 static u64
rbd_v1_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4100 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4101 const char *snap_name
;
4104 /* Skip over names until we find the one we are looking for */
4106 snap_name
= rbd_dev
->header
.snap_names
;
4107 while (which
< snapc
->num_snaps
) {
4108 if (!strcmp(name
, snap_name
))
4109 return snapc
->snaps
[which
];
4110 snap_name
+= strlen(snap_name
) + 1;
4116 static u64
rbd_v2_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4118 struct ceph_snap_context
*snapc
= rbd_dev
->header
.snapc
;
4123 for (which
= 0; !found
&& which
< snapc
->num_snaps
; which
++) {
4124 const char *snap_name
;
4126 snap_id
= snapc
->snaps
[which
];
4127 snap_name
= rbd_dev_v2_snap_name(rbd_dev
, snap_id
);
4128 if (IS_ERR(snap_name
)) {
4129 /* ignore no-longer existing snapshots */
4130 if (PTR_ERR(snap_name
) == -ENOENT
)
4135 found
= !strcmp(name
, snap_name
);
4138 return found
? snap_id
: CEPH_NOSNAP
;
4142 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4143 * no snapshot by that name is found, or if an error occurs.
4145 static u64
rbd_snap_id_by_name(struct rbd_device
*rbd_dev
, const char *name
)
4147 if (rbd_dev
->image_format
== 1)
4148 return rbd_v1_snap_id_by_name(rbd_dev
, name
);
4150 return rbd_v2_snap_id_by_name(rbd_dev
, name
);
4154 * When an rbd image has a parent image, it is identified by the
4155 * pool, image, and snapshot ids (not names). This function fills
4156 * in the names for those ids. (It's OK if we can't figure out the
4157 * name for an image id, but the pool and snapshot ids should always
4158 * exist and have names.) All names in an rbd spec are dynamically
4161 * When an image being mapped (not a parent) is probed, we have the
4162 * pool name and pool id, image name and image id, and the snapshot
4163 * name. The only thing we're missing is the snapshot id.
4165 static int rbd_dev_spec_update(struct rbd_device
*rbd_dev
)
4167 struct ceph_osd_client
*osdc
= &rbd_dev
->rbd_client
->client
->osdc
;
4168 struct rbd_spec
*spec
= rbd_dev
->spec
;
4169 const char *pool_name
;
4170 const char *image_name
;
4171 const char *snap_name
;
4175 * An image being mapped will have the pool name (etc.), but
4176 * we need to look up the snapshot id.
4178 if (spec
->pool_name
) {
4179 if (strcmp(spec
->snap_name
, RBD_SNAP_HEAD_NAME
)) {
4182 snap_id
= rbd_snap_id_by_name(rbd_dev
, spec
->snap_name
);
4183 if (snap_id
== CEPH_NOSNAP
)
4185 spec
->snap_id
= snap_id
;
4187 spec
->snap_id
= CEPH_NOSNAP
;
4193 /* Get the pool name; we have to make our own copy of this */
4195 pool_name
= ceph_pg_pool_name_by_id(osdc
->osdmap
, spec
->pool_id
);
4197 rbd_warn(rbd_dev
, "no pool with id %llu", spec
->pool_id
);
4200 pool_name
= kstrdup(pool_name
, GFP_KERNEL
);
4204 /* Fetch the image name; tolerate failure here */
4206 image_name
= rbd_dev_image_name(rbd_dev
);
4208 rbd_warn(rbd_dev
, "unable to get image name");
4210 /* Look up the snapshot name, and make a copy */
4212 snap_name
= rbd_snap_name(rbd_dev
, spec
->snap_id
);
4213 if (IS_ERR(snap_name
)) {
4214 ret
= PTR_ERR(snap_name
);
4218 spec
->pool_name
= pool_name
;
4219 spec
->image_name
= image_name
;
4220 spec
->snap_name
= snap_name
;
4230 static int rbd_dev_v2_snap_context(struct rbd_device
*rbd_dev
)
4239 struct ceph_snap_context
*snapc
;
4243 * We'll need room for the seq value (maximum snapshot id),
4244 * snapshot count, and array of that many snapshot ids.
4245 * For now we have a fixed upper limit on the number we're
4246 * prepared to receive.
4248 size
= sizeof (__le64
) + sizeof (__le32
) +
4249 RBD_MAX_SNAP_COUNT
* sizeof (__le64
);
4250 reply_buf
= kzalloc(size
, GFP_KERNEL
);
4254 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4255 "rbd", "get_snapcontext", NULL
, 0,
4257 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4262 end
= reply_buf
+ ret
;
4264 ceph_decode_64_safe(&p
, end
, seq
, out
);
4265 ceph_decode_32_safe(&p
, end
, snap_count
, out
);
4268 * Make sure the reported number of snapshot ids wouldn't go
4269 * beyond the end of our buffer. But before checking that,
4270 * make sure the computed size of the snapshot context we
4271 * allocate is representable in a size_t.
4273 if (snap_count
> (SIZE_MAX
- sizeof (struct ceph_snap_context
))
4278 if (!ceph_has_room(&p
, end
, snap_count
* sizeof (__le64
)))
4282 snapc
= ceph_create_snap_context(snap_count
, GFP_KERNEL
);
4288 for (i
= 0; i
< snap_count
; i
++)
4289 snapc
->snaps
[i
] = ceph_decode_64(&p
);
4291 ceph_put_snap_context(rbd_dev
->header
.snapc
);
4292 rbd_dev
->header
.snapc
= snapc
;
4294 dout(" snap context seq = %llu, snap_count = %u\n",
4295 (unsigned long long)seq
, (unsigned int)snap_count
);
4302 static const char *rbd_dev_v2_snap_name(struct rbd_device
*rbd_dev
,
4313 size
= sizeof (__le32
) + RBD_MAX_SNAP_NAME_LEN
;
4314 reply_buf
= kmalloc(size
, GFP_KERNEL
);
4316 return ERR_PTR(-ENOMEM
);
4318 snapid
= cpu_to_le64(snap_id
);
4319 ret
= rbd_obj_method_sync(rbd_dev
, rbd_dev
->header_name
,
4320 "rbd", "get_snapshot_name",
4321 &snapid
, sizeof (snapid
),
4323 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4325 snap_name
= ERR_PTR(ret
);
4330 end
= reply_buf
+ ret
;
4331 snap_name
= ceph_extract_encoded_string(&p
, end
, NULL
, GFP_KERNEL
);
4332 if (IS_ERR(snap_name
))
4335 dout(" snap_id 0x%016llx snap_name = %s\n",
4336 (unsigned long long)snap_id
, snap_name
);
4343 static int rbd_dev_v2_header_info(struct rbd_device
*rbd_dev
)
4345 bool first_time
= rbd_dev
->header
.object_prefix
== NULL
;
4348 ret
= rbd_dev_v2_image_size(rbd_dev
);
4353 ret
= rbd_dev_v2_header_onetime(rbd_dev
);
4359 * If the image supports layering, get the parent info. We
4360 * need to probe the first time regardless. Thereafter we
4361 * only need to if there's a parent, to see if it has
4362 * disappeared due to the mapped image getting flattened.
4364 if (rbd_dev
->header
.features
& RBD_FEATURE_LAYERING
&&
4365 (first_time
|| rbd_dev
->parent_spec
)) {
4368 ret
= rbd_dev_v2_parent_info(rbd_dev
);
4373 * Print a warning if this is the initial probe and
4374 * the image has a parent. Don't print it if the
4375 * image now being probed is itself a parent. We
4376 * can tell at this point because we won't know its
4377 * pool name yet (just its pool id).
4379 warn
= rbd_dev
->parent_spec
&& rbd_dev
->spec
->pool_name
;
4380 if (first_time
&& warn
)
4381 rbd_warn(rbd_dev
, "WARNING: kernel layering "
4382 "is EXPERIMENTAL!");
4385 if (rbd_dev
->spec
->snap_id
== CEPH_NOSNAP
)
4386 if (rbd_dev
->mapping
.size
!= rbd_dev
->header
.image_size
)
4387 rbd_dev
->mapping
.size
= rbd_dev
->header
.image_size
;
4389 ret
= rbd_dev_v2_snap_context(rbd_dev
);
4390 dout("rbd_dev_v2_snap_context returned %d\n", ret
);
4395 static int rbd_bus_add_dev(struct rbd_device
*rbd_dev
)
4400 dev
= &rbd_dev
->dev
;
4401 dev
->bus
= &rbd_bus_type
;
4402 dev
->type
= &rbd_device_type
;
4403 dev
->parent
= &rbd_root_dev
;
4404 dev
->release
= rbd_dev_device_release
;
4405 dev_set_name(dev
, "%d", rbd_dev
->dev_id
);
4406 ret
= device_register(dev
);
4411 static void rbd_bus_del_dev(struct rbd_device
*rbd_dev
)
4413 device_unregister(&rbd_dev
->dev
);
4417 * Get a unique rbd identifier for the given new rbd_dev, and add
4418 * the rbd_dev to the global list.
4420 static int rbd_dev_id_get(struct rbd_device
*rbd_dev
)
4424 new_dev_id
= ida_simple_get(&rbd_dev_id_ida
,
4425 0, minor_to_rbd_dev_id(1 << MINORBITS
),
4430 rbd_dev
->dev_id
= new_dev_id
;
4432 spin_lock(&rbd_dev_list_lock
);
4433 list_add_tail(&rbd_dev
->node
, &rbd_dev_list
);
4434 spin_unlock(&rbd_dev_list_lock
);
4436 dout("rbd_dev %p given dev id %d\n", rbd_dev
, rbd_dev
->dev_id
);
4442 * Remove an rbd_dev from the global list, and record that its
4443 * identifier is no longer in use.
4445 static void rbd_dev_id_put(struct rbd_device
*rbd_dev
)
4447 spin_lock(&rbd_dev_list_lock
);
4448 list_del_init(&rbd_dev
->node
);
4449 spin_unlock(&rbd_dev_list_lock
);
4451 ida_simple_remove(&rbd_dev_id_ida
, rbd_dev
->dev_id
);
4453 dout("rbd_dev %p released dev id %d\n", rbd_dev
, rbd_dev
->dev_id
);
4457 * Skips over white space at *buf, and updates *buf to point to the
4458 * first found non-space character (if any). Returns the length of
4459 * the token (string of non-white space characters) found. Note
4460 * that *buf must be terminated with '\0'.
4462 static inline size_t next_token(const char **buf
)
4465 * These are the characters that produce nonzero for
4466 * isspace() in the "C" and "POSIX" locales.
4468 const char *spaces
= " \f\n\r\t\v";
4470 *buf
+= strspn(*buf
, spaces
); /* Find start of token */
4472 return strcspn(*buf
, spaces
); /* Return token length */
4476 * Finds the next token in *buf, and if the provided token buffer is
4477 * big enough, copies the found token into it. The result, if
4478 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4479 * must be terminated with '\0' on entry.
4481 * Returns the length of the token found (not including the '\0').
4482 * Return value will be 0 if no token is found, and it will be >=
4483 * token_size if the token would not fit.
4485 * The *buf pointer will be updated to point beyond the end of the
4486 * found token. Note that this occurs even if the token buffer is
4487 * too small to hold it.
4489 static inline size_t copy_token(const char **buf
,
4495 len
= next_token(buf
);
4496 if (len
< token_size
) {
4497 memcpy(token
, *buf
, len
);
4498 *(token
+ len
) = '\0';
4506 * Finds the next token in *buf, dynamically allocates a buffer big
4507 * enough to hold a copy of it, and copies the token into the new
4508 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4509 * that a duplicate buffer is created even for a zero-length token.
4511 * Returns a pointer to the newly-allocated duplicate, or a null
4512 * pointer if memory for the duplicate was not available. If
4513 * the lenp argument is a non-null pointer, the length of the token
4514 * (not including the '\0') is returned in *lenp.
4516 * If successful, the *buf pointer will be updated to point beyond
4517 * the end of the found token.
4519 * Note: uses GFP_KERNEL for allocation.
4521 static inline char *dup_token(const char **buf
, size_t *lenp
)
4526 len
= next_token(buf
);
4527 dup
= kmemdup(*buf
, len
+ 1, GFP_KERNEL
);
4530 *(dup
+ len
) = '\0';
4540 * Parse the options provided for an "rbd add" (i.e., rbd image
4541 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4542 * and the data written is passed here via a NUL-terminated buffer.
4543 * Returns 0 if successful or an error code otherwise.
4545 * The information extracted from these options is recorded in
4546 * the other parameters which return dynamically-allocated
4549 * The address of a pointer that will refer to a ceph options
4550 * structure. Caller must release the returned pointer using
4551 * ceph_destroy_options() when it is no longer needed.
4553 * Address of an rbd options pointer. Fully initialized by
4554 * this function; caller must release with kfree().
4556 * Address of an rbd image specification pointer. Fully
4557 * initialized by this function based on parsed options.
4558 * Caller must release with rbd_spec_put().
4560 * The options passed take this form:
4561 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4564 * A comma-separated list of one or more monitor addresses.
4565 * A monitor address is an ip address, optionally followed
4566 * by a port number (separated by a colon).
4567 * I.e.: ip1[:port1][,ip2[:port2]...]
4569 * A comma-separated list of ceph and/or rbd options.
4571 * The name of the rados pool containing the rbd image.
4573 * The name of the image in that pool to map.
4575 * An optional snapshot id. If provided, the mapping will
4576 * present data from the image at the time that snapshot was
4577 * created. The image head is used if no snapshot id is
4578 * provided. Snapshot mappings are always read-only.
4580 static int rbd_add_parse_args(const char *buf
,
4581 struct ceph_options
**ceph_opts
,
4582 struct rbd_options
**opts
,
4583 struct rbd_spec
**rbd_spec
)
4587 const char *mon_addrs
;
4589 size_t mon_addrs_size
;
4590 struct rbd_spec
*spec
= NULL
;
4591 struct rbd_options
*rbd_opts
= NULL
;
4592 struct ceph_options
*copts
;
4595 /* The first four tokens are required */
4597 len
= next_token(&buf
);
4599 rbd_warn(NULL
, "no monitor address(es) provided");
4603 mon_addrs_size
= len
+ 1;
4607 options
= dup_token(&buf
, NULL
);
4611 rbd_warn(NULL
, "no options provided");
4615 spec
= rbd_spec_alloc();
4619 spec
->pool_name
= dup_token(&buf
, NULL
);
4620 if (!spec
->pool_name
)
4622 if (!*spec
->pool_name
) {
4623 rbd_warn(NULL
, "no pool name provided");
4627 spec
->image_name
= dup_token(&buf
, NULL
);
4628 if (!spec
->image_name
)
4630 if (!*spec
->image_name
) {
4631 rbd_warn(NULL
, "no image name provided");
4636 * Snapshot name is optional; default is to use "-"
4637 * (indicating the head/no snapshot).
4639 len
= next_token(&buf
);
4641 buf
= RBD_SNAP_HEAD_NAME
; /* No snapshot supplied */
4642 len
= sizeof (RBD_SNAP_HEAD_NAME
) - 1;
4643 } else if (len
> RBD_MAX_SNAP_NAME_LEN
) {
4644 ret
= -ENAMETOOLONG
;
4647 snap_name
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
4650 *(snap_name
+ len
) = '\0';
4651 spec
->snap_name
= snap_name
;
4653 /* Initialize all rbd options to the defaults */
4655 rbd_opts
= kzalloc(sizeof (*rbd_opts
), GFP_KERNEL
);
4659 rbd_opts
->read_only
= RBD_READ_ONLY_DEFAULT
;
4661 copts
= ceph_parse_options(options
, mon_addrs
,
4662 mon_addrs
+ mon_addrs_size
- 1,
4663 parse_rbd_opts_token
, rbd_opts
);
4664 if (IS_ERR(copts
)) {
4665 ret
= PTR_ERR(copts
);
4686 * An rbd format 2 image has a unique identifier, distinct from the
4687 * name given to it by the user. Internally, that identifier is
4688 * what's used to specify the names of objects related to the image.
4690 * A special "rbd id" object is used to map an rbd image name to its
4691 * id. If that object doesn't exist, then there is no v2 rbd image
4692 * with the supplied name.
4694 * This function will record the given rbd_dev's image_id field if
4695 * it can be determined, and in that case will return 0. If any
4696 * errors occur a negative errno will be returned and the rbd_dev's
4697 * image_id field will be unchanged (and should be NULL).
4699 static int rbd_dev_image_id(struct rbd_device
*rbd_dev
)
4708 * When probing a parent image, the image id is already
4709 * known (and the image name likely is not). There's no
4710 * need to fetch the image id again in this case. We
4711 * do still need to set the image format though.
4713 if (rbd_dev
->spec
->image_id
) {
4714 rbd_dev
->image_format
= *rbd_dev
->spec
->image_id
? 2 : 1;
4720 * First, see if the format 2 image id file exists, and if
4721 * so, get the image's persistent id from it.
4723 size
= sizeof (RBD_ID_PREFIX
) + strlen(rbd_dev
->spec
->image_name
);
4724 object_name
= kmalloc(size
, GFP_NOIO
);
4727 sprintf(object_name
, "%s%s", RBD_ID_PREFIX
, rbd_dev
->spec
->image_name
);
4728 dout("rbd id object name is %s\n", object_name
);
4730 /* Response will be an encoded string, which includes a length */
4732 size
= sizeof (__le32
) + RBD_IMAGE_ID_LEN_MAX
;
4733 response
= kzalloc(size
, GFP_NOIO
);
4739 /* If it doesn't exist we'll assume it's a format 1 image */
4741 ret
= rbd_obj_method_sync(rbd_dev
, object_name
,
4742 "rbd", "get_id", NULL
, 0,
4743 response
, RBD_IMAGE_ID_LEN_MAX
);
4744 dout("%s: rbd_obj_method_sync returned %d\n", __func__
, ret
);
4745 if (ret
== -ENOENT
) {
4746 image_id
= kstrdup("", GFP_KERNEL
);
4747 ret
= image_id
? 0 : -ENOMEM
;
4749 rbd_dev
->image_format
= 1;
4750 } else if (ret
> sizeof (__le32
)) {
4753 image_id
= ceph_extract_encoded_string(&p
, p
+ ret
,
4755 ret
= IS_ERR(image_id
) ? PTR_ERR(image_id
) : 0;
4757 rbd_dev
->image_format
= 2;
4763 rbd_dev
->spec
->image_id
= image_id
;
4764 dout("image_id is %s\n", image_id
);
4774 * Undo whatever state changes are made by v1 or v2 header info
4777 static void rbd_dev_unprobe(struct rbd_device
*rbd_dev
)
4779 struct rbd_image_header
*header
;
4781 /* Drop parent reference unless it's already been done (or none) */
4783 if (rbd_dev
->parent_overlap
)
4784 rbd_dev_parent_put(rbd_dev
);
4786 /* Free dynamic fields from the header, then zero it out */
4788 header
= &rbd_dev
->header
;
4789 ceph_put_snap_context(header
->snapc
);
4790 kfree(header
->snap_sizes
);
4791 kfree(header
->snap_names
);
4792 kfree(header
->object_prefix
);
4793 memset(header
, 0, sizeof (*header
));
4796 static int rbd_dev_v2_header_onetime(struct rbd_device
*rbd_dev
)
4800 ret
= rbd_dev_v2_object_prefix(rbd_dev
);
4805 * Get the and check features for the image. Currently the
4806 * features are assumed to never change.
4808 ret
= rbd_dev_v2_features(rbd_dev
);
4812 /* If the image supports fancy striping, get its parameters */
4814 if (rbd_dev
->header
.features
& RBD_FEATURE_STRIPINGV2
) {
4815 ret
= rbd_dev_v2_striping_info(rbd_dev
);
4819 /* No support for crypto and compression type format 2 images */
4823 rbd_dev
->header
.features
= 0;
4824 kfree(rbd_dev
->header
.object_prefix
);
4825 rbd_dev
->header
.object_prefix
= NULL
;
4830 static int rbd_dev_probe_parent(struct rbd_device
*rbd_dev
)
4832 struct rbd_device
*parent
= NULL
;
4833 struct rbd_spec
*parent_spec
;
4834 struct rbd_client
*rbdc
;
4837 if (!rbd_dev
->parent_spec
)
4840 * We need to pass a reference to the client and the parent
4841 * spec when creating the parent rbd_dev. Images related by
4842 * parent/child relationships always share both.
4844 parent_spec
= rbd_spec_get(rbd_dev
->parent_spec
);
4845 rbdc
= __rbd_get_client(rbd_dev
->rbd_client
);
4848 parent
= rbd_dev_create(rbdc
, parent_spec
);
4852 ret
= rbd_dev_image_probe(parent
, false);
4855 rbd_dev
->parent
= parent
;
4856 atomic_set(&rbd_dev
->parent_ref
, 1);
4861 rbd_dev_unparent(rbd_dev
);
4862 kfree(rbd_dev
->header_name
);
4863 rbd_dev_destroy(parent
);
4865 rbd_put_client(rbdc
);
4866 rbd_spec_put(parent_spec
);
4872 static int rbd_dev_device_setup(struct rbd_device
*rbd_dev
)
4876 /* Get an id and fill in device name. */
4878 ret
= rbd_dev_id_get(rbd_dev
);
4882 BUILD_BUG_ON(DEV_NAME_LEN
4883 < sizeof (RBD_DRV_NAME
) + MAX_INT_FORMAT_WIDTH
);
4884 sprintf(rbd_dev
->name
, "%s%d", RBD_DRV_NAME
, rbd_dev
->dev_id
);
4886 /* Record our major and minor device numbers. */
4888 if (!single_major
) {
4889 ret
= register_blkdev(0, rbd_dev
->name
);
4893 rbd_dev
->major
= ret
;
4896 rbd_dev
->major
= rbd_major
;
4897 rbd_dev
->minor
= rbd_dev_id_to_minor(rbd_dev
->dev_id
);
4900 /* Set up the blkdev mapping. */
4902 ret
= rbd_init_disk(rbd_dev
);
4904 goto err_out_blkdev
;
4906 ret
= rbd_dev_mapping_set(rbd_dev
);
4909 set_capacity(rbd_dev
->disk
, rbd_dev
->mapping
.size
/ SECTOR_SIZE
);
4911 ret
= rbd_bus_add_dev(rbd_dev
);
4913 goto err_out_mapping
;
4915 /* Everything's ready. Announce the disk to the world. */
4917 set_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
4918 add_disk(rbd_dev
->disk
);
4920 pr_info("%s: added with size 0x%llx\n", rbd_dev
->disk
->disk_name
,
4921 (unsigned long long) rbd_dev
->mapping
.size
);
4926 rbd_dev_mapping_clear(rbd_dev
);
4928 rbd_free_disk(rbd_dev
);
4931 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
4933 rbd_dev_id_put(rbd_dev
);
4934 rbd_dev_mapping_clear(rbd_dev
);
4939 static int rbd_dev_header_name(struct rbd_device
*rbd_dev
)
4941 struct rbd_spec
*spec
= rbd_dev
->spec
;
4944 /* Record the header object name for this rbd image. */
4946 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
4948 if (rbd_dev
->image_format
== 1)
4949 size
= strlen(spec
->image_name
) + sizeof (RBD_SUFFIX
);
4951 size
= sizeof (RBD_HEADER_PREFIX
) + strlen(spec
->image_id
);
4953 rbd_dev
->header_name
= kmalloc(size
, GFP_KERNEL
);
4954 if (!rbd_dev
->header_name
)
4957 if (rbd_dev
->image_format
== 1)
4958 sprintf(rbd_dev
->header_name
, "%s%s",
4959 spec
->image_name
, RBD_SUFFIX
);
4961 sprintf(rbd_dev
->header_name
, "%s%s",
4962 RBD_HEADER_PREFIX
, spec
->image_id
);
4966 static void rbd_dev_image_release(struct rbd_device
*rbd_dev
)
4968 rbd_dev_unprobe(rbd_dev
);
4969 kfree(rbd_dev
->header_name
);
4970 rbd_dev
->header_name
= NULL
;
4971 rbd_dev
->image_format
= 0;
4972 kfree(rbd_dev
->spec
->image_id
);
4973 rbd_dev
->spec
->image_id
= NULL
;
4975 rbd_dev_destroy(rbd_dev
);
4979 * Probe for the existence of the header object for the given rbd
4980 * device. If this image is the one being mapped (i.e., not a
4981 * parent), initiate a watch on its header object before using that
4982 * object to get detailed information about the rbd image.
4984 static int rbd_dev_image_probe(struct rbd_device
*rbd_dev
, bool mapping
)
4989 * Get the id from the image id object. Unless there's an
4990 * error, rbd_dev->spec->image_id will be filled in with
4991 * a dynamically-allocated string, and rbd_dev->image_format
4992 * will be set to either 1 or 2.
4994 ret
= rbd_dev_image_id(rbd_dev
);
4997 rbd_assert(rbd_dev
->spec
->image_id
);
4998 rbd_assert(rbd_image_format_valid(rbd_dev
->image_format
));
5000 ret
= rbd_dev_header_name(rbd_dev
);
5002 goto err_out_format
;
5005 ret
= rbd_dev_header_watch_sync(rbd_dev
);
5007 goto out_header_name
;
5010 if (rbd_dev
->image_format
== 1)
5011 ret
= rbd_dev_v1_header_info(rbd_dev
);
5013 ret
= rbd_dev_v2_header_info(rbd_dev
);
5017 ret
= rbd_dev_spec_update(rbd_dev
);
5021 ret
= rbd_dev_probe_parent(rbd_dev
);
5025 dout("discovered format %u image, header name is %s\n",
5026 rbd_dev
->image_format
, rbd_dev
->header_name
);
5030 rbd_dev_unprobe(rbd_dev
);
5033 rbd_dev_header_unwatch_sync(rbd_dev
);
5035 kfree(rbd_dev
->header_name
);
5036 rbd_dev
->header_name
= NULL
;
5038 rbd_dev
->image_format
= 0;
5039 kfree(rbd_dev
->spec
->image_id
);
5040 rbd_dev
->spec
->image_id
= NULL
;
5042 dout("probe failed, returning %d\n", ret
);
5047 static ssize_t
do_rbd_add(struct bus_type
*bus
,
5051 struct rbd_device
*rbd_dev
= NULL
;
5052 struct ceph_options
*ceph_opts
= NULL
;
5053 struct rbd_options
*rbd_opts
= NULL
;
5054 struct rbd_spec
*spec
= NULL
;
5055 struct rbd_client
*rbdc
;
5056 struct ceph_osd_client
*osdc
;
5060 if (!try_module_get(THIS_MODULE
))
5063 /* parse add command */
5064 rc
= rbd_add_parse_args(buf
, &ceph_opts
, &rbd_opts
, &spec
);
5066 goto err_out_module
;
5067 read_only
= rbd_opts
->read_only
;
5069 rbd_opts
= NULL
; /* done with this */
5071 rbdc
= rbd_get_client(ceph_opts
);
5078 osdc
= &rbdc
->client
->osdc
;
5079 rc
= ceph_pg_poolid_by_name(osdc
->osdmap
, spec
->pool_name
);
5081 goto err_out_client
;
5082 spec
->pool_id
= (u64
)rc
;
5084 /* The ceph file layout needs to fit pool id in 32 bits */
5086 if (spec
->pool_id
> (u64
)U32_MAX
) {
5087 rbd_warn(NULL
, "pool id too large (%llu > %u)\n",
5088 (unsigned long long)spec
->pool_id
, U32_MAX
);
5090 goto err_out_client
;
5093 rbd_dev
= rbd_dev_create(rbdc
, spec
);
5095 goto err_out_client
;
5096 rbdc
= NULL
; /* rbd_dev now owns this */
5097 spec
= NULL
; /* rbd_dev now owns this */
5099 rc
= rbd_dev_image_probe(rbd_dev
, true);
5101 goto err_out_rbd_dev
;
5103 /* If we are mapping a snapshot it must be marked read-only */
5105 if (rbd_dev
->spec
->snap_id
!= CEPH_NOSNAP
)
5107 rbd_dev
->mapping
.read_only
= read_only
;
5109 rc
= rbd_dev_device_setup(rbd_dev
);
5112 * rbd_dev_header_unwatch_sync() can't be moved into
5113 * rbd_dev_image_release() without refactoring, see
5114 * commit 1f3ef78861ac.
5116 rbd_dev_header_unwatch_sync(rbd_dev
);
5117 rbd_dev_image_release(rbd_dev
);
5118 goto err_out_module
;
5124 rbd_dev_destroy(rbd_dev
);
5126 rbd_put_client(rbdc
);
5130 module_put(THIS_MODULE
);
5132 dout("Error adding device %s\n", buf
);
5137 static ssize_t
rbd_add(struct bus_type
*bus
,
5144 return do_rbd_add(bus
, buf
, count
);
5147 static ssize_t
rbd_add_single_major(struct bus_type
*bus
,
5151 return do_rbd_add(bus
, buf
, count
);
5154 static void rbd_dev_device_release(struct device
*dev
)
5156 struct rbd_device
*rbd_dev
= dev_to_rbd_dev(dev
);
5158 rbd_free_disk(rbd_dev
);
5159 clear_bit(RBD_DEV_FLAG_EXISTS
, &rbd_dev
->flags
);
5160 rbd_dev_mapping_clear(rbd_dev
);
5162 unregister_blkdev(rbd_dev
->major
, rbd_dev
->name
);
5163 rbd_dev_id_put(rbd_dev
);
5164 rbd_dev_mapping_clear(rbd_dev
);
5167 static void rbd_dev_remove_parent(struct rbd_device
*rbd_dev
)
5169 while (rbd_dev
->parent
) {
5170 struct rbd_device
*first
= rbd_dev
;
5171 struct rbd_device
*second
= first
->parent
;
5172 struct rbd_device
*third
;
5175 * Follow to the parent with no grandparent and
5178 while (second
&& (third
= second
->parent
)) {
5183 rbd_dev_image_release(second
);
5184 first
->parent
= NULL
;
5185 first
->parent_overlap
= 0;
5187 rbd_assert(first
->parent_spec
);
5188 rbd_spec_put(first
->parent_spec
);
5189 first
->parent_spec
= NULL
;
5193 static ssize_t
do_rbd_remove(struct bus_type
*bus
,
5197 struct rbd_device
*rbd_dev
= NULL
;
5198 struct list_head
*tmp
;
5201 bool already
= false;
5204 ret
= kstrtoul(buf
, 10, &ul
);
5208 /* convert to int; abort if we lost anything in the conversion */
5214 spin_lock(&rbd_dev_list_lock
);
5215 list_for_each(tmp
, &rbd_dev_list
) {
5216 rbd_dev
= list_entry(tmp
, struct rbd_device
, node
);
5217 if (rbd_dev
->dev_id
== dev_id
) {
5223 spin_lock_irq(&rbd_dev
->lock
);
5224 if (rbd_dev
->open_count
)
5227 already
= test_and_set_bit(RBD_DEV_FLAG_REMOVING
,
5229 spin_unlock_irq(&rbd_dev
->lock
);
5231 spin_unlock(&rbd_dev_list_lock
);
5232 if (ret
< 0 || already
)
5235 rbd_dev_header_unwatch_sync(rbd_dev
);
5237 * flush remaining watch callbacks - these must be complete
5238 * before the osd_client is shutdown
5240 dout("%s: flushing notifies", __func__
);
5241 ceph_osdc_flush_notifies(&rbd_dev
->rbd_client
->client
->osdc
);
5244 * Don't free anything from rbd_dev->disk until after all
5245 * notifies are completely processed. Otherwise
5246 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5247 * in a potential use after free of rbd_dev->disk or rbd_dev.
5249 rbd_bus_del_dev(rbd_dev
);
5250 rbd_dev_image_release(rbd_dev
);
5251 module_put(THIS_MODULE
);
5256 static ssize_t
rbd_remove(struct bus_type
*bus
,
5263 return do_rbd_remove(bus
, buf
, count
);
5266 static ssize_t
rbd_remove_single_major(struct bus_type
*bus
,
5270 return do_rbd_remove(bus
, buf
, count
);
5274 * create control files in sysfs
5277 static int rbd_sysfs_init(void)
5281 ret
= device_register(&rbd_root_dev
);
5285 ret
= bus_register(&rbd_bus_type
);
5287 device_unregister(&rbd_root_dev
);
5292 static void rbd_sysfs_cleanup(void)
5294 bus_unregister(&rbd_bus_type
);
5295 device_unregister(&rbd_root_dev
);
5298 static int rbd_slab_init(void)
5300 rbd_assert(!rbd_img_request_cache
);
5301 rbd_img_request_cache
= kmem_cache_create("rbd_img_request",
5302 sizeof (struct rbd_img_request
),
5303 __alignof__(struct rbd_img_request
),
5305 if (!rbd_img_request_cache
)
5308 rbd_assert(!rbd_obj_request_cache
);
5309 rbd_obj_request_cache
= kmem_cache_create("rbd_obj_request",
5310 sizeof (struct rbd_obj_request
),
5311 __alignof__(struct rbd_obj_request
),
5313 if (!rbd_obj_request_cache
)
5316 rbd_assert(!rbd_segment_name_cache
);
5317 rbd_segment_name_cache
= kmem_cache_create("rbd_segment_name",
5318 CEPH_MAX_OID_NAME_LEN
+ 1, 1, 0, NULL
);
5319 if (rbd_segment_name_cache
)
5322 if (rbd_obj_request_cache
) {
5323 kmem_cache_destroy(rbd_obj_request_cache
);
5324 rbd_obj_request_cache
= NULL
;
5327 kmem_cache_destroy(rbd_img_request_cache
);
5328 rbd_img_request_cache
= NULL
;
5333 static void rbd_slab_exit(void)
5335 rbd_assert(rbd_segment_name_cache
);
5336 kmem_cache_destroy(rbd_segment_name_cache
);
5337 rbd_segment_name_cache
= NULL
;
5339 rbd_assert(rbd_obj_request_cache
);
5340 kmem_cache_destroy(rbd_obj_request_cache
);
5341 rbd_obj_request_cache
= NULL
;
5343 rbd_assert(rbd_img_request_cache
);
5344 kmem_cache_destroy(rbd_img_request_cache
);
5345 rbd_img_request_cache
= NULL
;
5348 static int __init
rbd_init(void)
5352 if (!libceph_compatible(NULL
)) {
5353 rbd_warn(NULL
, "libceph incompatibility (quitting)");
5357 rc
= rbd_slab_init();
5362 rbd_major
= register_blkdev(0, RBD_DRV_NAME
);
5363 if (rbd_major
< 0) {
5369 rc
= rbd_sysfs_init();
5371 goto err_out_blkdev
;
5374 pr_info("loaded (major %d)\n", rbd_major
);
5376 pr_info("loaded\n");
5382 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
5388 static void __exit
rbd_exit(void)
5390 rbd_sysfs_cleanup();
5392 unregister_blkdev(rbd_major
, RBD_DRV_NAME
);
5396 module_init(rbd_init
);
5397 module_exit(rbd_exit
);
5399 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5400 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5401 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5402 /* following authorship retained from original osdblk.c */
5403 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5405 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5406 MODULE_LICENSE("GPL");