rbd: ignore zero-length requests
[deliverable/linux.git] / drivers / block / rbd.c
1 /*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
24 For usage instructions, please refer to:
25
26 Documentation/ABI/testing/sysfs-bus-rbd
27
28 */
29
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
35
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
39 #include <linux/fs.h>
40 #include <linux/blkdev.h>
41
42 #include "rbd_types.h"
43
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
45
46 /*
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
51 */
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
54
55 /* It might be useful to have these defined elsewhere */
56
57 #define U8_MAX ((u8) (~0U))
58 #define U16_MAX ((u16) (~0U))
59 #define U32_MAX ((u32) (~0U))
60 #define U64_MAX ((u64) (~0ULL))
61
62 #define RBD_DRV_NAME "rbd"
63 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
64
65 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
66
67 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
68 #define RBD_MAX_SNAP_NAME_LEN \
69 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
70
71 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
72
73 #define RBD_SNAP_HEAD_NAME "-"
74
75 /* This allows a single page to hold an image name sent by OSD */
76 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
77 #define RBD_IMAGE_ID_LEN_MAX 64
78
79 #define RBD_OBJ_PREFIX_LEN_MAX 64
80
81 /* Feature bits */
82
83 #define RBD_FEATURE_LAYERING 1
84
85 /* Features supported by this (client software) implementation. */
86
87 #define RBD_FEATURES_ALL (0)
88
89 /*
90 * An RBD device name will be "rbd#", where the "rbd" comes from
91 * RBD_DRV_NAME above, and # is a unique integer identifier.
92 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
93 * enough to hold all possible device names.
94 */
95 #define DEV_NAME_LEN 32
96 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
97
98 /*
99 * block device image metadata (in-memory version)
100 */
101 struct rbd_image_header {
102 /* These four fields never change for a given rbd image */
103 char *object_prefix;
104 u64 features;
105 __u8 obj_order;
106 __u8 crypt_type;
107 __u8 comp_type;
108
109 /* The remaining fields need to be updated occasionally */
110 u64 image_size;
111 struct ceph_snap_context *snapc;
112 char *snap_names;
113 u64 *snap_sizes;
114
115 u64 obj_version;
116 };
117
118 /*
119 * An rbd image specification.
120 *
121 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
122 * identify an image. Each rbd_dev structure includes a pointer to
123 * an rbd_spec structure that encapsulates this identity.
124 *
125 * Each of the id's in an rbd_spec has an associated name. For a
126 * user-mapped image, the names are supplied and the id's associated
127 * with them are looked up. For a layered image, a parent image is
128 * defined by the tuple, and the names are looked up.
129 *
130 * An rbd_dev structure contains a parent_spec pointer which is
131 * non-null if the image it represents is a child in a layered
132 * image. This pointer will refer to the rbd_spec structure used
133 * by the parent rbd_dev for its own identity (i.e., the structure
134 * is shared between the parent and child).
135 *
136 * Since these structures are populated once, during the discovery
137 * phase of image construction, they are effectively immutable so
138 * we make no effort to synchronize access to them.
139 *
140 * Note that code herein does not assume the image name is known (it
141 * could be a null pointer).
142 */
143 struct rbd_spec {
144 u64 pool_id;
145 char *pool_name;
146
147 char *image_id;
148 char *image_name;
149
150 u64 snap_id;
151 char *snap_name;
152
153 struct kref kref;
154 };
155
156 /*
157 * an instance of the client. multiple devices may share an rbd client.
158 */
159 struct rbd_client {
160 struct ceph_client *client;
161 struct kref kref;
162 struct list_head node;
163 };
164
165 struct rbd_img_request;
166 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
167
168 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
169
170 struct rbd_obj_request;
171 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
172
173 enum obj_request_type {
174 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
175 };
176
177 struct rbd_obj_request {
178 const char *object_name;
179 u64 offset; /* object start byte */
180 u64 length; /* bytes from offset */
181
182 struct rbd_img_request *img_request;
183 struct list_head links; /* img_request->obj_requests */
184 u32 which; /* posn image request list */
185
186 enum obj_request_type type;
187 union {
188 struct bio *bio_list;
189 struct {
190 struct page **pages;
191 u32 page_count;
192 };
193 };
194
195 struct ceph_osd_request *osd_req;
196
197 u64 xferred; /* bytes transferred */
198 u64 version;
199 s32 result;
200 atomic_t done;
201
202 rbd_obj_callback_t callback;
203 struct completion completion;
204
205 struct kref kref;
206 };
207
208 struct rbd_img_request {
209 struct request *rq;
210 struct rbd_device *rbd_dev;
211 u64 offset; /* starting image byte offset */
212 u64 length; /* byte count from offset */
213 bool write_request; /* false for read */
214 union {
215 struct ceph_snap_context *snapc; /* for writes */
216 u64 snap_id; /* for reads */
217 };
218 spinlock_t completion_lock;/* protects next_completion */
219 u32 next_completion;
220 rbd_img_callback_t callback;
221
222 u32 obj_request_count;
223 struct list_head obj_requests; /* rbd_obj_request structs */
224
225 struct kref kref;
226 };
227
228 #define for_each_obj_request(ireq, oreq) \
229 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
230 #define for_each_obj_request_from(ireq, oreq) \
231 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
232 #define for_each_obj_request_safe(ireq, oreq, n) \
233 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
234
235 struct rbd_snap {
236 struct device dev;
237 const char *name;
238 u64 size;
239 struct list_head node;
240 u64 id;
241 u64 features;
242 };
243
244 struct rbd_mapping {
245 u64 size;
246 u64 features;
247 bool read_only;
248 };
249
250 /*
251 * a single device
252 */
253 struct rbd_device {
254 int dev_id; /* blkdev unique id */
255
256 int major; /* blkdev assigned major */
257 struct gendisk *disk; /* blkdev's gendisk and rq */
258
259 u32 image_format; /* Either 1 or 2 */
260 struct rbd_client *rbd_client;
261
262 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
263
264 spinlock_t lock; /* queue, flags, open_count */
265
266 struct rbd_image_header header;
267 unsigned long flags; /* possibly lock protected */
268 struct rbd_spec *spec;
269
270 char *header_name;
271
272 struct ceph_file_layout layout;
273
274 struct ceph_osd_event *watch_event;
275 struct rbd_obj_request *watch_request;
276
277 struct rbd_spec *parent_spec;
278 u64 parent_overlap;
279
280 /* protects updating the header */
281 struct rw_semaphore header_rwsem;
282
283 struct rbd_mapping mapping;
284
285 struct list_head node;
286
287 /* list of snapshots */
288 struct list_head snaps;
289
290 /* sysfs related */
291 struct device dev;
292 unsigned long open_count; /* protected by lock */
293 };
294
295 /*
296 * Flag bits for rbd_dev->flags. If atomicity is required,
297 * rbd_dev->lock is used to protect access.
298 *
299 * Currently, only the "removing" flag (which is coupled with the
300 * "open_count" field) requires atomic access.
301 */
302 enum rbd_dev_flags {
303 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
304 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
305 };
306
307 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
308
309 static LIST_HEAD(rbd_dev_list); /* devices */
310 static DEFINE_SPINLOCK(rbd_dev_list_lock);
311
312 static LIST_HEAD(rbd_client_list); /* clients */
313 static DEFINE_SPINLOCK(rbd_client_list_lock);
314
315 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
316 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
317
318 static void rbd_dev_release(struct device *dev);
319 static void rbd_remove_snap_dev(struct rbd_snap *snap);
320
321 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
322 size_t count);
323 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
324 size_t count);
325
326 static struct bus_attribute rbd_bus_attrs[] = {
327 __ATTR(add, S_IWUSR, NULL, rbd_add),
328 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
329 __ATTR_NULL
330 };
331
332 static struct bus_type rbd_bus_type = {
333 .name = "rbd",
334 .bus_attrs = rbd_bus_attrs,
335 };
336
337 static void rbd_root_dev_release(struct device *dev)
338 {
339 }
340
341 static struct device rbd_root_dev = {
342 .init_name = "rbd",
343 .release = rbd_root_dev_release,
344 };
345
346 static __printf(2, 3)
347 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
348 {
349 struct va_format vaf;
350 va_list args;
351
352 va_start(args, fmt);
353 vaf.fmt = fmt;
354 vaf.va = &args;
355
356 if (!rbd_dev)
357 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
358 else if (rbd_dev->disk)
359 printk(KERN_WARNING "%s: %s: %pV\n",
360 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
361 else if (rbd_dev->spec && rbd_dev->spec->image_name)
362 printk(KERN_WARNING "%s: image %s: %pV\n",
363 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
364 else if (rbd_dev->spec && rbd_dev->spec->image_id)
365 printk(KERN_WARNING "%s: id %s: %pV\n",
366 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
367 else /* punt */
368 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
369 RBD_DRV_NAME, rbd_dev, &vaf);
370 va_end(args);
371 }
372
373 #ifdef RBD_DEBUG
374 #define rbd_assert(expr) \
375 if (unlikely(!(expr))) { \
376 printk(KERN_ERR "\nAssertion failure in %s() " \
377 "at line %d:\n\n" \
378 "\trbd_assert(%s);\n\n", \
379 __func__, __LINE__, #expr); \
380 BUG(); \
381 }
382 #else /* !RBD_DEBUG */
383 # define rbd_assert(expr) ((void) 0)
384 #endif /* !RBD_DEBUG */
385
386 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
387 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
388
389 static int rbd_open(struct block_device *bdev, fmode_t mode)
390 {
391 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
392 bool removing = false;
393
394 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
395 return -EROFS;
396
397 spin_lock_irq(&rbd_dev->lock);
398 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
399 removing = true;
400 else
401 rbd_dev->open_count++;
402 spin_unlock_irq(&rbd_dev->lock);
403 if (removing)
404 return -ENOENT;
405
406 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
407 (void) get_device(&rbd_dev->dev);
408 set_device_ro(bdev, rbd_dev->mapping.read_only);
409 mutex_unlock(&ctl_mutex);
410
411 return 0;
412 }
413
414 static int rbd_release(struct gendisk *disk, fmode_t mode)
415 {
416 struct rbd_device *rbd_dev = disk->private_data;
417 unsigned long open_count_before;
418
419 spin_lock_irq(&rbd_dev->lock);
420 open_count_before = rbd_dev->open_count--;
421 spin_unlock_irq(&rbd_dev->lock);
422 rbd_assert(open_count_before > 0);
423
424 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
425 put_device(&rbd_dev->dev);
426 mutex_unlock(&ctl_mutex);
427
428 return 0;
429 }
430
431 static const struct block_device_operations rbd_bd_ops = {
432 .owner = THIS_MODULE,
433 .open = rbd_open,
434 .release = rbd_release,
435 };
436
437 /*
438 * Initialize an rbd client instance.
439 * We own *ceph_opts.
440 */
441 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
442 {
443 struct rbd_client *rbdc;
444 int ret = -ENOMEM;
445
446 dout("rbd_client_create\n");
447 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
448 if (!rbdc)
449 goto out_opt;
450
451 kref_init(&rbdc->kref);
452 INIT_LIST_HEAD(&rbdc->node);
453
454 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
455
456 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
457 if (IS_ERR(rbdc->client))
458 goto out_mutex;
459 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
460
461 ret = ceph_open_session(rbdc->client);
462 if (ret < 0)
463 goto out_err;
464
465 spin_lock(&rbd_client_list_lock);
466 list_add_tail(&rbdc->node, &rbd_client_list);
467 spin_unlock(&rbd_client_list_lock);
468
469 mutex_unlock(&ctl_mutex);
470
471 dout("rbd_client_create created %p\n", rbdc);
472 return rbdc;
473
474 out_err:
475 ceph_destroy_client(rbdc->client);
476 out_mutex:
477 mutex_unlock(&ctl_mutex);
478 kfree(rbdc);
479 out_opt:
480 if (ceph_opts)
481 ceph_destroy_options(ceph_opts);
482 return ERR_PTR(ret);
483 }
484
485 /*
486 * Find a ceph client with specific addr and configuration. If
487 * found, bump its reference count.
488 */
489 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
490 {
491 struct rbd_client *client_node;
492 bool found = false;
493
494 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
495 return NULL;
496
497 spin_lock(&rbd_client_list_lock);
498 list_for_each_entry(client_node, &rbd_client_list, node) {
499 if (!ceph_compare_options(ceph_opts, client_node->client)) {
500 kref_get(&client_node->kref);
501 found = true;
502 break;
503 }
504 }
505 spin_unlock(&rbd_client_list_lock);
506
507 return found ? client_node : NULL;
508 }
509
510 /*
511 * mount options
512 */
513 enum {
514 Opt_last_int,
515 /* int args above */
516 Opt_last_string,
517 /* string args above */
518 Opt_read_only,
519 Opt_read_write,
520 /* Boolean args above */
521 Opt_last_bool,
522 };
523
524 static match_table_t rbd_opts_tokens = {
525 /* int args above */
526 /* string args above */
527 {Opt_read_only, "read_only"},
528 {Opt_read_only, "ro"}, /* Alternate spelling */
529 {Opt_read_write, "read_write"},
530 {Opt_read_write, "rw"}, /* Alternate spelling */
531 /* Boolean args above */
532 {-1, NULL}
533 };
534
535 struct rbd_options {
536 bool read_only;
537 };
538
539 #define RBD_READ_ONLY_DEFAULT false
540
541 static int parse_rbd_opts_token(char *c, void *private)
542 {
543 struct rbd_options *rbd_opts = private;
544 substring_t argstr[MAX_OPT_ARGS];
545 int token, intval, ret;
546
547 token = match_token(c, rbd_opts_tokens, argstr);
548 if (token < 0)
549 return -EINVAL;
550
551 if (token < Opt_last_int) {
552 ret = match_int(&argstr[0], &intval);
553 if (ret < 0) {
554 pr_err("bad mount option arg (not int) "
555 "at '%s'\n", c);
556 return ret;
557 }
558 dout("got int token %d val %d\n", token, intval);
559 } else if (token > Opt_last_int && token < Opt_last_string) {
560 dout("got string token %d val %s\n", token,
561 argstr[0].from);
562 } else if (token > Opt_last_string && token < Opt_last_bool) {
563 dout("got Boolean token %d\n", token);
564 } else {
565 dout("got token %d\n", token);
566 }
567
568 switch (token) {
569 case Opt_read_only:
570 rbd_opts->read_only = true;
571 break;
572 case Opt_read_write:
573 rbd_opts->read_only = false;
574 break;
575 default:
576 rbd_assert(false);
577 break;
578 }
579 return 0;
580 }
581
582 /*
583 * Get a ceph client with specific addr and configuration, if one does
584 * not exist create it.
585 */
586 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
587 {
588 struct rbd_client *rbdc;
589
590 rbdc = rbd_client_find(ceph_opts);
591 if (rbdc) /* using an existing client */
592 ceph_destroy_options(ceph_opts);
593 else
594 rbdc = rbd_client_create(ceph_opts);
595
596 return rbdc;
597 }
598
599 /*
600 * Destroy ceph client
601 *
602 * Caller must hold rbd_client_list_lock.
603 */
604 static void rbd_client_release(struct kref *kref)
605 {
606 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
607
608 dout("rbd_release_client %p\n", rbdc);
609 spin_lock(&rbd_client_list_lock);
610 list_del(&rbdc->node);
611 spin_unlock(&rbd_client_list_lock);
612
613 ceph_destroy_client(rbdc->client);
614 kfree(rbdc);
615 }
616
617 /*
618 * Drop reference to ceph client node. If it's not referenced anymore, release
619 * it.
620 */
621 static void rbd_put_client(struct rbd_client *rbdc)
622 {
623 if (rbdc)
624 kref_put(&rbdc->kref, rbd_client_release);
625 }
626
627 static bool rbd_image_format_valid(u32 image_format)
628 {
629 return image_format == 1 || image_format == 2;
630 }
631
632 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
633 {
634 size_t size;
635 u32 snap_count;
636
637 /* The header has to start with the magic rbd header text */
638 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
639 return false;
640
641 /* The bio layer requires at least sector-sized I/O */
642
643 if (ondisk->options.order < SECTOR_SHIFT)
644 return false;
645
646 /* If we use u64 in a few spots we may be able to loosen this */
647
648 if (ondisk->options.order > 8 * sizeof (int) - 1)
649 return false;
650
651 /*
652 * The size of a snapshot header has to fit in a size_t, and
653 * that limits the number of snapshots.
654 */
655 snap_count = le32_to_cpu(ondisk->snap_count);
656 size = SIZE_MAX - sizeof (struct ceph_snap_context);
657 if (snap_count > size / sizeof (__le64))
658 return false;
659
660 /*
661 * Not only that, but the size of the entire the snapshot
662 * header must also be representable in a size_t.
663 */
664 size -= snap_count * sizeof (__le64);
665 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
666 return false;
667
668 return true;
669 }
670
671 /*
672 * Create a new header structure, translate header format from the on-disk
673 * header.
674 */
675 static int rbd_header_from_disk(struct rbd_image_header *header,
676 struct rbd_image_header_ondisk *ondisk)
677 {
678 u32 snap_count;
679 size_t len;
680 size_t size;
681 u32 i;
682
683 memset(header, 0, sizeof (*header));
684
685 snap_count = le32_to_cpu(ondisk->snap_count);
686
687 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
688 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
689 if (!header->object_prefix)
690 return -ENOMEM;
691 memcpy(header->object_prefix, ondisk->object_prefix, len);
692 header->object_prefix[len] = '\0';
693
694 if (snap_count) {
695 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
696
697 /* Save a copy of the snapshot names */
698
699 if (snap_names_len > (u64) SIZE_MAX)
700 return -EIO;
701 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
702 if (!header->snap_names)
703 goto out_err;
704 /*
705 * Note that rbd_dev_v1_header_read() guarantees
706 * the ondisk buffer we're working with has
707 * snap_names_len bytes beyond the end of the
708 * snapshot id array, this memcpy() is safe.
709 */
710 memcpy(header->snap_names, &ondisk->snaps[snap_count],
711 snap_names_len);
712
713 /* Record each snapshot's size */
714
715 size = snap_count * sizeof (*header->snap_sizes);
716 header->snap_sizes = kmalloc(size, GFP_KERNEL);
717 if (!header->snap_sizes)
718 goto out_err;
719 for (i = 0; i < snap_count; i++)
720 header->snap_sizes[i] =
721 le64_to_cpu(ondisk->snaps[i].image_size);
722 } else {
723 WARN_ON(ondisk->snap_names_len);
724 header->snap_names = NULL;
725 header->snap_sizes = NULL;
726 }
727
728 header->features = 0; /* No features support in v1 images */
729 header->obj_order = ondisk->options.order;
730 header->crypt_type = ondisk->options.crypt_type;
731 header->comp_type = ondisk->options.comp_type;
732
733 /* Allocate and fill in the snapshot context */
734
735 header->image_size = le64_to_cpu(ondisk->image_size);
736 size = sizeof (struct ceph_snap_context);
737 size += snap_count * sizeof (header->snapc->snaps[0]);
738 header->snapc = kzalloc(size, GFP_KERNEL);
739 if (!header->snapc)
740 goto out_err;
741
742 atomic_set(&header->snapc->nref, 1);
743 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
744 header->snapc->num_snaps = snap_count;
745 for (i = 0; i < snap_count; i++)
746 header->snapc->snaps[i] =
747 le64_to_cpu(ondisk->snaps[i].id);
748
749 return 0;
750
751 out_err:
752 kfree(header->snap_sizes);
753 header->snap_sizes = NULL;
754 kfree(header->snap_names);
755 header->snap_names = NULL;
756 kfree(header->object_prefix);
757 header->object_prefix = NULL;
758
759 return -ENOMEM;
760 }
761
762 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
763 {
764 struct rbd_snap *snap;
765
766 if (snap_id == CEPH_NOSNAP)
767 return RBD_SNAP_HEAD_NAME;
768
769 list_for_each_entry(snap, &rbd_dev->snaps, node)
770 if (snap_id == snap->id)
771 return snap->name;
772
773 return NULL;
774 }
775
776 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
777 {
778
779 struct rbd_snap *snap;
780
781 list_for_each_entry(snap, &rbd_dev->snaps, node) {
782 if (!strcmp(snap_name, snap->name)) {
783 rbd_dev->spec->snap_id = snap->id;
784 rbd_dev->mapping.size = snap->size;
785 rbd_dev->mapping.features = snap->features;
786
787 return 0;
788 }
789 }
790
791 return -ENOENT;
792 }
793
794 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev)
795 {
796 int ret;
797
798 if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
799 sizeof (RBD_SNAP_HEAD_NAME))) {
800 rbd_dev->spec->snap_id = CEPH_NOSNAP;
801 rbd_dev->mapping.size = rbd_dev->header.image_size;
802 rbd_dev->mapping.features = rbd_dev->header.features;
803 ret = 0;
804 } else {
805 ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
806 if (ret < 0)
807 goto done;
808 rbd_dev->mapping.read_only = true;
809 }
810 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
811
812 done:
813 return ret;
814 }
815
816 static void rbd_header_free(struct rbd_image_header *header)
817 {
818 kfree(header->object_prefix);
819 header->object_prefix = NULL;
820 kfree(header->snap_sizes);
821 header->snap_sizes = NULL;
822 kfree(header->snap_names);
823 header->snap_names = NULL;
824 ceph_put_snap_context(header->snapc);
825 header->snapc = NULL;
826 }
827
828 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
829 {
830 char *name;
831 u64 segment;
832 int ret;
833
834 name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
835 if (!name)
836 return NULL;
837 segment = offset >> rbd_dev->header.obj_order;
838 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
839 rbd_dev->header.object_prefix, segment);
840 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
841 pr_err("error formatting segment name for #%llu (%d)\n",
842 segment, ret);
843 kfree(name);
844 name = NULL;
845 }
846
847 return name;
848 }
849
850 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
851 {
852 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
853
854 return offset & (segment_size - 1);
855 }
856
857 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
858 u64 offset, u64 length)
859 {
860 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
861
862 offset &= segment_size - 1;
863
864 rbd_assert(length <= U64_MAX - offset);
865 if (offset + length > segment_size)
866 length = segment_size - offset;
867
868 return length;
869 }
870
871 /*
872 * returns the size of an object in the image
873 */
874 static u64 rbd_obj_bytes(struct rbd_image_header *header)
875 {
876 return 1 << header->obj_order;
877 }
878
879 /*
880 * bio helpers
881 */
882
883 static void bio_chain_put(struct bio *chain)
884 {
885 struct bio *tmp;
886
887 while (chain) {
888 tmp = chain;
889 chain = chain->bi_next;
890 bio_put(tmp);
891 }
892 }
893
894 /*
895 * zeros a bio chain, starting at specific offset
896 */
897 static void zero_bio_chain(struct bio *chain, int start_ofs)
898 {
899 struct bio_vec *bv;
900 unsigned long flags;
901 void *buf;
902 int i;
903 int pos = 0;
904
905 while (chain) {
906 bio_for_each_segment(bv, chain, i) {
907 if (pos + bv->bv_len > start_ofs) {
908 int remainder = max(start_ofs - pos, 0);
909 buf = bvec_kmap_irq(bv, &flags);
910 memset(buf + remainder, 0,
911 bv->bv_len - remainder);
912 bvec_kunmap_irq(buf, &flags);
913 }
914 pos += bv->bv_len;
915 }
916
917 chain = chain->bi_next;
918 }
919 }
920
921 /*
922 * Clone a portion of a bio, starting at the given byte offset
923 * and continuing for the number of bytes indicated.
924 */
925 static struct bio *bio_clone_range(struct bio *bio_src,
926 unsigned int offset,
927 unsigned int len,
928 gfp_t gfpmask)
929 {
930 struct bio_vec *bv;
931 unsigned int resid;
932 unsigned short idx;
933 unsigned int voff;
934 unsigned short end_idx;
935 unsigned short vcnt;
936 struct bio *bio;
937
938 /* Handle the easy case for the caller */
939
940 if (!offset && len == bio_src->bi_size)
941 return bio_clone(bio_src, gfpmask);
942
943 if (WARN_ON_ONCE(!len))
944 return NULL;
945 if (WARN_ON_ONCE(len > bio_src->bi_size))
946 return NULL;
947 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
948 return NULL;
949
950 /* Find first affected segment... */
951
952 resid = offset;
953 __bio_for_each_segment(bv, bio_src, idx, 0) {
954 if (resid < bv->bv_len)
955 break;
956 resid -= bv->bv_len;
957 }
958 voff = resid;
959
960 /* ...and the last affected segment */
961
962 resid += len;
963 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
964 if (resid <= bv->bv_len)
965 break;
966 resid -= bv->bv_len;
967 }
968 vcnt = end_idx - idx + 1;
969
970 /* Build the clone */
971
972 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
973 if (!bio)
974 return NULL; /* ENOMEM */
975
976 bio->bi_bdev = bio_src->bi_bdev;
977 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
978 bio->bi_rw = bio_src->bi_rw;
979 bio->bi_flags |= 1 << BIO_CLONED;
980
981 /*
982 * Copy over our part of the bio_vec, then update the first
983 * and last (or only) entries.
984 */
985 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
986 vcnt * sizeof (struct bio_vec));
987 bio->bi_io_vec[0].bv_offset += voff;
988 if (vcnt > 1) {
989 bio->bi_io_vec[0].bv_len -= voff;
990 bio->bi_io_vec[vcnt - 1].bv_len = resid;
991 } else {
992 bio->bi_io_vec[0].bv_len = len;
993 }
994
995 bio->bi_vcnt = vcnt;
996 bio->bi_size = len;
997 bio->bi_idx = 0;
998
999 return bio;
1000 }
1001
1002 /*
1003 * Clone a portion of a bio chain, starting at the given byte offset
1004 * into the first bio in the source chain and continuing for the
1005 * number of bytes indicated. The result is another bio chain of
1006 * exactly the given length, or a null pointer on error.
1007 *
1008 * The bio_src and offset parameters are both in-out. On entry they
1009 * refer to the first source bio and the offset into that bio where
1010 * the start of data to be cloned is located.
1011 *
1012 * On return, bio_src is updated to refer to the bio in the source
1013 * chain that contains first un-cloned byte, and *offset will
1014 * contain the offset of that byte within that bio.
1015 */
1016 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1017 unsigned int *offset,
1018 unsigned int len,
1019 gfp_t gfpmask)
1020 {
1021 struct bio *bi = *bio_src;
1022 unsigned int off = *offset;
1023 struct bio *chain = NULL;
1024 struct bio **end;
1025
1026 /* Build up a chain of clone bios up to the limit */
1027
1028 if (!bi || off >= bi->bi_size || !len)
1029 return NULL; /* Nothing to clone */
1030
1031 end = &chain;
1032 while (len) {
1033 unsigned int bi_size;
1034 struct bio *bio;
1035
1036 if (!bi) {
1037 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1038 goto out_err; /* EINVAL; ran out of bio's */
1039 }
1040 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1041 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1042 if (!bio)
1043 goto out_err; /* ENOMEM */
1044
1045 *end = bio;
1046 end = &bio->bi_next;
1047
1048 off += bi_size;
1049 if (off == bi->bi_size) {
1050 bi = bi->bi_next;
1051 off = 0;
1052 }
1053 len -= bi_size;
1054 }
1055 *bio_src = bi;
1056 *offset = off;
1057
1058 return chain;
1059 out_err:
1060 bio_chain_put(chain);
1061
1062 return NULL;
1063 }
1064
1065 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1066 {
1067 kref_get(&obj_request->kref);
1068 }
1069
1070 static void rbd_obj_request_destroy(struct kref *kref);
1071 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1072 {
1073 rbd_assert(obj_request != NULL);
1074 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1075 }
1076
1077 static void rbd_img_request_get(struct rbd_img_request *img_request)
1078 {
1079 kref_get(&img_request->kref);
1080 }
1081
1082 static void rbd_img_request_destroy(struct kref *kref);
1083 static void rbd_img_request_put(struct rbd_img_request *img_request)
1084 {
1085 rbd_assert(img_request != NULL);
1086 kref_put(&img_request->kref, rbd_img_request_destroy);
1087 }
1088
1089 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1090 struct rbd_obj_request *obj_request)
1091 {
1092 rbd_assert(obj_request->img_request == NULL);
1093
1094 rbd_obj_request_get(obj_request);
1095 obj_request->img_request = img_request;
1096 obj_request->which = img_request->obj_request_count;
1097 rbd_assert(obj_request->which != BAD_WHICH);
1098 img_request->obj_request_count++;
1099 list_add_tail(&obj_request->links, &img_request->obj_requests);
1100 }
1101
1102 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1103 struct rbd_obj_request *obj_request)
1104 {
1105 rbd_assert(obj_request->which != BAD_WHICH);
1106
1107 list_del(&obj_request->links);
1108 rbd_assert(img_request->obj_request_count > 0);
1109 img_request->obj_request_count--;
1110 rbd_assert(obj_request->which == img_request->obj_request_count);
1111 obj_request->which = BAD_WHICH;
1112 rbd_assert(obj_request->img_request == img_request);
1113 obj_request->img_request = NULL;
1114 obj_request->callback = NULL;
1115 rbd_obj_request_put(obj_request);
1116 }
1117
1118 static bool obj_request_type_valid(enum obj_request_type type)
1119 {
1120 switch (type) {
1121 case OBJ_REQUEST_NODATA:
1122 case OBJ_REQUEST_BIO:
1123 case OBJ_REQUEST_PAGES:
1124 return true;
1125 default:
1126 return false;
1127 }
1128 }
1129
1130 struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...)
1131 {
1132 struct ceph_osd_req_op *op;
1133 va_list args;
1134 size_t size;
1135
1136 op = kzalloc(sizeof (*op), GFP_NOIO);
1137 if (!op)
1138 return NULL;
1139 op->op = opcode;
1140 va_start(args, opcode);
1141 switch (opcode) {
1142 case CEPH_OSD_OP_READ:
1143 case CEPH_OSD_OP_WRITE:
1144 /* rbd_osd_req_op_create(READ, offset, length) */
1145 /* rbd_osd_req_op_create(WRITE, offset, length) */
1146 op->extent.offset = va_arg(args, u64);
1147 op->extent.length = va_arg(args, u64);
1148 if (opcode == CEPH_OSD_OP_WRITE)
1149 op->payload_len = op->extent.length;
1150 break;
1151 case CEPH_OSD_OP_STAT:
1152 break;
1153 case CEPH_OSD_OP_CALL:
1154 /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */
1155 op->cls.class_name = va_arg(args, char *);
1156 size = strlen(op->cls.class_name);
1157 rbd_assert(size <= (size_t) U8_MAX);
1158 op->cls.class_len = size;
1159 op->payload_len = size;
1160
1161 op->cls.method_name = va_arg(args, char *);
1162 size = strlen(op->cls.method_name);
1163 rbd_assert(size <= (size_t) U8_MAX);
1164 op->cls.method_len = size;
1165 op->payload_len += size;
1166
1167 op->cls.argc = 0;
1168 op->cls.indata = va_arg(args, void *);
1169 size = va_arg(args, size_t);
1170 rbd_assert(size <= (size_t) U32_MAX);
1171 op->cls.indata_len = (u32) size;
1172 op->payload_len += size;
1173 break;
1174 case CEPH_OSD_OP_NOTIFY_ACK:
1175 case CEPH_OSD_OP_WATCH:
1176 /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */
1177 /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */
1178 op->watch.cookie = va_arg(args, u64);
1179 op->watch.ver = va_arg(args, u64);
1180 op->watch.ver = cpu_to_le64(op->watch.ver);
1181 if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int))
1182 op->watch.flag = (u8) 1;
1183 break;
1184 default:
1185 rbd_warn(NULL, "unsupported opcode %hu\n", opcode);
1186 kfree(op);
1187 op = NULL;
1188 break;
1189 }
1190 va_end(args);
1191
1192 return op;
1193 }
1194
1195 static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op)
1196 {
1197 kfree(op);
1198 }
1199
1200 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1201 struct rbd_obj_request *obj_request)
1202 {
1203 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1204 }
1205
1206 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1207 {
1208 if (img_request->callback)
1209 img_request->callback(img_request);
1210 else
1211 rbd_img_request_put(img_request);
1212 }
1213
1214 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1215
1216 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1217 {
1218 return wait_for_completion_interruptible(&obj_request->completion);
1219 }
1220
1221 static void obj_request_done_init(struct rbd_obj_request *obj_request)
1222 {
1223 atomic_set(&obj_request->done, 0);
1224 smp_wmb();
1225 }
1226
1227 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1228 {
1229 atomic_set(&obj_request->done, 1);
1230 smp_wmb();
1231 }
1232
1233 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1234 {
1235 smp_rmb();
1236 return atomic_read(&obj_request->done) != 0;
1237 }
1238
1239 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request,
1240 struct ceph_osd_op *op)
1241 {
1242 obj_request_done_set(obj_request);
1243 }
1244
1245 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1246 {
1247 if (obj_request->callback)
1248 obj_request->callback(obj_request);
1249 else
1250 complete_all(&obj_request->completion);
1251 }
1252
1253 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request,
1254 struct ceph_osd_op *op)
1255 {
1256 u64 xferred;
1257
1258 /*
1259 * We support a 64-bit length, but ultimately it has to be
1260 * passed to blk_end_request(), which takes an unsigned int.
1261 */
1262 xferred = le64_to_cpu(op->extent.length);
1263 rbd_assert(xferred < (u64) UINT_MAX);
1264 if (obj_request->result == (s32) -ENOENT) {
1265 zero_bio_chain(obj_request->bio_list, 0);
1266 obj_request->result = 0;
1267 } else if (xferred < obj_request->length && !obj_request->result) {
1268 zero_bio_chain(obj_request->bio_list, xferred);
1269 xferred = obj_request->length;
1270 }
1271 obj_request->xferred = xferred;
1272 obj_request_done_set(obj_request);
1273 }
1274
1275 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request,
1276 struct ceph_osd_op *op)
1277 {
1278 obj_request->xferred = le64_to_cpu(op->extent.length);
1279 obj_request_done_set(obj_request);
1280 }
1281
1282 /*
1283 * For a simple stat call there's nothing to do. We'll do more if
1284 * this is part of a write sequence for a layered image.
1285 */
1286 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request,
1287 struct ceph_osd_op *op)
1288 {
1289 obj_request_done_set(obj_request);
1290 }
1291
1292 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1293 struct ceph_msg *msg)
1294 {
1295 struct rbd_obj_request *obj_request = osd_req->r_priv;
1296 struct ceph_osd_reply_head *reply_head;
1297 struct ceph_osd_op *op;
1298 u32 num_ops;
1299 u16 opcode;
1300
1301 rbd_assert(osd_req == obj_request->osd_req);
1302 rbd_assert(!!obj_request->img_request ^
1303 (obj_request->which == BAD_WHICH));
1304
1305 obj_request->xferred = le32_to_cpu(msg->hdr.data_len);
1306 reply_head = msg->front.iov_base;
1307 obj_request->result = (s32) le32_to_cpu(reply_head->result);
1308 obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version);
1309
1310 num_ops = le32_to_cpu(reply_head->num_ops);
1311 WARN_ON(num_ops != 1); /* For now */
1312
1313 op = &reply_head->ops[0];
1314 opcode = le16_to_cpu(op->op);
1315 switch (opcode) {
1316 case CEPH_OSD_OP_READ:
1317 rbd_osd_read_callback(obj_request, op);
1318 break;
1319 case CEPH_OSD_OP_WRITE:
1320 rbd_osd_write_callback(obj_request, op);
1321 break;
1322 case CEPH_OSD_OP_STAT:
1323 rbd_osd_stat_callback(obj_request, op);
1324 break;
1325 case CEPH_OSD_OP_CALL:
1326 case CEPH_OSD_OP_NOTIFY_ACK:
1327 case CEPH_OSD_OP_WATCH:
1328 rbd_osd_trivial_callback(obj_request, op);
1329 break;
1330 default:
1331 rbd_warn(NULL, "%s: unsupported op %hu\n",
1332 obj_request->object_name, (unsigned short) opcode);
1333 break;
1334 }
1335
1336 if (obj_request_done_test(obj_request))
1337 rbd_obj_request_complete(obj_request);
1338 }
1339
1340 static struct ceph_osd_request *rbd_osd_req_create(
1341 struct rbd_device *rbd_dev,
1342 bool write_request,
1343 struct rbd_obj_request *obj_request,
1344 struct ceph_osd_req_op *op)
1345 {
1346 struct rbd_img_request *img_request = obj_request->img_request;
1347 struct ceph_snap_context *snapc = NULL;
1348 struct ceph_osd_client *osdc;
1349 struct ceph_osd_request *osd_req;
1350 struct timespec now;
1351 struct timespec *mtime;
1352 u64 snap_id = CEPH_NOSNAP;
1353 u64 offset = obj_request->offset;
1354 u64 length = obj_request->length;
1355
1356 if (img_request) {
1357 rbd_assert(img_request->write_request == write_request);
1358 if (img_request->write_request)
1359 snapc = img_request->snapc;
1360 else
1361 snap_id = img_request->snap_id;
1362 }
1363
1364 /* Allocate and initialize the request, for the single op */
1365
1366 osdc = &rbd_dev->rbd_client->client->osdc;
1367 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1368 if (!osd_req)
1369 return NULL; /* ENOMEM */
1370
1371 rbd_assert(obj_request_type_valid(obj_request->type));
1372 switch (obj_request->type) {
1373 case OBJ_REQUEST_NODATA:
1374 break; /* Nothing to do */
1375 case OBJ_REQUEST_BIO:
1376 rbd_assert(obj_request->bio_list != NULL);
1377 osd_req->r_bio = obj_request->bio_list;
1378 break;
1379 case OBJ_REQUEST_PAGES:
1380 osd_req->r_pages = obj_request->pages;
1381 osd_req->r_num_pages = obj_request->page_count;
1382 osd_req->r_page_alignment = offset & ~PAGE_MASK;
1383 break;
1384 }
1385
1386 if (write_request) {
1387 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1388 now = CURRENT_TIME;
1389 mtime = &now;
1390 } else {
1391 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1392 mtime = NULL; /* not needed for reads */
1393 offset = 0; /* These are not used... */
1394 length = 0; /* ...for osd read requests */
1395 }
1396
1397 osd_req->r_callback = rbd_osd_req_callback;
1398 osd_req->r_priv = obj_request;
1399
1400 osd_req->r_oid_len = strlen(obj_request->object_name);
1401 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1402 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1403
1404 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1405
1406 /* osd_req will get its own reference to snapc (if non-null) */
1407
1408 ceph_osdc_build_request(osd_req, offset, length, 1, op,
1409 snapc, snap_id, mtime);
1410
1411 return osd_req;
1412 }
1413
1414 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1415 {
1416 ceph_osdc_put_request(osd_req);
1417 }
1418
1419 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1420
1421 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1422 u64 offset, u64 length,
1423 enum obj_request_type type)
1424 {
1425 struct rbd_obj_request *obj_request;
1426 size_t size;
1427 char *name;
1428
1429 rbd_assert(obj_request_type_valid(type));
1430
1431 size = strlen(object_name) + 1;
1432 obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1433 if (!obj_request)
1434 return NULL;
1435
1436 name = (char *)(obj_request + 1);
1437 obj_request->object_name = memcpy(name, object_name, size);
1438 obj_request->offset = offset;
1439 obj_request->length = length;
1440 obj_request->which = BAD_WHICH;
1441 obj_request->type = type;
1442 INIT_LIST_HEAD(&obj_request->links);
1443 obj_request_done_init(obj_request);
1444 init_completion(&obj_request->completion);
1445 kref_init(&obj_request->kref);
1446
1447 return obj_request;
1448 }
1449
1450 static void rbd_obj_request_destroy(struct kref *kref)
1451 {
1452 struct rbd_obj_request *obj_request;
1453
1454 obj_request = container_of(kref, struct rbd_obj_request, kref);
1455
1456 rbd_assert(obj_request->img_request == NULL);
1457 rbd_assert(obj_request->which == BAD_WHICH);
1458
1459 if (obj_request->osd_req)
1460 rbd_osd_req_destroy(obj_request->osd_req);
1461
1462 rbd_assert(obj_request_type_valid(obj_request->type));
1463 switch (obj_request->type) {
1464 case OBJ_REQUEST_NODATA:
1465 break; /* Nothing to do */
1466 case OBJ_REQUEST_BIO:
1467 if (obj_request->bio_list)
1468 bio_chain_put(obj_request->bio_list);
1469 break;
1470 case OBJ_REQUEST_PAGES:
1471 if (obj_request->pages)
1472 ceph_release_page_vector(obj_request->pages,
1473 obj_request->page_count);
1474 break;
1475 }
1476
1477 kfree(obj_request);
1478 }
1479
1480 /*
1481 * Caller is responsible for filling in the list of object requests
1482 * that comprises the image request, and the Linux request pointer
1483 * (if there is one).
1484 */
1485 struct rbd_img_request *rbd_img_request_create(struct rbd_device *rbd_dev,
1486 u64 offset, u64 length,
1487 bool write_request)
1488 {
1489 struct rbd_img_request *img_request;
1490 struct ceph_snap_context *snapc = NULL;
1491
1492 img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1493 if (!img_request)
1494 return NULL;
1495
1496 if (write_request) {
1497 down_read(&rbd_dev->header_rwsem);
1498 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1499 up_read(&rbd_dev->header_rwsem);
1500 if (WARN_ON(!snapc)) {
1501 kfree(img_request);
1502 return NULL; /* Shouldn't happen */
1503 }
1504 }
1505
1506 img_request->rq = NULL;
1507 img_request->rbd_dev = rbd_dev;
1508 img_request->offset = offset;
1509 img_request->length = length;
1510 img_request->write_request = write_request;
1511 if (write_request)
1512 img_request->snapc = snapc;
1513 else
1514 img_request->snap_id = rbd_dev->spec->snap_id;
1515 spin_lock_init(&img_request->completion_lock);
1516 img_request->next_completion = 0;
1517 img_request->callback = NULL;
1518 img_request->obj_request_count = 0;
1519 INIT_LIST_HEAD(&img_request->obj_requests);
1520 kref_init(&img_request->kref);
1521
1522 rbd_img_request_get(img_request); /* Avoid a warning */
1523 rbd_img_request_put(img_request); /* TEMPORARY */
1524
1525 return img_request;
1526 }
1527
1528 static void rbd_img_request_destroy(struct kref *kref)
1529 {
1530 struct rbd_img_request *img_request;
1531 struct rbd_obj_request *obj_request;
1532 struct rbd_obj_request *next_obj_request;
1533
1534 img_request = container_of(kref, struct rbd_img_request, kref);
1535
1536 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1537 rbd_img_obj_request_del(img_request, obj_request);
1538 rbd_assert(img_request->obj_request_count == 0);
1539
1540 if (img_request->write_request)
1541 ceph_put_snap_context(img_request->snapc);
1542
1543 kfree(img_request);
1544 }
1545
1546 static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
1547 struct bio *bio_list)
1548 {
1549 struct rbd_device *rbd_dev = img_request->rbd_dev;
1550 struct rbd_obj_request *obj_request = NULL;
1551 struct rbd_obj_request *next_obj_request;
1552 unsigned int bio_offset;
1553 u64 image_offset;
1554 u64 resid;
1555 u16 opcode;
1556
1557 opcode = img_request->write_request ? CEPH_OSD_OP_WRITE
1558 : CEPH_OSD_OP_READ;
1559 bio_offset = 0;
1560 image_offset = img_request->offset;
1561 rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT);
1562 resid = img_request->length;
1563 rbd_assert(resid > 0);
1564 while (resid) {
1565 const char *object_name;
1566 unsigned int clone_size;
1567 struct ceph_osd_req_op *op;
1568 u64 offset;
1569 u64 length;
1570
1571 object_name = rbd_segment_name(rbd_dev, image_offset);
1572 if (!object_name)
1573 goto out_unwind;
1574 offset = rbd_segment_offset(rbd_dev, image_offset);
1575 length = rbd_segment_length(rbd_dev, image_offset, resid);
1576 obj_request = rbd_obj_request_create(object_name,
1577 offset, length,
1578 OBJ_REQUEST_BIO);
1579 kfree(object_name); /* object request has its own copy */
1580 if (!obj_request)
1581 goto out_unwind;
1582
1583 rbd_assert(length <= (u64) UINT_MAX);
1584 clone_size = (unsigned int) length;
1585 obj_request->bio_list = bio_chain_clone_range(&bio_list,
1586 &bio_offset, clone_size,
1587 GFP_ATOMIC);
1588 if (!obj_request->bio_list)
1589 goto out_partial;
1590
1591 /*
1592 * Build up the op to use in building the osd
1593 * request. Note that the contents of the op are
1594 * copied by rbd_osd_req_create().
1595 */
1596 op = rbd_osd_req_op_create(opcode, offset, length);
1597 if (!op)
1598 goto out_partial;
1599 obj_request->osd_req = rbd_osd_req_create(rbd_dev,
1600 img_request->write_request,
1601 obj_request, op);
1602 rbd_osd_req_op_destroy(op);
1603 if (!obj_request->osd_req)
1604 goto out_partial;
1605 /* status and version are initially zero-filled */
1606
1607 rbd_img_obj_request_add(img_request, obj_request);
1608
1609 image_offset += length;
1610 resid -= length;
1611 }
1612
1613 return 0;
1614
1615 out_partial:
1616 rbd_obj_request_put(obj_request);
1617 out_unwind:
1618 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1619 rbd_obj_request_put(obj_request);
1620
1621 return -ENOMEM;
1622 }
1623
1624 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1625 {
1626 struct rbd_img_request *img_request;
1627 u32 which = obj_request->which;
1628 bool more = true;
1629
1630 img_request = obj_request->img_request;
1631
1632 rbd_assert(img_request != NULL);
1633 rbd_assert(img_request->rq != NULL);
1634 rbd_assert(img_request->obj_request_count > 0);
1635 rbd_assert(which != BAD_WHICH);
1636 rbd_assert(which < img_request->obj_request_count);
1637 rbd_assert(which >= img_request->next_completion);
1638
1639 spin_lock_irq(&img_request->completion_lock);
1640 if (which != img_request->next_completion)
1641 goto out;
1642
1643 for_each_obj_request_from(img_request, obj_request) {
1644 unsigned int xferred;
1645 int result;
1646
1647 rbd_assert(more);
1648 rbd_assert(which < img_request->obj_request_count);
1649
1650 if (!obj_request_done_test(obj_request))
1651 break;
1652
1653 rbd_assert(obj_request->xferred <= (u64) UINT_MAX);
1654 xferred = (unsigned int) obj_request->xferred;
1655 result = (int) obj_request->result;
1656 if (result)
1657 rbd_warn(NULL, "obj_request %s result %d xferred %u\n",
1658 img_request->write_request ? "write" : "read",
1659 result, xferred);
1660
1661 more = blk_end_request(img_request->rq, result, xferred);
1662 which++;
1663 }
1664 rbd_assert(more ^ (which == img_request->obj_request_count));
1665 img_request->next_completion = which;
1666 out:
1667 spin_unlock_irq(&img_request->completion_lock);
1668
1669 if (!more)
1670 rbd_img_request_complete(img_request);
1671 }
1672
1673 static int rbd_img_request_submit(struct rbd_img_request *img_request)
1674 {
1675 struct rbd_device *rbd_dev = img_request->rbd_dev;
1676 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1677 struct rbd_obj_request *obj_request;
1678
1679 for_each_obj_request(img_request, obj_request) {
1680 int ret;
1681
1682 obj_request->callback = rbd_img_obj_callback;
1683 ret = rbd_obj_request_submit(osdc, obj_request);
1684 if (ret)
1685 return ret;
1686 /*
1687 * The image request has its own reference to each
1688 * of its object requests, so we can safely drop the
1689 * initial one here.
1690 */
1691 rbd_obj_request_put(obj_request);
1692 }
1693
1694 return 0;
1695 }
1696
1697 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev,
1698 u64 ver, u64 notify_id)
1699 {
1700 struct rbd_obj_request *obj_request;
1701 struct ceph_osd_req_op *op;
1702 struct ceph_osd_client *osdc;
1703 int ret;
1704
1705 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1706 OBJ_REQUEST_NODATA);
1707 if (!obj_request)
1708 return -ENOMEM;
1709
1710 ret = -ENOMEM;
1711 op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver);
1712 if (!op)
1713 goto out;
1714 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1715 obj_request, op);
1716 rbd_osd_req_op_destroy(op);
1717 if (!obj_request->osd_req)
1718 goto out;
1719
1720 osdc = &rbd_dev->rbd_client->client->osdc;
1721 obj_request->callback = rbd_obj_request_put;
1722 ret = rbd_obj_request_submit(osdc, obj_request);
1723 out:
1724 if (ret)
1725 rbd_obj_request_put(obj_request);
1726
1727 return ret;
1728 }
1729
1730 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1731 {
1732 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1733 u64 hver;
1734 int rc;
1735
1736 if (!rbd_dev)
1737 return;
1738
1739 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1740 rbd_dev->header_name, (unsigned long long) notify_id,
1741 (unsigned int) opcode);
1742 rc = rbd_dev_refresh(rbd_dev, &hver);
1743 if (rc)
1744 rbd_warn(rbd_dev, "got notification but failed to "
1745 " update snaps: %d\n", rc);
1746
1747 rbd_obj_notify_ack(rbd_dev, hver, notify_id);
1748 }
1749
1750 /*
1751 * Request sync osd watch/unwatch. The value of "start" determines
1752 * whether a watch request is being initiated or torn down.
1753 */
1754 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
1755 {
1756 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1757 struct rbd_obj_request *obj_request;
1758 struct ceph_osd_req_op *op;
1759 int ret;
1760
1761 rbd_assert(start ^ !!rbd_dev->watch_event);
1762 rbd_assert(start ^ !!rbd_dev->watch_request);
1763
1764 if (start) {
1765 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
1766 &rbd_dev->watch_event);
1767 if (ret < 0)
1768 return ret;
1769 rbd_assert(rbd_dev->watch_event != NULL);
1770 }
1771
1772 ret = -ENOMEM;
1773 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
1774 OBJ_REQUEST_NODATA);
1775 if (!obj_request)
1776 goto out_cancel;
1777
1778 op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH,
1779 rbd_dev->watch_event->cookie,
1780 rbd_dev->header.obj_version, start);
1781 if (!op)
1782 goto out_cancel;
1783 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true,
1784 obj_request, op);
1785 rbd_osd_req_op_destroy(op);
1786 if (!obj_request->osd_req)
1787 goto out_cancel;
1788
1789 if (start)
1790 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
1791 else
1792 ceph_osdc_unregister_linger_request(osdc,
1793 rbd_dev->watch_request->osd_req);
1794 ret = rbd_obj_request_submit(osdc, obj_request);
1795 if (ret)
1796 goto out_cancel;
1797 ret = rbd_obj_request_wait(obj_request);
1798 if (ret)
1799 goto out_cancel;
1800 ret = obj_request->result;
1801 if (ret)
1802 goto out_cancel;
1803
1804 /*
1805 * A watch request is set to linger, so the underlying osd
1806 * request won't go away until we unregister it. We retain
1807 * a pointer to the object request during that time (in
1808 * rbd_dev->watch_request), so we'll keep a reference to
1809 * it. We'll drop that reference (below) after we've
1810 * unregistered it.
1811 */
1812 if (start) {
1813 rbd_dev->watch_request = obj_request;
1814
1815 return 0;
1816 }
1817
1818 /* We have successfully torn down the watch request */
1819
1820 rbd_obj_request_put(rbd_dev->watch_request);
1821 rbd_dev->watch_request = NULL;
1822 out_cancel:
1823 /* Cancel the event if we're tearing down, or on error */
1824 ceph_osdc_cancel_event(rbd_dev->watch_event);
1825 rbd_dev->watch_event = NULL;
1826 if (obj_request)
1827 rbd_obj_request_put(obj_request);
1828
1829 return ret;
1830 }
1831
1832 /*
1833 * Synchronous osd object method call
1834 */
1835 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
1836 const char *object_name,
1837 const char *class_name,
1838 const char *method_name,
1839 const char *outbound,
1840 size_t outbound_size,
1841 char *inbound,
1842 size_t inbound_size,
1843 u64 *version)
1844 {
1845 struct rbd_obj_request *obj_request;
1846 struct ceph_osd_client *osdc;
1847 struct ceph_osd_req_op *op;
1848 struct page **pages;
1849 u32 page_count;
1850 int ret;
1851
1852 /*
1853 * Method calls are ultimately read operations but they
1854 * don't involve object data (so no offset or length).
1855 * The result should placed into the inbound buffer
1856 * provided. They also supply outbound data--parameters for
1857 * the object method. Currently if this is present it will
1858 * be a snapshot id.
1859 */
1860 page_count = (u32) calc_pages_for(0, inbound_size);
1861 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
1862 if (IS_ERR(pages))
1863 return PTR_ERR(pages);
1864
1865 ret = -ENOMEM;
1866 obj_request = rbd_obj_request_create(object_name, 0, 0,
1867 OBJ_REQUEST_PAGES);
1868 if (!obj_request)
1869 goto out;
1870
1871 obj_request->pages = pages;
1872 obj_request->page_count = page_count;
1873
1874 op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name,
1875 method_name, outbound, outbound_size);
1876 if (!op)
1877 goto out;
1878 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
1879 obj_request, op);
1880 rbd_osd_req_op_destroy(op);
1881 if (!obj_request->osd_req)
1882 goto out;
1883
1884 osdc = &rbd_dev->rbd_client->client->osdc;
1885 ret = rbd_obj_request_submit(osdc, obj_request);
1886 if (ret)
1887 goto out;
1888 ret = rbd_obj_request_wait(obj_request);
1889 if (ret)
1890 goto out;
1891
1892 ret = obj_request->result;
1893 if (ret < 0)
1894 goto out;
1895 ret = 0;
1896 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
1897 if (version)
1898 *version = obj_request->version;
1899 out:
1900 if (obj_request)
1901 rbd_obj_request_put(obj_request);
1902 else
1903 ceph_release_page_vector(pages, page_count);
1904
1905 return ret;
1906 }
1907
1908 static void rbd_request_fn(struct request_queue *q)
1909 {
1910 struct rbd_device *rbd_dev = q->queuedata;
1911 bool read_only = rbd_dev->mapping.read_only;
1912 struct request *rq;
1913 int result;
1914
1915 while ((rq = blk_fetch_request(q))) {
1916 bool write_request = rq_data_dir(rq) == WRITE;
1917 struct rbd_img_request *img_request;
1918 u64 offset;
1919 u64 length;
1920
1921 /* Ignore any non-FS requests that filter through. */
1922
1923 if (rq->cmd_type != REQ_TYPE_FS) {
1924 dout("%s: non-fs request type %d\n", __func__,
1925 (int) rq->cmd_type);
1926 __blk_end_request_all(rq, 0);
1927 continue;
1928 }
1929
1930 /* Ignore/skip any zero-length requests */
1931
1932 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
1933 length = (u64) blk_rq_bytes(rq);
1934
1935 if (!length) {
1936 dout("%s: zero-length request\n", __func__);
1937 __blk_end_request_all(rq, 0);
1938 continue;
1939 }
1940
1941 spin_unlock_irq(q->queue_lock);
1942
1943 /* Disallow writes to a read-only device */
1944
1945 if (write_request) {
1946 result = -EROFS;
1947 if (read_only)
1948 goto end_request;
1949 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
1950 }
1951
1952 /*
1953 * Quit early if the mapped snapshot no longer
1954 * exists. It's still possible the snapshot will
1955 * have disappeared by the time our request arrives
1956 * at the osd, but there's no sense in sending it if
1957 * we already know.
1958 */
1959 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
1960 dout("request for non-existent snapshot");
1961 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
1962 result = -ENXIO;
1963 goto end_request;
1964 }
1965
1966 result = -EINVAL;
1967 if (WARN_ON(offset && length > U64_MAX - offset + 1))
1968 goto end_request; /* Shouldn't happen */
1969
1970 result = -ENOMEM;
1971 img_request = rbd_img_request_create(rbd_dev, offset, length,
1972 write_request);
1973 if (!img_request)
1974 goto end_request;
1975
1976 img_request->rq = rq;
1977
1978 result = rbd_img_request_fill_bio(img_request, rq->bio);
1979 if (!result)
1980 result = rbd_img_request_submit(img_request);
1981 if (result)
1982 rbd_img_request_put(img_request);
1983 end_request:
1984 spin_lock_irq(q->queue_lock);
1985 if (result < 0) {
1986 rbd_warn(rbd_dev, "obj_request %s result %d\n",
1987 write_request ? "write" : "read", result);
1988 __blk_end_request_all(rq, result);
1989 }
1990 }
1991 }
1992
1993 /*
1994 * a queue callback. Makes sure that we don't create a bio that spans across
1995 * multiple osd objects. One exception would be with a single page bios,
1996 * which we handle later at bio_chain_clone_range()
1997 */
1998 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1999 struct bio_vec *bvec)
2000 {
2001 struct rbd_device *rbd_dev = q->queuedata;
2002 sector_t sector_offset;
2003 sector_t sectors_per_obj;
2004 sector_t obj_sector_offset;
2005 int ret;
2006
2007 /*
2008 * Find how far into its rbd object the partition-relative
2009 * bio start sector is to offset relative to the enclosing
2010 * device.
2011 */
2012 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2013 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2014 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2015
2016 /*
2017 * Compute the number of bytes from that offset to the end
2018 * of the object. Account for what's already used by the bio.
2019 */
2020 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2021 if (ret > bmd->bi_size)
2022 ret -= bmd->bi_size;
2023 else
2024 ret = 0;
2025
2026 /*
2027 * Don't send back more than was asked for. And if the bio
2028 * was empty, let the whole thing through because: "Note
2029 * that a block device *must* allow a single page to be
2030 * added to an empty bio."
2031 */
2032 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2033 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2034 ret = (int) bvec->bv_len;
2035
2036 return ret;
2037 }
2038
2039 static void rbd_free_disk(struct rbd_device *rbd_dev)
2040 {
2041 struct gendisk *disk = rbd_dev->disk;
2042
2043 if (!disk)
2044 return;
2045
2046 if (disk->flags & GENHD_FL_UP)
2047 del_gendisk(disk);
2048 if (disk->queue)
2049 blk_cleanup_queue(disk->queue);
2050 put_disk(disk);
2051 }
2052
2053 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2054 const char *object_name,
2055 u64 offset, u64 length,
2056 char *buf, u64 *version)
2057
2058 {
2059 struct ceph_osd_req_op *op;
2060 struct rbd_obj_request *obj_request;
2061 struct ceph_osd_client *osdc;
2062 struct page **pages = NULL;
2063 u32 page_count;
2064 size_t size;
2065 int ret;
2066
2067 page_count = (u32) calc_pages_for(offset, length);
2068 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2069 if (IS_ERR(pages))
2070 ret = PTR_ERR(pages);
2071
2072 ret = -ENOMEM;
2073 obj_request = rbd_obj_request_create(object_name, offset, length,
2074 OBJ_REQUEST_PAGES);
2075 if (!obj_request)
2076 goto out;
2077
2078 obj_request->pages = pages;
2079 obj_request->page_count = page_count;
2080
2081 op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length);
2082 if (!op)
2083 goto out;
2084 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2085 obj_request, op);
2086 rbd_osd_req_op_destroy(op);
2087 if (!obj_request->osd_req)
2088 goto out;
2089
2090 osdc = &rbd_dev->rbd_client->client->osdc;
2091 ret = rbd_obj_request_submit(osdc, obj_request);
2092 if (ret)
2093 goto out;
2094 ret = rbd_obj_request_wait(obj_request);
2095 if (ret)
2096 goto out;
2097
2098 ret = obj_request->result;
2099 if (ret < 0)
2100 goto out;
2101
2102 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2103 size = (size_t) obj_request->xferred;
2104 ceph_copy_from_page_vector(pages, buf, 0, size);
2105 rbd_assert(size <= (size_t) INT_MAX);
2106 ret = (int) size;
2107 if (version)
2108 *version = obj_request->version;
2109 out:
2110 if (obj_request)
2111 rbd_obj_request_put(obj_request);
2112 else
2113 ceph_release_page_vector(pages, page_count);
2114
2115 return ret;
2116 }
2117
2118 /*
2119 * Read the complete header for the given rbd device.
2120 *
2121 * Returns a pointer to a dynamically-allocated buffer containing
2122 * the complete and validated header. Caller can pass the address
2123 * of a variable that will be filled in with the version of the
2124 * header object at the time it was read.
2125 *
2126 * Returns a pointer-coded errno if a failure occurs.
2127 */
2128 static struct rbd_image_header_ondisk *
2129 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
2130 {
2131 struct rbd_image_header_ondisk *ondisk = NULL;
2132 u32 snap_count = 0;
2133 u64 names_size = 0;
2134 u32 want_count;
2135 int ret;
2136
2137 /*
2138 * The complete header will include an array of its 64-bit
2139 * snapshot ids, followed by the names of those snapshots as
2140 * a contiguous block of NUL-terminated strings. Note that
2141 * the number of snapshots could change by the time we read
2142 * it in, in which case we re-read it.
2143 */
2144 do {
2145 size_t size;
2146
2147 kfree(ondisk);
2148
2149 size = sizeof (*ondisk);
2150 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2151 size += names_size;
2152 ondisk = kmalloc(size, GFP_KERNEL);
2153 if (!ondisk)
2154 return ERR_PTR(-ENOMEM);
2155
2156 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2157 0, size,
2158 (char *) ondisk, version);
2159 if (ret < 0)
2160 goto out_err;
2161 if (WARN_ON((size_t) ret < size)) {
2162 ret = -ENXIO;
2163 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2164 size, ret);
2165 goto out_err;
2166 }
2167 if (!rbd_dev_ondisk_valid(ondisk)) {
2168 ret = -ENXIO;
2169 rbd_warn(rbd_dev, "invalid header");
2170 goto out_err;
2171 }
2172
2173 names_size = le64_to_cpu(ondisk->snap_names_len);
2174 want_count = snap_count;
2175 snap_count = le32_to_cpu(ondisk->snap_count);
2176 } while (snap_count != want_count);
2177
2178 return ondisk;
2179
2180 out_err:
2181 kfree(ondisk);
2182
2183 return ERR_PTR(ret);
2184 }
2185
2186 /*
2187 * reload the ondisk the header
2188 */
2189 static int rbd_read_header(struct rbd_device *rbd_dev,
2190 struct rbd_image_header *header)
2191 {
2192 struct rbd_image_header_ondisk *ondisk;
2193 u64 ver = 0;
2194 int ret;
2195
2196 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
2197 if (IS_ERR(ondisk))
2198 return PTR_ERR(ondisk);
2199 ret = rbd_header_from_disk(header, ondisk);
2200 if (ret >= 0)
2201 header->obj_version = ver;
2202 kfree(ondisk);
2203
2204 return ret;
2205 }
2206
2207 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
2208 {
2209 struct rbd_snap *snap;
2210 struct rbd_snap *next;
2211
2212 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
2213 rbd_remove_snap_dev(snap);
2214 }
2215
2216 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2217 {
2218 sector_t size;
2219
2220 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
2221 return;
2222
2223 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
2224 dout("setting size to %llu sectors", (unsigned long long) size);
2225 rbd_dev->mapping.size = (u64) size;
2226 set_capacity(rbd_dev->disk, size);
2227 }
2228
2229 /*
2230 * only read the first part of the ondisk header, without the snaps info
2231 */
2232 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
2233 {
2234 int ret;
2235 struct rbd_image_header h;
2236
2237 ret = rbd_read_header(rbd_dev, &h);
2238 if (ret < 0)
2239 return ret;
2240
2241 down_write(&rbd_dev->header_rwsem);
2242
2243 /* Update image size, and check for resize of mapped image */
2244 rbd_dev->header.image_size = h.image_size;
2245 rbd_update_mapping_size(rbd_dev);
2246
2247 /* rbd_dev->header.object_prefix shouldn't change */
2248 kfree(rbd_dev->header.snap_sizes);
2249 kfree(rbd_dev->header.snap_names);
2250 /* osd requests may still refer to snapc */
2251 ceph_put_snap_context(rbd_dev->header.snapc);
2252
2253 if (hver)
2254 *hver = h.obj_version;
2255 rbd_dev->header.obj_version = h.obj_version;
2256 rbd_dev->header.image_size = h.image_size;
2257 rbd_dev->header.snapc = h.snapc;
2258 rbd_dev->header.snap_names = h.snap_names;
2259 rbd_dev->header.snap_sizes = h.snap_sizes;
2260 /* Free the extra copy of the object prefix */
2261 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
2262 kfree(h.object_prefix);
2263
2264 ret = rbd_dev_snaps_update(rbd_dev);
2265 if (!ret)
2266 ret = rbd_dev_snaps_register(rbd_dev);
2267
2268 up_write(&rbd_dev->header_rwsem);
2269
2270 return ret;
2271 }
2272
2273 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
2274 {
2275 int ret;
2276
2277 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
2278 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2279 if (rbd_dev->image_format == 1)
2280 ret = rbd_dev_v1_refresh(rbd_dev, hver);
2281 else
2282 ret = rbd_dev_v2_refresh(rbd_dev, hver);
2283 mutex_unlock(&ctl_mutex);
2284
2285 return ret;
2286 }
2287
2288 static int rbd_init_disk(struct rbd_device *rbd_dev)
2289 {
2290 struct gendisk *disk;
2291 struct request_queue *q;
2292 u64 segment_size;
2293
2294 /* create gendisk info */
2295 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
2296 if (!disk)
2297 return -ENOMEM;
2298
2299 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
2300 rbd_dev->dev_id);
2301 disk->major = rbd_dev->major;
2302 disk->first_minor = 0;
2303 disk->fops = &rbd_bd_ops;
2304 disk->private_data = rbd_dev;
2305
2306 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
2307 if (!q)
2308 goto out_disk;
2309
2310 /* We use the default size, but let's be explicit about it. */
2311 blk_queue_physical_block_size(q, SECTOR_SIZE);
2312
2313 /* set io sizes to object size */
2314 segment_size = rbd_obj_bytes(&rbd_dev->header);
2315 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
2316 blk_queue_max_segment_size(q, segment_size);
2317 blk_queue_io_min(q, segment_size);
2318 blk_queue_io_opt(q, segment_size);
2319
2320 blk_queue_merge_bvec(q, rbd_merge_bvec);
2321 disk->queue = q;
2322
2323 q->queuedata = rbd_dev;
2324
2325 rbd_dev->disk = disk;
2326
2327 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
2328
2329 return 0;
2330 out_disk:
2331 put_disk(disk);
2332
2333 return -ENOMEM;
2334 }
2335
2336 /*
2337 sysfs
2338 */
2339
2340 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
2341 {
2342 return container_of(dev, struct rbd_device, dev);
2343 }
2344
2345 static ssize_t rbd_size_show(struct device *dev,
2346 struct device_attribute *attr, char *buf)
2347 {
2348 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2349 sector_t size;
2350
2351 down_read(&rbd_dev->header_rwsem);
2352 size = get_capacity(rbd_dev->disk);
2353 up_read(&rbd_dev->header_rwsem);
2354
2355 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
2356 }
2357
2358 /*
2359 * Note this shows the features for whatever's mapped, which is not
2360 * necessarily the base image.
2361 */
2362 static ssize_t rbd_features_show(struct device *dev,
2363 struct device_attribute *attr, char *buf)
2364 {
2365 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2366
2367 return sprintf(buf, "0x%016llx\n",
2368 (unsigned long long) rbd_dev->mapping.features);
2369 }
2370
2371 static ssize_t rbd_major_show(struct device *dev,
2372 struct device_attribute *attr, char *buf)
2373 {
2374 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2375
2376 return sprintf(buf, "%d\n", rbd_dev->major);
2377 }
2378
2379 static ssize_t rbd_client_id_show(struct device *dev,
2380 struct device_attribute *attr, char *buf)
2381 {
2382 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2383
2384 return sprintf(buf, "client%lld\n",
2385 ceph_client_id(rbd_dev->rbd_client->client));
2386 }
2387
2388 static ssize_t rbd_pool_show(struct device *dev,
2389 struct device_attribute *attr, char *buf)
2390 {
2391 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2392
2393 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
2394 }
2395
2396 static ssize_t rbd_pool_id_show(struct device *dev,
2397 struct device_attribute *attr, char *buf)
2398 {
2399 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2400
2401 return sprintf(buf, "%llu\n",
2402 (unsigned long long) rbd_dev->spec->pool_id);
2403 }
2404
2405 static ssize_t rbd_name_show(struct device *dev,
2406 struct device_attribute *attr, char *buf)
2407 {
2408 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2409
2410 if (rbd_dev->spec->image_name)
2411 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
2412
2413 return sprintf(buf, "(unknown)\n");
2414 }
2415
2416 static ssize_t rbd_image_id_show(struct device *dev,
2417 struct device_attribute *attr, char *buf)
2418 {
2419 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2420
2421 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
2422 }
2423
2424 /*
2425 * Shows the name of the currently-mapped snapshot (or
2426 * RBD_SNAP_HEAD_NAME for the base image).
2427 */
2428 static ssize_t rbd_snap_show(struct device *dev,
2429 struct device_attribute *attr,
2430 char *buf)
2431 {
2432 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2433
2434 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
2435 }
2436
2437 /*
2438 * For an rbd v2 image, shows the pool id, image id, and snapshot id
2439 * for the parent image. If there is no parent, simply shows
2440 * "(no parent image)".
2441 */
2442 static ssize_t rbd_parent_show(struct device *dev,
2443 struct device_attribute *attr,
2444 char *buf)
2445 {
2446 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2447 struct rbd_spec *spec = rbd_dev->parent_spec;
2448 int count;
2449 char *bufp = buf;
2450
2451 if (!spec)
2452 return sprintf(buf, "(no parent image)\n");
2453
2454 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
2455 (unsigned long long) spec->pool_id, spec->pool_name);
2456 if (count < 0)
2457 return count;
2458 bufp += count;
2459
2460 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
2461 spec->image_name ? spec->image_name : "(unknown)");
2462 if (count < 0)
2463 return count;
2464 bufp += count;
2465
2466 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
2467 (unsigned long long) spec->snap_id, spec->snap_name);
2468 if (count < 0)
2469 return count;
2470 bufp += count;
2471
2472 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
2473 if (count < 0)
2474 return count;
2475 bufp += count;
2476
2477 return (ssize_t) (bufp - buf);
2478 }
2479
2480 static ssize_t rbd_image_refresh(struct device *dev,
2481 struct device_attribute *attr,
2482 const char *buf,
2483 size_t size)
2484 {
2485 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
2486 int ret;
2487
2488 ret = rbd_dev_refresh(rbd_dev, NULL);
2489
2490 return ret < 0 ? ret : size;
2491 }
2492
2493 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
2494 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
2495 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
2496 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
2497 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
2498 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
2499 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
2500 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
2501 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
2502 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
2503 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
2504
2505 static struct attribute *rbd_attrs[] = {
2506 &dev_attr_size.attr,
2507 &dev_attr_features.attr,
2508 &dev_attr_major.attr,
2509 &dev_attr_client_id.attr,
2510 &dev_attr_pool.attr,
2511 &dev_attr_pool_id.attr,
2512 &dev_attr_name.attr,
2513 &dev_attr_image_id.attr,
2514 &dev_attr_current_snap.attr,
2515 &dev_attr_parent.attr,
2516 &dev_attr_refresh.attr,
2517 NULL
2518 };
2519
2520 static struct attribute_group rbd_attr_group = {
2521 .attrs = rbd_attrs,
2522 };
2523
2524 static const struct attribute_group *rbd_attr_groups[] = {
2525 &rbd_attr_group,
2526 NULL
2527 };
2528
2529 static void rbd_sysfs_dev_release(struct device *dev)
2530 {
2531 }
2532
2533 static struct device_type rbd_device_type = {
2534 .name = "rbd",
2535 .groups = rbd_attr_groups,
2536 .release = rbd_sysfs_dev_release,
2537 };
2538
2539
2540 /*
2541 sysfs - snapshots
2542 */
2543
2544 static ssize_t rbd_snap_size_show(struct device *dev,
2545 struct device_attribute *attr,
2546 char *buf)
2547 {
2548 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2549
2550 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
2551 }
2552
2553 static ssize_t rbd_snap_id_show(struct device *dev,
2554 struct device_attribute *attr,
2555 char *buf)
2556 {
2557 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2558
2559 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
2560 }
2561
2562 static ssize_t rbd_snap_features_show(struct device *dev,
2563 struct device_attribute *attr,
2564 char *buf)
2565 {
2566 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2567
2568 return sprintf(buf, "0x%016llx\n",
2569 (unsigned long long) snap->features);
2570 }
2571
2572 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2573 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
2574 static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
2575
2576 static struct attribute *rbd_snap_attrs[] = {
2577 &dev_attr_snap_size.attr,
2578 &dev_attr_snap_id.attr,
2579 &dev_attr_snap_features.attr,
2580 NULL,
2581 };
2582
2583 static struct attribute_group rbd_snap_attr_group = {
2584 .attrs = rbd_snap_attrs,
2585 };
2586
2587 static void rbd_snap_dev_release(struct device *dev)
2588 {
2589 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2590 kfree(snap->name);
2591 kfree(snap);
2592 }
2593
2594 static const struct attribute_group *rbd_snap_attr_groups[] = {
2595 &rbd_snap_attr_group,
2596 NULL
2597 };
2598
2599 static struct device_type rbd_snap_device_type = {
2600 .groups = rbd_snap_attr_groups,
2601 .release = rbd_snap_dev_release,
2602 };
2603
2604 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
2605 {
2606 kref_get(&spec->kref);
2607
2608 return spec;
2609 }
2610
2611 static void rbd_spec_free(struct kref *kref);
2612 static void rbd_spec_put(struct rbd_spec *spec)
2613 {
2614 if (spec)
2615 kref_put(&spec->kref, rbd_spec_free);
2616 }
2617
2618 static struct rbd_spec *rbd_spec_alloc(void)
2619 {
2620 struct rbd_spec *spec;
2621
2622 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
2623 if (!spec)
2624 return NULL;
2625 kref_init(&spec->kref);
2626
2627 rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */
2628
2629 return spec;
2630 }
2631
2632 static void rbd_spec_free(struct kref *kref)
2633 {
2634 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
2635
2636 kfree(spec->pool_name);
2637 kfree(spec->image_id);
2638 kfree(spec->image_name);
2639 kfree(spec->snap_name);
2640 kfree(spec);
2641 }
2642
2643 struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
2644 struct rbd_spec *spec)
2645 {
2646 struct rbd_device *rbd_dev;
2647
2648 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
2649 if (!rbd_dev)
2650 return NULL;
2651
2652 spin_lock_init(&rbd_dev->lock);
2653 rbd_dev->flags = 0;
2654 INIT_LIST_HEAD(&rbd_dev->node);
2655 INIT_LIST_HEAD(&rbd_dev->snaps);
2656 init_rwsem(&rbd_dev->header_rwsem);
2657
2658 rbd_dev->spec = spec;
2659 rbd_dev->rbd_client = rbdc;
2660
2661 /* Initialize the layout used for all rbd requests */
2662
2663 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2664 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
2665 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
2666 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
2667
2668 return rbd_dev;
2669 }
2670
2671 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
2672 {
2673 rbd_spec_put(rbd_dev->parent_spec);
2674 kfree(rbd_dev->header_name);
2675 rbd_put_client(rbd_dev->rbd_client);
2676 rbd_spec_put(rbd_dev->spec);
2677 kfree(rbd_dev);
2678 }
2679
2680 static bool rbd_snap_registered(struct rbd_snap *snap)
2681 {
2682 bool ret = snap->dev.type == &rbd_snap_device_type;
2683 bool reg = device_is_registered(&snap->dev);
2684
2685 rbd_assert(!ret ^ reg);
2686
2687 return ret;
2688 }
2689
2690 static void rbd_remove_snap_dev(struct rbd_snap *snap)
2691 {
2692 list_del(&snap->node);
2693 if (device_is_registered(&snap->dev))
2694 device_unregister(&snap->dev);
2695 }
2696
2697 static int rbd_register_snap_dev(struct rbd_snap *snap,
2698 struct device *parent)
2699 {
2700 struct device *dev = &snap->dev;
2701 int ret;
2702
2703 dev->type = &rbd_snap_device_type;
2704 dev->parent = parent;
2705 dev->release = rbd_snap_dev_release;
2706 dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name);
2707 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2708
2709 ret = device_register(dev);
2710
2711 return ret;
2712 }
2713
2714 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2715 const char *snap_name,
2716 u64 snap_id, u64 snap_size,
2717 u64 snap_features)
2718 {
2719 struct rbd_snap *snap;
2720 int ret;
2721
2722 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2723 if (!snap)
2724 return ERR_PTR(-ENOMEM);
2725
2726 ret = -ENOMEM;
2727 snap->name = kstrdup(snap_name, GFP_KERNEL);
2728 if (!snap->name)
2729 goto err;
2730
2731 snap->id = snap_id;
2732 snap->size = snap_size;
2733 snap->features = snap_features;
2734
2735 return snap;
2736
2737 err:
2738 kfree(snap->name);
2739 kfree(snap);
2740
2741 return ERR_PTR(ret);
2742 }
2743
2744 static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2745 u64 *snap_size, u64 *snap_features)
2746 {
2747 char *snap_name;
2748
2749 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2750
2751 *snap_size = rbd_dev->header.snap_sizes[which];
2752 *snap_features = 0; /* No features for v1 */
2753
2754 /* Skip over names until we find the one we are looking for */
2755
2756 snap_name = rbd_dev->header.snap_names;
2757 while (which--)
2758 snap_name += strlen(snap_name) + 1;
2759
2760 return snap_name;
2761 }
2762
2763 /*
2764 * Get the size and object order for an image snapshot, or if
2765 * snap_id is CEPH_NOSNAP, gets this information for the base
2766 * image.
2767 */
2768 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2769 u8 *order, u64 *snap_size)
2770 {
2771 __le64 snapid = cpu_to_le64(snap_id);
2772 int ret;
2773 struct {
2774 u8 order;
2775 __le64 size;
2776 } __attribute__ ((packed)) size_buf = { 0 };
2777
2778 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2779 "rbd", "get_size",
2780 (char *) &snapid, sizeof (snapid),
2781 (char *) &size_buf, sizeof (size_buf), NULL);
2782 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2783 if (ret < 0)
2784 return ret;
2785
2786 *order = size_buf.order;
2787 *snap_size = le64_to_cpu(size_buf.size);
2788
2789 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2790 (unsigned long long) snap_id, (unsigned int) *order,
2791 (unsigned long long) *snap_size);
2792
2793 return 0;
2794 }
2795
2796 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2797 {
2798 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2799 &rbd_dev->header.obj_order,
2800 &rbd_dev->header.image_size);
2801 }
2802
2803 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2804 {
2805 void *reply_buf;
2806 int ret;
2807 void *p;
2808
2809 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2810 if (!reply_buf)
2811 return -ENOMEM;
2812
2813 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2814 "rbd", "get_object_prefix",
2815 NULL, 0,
2816 reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL);
2817 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2818 if (ret < 0)
2819 goto out;
2820
2821 p = reply_buf;
2822 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2823 p + RBD_OBJ_PREFIX_LEN_MAX,
2824 NULL, GFP_NOIO);
2825
2826 if (IS_ERR(rbd_dev->header.object_prefix)) {
2827 ret = PTR_ERR(rbd_dev->header.object_prefix);
2828 rbd_dev->header.object_prefix = NULL;
2829 } else {
2830 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
2831 }
2832
2833 out:
2834 kfree(reply_buf);
2835
2836 return ret;
2837 }
2838
2839 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2840 u64 *snap_features)
2841 {
2842 __le64 snapid = cpu_to_le64(snap_id);
2843 struct {
2844 __le64 features;
2845 __le64 incompat;
2846 } features_buf = { 0 };
2847 u64 incompat;
2848 int ret;
2849
2850 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2851 "rbd", "get_features",
2852 (char *) &snapid, sizeof (snapid),
2853 (char *) &features_buf, sizeof (features_buf),
2854 NULL);
2855 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2856 if (ret < 0)
2857 return ret;
2858
2859 incompat = le64_to_cpu(features_buf.incompat);
2860 if (incompat & ~RBD_FEATURES_ALL)
2861 return -ENXIO;
2862
2863 *snap_features = le64_to_cpu(features_buf.features);
2864
2865 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2866 (unsigned long long) snap_id,
2867 (unsigned long long) *snap_features,
2868 (unsigned long long) le64_to_cpu(features_buf.incompat));
2869
2870 return 0;
2871 }
2872
2873 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2874 {
2875 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2876 &rbd_dev->header.features);
2877 }
2878
2879 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
2880 {
2881 struct rbd_spec *parent_spec;
2882 size_t size;
2883 void *reply_buf = NULL;
2884 __le64 snapid;
2885 void *p;
2886 void *end;
2887 char *image_id;
2888 u64 overlap;
2889 int ret;
2890
2891 parent_spec = rbd_spec_alloc();
2892 if (!parent_spec)
2893 return -ENOMEM;
2894
2895 size = sizeof (__le64) + /* pool_id */
2896 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
2897 sizeof (__le64) + /* snap_id */
2898 sizeof (__le64); /* overlap */
2899 reply_buf = kmalloc(size, GFP_KERNEL);
2900 if (!reply_buf) {
2901 ret = -ENOMEM;
2902 goto out_err;
2903 }
2904
2905 snapid = cpu_to_le64(CEPH_NOSNAP);
2906 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
2907 "rbd", "get_parent",
2908 (char *) &snapid, sizeof (snapid),
2909 (char *) reply_buf, size, NULL);
2910 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
2911 if (ret < 0)
2912 goto out_err;
2913
2914 ret = -ERANGE;
2915 p = reply_buf;
2916 end = (char *) reply_buf + size;
2917 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
2918 if (parent_spec->pool_id == CEPH_NOPOOL)
2919 goto out; /* No parent? No problem. */
2920
2921 /* The ceph file layout needs to fit pool id in 32 bits */
2922
2923 ret = -EIO;
2924 if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX))
2925 goto out;
2926
2927 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
2928 if (IS_ERR(image_id)) {
2929 ret = PTR_ERR(image_id);
2930 goto out_err;
2931 }
2932 parent_spec->image_id = image_id;
2933 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
2934 ceph_decode_64_safe(&p, end, overlap, out_err);
2935
2936 rbd_dev->parent_overlap = overlap;
2937 rbd_dev->parent_spec = parent_spec;
2938 parent_spec = NULL; /* rbd_dev now owns this */
2939 out:
2940 ret = 0;
2941 out_err:
2942 kfree(reply_buf);
2943 rbd_spec_put(parent_spec);
2944
2945 return ret;
2946 }
2947
2948 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
2949 {
2950 size_t image_id_size;
2951 char *image_id;
2952 void *p;
2953 void *end;
2954 size_t size;
2955 void *reply_buf = NULL;
2956 size_t len = 0;
2957 char *image_name = NULL;
2958 int ret;
2959
2960 rbd_assert(!rbd_dev->spec->image_name);
2961
2962 len = strlen(rbd_dev->spec->image_id);
2963 image_id_size = sizeof (__le32) + len;
2964 image_id = kmalloc(image_id_size, GFP_KERNEL);
2965 if (!image_id)
2966 return NULL;
2967
2968 p = image_id;
2969 end = (char *) image_id + image_id_size;
2970 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len);
2971
2972 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
2973 reply_buf = kmalloc(size, GFP_KERNEL);
2974 if (!reply_buf)
2975 goto out;
2976
2977 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
2978 "rbd", "dir_get_name",
2979 image_id, image_id_size,
2980 (char *) reply_buf, size, NULL);
2981 if (ret < 0)
2982 goto out;
2983 p = reply_buf;
2984 end = (char *) reply_buf + size;
2985 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
2986 if (IS_ERR(image_name))
2987 image_name = NULL;
2988 else
2989 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
2990 out:
2991 kfree(reply_buf);
2992 kfree(image_id);
2993
2994 return image_name;
2995 }
2996
2997 /*
2998 * When a parent image gets probed, we only have the pool, image,
2999 * and snapshot ids but not the names of any of them. This call
3000 * is made later to fill in those names. It has to be done after
3001 * rbd_dev_snaps_update() has completed because some of the
3002 * information (in particular, snapshot name) is not available
3003 * until then.
3004 */
3005 static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev)
3006 {
3007 struct ceph_osd_client *osdc;
3008 const char *name;
3009 void *reply_buf = NULL;
3010 int ret;
3011
3012 if (rbd_dev->spec->pool_name)
3013 return 0; /* Already have the names */
3014
3015 /* Look up the pool name */
3016
3017 osdc = &rbd_dev->rbd_client->client->osdc;
3018 name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id);
3019 if (!name) {
3020 rbd_warn(rbd_dev, "there is no pool with id %llu",
3021 rbd_dev->spec->pool_id); /* Really a BUG() */
3022 return -EIO;
3023 }
3024
3025 rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL);
3026 if (!rbd_dev->spec->pool_name)
3027 return -ENOMEM;
3028
3029 /* Fetch the image name; tolerate failure here */
3030
3031 name = rbd_dev_image_name(rbd_dev);
3032 if (name)
3033 rbd_dev->spec->image_name = (char *) name;
3034 else
3035 rbd_warn(rbd_dev, "unable to get image name");
3036
3037 /* Look up the snapshot name. */
3038
3039 name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id);
3040 if (!name) {
3041 rbd_warn(rbd_dev, "no snapshot with id %llu",
3042 rbd_dev->spec->snap_id); /* Really a BUG() */
3043 ret = -EIO;
3044 goto out_err;
3045 }
3046 rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL);
3047 if(!rbd_dev->spec->snap_name)
3048 goto out_err;
3049
3050 return 0;
3051 out_err:
3052 kfree(reply_buf);
3053 kfree(rbd_dev->spec->pool_name);
3054 rbd_dev->spec->pool_name = NULL;
3055
3056 return ret;
3057 }
3058
3059 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
3060 {
3061 size_t size;
3062 int ret;
3063 void *reply_buf;
3064 void *p;
3065 void *end;
3066 u64 seq;
3067 u32 snap_count;
3068 struct ceph_snap_context *snapc;
3069 u32 i;
3070
3071 /*
3072 * We'll need room for the seq value (maximum snapshot id),
3073 * snapshot count, and array of that many snapshot ids.
3074 * For now we have a fixed upper limit on the number we're
3075 * prepared to receive.
3076 */
3077 size = sizeof (__le64) + sizeof (__le32) +
3078 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3079 reply_buf = kzalloc(size, GFP_KERNEL);
3080 if (!reply_buf)
3081 return -ENOMEM;
3082
3083 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3084 "rbd", "get_snapcontext",
3085 NULL, 0,
3086 reply_buf, size, ver);
3087 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3088 if (ret < 0)
3089 goto out;
3090
3091 ret = -ERANGE;
3092 p = reply_buf;
3093 end = (char *) reply_buf + size;
3094 ceph_decode_64_safe(&p, end, seq, out);
3095 ceph_decode_32_safe(&p, end, snap_count, out);
3096
3097 /*
3098 * Make sure the reported number of snapshot ids wouldn't go
3099 * beyond the end of our buffer. But before checking that,
3100 * make sure the computed size of the snapshot context we
3101 * allocate is representable in a size_t.
3102 */
3103 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3104 / sizeof (u64)) {
3105 ret = -EINVAL;
3106 goto out;
3107 }
3108 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3109 goto out;
3110
3111 size = sizeof (struct ceph_snap_context) +
3112 snap_count * sizeof (snapc->snaps[0]);
3113 snapc = kmalloc(size, GFP_KERNEL);
3114 if (!snapc) {
3115 ret = -ENOMEM;
3116 goto out;
3117 }
3118
3119 atomic_set(&snapc->nref, 1);
3120 snapc->seq = seq;
3121 snapc->num_snaps = snap_count;
3122 for (i = 0; i < snap_count; i++)
3123 snapc->snaps[i] = ceph_decode_64(&p);
3124
3125 rbd_dev->header.snapc = snapc;
3126
3127 dout(" snap context seq = %llu, snap_count = %u\n",
3128 (unsigned long long) seq, (unsigned int) snap_count);
3129
3130 out:
3131 kfree(reply_buf);
3132
3133 return 0;
3134 }
3135
3136 static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3137 {
3138 size_t size;
3139 void *reply_buf;
3140 __le64 snap_id;
3141 int ret;
3142 void *p;
3143 void *end;
3144 char *snap_name;
3145
3146 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3147 reply_buf = kmalloc(size, GFP_KERNEL);
3148 if (!reply_buf)
3149 return ERR_PTR(-ENOMEM);
3150
3151 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
3152 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3153 "rbd", "get_snapshot_name",
3154 (char *) &snap_id, sizeof (snap_id),
3155 reply_buf, size, NULL);
3156 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3157 if (ret < 0)
3158 goto out;
3159
3160 p = reply_buf;
3161 end = (char *) reply_buf + size;
3162 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3163 if (IS_ERR(snap_name)) {
3164 ret = PTR_ERR(snap_name);
3165 goto out;
3166 } else {
3167 dout(" snap_id 0x%016llx snap_name = %s\n",
3168 (unsigned long long) le64_to_cpu(snap_id), snap_name);
3169 }
3170 kfree(reply_buf);
3171
3172 return snap_name;
3173 out:
3174 kfree(reply_buf);
3175
3176 return ERR_PTR(ret);
3177 }
3178
3179 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3180 u64 *snap_size, u64 *snap_features)
3181 {
3182 u64 snap_id;
3183 u8 order;
3184 int ret;
3185
3186 snap_id = rbd_dev->header.snapc->snaps[which];
3187 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
3188 if (ret)
3189 return ERR_PTR(ret);
3190 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
3191 if (ret)
3192 return ERR_PTR(ret);
3193
3194 return rbd_dev_v2_snap_name(rbd_dev, which);
3195 }
3196
3197 static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3198 u64 *snap_size, u64 *snap_features)
3199 {
3200 if (rbd_dev->image_format == 1)
3201 return rbd_dev_v1_snap_info(rbd_dev, which,
3202 snap_size, snap_features);
3203 if (rbd_dev->image_format == 2)
3204 return rbd_dev_v2_snap_info(rbd_dev, which,
3205 snap_size, snap_features);
3206 return ERR_PTR(-EINVAL);
3207 }
3208
3209 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
3210 {
3211 int ret;
3212 __u8 obj_order;
3213
3214 down_write(&rbd_dev->header_rwsem);
3215
3216 /* Grab old order first, to see if it changes */
3217
3218 obj_order = rbd_dev->header.obj_order,
3219 ret = rbd_dev_v2_image_size(rbd_dev);
3220 if (ret)
3221 goto out;
3222 if (rbd_dev->header.obj_order != obj_order) {
3223 ret = -EIO;
3224 goto out;
3225 }
3226 rbd_update_mapping_size(rbd_dev);
3227
3228 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
3229 dout("rbd_dev_v2_snap_context returned %d\n", ret);
3230 if (ret)
3231 goto out;
3232 ret = rbd_dev_snaps_update(rbd_dev);
3233 dout("rbd_dev_snaps_update returned %d\n", ret);
3234 if (ret)
3235 goto out;
3236 ret = rbd_dev_snaps_register(rbd_dev);
3237 dout("rbd_dev_snaps_register returned %d\n", ret);
3238 out:
3239 up_write(&rbd_dev->header_rwsem);
3240
3241 return ret;
3242 }
3243
3244 /*
3245 * Scan the rbd device's current snapshot list and compare it to the
3246 * newly-received snapshot context. Remove any existing snapshots
3247 * not present in the new snapshot context. Add a new snapshot for
3248 * any snaphots in the snapshot context not in the current list.
3249 * And verify there are no changes to snapshots we already know
3250 * about.
3251 *
3252 * Assumes the snapshots in the snapshot context are sorted by
3253 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
3254 * are also maintained in that order.)
3255 */
3256 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
3257 {
3258 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3259 const u32 snap_count = snapc->num_snaps;
3260 struct list_head *head = &rbd_dev->snaps;
3261 struct list_head *links = head->next;
3262 u32 index = 0;
3263
3264 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
3265 while (index < snap_count || links != head) {
3266 u64 snap_id;
3267 struct rbd_snap *snap;
3268 char *snap_name;
3269 u64 snap_size = 0;
3270 u64 snap_features = 0;
3271
3272 snap_id = index < snap_count ? snapc->snaps[index]
3273 : CEPH_NOSNAP;
3274 snap = links != head ? list_entry(links, struct rbd_snap, node)
3275 : NULL;
3276 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
3277
3278 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
3279 struct list_head *next = links->next;
3280
3281 /*
3282 * A previously-existing snapshot is not in
3283 * the new snap context.
3284 *
3285 * If the now missing snapshot is the one the
3286 * image is mapped to, clear its exists flag
3287 * so we can avoid sending any more requests
3288 * to it.
3289 */
3290 if (rbd_dev->spec->snap_id == snap->id)
3291 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3292 rbd_remove_snap_dev(snap);
3293 dout("%ssnap id %llu has been removed\n",
3294 rbd_dev->spec->snap_id == snap->id ?
3295 "mapped " : "",
3296 (unsigned long long) snap->id);
3297
3298 /* Done with this list entry; advance */
3299
3300 links = next;
3301 continue;
3302 }
3303
3304 snap_name = rbd_dev_snap_info(rbd_dev, index,
3305 &snap_size, &snap_features);
3306 if (IS_ERR(snap_name))
3307 return PTR_ERR(snap_name);
3308
3309 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
3310 (unsigned long long) snap_id);
3311 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
3312 struct rbd_snap *new_snap;
3313
3314 /* We haven't seen this snapshot before */
3315
3316 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
3317 snap_id, snap_size, snap_features);
3318 if (IS_ERR(new_snap)) {
3319 int err = PTR_ERR(new_snap);
3320
3321 dout(" failed to add dev, error %d\n", err);
3322
3323 return err;
3324 }
3325
3326 /* New goes before existing, or at end of list */
3327
3328 dout(" added dev%s\n", snap ? "" : " at end\n");
3329 if (snap)
3330 list_add_tail(&new_snap->node, &snap->node);
3331 else
3332 list_add_tail(&new_snap->node, head);
3333 } else {
3334 /* Already have this one */
3335
3336 dout(" already present\n");
3337
3338 rbd_assert(snap->size == snap_size);
3339 rbd_assert(!strcmp(snap->name, snap_name));
3340 rbd_assert(snap->features == snap_features);
3341
3342 /* Done with this list entry; advance */
3343
3344 links = links->next;
3345 }
3346
3347 /* Advance to the next entry in the snapshot context */
3348
3349 index++;
3350 }
3351 dout("%s: done\n", __func__);
3352
3353 return 0;
3354 }
3355
3356 /*
3357 * Scan the list of snapshots and register the devices for any that
3358 * have not already been registered.
3359 */
3360 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
3361 {
3362 struct rbd_snap *snap;
3363 int ret = 0;
3364
3365 dout("%s called\n", __func__);
3366 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
3367 return -EIO;
3368
3369 list_for_each_entry(snap, &rbd_dev->snaps, node) {
3370 if (!rbd_snap_registered(snap)) {
3371 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
3372 if (ret < 0)
3373 break;
3374 }
3375 }
3376 dout("%s: returning %d\n", __func__, ret);
3377
3378 return ret;
3379 }
3380
3381 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
3382 {
3383 struct device *dev;
3384 int ret;
3385
3386 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3387
3388 dev = &rbd_dev->dev;
3389 dev->bus = &rbd_bus_type;
3390 dev->type = &rbd_device_type;
3391 dev->parent = &rbd_root_dev;
3392 dev->release = rbd_dev_release;
3393 dev_set_name(dev, "%d", rbd_dev->dev_id);
3394 ret = device_register(dev);
3395
3396 mutex_unlock(&ctl_mutex);
3397
3398 return ret;
3399 }
3400
3401 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
3402 {
3403 device_unregister(&rbd_dev->dev);
3404 }
3405
3406 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
3407
3408 /*
3409 * Get a unique rbd identifier for the given new rbd_dev, and add
3410 * the rbd_dev to the global list. The minimum rbd id is 1.
3411 */
3412 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
3413 {
3414 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
3415
3416 spin_lock(&rbd_dev_list_lock);
3417 list_add_tail(&rbd_dev->node, &rbd_dev_list);
3418 spin_unlock(&rbd_dev_list_lock);
3419 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
3420 (unsigned long long) rbd_dev->dev_id);
3421 }
3422
3423 /*
3424 * Remove an rbd_dev from the global list, and record that its
3425 * identifier is no longer in use.
3426 */
3427 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
3428 {
3429 struct list_head *tmp;
3430 int rbd_id = rbd_dev->dev_id;
3431 int max_id;
3432
3433 rbd_assert(rbd_id > 0);
3434
3435 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
3436 (unsigned long long) rbd_dev->dev_id);
3437 spin_lock(&rbd_dev_list_lock);
3438 list_del_init(&rbd_dev->node);
3439
3440 /*
3441 * If the id being "put" is not the current maximum, there
3442 * is nothing special we need to do.
3443 */
3444 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
3445 spin_unlock(&rbd_dev_list_lock);
3446 return;
3447 }
3448
3449 /*
3450 * We need to update the current maximum id. Search the
3451 * list to find out what it is. We're more likely to find
3452 * the maximum at the end, so search the list backward.
3453 */
3454 max_id = 0;
3455 list_for_each_prev(tmp, &rbd_dev_list) {
3456 struct rbd_device *rbd_dev;
3457
3458 rbd_dev = list_entry(tmp, struct rbd_device, node);
3459 if (rbd_dev->dev_id > max_id)
3460 max_id = rbd_dev->dev_id;
3461 }
3462 spin_unlock(&rbd_dev_list_lock);
3463
3464 /*
3465 * The max id could have been updated by rbd_dev_id_get(), in
3466 * which case it now accurately reflects the new maximum.
3467 * Be careful not to overwrite the maximum value in that
3468 * case.
3469 */
3470 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
3471 dout(" max dev id has been reset\n");
3472 }
3473
3474 /*
3475 * Skips over white space at *buf, and updates *buf to point to the
3476 * first found non-space character (if any). Returns the length of
3477 * the token (string of non-white space characters) found. Note
3478 * that *buf must be terminated with '\0'.
3479 */
3480 static inline size_t next_token(const char **buf)
3481 {
3482 /*
3483 * These are the characters that produce nonzero for
3484 * isspace() in the "C" and "POSIX" locales.
3485 */
3486 const char *spaces = " \f\n\r\t\v";
3487
3488 *buf += strspn(*buf, spaces); /* Find start of token */
3489
3490 return strcspn(*buf, spaces); /* Return token length */
3491 }
3492
3493 /*
3494 * Finds the next token in *buf, and if the provided token buffer is
3495 * big enough, copies the found token into it. The result, if
3496 * copied, is guaranteed to be terminated with '\0'. Note that *buf
3497 * must be terminated with '\0' on entry.
3498 *
3499 * Returns the length of the token found (not including the '\0').
3500 * Return value will be 0 if no token is found, and it will be >=
3501 * token_size if the token would not fit.
3502 *
3503 * The *buf pointer will be updated to point beyond the end of the
3504 * found token. Note that this occurs even if the token buffer is
3505 * too small to hold it.
3506 */
3507 static inline size_t copy_token(const char **buf,
3508 char *token,
3509 size_t token_size)
3510 {
3511 size_t len;
3512
3513 len = next_token(buf);
3514 if (len < token_size) {
3515 memcpy(token, *buf, len);
3516 *(token + len) = '\0';
3517 }
3518 *buf += len;
3519
3520 return len;
3521 }
3522
3523 /*
3524 * Finds the next token in *buf, dynamically allocates a buffer big
3525 * enough to hold a copy of it, and copies the token into the new
3526 * buffer. The copy is guaranteed to be terminated with '\0'. Note
3527 * that a duplicate buffer is created even for a zero-length token.
3528 *
3529 * Returns a pointer to the newly-allocated duplicate, or a null
3530 * pointer if memory for the duplicate was not available. If
3531 * the lenp argument is a non-null pointer, the length of the token
3532 * (not including the '\0') is returned in *lenp.
3533 *
3534 * If successful, the *buf pointer will be updated to point beyond
3535 * the end of the found token.
3536 *
3537 * Note: uses GFP_KERNEL for allocation.
3538 */
3539 static inline char *dup_token(const char **buf, size_t *lenp)
3540 {
3541 char *dup;
3542 size_t len;
3543
3544 len = next_token(buf);
3545 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
3546 if (!dup)
3547 return NULL;
3548 *(dup + len) = '\0';
3549 *buf += len;
3550
3551 if (lenp)
3552 *lenp = len;
3553
3554 return dup;
3555 }
3556
3557 /*
3558 * Parse the options provided for an "rbd add" (i.e., rbd image
3559 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
3560 * and the data written is passed here via a NUL-terminated buffer.
3561 * Returns 0 if successful or an error code otherwise.
3562 *
3563 * The information extracted from these options is recorded in
3564 * the other parameters which return dynamically-allocated
3565 * structures:
3566 * ceph_opts
3567 * The address of a pointer that will refer to a ceph options
3568 * structure. Caller must release the returned pointer using
3569 * ceph_destroy_options() when it is no longer needed.
3570 * rbd_opts
3571 * Address of an rbd options pointer. Fully initialized by
3572 * this function; caller must release with kfree().
3573 * spec
3574 * Address of an rbd image specification pointer. Fully
3575 * initialized by this function based on parsed options.
3576 * Caller must release with rbd_spec_put().
3577 *
3578 * The options passed take this form:
3579 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
3580 * where:
3581 * <mon_addrs>
3582 * A comma-separated list of one or more monitor addresses.
3583 * A monitor address is an ip address, optionally followed
3584 * by a port number (separated by a colon).
3585 * I.e.: ip1[:port1][,ip2[:port2]...]
3586 * <options>
3587 * A comma-separated list of ceph and/or rbd options.
3588 * <pool_name>
3589 * The name of the rados pool containing the rbd image.
3590 * <image_name>
3591 * The name of the image in that pool to map.
3592 * <snap_id>
3593 * An optional snapshot id. If provided, the mapping will
3594 * present data from the image at the time that snapshot was
3595 * created. The image head is used if no snapshot id is
3596 * provided. Snapshot mappings are always read-only.
3597 */
3598 static int rbd_add_parse_args(const char *buf,
3599 struct ceph_options **ceph_opts,
3600 struct rbd_options **opts,
3601 struct rbd_spec **rbd_spec)
3602 {
3603 size_t len;
3604 char *options;
3605 const char *mon_addrs;
3606 size_t mon_addrs_size;
3607 struct rbd_spec *spec = NULL;
3608 struct rbd_options *rbd_opts = NULL;
3609 struct ceph_options *copts;
3610 int ret;
3611
3612 /* The first four tokens are required */
3613
3614 len = next_token(&buf);
3615 if (!len) {
3616 rbd_warn(NULL, "no monitor address(es) provided");
3617 return -EINVAL;
3618 }
3619 mon_addrs = buf;
3620 mon_addrs_size = len + 1;
3621 buf += len;
3622
3623 ret = -EINVAL;
3624 options = dup_token(&buf, NULL);
3625 if (!options)
3626 return -ENOMEM;
3627 if (!*options) {
3628 rbd_warn(NULL, "no options provided");
3629 goto out_err;
3630 }
3631
3632 spec = rbd_spec_alloc();
3633 if (!spec)
3634 goto out_mem;
3635
3636 spec->pool_name = dup_token(&buf, NULL);
3637 if (!spec->pool_name)
3638 goto out_mem;
3639 if (!*spec->pool_name) {
3640 rbd_warn(NULL, "no pool name provided");
3641 goto out_err;
3642 }
3643
3644 spec->image_name = dup_token(&buf, NULL);
3645 if (!spec->image_name)
3646 goto out_mem;
3647 if (!*spec->image_name) {
3648 rbd_warn(NULL, "no image name provided");
3649 goto out_err;
3650 }
3651
3652 /*
3653 * Snapshot name is optional; default is to use "-"
3654 * (indicating the head/no snapshot).
3655 */
3656 len = next_token(&buf);
3657 if (!len) {
3658 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
3659 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
3660 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
3661 ret = -ENAMETOOLONG;
3662 goto out_err;
3663 }
3664 spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
3665 if (!spec->snap_name)
3666 goto out_mem;
3667 *(spec->snap_name + len) = '\0';
3668
3669 /* Initialize all rbd options to the defaults */
3670
3671 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
3672 if (!rbd_opts)
3673 goto out_mem;
3674
3675 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
3676
3677 copts = ceph_parse_options(options, mon_addrs,
3678 mon_addrs + mon_addrs_size - 1,
3679 parse_rbd_opts_token, rbd_opts);
3680 if (IS_ERR(copts)) {
3681 ret = PTR_ERR(copts);
3682 goto out_err;
3683 }
3684 kfree(options);
3685
3686 *ceph_opts = copts;
3687 *opts = rbd_opts;
3688 *rbd_spec = spec;
3689
3690 return 0;
3691 out_mem:
3692 ret = -ENOMEM;
3693 out_err:
3694 kfree(rbd_opts);
3695 rbd_spec_put(spec);
3696 kfree(options);
3697
3698 return ret;
3699 }
3700
3701 /*
3702 * An rbd format 2 image has a unique identifier, distinct from the
3703 * name given to it by the user. Internally, that identifier is
3704 * what's used to specify the names of objects related to the image.
3705 *
3706 * A special "rbd id" object is used to map an rbd image name to its
3707 * id. If that object doesn't exist, then there is no v2 rbd image
3708 * with the supplied name.
3709 *
3710 * This function will record the given rbd_dev's image_id field if
3711 * it can be determined, and in that case will return 0. If any
3712 * errors occur a negative errno will be returned and the rbd_dev's
3713 * image_id field will be unchanged (and should be NULL).
3714 */
3715 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
3716 {
3717 int ret;
3718 size_t size;
3719 char *object_name;
3720 void *response;
3721 void *p;
3722
3723 /*
3724 * When probing a parent image, the image id is already
3725 * known (and the image name likely is not). There's no
3726 * need to fetch the image id again in this case.
3727 */
3728 if (rbd_dev->spec->image_id)
3729 return 0;
3730
3731 /*
3732 * First, see if the format 2 image id file exists, and if
3733 * so, get the image's persistent id from it.
3734 */
3735 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
3736 object_name = kmalloc(size, GFP_NOIO);
3737 if (!object_name)
3738 return -ENOMEM;
3739 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
3740 dout("rbd id object name is %s\n", object_name);
3741
3742 /* Response will be an encoded string, which includes a length */
3743
3744 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
3745 response = kzalloc(size, GFP_NOIO);
3746 if (!response) {
3747 ret = -ENOMEM;
3748 goto out;
3749 }
3750
3751 ret = rbd_obj_method_sync(rbd_dev, object_name,
3752 "rbd", "get_id",
3753 NULL, 0,
3754 response, RBD_IMAGE_ID_LEN_MAX, NULL);
3755 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3756 if (ret < 0)
3757 goto out;
3758
3759 p = response;
3760 rbd_dev->spec->image_id = ceph_extract_encoded_string(&p,
3761 p + RBD_IMAGE_ID_LEN_MAX,
3762 NULL, GFP_NOIO);
3763 if (IS_ERR(rbd_dev->spec->image_id)) {
3764 ret = PTR_ERR(rbd_dev->spec->image_id);
3765 rbd_dev->spec->image_id = NULL;
3766 } else {
3767 dout("image_id is %s\n", rbd_dev->spec->image_id);
3768 }
3769 out:
3770 kfree(response);
3771 kfree(object_name);
3772
3773 return ret;
3774 }
3775
3776 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
3777 {
3778 int ret;
3779 size_t size;
3780
3781 /* Version 1 images have no id; empty string is used */
3782
3783 rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL);
3784 if (!rbd_dev->spec->image_id)
3785 return -ENOMEM;
3786
3787 /* Record the header object name for this rbd image. */
3788
3789 size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX);
3790 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3791 if (!rbd_dev->header_name) {
3792 ret = -ENOMEM;
3793 goto out_err;
3794 }
3795 sprintf(rbd_dev->header_name, "%s%s",
3796 rbd_dev->spec->image_name, RBD_SUFFIX);
3797
3798 /* Populate rbd image metadata */
3799
3800 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
3801 if (ret < 0)
3802 goto out_err;
3803
3804 /* Version 1 images have no parent (no layering) */
3805
3806 rbd_dev->parent_spec = NULL;
3807 rbd_dev->parent_overlap = 0;
3808
3809 rbd_dev->image_format = 1;
3810
3811 dout("discovered version 1 image, header name is %s\n",
3812 rbd_dev->header_name);
3813
3814 return 0;
3815
3816 out_err:
3817 kfree(rbd_dev->header_name);
3818 rbd_dev->header_name = NULL;
3819 kfree(rbd_dev->spec->image_id);
3820 rbd_dev->spec->image_id = NULL;
3821
3822 return ret;
3823 }
3824
3825 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
3826 {
3827 size_t size;
3828 int ret;
3829 u64 ver = 0;
3830
3831 /*
3832 * Image id was filled in by the caller. Record the header
3833 * object name for this rbd image.
3834 */
3835 size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id);
3836 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
3837 if (!rbd_dev->header_name)
3838 return -ENOMEM;
3839 sprintf(rbd_dev->header_name, "%s%s",
3840 RBD_HEADER_PREFIX, rbd_dev->spec->image_id);
3841
3842 /* Get the size and object order for the image */
3843
3844 ret = rbd_dev_v2_image_size(rbd_dev);
3845 if (ret < 0)
3846 goto out_err;
3847
3848 /* Get the object prefix (a.k.a. block_name) for the image */
3849
3850 ret = rbd_dev_v2_object_prefix(rbd_dev);
3851 if (ret < 0)
3852 goto out_err;
3853
3854 /* Get the and check features for the image */
3855
3856 ret = rbd_dev_v2_features(rbd_dev);
3857 if (ret < 0)
3858 goto out_err;
3859
3860 /* If the image supports layering, get the parent info */
3861
3862 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
3863 ret = rbd_dev_v2_parent_info(rbd_dev);
3864 if (ret < 0)
3865 goto out_err;
3866 }
3867
3868 /* crypto and compression type aren't (yet) supported for v2 images */
3869
3870 rbd_dev->header.crypt_type = 0;
3871 rbd_dev->header.comp_type = 0;
3872
3873 /* Get the snapshot context, plus the header version */
3874
3875 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
3876 if (ret)
3877 goto out_err;
3878 rbd_dev->header.obj_version = ver;
3879
3880 rbd_dev->image_format = 2;
3881
3882 dout("discovered version 2 image, header name is %s\n",
3883 rbd_dev->header_name);
3884
3885 return 0;
3886 out_err:
3887 rbd_dev->parent_overlap = 0;
3888 rbd_spec_put(rbd_dev->parent_spec);
3889 rbd_dev->parent_spec = NULL;
3890 kfree(rbd_dev->header_name);
3891 rbd_dev->header_name = NULL;
3892 kfree(rbd_dev->header.object_prefix);
3893 rbd_dev->header.object_prefix = NULL;
3894
3895 return ret;
3896 }
3897
3898 static int rbd_dev_probe_finish(struct rbd_device *rbd_dev)
3899 {
3900 int ret;
3901
3902 /* no need to lock here, as rbd_dev is not registered yet */
3903 ret = rbd_dev_snaps_update(rbd_dev);
3904 if (ret)
3905 return ret;
3906
3907 ret = rbd_dev_probe_update_spec(rbd_dev);
3908 if (ret)
3909 goto err_out_snaps;
3910
3911 ret = rbd_dev_set_mapping(rbd_dev);
3912 if (ret)
3913 goto err_out_snaps;
3914
3915 /* generate unique id: find highest unique id, add one */
3916 rbd_dev_id_get(rbd_dev);
3917
3918 /* Fill in the device name, now that we have its id. */
3919 BUILD_BUG_ON(DEV_NAME_LEN
3920 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3921 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3922
3923 /* Get our block major device number. */
3924
3925 ret = register_blkdev(0, rbd_dev->name);
3926 if (ret < 0)
3927 goto err_out_id;
3928 rbd_dev->major = ret;
3929
3930 /* Set up the blkdev mapping. */
3931
3932 ret = rbd_init_disk(rbd_dev);
3933 if (ret)
3934 goto err_out_blkdev;
3935
3936 ret = rbd_bus_add_dev(rbd_dev);
3937 if (ret)
3938 goto err_out_disk;
3939
3940 /*
3941 * At this point cleanup in the event of an error is the job
3942 * of the sysfs code (initiated by rbd_bus_del_dev()).
3943 */
3944 down_write(&rbd_dev->header_rwsem);
3945 ret = rbd_dev_snaps_register(rbd_dev);
3946 up_write(&rbd_dev->header_rwsem);
3947 if (ret)
3948 goto err_out_bus;
3949
3950 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
3951 if (ret)
3952 goto err_out_bus;
3953
3954 /* Everything's ready. Announce the disk to the world. */
3955
3956 add_disk(rbd_dev->disk);
3957
3958 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
3959 (unsigned long long) rbd_dev->mapping.size);
3960
3961 return ret;
3962 err_out_bus:
3963 /* this will also clean up rest of rbd_dev stuff */
3964
3965 rbd_bus_del_dev(rbd_dev);
3966
3967 return ret;
3968 err_out_disk:
3969 rbd_free_disk(rbd_dev);
3970 err_out_blkdev:
3971 unregister_blkdev(rbd_dev->major, rbd_dev->name);
3972 err_out_id:
3973 rbd_dev_id_put(rbd_dev);
3974 err_out_snaps:
3975 rbd_remove_all_snaps(rbd_dev);
3976
3977 return ret;
3978 }
3979
3980 /*
3981 * Probe for the existence of the header object for the given rbd
3982 * device. For format 2 images this includes determining the image
3983 * id.
3984 */
3985 static int rbd_dev_probe(struct rbd_device *rbd_dev)
3986 {
3987 int ret;
3988
3989 /*
3990 * Get the id from the image id object. If it's not a
3991 * format 2 image, we'll get ENOENT back, and we'll assume
3992 * it's a format 1 image.
3993 */
3994 ret = rbd_dev_image_id(rbd_dev);
3995 if (ret)
3996 ret = rbd_dev_v1_probe(rbd_dev);
3997 else
3998 ret = rbd_dev_v2_probe(rbd_dev);
3999 if (ret) {
4000 dout("probe failed, returning %d\n", ret);
4001
4002 return ret;
4003 }
4004
4005 ret = rbd_dev_probe_finish(rbd_dev);
4006 if (ret)
4007 rbd_header_free(&rbd_dev->header);
4008
4009 return ret;
4010 }
4011
4012 static ssize_t rbd_add(struct bus_type *bus,
4013 const char *buf,
4014 size_t count)
4015 {
4016 struct rbd_device *rbd_dev = NULL;
4017 struct ceph_options *ceph_opts = NULL;
4018 struct rbd_options *rbd_opts = NULL;
4019 struct rbd_spec *spec = NULL;
4020 struct rbd_client *rbdc;
4021 struct ceph_osd_client *osdc;
4022 int rc = -ENOMEM;
4023
4024 if (!try_module_get(THIS_MODULE))
4025 return -ENODEV;
4026
4027 /* parse add command */
4028 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4029 if (rc < 0)
4030 goto err_out_module;
4031
4032 rbdc = rbd_get_client(ceph_opts);
4033 if (IS_ERR(rbdc)) {
4034 rc = PTR_ERR(rbdc);
4035 goto err_out_args;
4036 }
4037 ceph_opts = NULL; /* rbd_dev client now owns this */
4038
4039 /* pick the pool */
4040 osdc = &rbdc->client->osdc;
4041 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4042 if (rc < 0)
4043 goto err_out_client;
4044 spec->pool_id = (u64) rc;
4045
4046 /* The ceph file layout needs to fit pool id in 32 bits */
4047
4048 if (WARN_ON(spec->pool_id > (u64) U32_MAX)) {
4049 rc = -EIO;
4050 goto err_out_client;
4051 }
4052
4053 rbd_dev = rbd_dev_create(rbdc, spec);
4054 if (!rbd_dev)
4055 goto err_out_client;
4056 rbdc = NULL; /* rbd_dev now owns this */
4057 spec = NULL; /* rbd_dev now owns this */
4058
4059 rbd_dev->mapping.read_only = rbd_opts->read_only;
4060 kfree(rbd_opts);
4061 rbd_opts = NULL; /* done with this */
4062
4063 rc = rbd_dev_probe(rbd_dev);
4064 if (rc < 0)
4065 goto err_out_rbd_dev;
4066
4067 return count;
4068 err_out_rbd_dev:
4069 rbd_dev_destroy(rbd_dev);
4070 err_out_client:
4071 rbd_put_client(rbdc);
4072 err_out_args:
4073 if (ceph_opts)
4074 ceph_destroy_options(ceph_opts);
4075 kfree(rbd_opts);
4076 rbd_spec_put(spec);
4077 err_out_module:
4078 module_put(THIS_MODULE);
4079
4080 dout("Error adding device %s\n", buf);
4081
4082 return (ssize_t) rc;
4083 }
4084
4085 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4086 {
4087 struct list_head *tmp;
4088 struct rbd_device *rbd_dev;
4089
4090 spin_lock(&rbd_dev_list_lock);
4091 list_for_each(tmp, &rbd_dev_list) {
4092 rbd_dev = list_entry(tmp, struct rbd_device, node);
4093 if (rbd_dev->dev_id == dev_id) {
4094 spin_unlock(&rbd_dev_list_lock);
4095 return rbd_dev;
4096 }
4097 }
4098 spin_unlock(&rbd_dev_list_lock);
4099 return NULL;
4100 }
4101
4102 static void rbd_dev_release(struct device *dev)
4103 {
4104 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4105
4106 if (rbd_dev->watch_event)
4107 rbd_dev_header_watch_sync(rbd_dev, 0);
4108
4109 /* clean up and free blkdev */
4110 rbd_free_disk(rbd_dev);
4111 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4112
4113 /* release allocated disk header fields */
4114 rbd_header_free(&rbd_dev->header);
4115
4116 /* done with the id, and with the rbd_dev */
4117 rbd_dev_id_put(rbd_dev);
4118 rbd_assert(rbd_dev->rbd_client != NULL);
4119 rbd_dev_destroy(rbd_dev);
4120
4121 /* release module ref */
4122 module_put(THIS_MODULE);
4123 }
4124
4125 static ssize_t rbd_remove(struct bus_type *bus,
4126 const char *buf,
4127 size_t count)
4128 {
4129 struct rbd_device *rbd_dev = NULL;
4130 int target_id, rc;
4131 unsigned long ul;
4132 int ret = count;
4133
4134 rc = strict_strtoul(buf, 10, &ul);
4135 if (rc)
4136 return rc;
4137
4138 /* convert to int; abort if we lost anything in the conversion */
4139 target_id = (int) ul;
4140 if (target_id != ul)
4141 return -EINVAL;
4142
4143 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4144
4145 rbd_dev = __rbd_get_dev(target_id);
4146 if (!rbd_dev) {
4147 ret = -ENOENT;
4148 goto done;
4149 }
4150
4151 spin_lock_irq(&rbd_dev->lock);
4152 if (rbd_dev->open_count)
4153 ret = -EBUSY;
4154 else
4155 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4156 spin_unlock_irq(&rbd_dev->lock);
4157 if (ret < 0)
4158 goto done;
4159
4160 rbd_remove_all_snaps(rbd_dev);
4161 rbd_bus_del_dev(rbd_dev);
4162
4163 done:
4164 mutex_unlock(&ctl_mutex);
4165
4166 return ret;
4167 }
4168
4169 /*
4170 * create control files in sysfs
4171 * /sys/bus/rbd/...
4172 */
4173 static int rbd_sysfs_init(void)
4174 {
4175 int ret;
4176
4177 ret = device_register(&rbd_root_dev);
4178 if (ret < 0)
4179 return ret;
4180
4181 ret = bus_register(&rbd_bus_type);
4182 if (ret < 0)
4183 device_unregister(&rbd_root_dev);
4184
4185 return ret;
4186 }
4187
4188 static void rbd_sysfs_cleanup(void)
4189 {
4190 bus_unregister(&rbd_bus_type);
4191 device_unregister(&rbd_root_dev);
4192 }
4193
4194 int __init rbd_init(void)
4195 {
4196 int rc;
4197
4198 if (!libceph_compatible(NULL)) {
4199 rbd_warn(NULL, "libceph incompatibility (quitting)");
4200
4201 return -EINVAL;
4202 }
4203 rc = rbd_sysfs_init();
4204 if (rc)
4205 return rc;
4206 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
4207 return 0;
4208 }
4209
4210 void __exit rbd_exit(void)
4211 {
4212 rbd_sysfs_cleanup();
4213 }
4214
4215 module_init(rbd_init);
4216 module_exit(rbd_exit);
4217
4218 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
4219 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
4220 MODULE_DESCRIPTION("rados block device");
4221
4222 /* following authorship retained from original osdblk.c */
4223 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
4224
4225 MODULE_LICENSE("GPL");
This page took 0.150286 seconds and 6 git commands to generate.