xen/blkback: Add support for BLKIF_OP_FLUSH_DISKCACHE and drop BLKIF_OP_WRITE_BARRIER.
[deliverable/linux.git] / drivers / block / xen-blkback / xenbus.c
1 /* Xenbus code for blkif backend
2 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3 Copyright (C) 2005 XenSource Ltd
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 */
16
17 #include <stdarg.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <xen/events.h>
21 #include <xen/grant_table.h>
22 #include "common.h"
23
24 #undef DPRINTK
25 #define DPRINTK(fmt, args...) \
26 pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
27 __func__, __LINE__, ##args)
28
29 struct backend_info {
30 struct xenbus_device *dev;
31 struct blkif_st *blkif;
32 struct xenbus_watch backend_watch;
33 unsigned major;
34 unsigned minor;
35 char *mode;
36 };
37
38 static struct kmem_cache *xen_blkif_cachep;
39 static void connect(struct backend_info *);
40 static int connect_ring(struct backend_info *);
41 static void backend_changed(struct xenbus_watch *, const char **,
42 unsigned int);
43
44 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
45 {
46 return be->dev;
47 }
48
49 static int blkback_name(struct blkif_st *blkif, char *buf)
50 {
51 char *devpath, *devname;
52 struct xenbus_device *dev = blkif->be->dev;
53
54 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
55 if (IS_ERR(devpath))
56 return PTR_ERR(devpath);
57
58 devname = strstr(devpath, "/dev/");
59 if (devname != NULL)
60 devname += strlen("/dev/");
61 else
62 devname = devpath;
63
64 snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
65 kfree(devpath);
66
67 return 0;
68 }
69
70 static void xen_update_blkif_status(struct blkif_st *blkif)
71 {
72 int err;
73 char name[TASK_COMM_LEN];
74
75 /* Not ready to connect? */
76 if (!blkif->irq || !blkif->vbd.bdev)
77 return;
78
79 /* Already connected? */
80 if (blkif->be->dev->state == XenbusStateConnected)
81 return;
82
83 /* Attempt to connect: exit if we fail to. */
84 connect(blkif->be);
85 if (blkif->be->dev->state != XenbusStateConnected)
86 return;
87
88 err = blkback_name(blkif, name);
89 if (err) {
90 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
91 return;
92 }
93
94 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
95 if (err) {
96 xenbus_dev_error(blkif->be->dev, err, "block flush");
97 return;
98 }
99 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
100
101 blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name);
102 if (IS_ERR(blkif->xenblkd)) {
103 err = PTR_ERR(blkif->xenblkd);
104 blkif->xenblkd = NULL;
105 xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
106 }
107 }
108
109 static struct blkif_st *xen_blkif_alloc(domid_t domid)
110 {
111 struct blkif_st *blkif;
112
113 blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
114 if (!blkif)
115 return ERR_PTR(-ENOMEM);
116
117 memset(blkif, 0, sizeof(*blkif));
118 blkif->domid = domid;
119 spin_lock_init(&blkif->blk_ring_lock);
120 atomic_set(&blkif->refcnt, 1);
121 init_waitqueue_head(&blkif->wq);
122 blkif->st_print = jiffies;
123 init_waitqueue_head(&blkif->waiting_to_free);
124
125 return blkif;
126 }
127
128 static int map_frontend_page(struct blkif_st *blkif, unsigned long shared_page)
129 {
130 struct gnttab_map_grant_ref op;
131
132 gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
133 GNTMAP_host_map, shared_page, blkif->domid);
134
135 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
136 BUG();
137
138 if (op.status) {
139 DPRINTK(" Grant table operation failure !\n");
140 return op.status;
141 }
142
143 blkif->shmem_ref = shared_page;
144 blkif->shmem_handle = op.handle;
145
146 return 0;
147 }
148
149 static void unmap_frontend_page(struct blkif_st *blkif)
150 {
151 struct gnttab_unmap_grant_ref op;
152
153 gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
154 GNTMAP_host_map, blkif->shmem_handle);
155
156 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
157 BUG();
158 }
159
160 static int xen_blkif_map(struct blkif_st *blkif, unsigned long shared_page,
161 unsigned int evtchn)
162 {
163 int err;
164
165 /* Already connected through? */
166 if (blkif->irq)
167 return 0;
168
169 blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
170 if (!blkif->blk_ring_area)
171 return -ENOMEM;
172
173 err = map_frontend_page(blkif, shared_page);
174 if (err) {
175 free_vm_area(blkif->blk_ring_area);
176 return err;
177 }
178
179 switch (blkif->blk_protocol) {
180 case BLKIF_PROTOCOL_NATIVE:
181 {
182 struct blkif_sring *sring;
183 sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
184 BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
185 break;
186 }
187 case BLKIF_PROTOCOL_X86_32:
188 {
189 struct blkif_x86_32_sring *sring_x86_32;
190 sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
191 BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
192 break;
193 }
194 case BLKIF_PROTOCOL_X86_64:
195 {
196 struct blkif_x86_64_sring *sring_x86_64;
197 sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
198 BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
199 break;
200 }
201 default:
202 BUG();
203 }
204
205 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
206 xen_blkif_be_int, 0,
207 "blkif-backend", blkif);
208 if (err < 0) {
209 unmap_frontend_page(blkif);
210 free_vm_area(blkif->blk_ring_area);
211 blkif->blk_rings.common.sring = NULL;
212 return err;
213 }
214 blkif->irq = err;
215
216 return 0;
217 }
218
219 static void xen_blkif_disconnect(struct blkif_st *blkif)
220 {
221 if (blkif->xenblkd) {
222 kthread_stop(blkif->xenblkd);
223 blkif->xenblkd = NULL;
224 }
225
226 atomic_dec(&blkif->refcnt);
227 wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
228 atomic_inc(&blkif->refcnt);
229
230 if (blkif->irq) {
231 unbind_from_irqhandler(blkif->irq, blkif);
232 blkif->irq = 0;
233 }
234
235 if (blkif->blk_rings.common.sring) {
236 unmap_frontend_page(blkif);
237 free_vm_area(blkif->blk_ring_area);
238 blkif->blk_rings.common.sring = NULL;
239 }
240 }
241
242 void xen_blkif_free(struct blkif_st *blkif)
243 {
244 if (!atomic_dec_and_test(&blkif->refcnt))
245 BUG();
246 kmem_cache_free(xen_blkif_cachep, blkif);
247 }
248
249 int __init xen_blkif_interface_init(void)
250 {
251 xen_blkif_cachep = kmem_cache_create("blkif_cache",
252 sizeof(struct blkif_st),
253 0, 0, NULL);
254 if (!xen_blkif_cachep)
255 return -ENOMEM;
256
257 return 0;
258 }
259
260 /*
261 * sysfs interface for VBD I/O requests
262 */
263
264 #define VBD_SHOW(name, format, args...) \
265 static ssize_t show_##name(struct device *_dev, \
266 struct device_attribute *attr, \
267 char *buf) \
268 { \
269 struct xenbus_device *dev = to_xenbus_device(_dev); \
270 struct backend_info *be = dev_get_drvdata(&dev->dev); \
271 \
272 return sprintf(buf, format, ##args); \
273 } \
274 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
275
276 VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
277 VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
278 VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
279 VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
280 VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
281 VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
282
283 static struct attribute *vbdstat_attrs[] = {
284 &dev_attr_oo_req.attr,
285 &dev_attr_rd_req.attr,
286 &dev_attr_wr_req.attr,
287 &dev_attr_f_req.attr,
288 &dev_attr_rd_sect.attr,
289 &dev_attr_wr_sect.attr,
290 NULL
291 };
292
293 static struct attribute_group vbdstat_group = {
294 .name = "statistics",
295 .attrs = vbdstat_attrs,
296 };
297
298 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
299 VBD_SHOW(mode, "%s\n", be->mode);
300
301 int xenvbd_sysfs_addif(struct xenbus_device *dev)
302 {
303 int error;
304
305 error = device_create_file(&dev->dev, &dev_attr_physical_device);
306 if (error)
307 goto fail1;
308
309 error = device_create_file(&dev->dev, &dev_attr_mode);
310 if (error)
311 goto fail2;
312
313 error = sysfs_create_group(&dev->dev.kobj, &vbdstat_group);
314 if (error)
315 goto fail3;
316
317 return 0;
318
319 fail3: sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
320 fail2: device_remove_file(&dev->dev, &dev_attr_mode);
321 fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
322 return error;
323 }
324
325 void xenvbd_sysfs_delif(struct xenbus_device *dev)
326 {
327 sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
328 device_remove_file(&dev->dev, &dev_attr_mode);
329 device_remove_file(&dev->dev, &dev_attr_physical_device);
330 }
331
332
333 static void vbd_free(struct vbd *vbd)
334 {
335 if (vbd->bdev)
336 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
337 vbd->bdev = NULL;
338 }
339
340 static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle,
341 unsigned major, unsigned minor, int readonly,
342 int cdrom)
343 {
344 struct vbd *vbd;
345 struct block_device *bdev;
346 struct request_queue *q;
347
348 vbd = &blkif->vbd;
349 vbd->handle = handle;
350 vbd->readonly = readonly;
351 vbd->type = 0;
352
353 vbd->pdevice = MKDEV(major, minor);
354
355 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
356 FMODE_READ : FMODE_WRITE, NULL);
357
358 if (IS_ERR(bdev)) {
359 DPRINTK("vbd_creat: device %08x could not be opened.\n",
360 vbd->pdevice);
361 return -ENOENT;
362 }
363
364 vbd->bdev = bdev;
365 vbd->size = vbd_sz(vbd);
366
367 if (vbd->bdev->bd_disk == NULL) {
368 DPRINTK("vbd_creat: device %08x doesn't exist.\n",
369 vbd->pdevice);
370 vbd_free(vbd);
371 return -ENOENT;
372 }
373
374 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
375 vbd->type |= VDISK_CDROM;
376 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
377 vbd->type |= VDISK_REMOVABLE;
378
379 q = bdev_get_queue(bdev);
380 if (q && q->flush_flags)
381 vbd->flush_support = true;
382
383 DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
384 handle, blkif->domid);
385 return 0;
386 }
387 static int xen_blkbk_remove(struct xenbus_device *dev)
388 {
389 struct backend_info *be = dev_get_drvdata(&dev->dev);
390
391 DPRINTK("");
392
393 if (be->major || be->minor)
394 xenvbd_sysfs_delif(dev);
395
396 if (be->backend_watch.node) {
397 unregister_xenbus_watch(&be->backend_watch);
398 kfree(be->backend_watch.node);
399 be->backend_watch.node = NULL;
400 }
401
402 if (be->blkif) {
403 xen_blkif_disconnect(be->blkif);
404 vbd_free(&be->blkif->vbd);
405 xen_blkif_free(be->blkif);
406 be->blkif = NULL;
407 }
408
409 kfree(be);
410 dev_set_drvdata(&dev->dev, NULL);
411 return 0;
412 }
413
414 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
415 struct backend_info *be, int state)
416 {
417 struct xenbus_device *dev = be->dev;
418 int err;
419
420 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
421 "%d", state);
422 if (err)
423 xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
424
425 return err;
426 }
427
428 /**
429 * Entry point to this code when a new device is created. Allocate the basic
430 * structures, and watch the store waiting for the hotplug scripts to tell us
431 * the device's physical major and minor numbers. Switch to InitWait.
432 */
433 static int xen_blkbk_probe(struct xenbus_device *dev,
434 const struct xenbus_device_id *id)
435 {
436 int err;
437 struct backend_info *be = kzalloc(sizeof(struct backend_info),
438 GFP_KERNEL);
439 if (!be) {
440 xenbus_dev_fatal(dev, -ENOMEM,
441 "allocating backend structure");
442 return -ENOMEM;
443 }
444 be->dev = dev;
445 dev_set_drvdata(&dev->dev, be);
446
447 be->blkif = xen_blkif_alloc(dev->otherend_id);
448 if (IS_ERR(be->blkif)) {
449 err = PTR_ERR(be->blkif);
450 be->blkif = NULL;
451 xenbus_dev_fatal(dev, err, "creating block interface");
452 goto fail;
453 }
454
455 /* setup back pointer */
456 be->blkif->be = be;
457
458 err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
459 "%s/%s", dev->nodename, "physical-device");
460 if (err)
461 goto fail;
462
463 err = xenbus_switch_state(dev, XenbusStateInitWait);
464 if (err)
465 goto fail;
466
467 return 0;
468
469 fail:
470 DPRINTK("failed");
471 xen_blkbk_remove(dev);
472 return err;
473 }
474
475
476 /**
477 * Callback received when the hotplug scripts have placed the physical-device
478 * node. Read it and the mode node, and create a vbd. If the frontend is
479 * ready, connect.
480 */
481 static void backend_changed(struct xenbus_watch *watch,
482 const char **vec, unsigned int len)
483 {
484 int err;
485 unsigned major;
486 unsigned minor;
487 struct backend_info *be
488 = container_of(watch, struct backend_info, backend_watch);
489 struct xenbus_device *dev = be->dev;
490 int cdrom = 0;
491 char *device_type;
492
493 DPRINTK("");
494
495 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
496 &major, &minor);
497 if (XENBUS_EXIST_ERR(err)) {
498 /* Since this watch will fire once immediately after it is
499 registered, we expect this. Ignore it, and wait for the
500 hotplug scripts. */
501 return;
502 }
503 if (err != 2) {
504 xenbus_dev_fatal(dev, err, "reading physical-device");
505 return;
506 }
507
508 if ((be->major || be->minor) &&
509 ((be->major != major) || (be->minor != minor))) {
510 printk(KERN_WARNING
511 "blkback: changing physical device (from %x:%x to "
512 "%x:%x) not supported.\n", be->major, be->minor,
513 major, minor);
514 return;
515 }
516
517 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
518 if (IS_ERR(be->mode)) {
519 err = PTR_ERR(be->mode);
520 be->mode = NULL;
521 xenbus_dev_fatal(dev, err, "reading mode");
522 return;
523 }
524
525 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
526 if (!IS_ERR(device_type)) {
527 cdrom = strcmp(device_type, "cdrom") == 0;
528 kfree(device_type);
529 }
530
531 if (be->major == 0 && be->minor == 0) {
532 /* Front end dir is a number, which is used as the handle. */
533
534 char *p = strrchr(dev->otherend, '/') + 1;
535 long handle;
536 err = strict_strtoul(p, 0, &handle);
537 if (err)
538 return;
539
540 be->major = major;
541 be->minor = minor;
542
543 err = vbd_create(be->blkif, handle, major, minor,
544 (NULL == strchr(be->mode, 'w')), cdrom);
545 if (err) {
546 be->major = be->minor = 0;
547 xenbus_dev_fatal(dev, err, "creating vbd structure");
548 return;
549 }
550
551 err = xenvbd_sysfs_addif(dev);
552 if (err) {
553 vbd_free(&be->blkif->vbd);
554 be->major = be->minor = 0;
555 xenbus_dev_fatal(dev, err, "creating sysfs entries");
556 return;
557 }
558
559 /* We're potentially connected now */
560 xen_update_blkif_status(be->blkif);
561 }
562 }
563
564
565 /**
566 * Callback received when the frontend's state changes.
567 */
568 static void frontend_changed(struct xenbus_device *dev,
569 enum xenbus_state frontend_state)
570 {
571 struct backend_info *be = dev_get_drvdata(&dev->dev);
572 int err;
573
574 DPRINTK("%s", xenbus_strstate(frontend_state));
575
576 switch (frontend_state) {
577 case XenbusStateInitialising:
578 if (dev->state == XenbusStateClosed) {
579 printk(KERN_INFO "%s: %s: prepare for reconnect\n",
580 __func__, dev->nodename);
581 xenbus_switch_state(dev, XenbusStateInitWait);
582 }
583 break;
584
585 case XenbusStateInitialised:
586 case XenbusStateConnected:
587 /* Ensure we connect even when two watches fire in
588 close successsion and we miss the intermediate value
589 of frontend_state. */
590 if (dev->state == XenbusStateConnected)
591 break;
592
593 /* Enforce precondition before potential leak point.
594 * blkif_disconnect() is idempotent.
595 */
596 xen_blkif_disconnect(be->blkif);
597
598 err = connect_ring(be);
599 if (err)
600 break;
601 xen_update_blkif_status(be->blkif);
602 break;
603
604 case XenbusStateClosing:
605 xen_blkif_disconnect(be->blkif);
606 xenbus_switch_state(dev, XenbusStateClosing);
607 break;
608
609 case XenbusStateClosed:
610 xenbus_switch_state(dev, XenbusStateClosed);
611 if (xenbus_dev_is_online(dev))
612 break;
613 /* fall through if not online */
614 case XenbusStateUnknown:
615 /* implies blkif_disconnect() via blkback_remove() */
616 device_unregister(&dev->dev);
617 break;
618
619 default:
620 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
621 frontend_state);
622 break;
623 }
624 }
625
626
627 /* ** Connection ** */
628
629
630 /**
631 * Write the physical details regarding the block device to the store, and
632 * switch to Connected state.
633 */
634 static void connect(struct backend_info *be)
635 {
636 struct xenbus_transaction xbt;
637 int err;
638 struct xenbus_device *dev = be->dev;
639
640 DPRINTK("%s", dev->otherend);
641
642 /* Supply the information about the device the frontend needs */
643 again:
644 err = xenbus_transaction_start(&xbt);
645 if (err) {
646 xenbus_dev_fatal(dev, err, "starting transaction");
647 return;
648 }
649
650 err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
651 if (err)
652 goto abort;
653
654 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
655 (unsigned long long)vbd_sz(&be->blkif->vbd));
656 if (err) {
657 xenbus_dev_fatal(dev, err, "writing %s/sectors",
658 dev->nodename);
659 goto abort;
660 }
661
662 /* FIXME: use a typename instead */
663 err = xenbus_printf(xbt, dev->nodename, "info", "%u",
664 be->blkif->vbd.type |
665 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
666 if (err) {
667 xenbus_dev_fatal(dev, err, "writing %s/info",
668 dev->nodename);
669 goto abort;
670 }
671 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
672 (unsigned long)
673 bdev_logical_block_size(be->blkif->vbd.bdev));
674 if (err) {
675 xenbus_dev_fatal(dev, err, "writing %s/sector-size",
676 dev->nodename);
677 goto abort;
678 }
679
680 err = xenbus_transaction_end(xbt, 0);
681 if (err == -EAGAIN)
682 goto again;
683 if (err)
684 xenbus_dev_fatal(dev, err, "ending transaction");
685
686 err = xenbus_switch_state(dev, XenbusStateConnected);
687 if (err)
688 xenbus_dev_fatal(dev, err, "switching to Connected state",
689 dev->nodename);
690
691 return;
692 abort:
693 xenbus_transaction_end(xbt, 1);
694 }
695
696
697 static int connect_ring(struct backend_info *be)
698 {
699 struct xenbus_device *dev = be->dev;
700 unsigned long ring_ref;
701 unsigned int evtchn;
702 char protocol[64] = "";
703 int err;
704
705 DPRINTK("%s", dev->otherend);
706
707 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
708 &ring_ref, "event-channel", "%u", &evtchn, NULL);
709 if (err) {
710 xenbus_dev_fatal(dev, err,
711 "reading %s/ring-ref and event-channel",
712 dev->otherend);
713 return err;
714 }
715
716 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
717 err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
718 "%63s", protocol, NULL);
719 if (err)
720 strcpy(protocol, "unspecified, assuming native");
721 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
722 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
723 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
724 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
725 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
726 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
727 else {
728 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
729 return -1;
730 }
731 printk(KERN_INFO
732 "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
733 ring_ref, evtchn, be->blkif->blk_protocol, protocol);
734
735 /* Map the shared frame, irq etc. */
736 err = xen_blkif_map(be->blkif, ring_ref, evtchn);
737 if (err) {
738 xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
739 ring_ref, evtchn);
740 return err;
741 }
742
743 return 0;
744 }
745
746
747 /* ** Driver Registration ** */
748
749
750 static const struct xenbus_device_id xen_blkbk_ids[] = {
751 { "vbd" },
752 { "" }
753 };
754
755
756 static struct xenbus_driver xen_blkbk = {
757 .name = "vbd",
758 .owner = THIS_MODULE,
759 .ids = xen_blkbk_ids,
760 .probe = xen_blkbk_probe,
761 .remove = xen_blkbk_remove,
762 .otherend_changed = frontend_changed
763 };
764
765
766 int xen_blkif_xenbus_init(void)
767 {
768 return xenbus_register_backend(&xen_blkbk);
769 }
This page took 0.047145 seconds and 5 git commands to generate.