xen/blkback: Utilize the M2P override mechanism for GNTMAP_host_map
[deliverable/linux.git] / drivers / xen / blkback / blkback.c
CommitLineData
4d05a28d
KRW
1/******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
3 *
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
9 *
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38#include <linux/spinlock.h>
39#include <linux/kthread.h>
40#include <linux/list.h>
41#include <linux/delay.h>
88122933 42#include <linux/freezer.h>
afd91d07 43
88122933
JF
44#include <xen/events.h>
45#include <xen/page.h>
46#include <asm/xen/hypervisor.h>
47#include <asm/xen/hypercall.h>
4d05a28d
KRW
48#include "common.h"
49
50/*
51 * These are rather arbitrary. They are fairly large because adjacent requests
52 * pulled from a communication ring are quite likely to end up being part of
53 * the same scatter/gather request at the disc.
54 *
55 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
56 *
57 * This will increase the chances of being able to write whole tracks.
58 * 64 should be enough to keep us competitive with Linux.
59 */
60static int blkif_reqs = 64;
61module_param_named(reqs, blkif_reqs, int, 0);
62MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
63
64/* Run-time switchable: /sys/module/blkback/parameters/ */
65static unsigned int log_stats = 0;
66static unsigned int debug_lvl = 0;
67module_param(log_stats, int, 0644);
68module_param(debug_lvl, int, 0644);
69
70/*
71 * Each outstanding request that we've passed to the lower device layers has a
72 * 'pending_req' allocated to it. Each buffer_head that completes decrements
73 * the pendcnt towards zero. When it hits zero, the specified domain has a
74 * response queued for it, with the saved 'id' passed back.
75 */
76typedef struct {
77 blkif_t *blkif;
78 u64 id;
79 int nr_pages;
80 atomic_t pendcnt;
81 unsigned short operation;
82 int status;
83 struct list_head free_list;
84} pending_req_t;
85
4d05a28d
KRW
86#define BLKBACK_INVALID_HANDLE (~0)
87
e8e28871
KRW
88struct xen_blkbk {
89 pending_req_t *pending_reqs;
90 struct list_head pending_free;
91 spinlock_t pending_free_lock;
92 wait_queue_head_t pending_free_wq;
93 struct page **pending_pages;
94 grant_handle_t *pending_grant_handles;
95};
96
97static struct xen_blkbk *blkbk;
4d05a28d
KRW
98
99static inline int vaddr_pagenr(pending_req_t *req, int seg)
100{
e8e28871 101 return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
4d05a28d
KRW
102}
103
efe08a3e
JB
104#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
105
4d05a28d
KRW
106static inline unsigned long vaddr(pending_req_t *req, int seg)
107{
e8e28871 108 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
4d05a28d
KRW
109 return (unsigned long)pfn_to_kaddr(pfn);
110}
111
112#define pending_handle(_req, _seg) \
e8e28871 113 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
4d05a28d
KRW
114
115
116static int do_block_io_op(blkif_t *blkif);
117static void dispatch_rw_block_io(blkif_t *blkif,
88122933 118 struct blkif_request *req,
4d05a28d
KRW
119 pending_req_t *pending_req);
120static void make_response(blkif_t *blkif, u64 id,
121 unsigned short op, int st);
122
123/******************************************************************
124 * misc small helpers
125 */
126static pending_req_t* alloc_req(void)
127{
128 pending_req_t *req = NULL;
129 unsigned long flags;
130
e8e28871
KRW
131 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
132 if (!list_empty(&blkbk->pending_free)) {
133 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
4d05a28d
KRW
134 list_del(&req->free_list);
135 }
e8e28871 136 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
4d05a28d
KRW
137 return req;
138}
139
140static void free_req(pending_req_t *req)
141{
142 unsigned long flags;
143 int was_empty;
144
e8e28871
KRW
145 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
146 was_empty = list_empty(&blkbk->pending_free);
147 list_add(&req->free_list, &blkbk->pending_free);
148 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
4d05a28d 149 if (was_empty)
e8e28871 150 wake_up(&blkbk->pending_free_wq);
4d05a28d
KRW
151}
152
153static void unplug_queue(blkif_t *blkif)
154{
155 if (blkif->plug == NULL)
156 return;
157 if (blkif->plug->unplug_fn)
158 blkif->plug->unplug_fn(blkif->plug);
159 blk_put_queue(blkif->plug);
160 blkif->plug = NULL;
161}
162
163static void plug_queue(blkif_t *blkif, struct block_device *bdev)
164{
88122933 165 struct request_queue *q = bdev_get_queue(bdev);
4d05a28d
KRW
166
167 if (q == blkif->plug)
168 return;
169 unplug_queue(blkif);
170 blk_get_queue(q);
171 blkif->plug = q;
172}
173
174static void fast_flush_area(pending_req_t *req)
175{
176 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
177 unsigned int i, invcount = 0;
178 grant_handle_t handle;
179 int ret;
180
181 for (i = 0; i < req->nr_pages; i++) {
182 handle = pending_handle(req, i);
183 if (handle == BLKBACK_INVALID_HANDLE)
184 continue;
185 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
186 GNTMAP_host_map, handle);
187 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
188 invcount++;
189 }
190
191 ret = HYPERVISOR_grant_table_op(
192 GNTTABOP_unmap_grant_ref, unmap, invcount);
193 BUG_ON(ret);
5dc03639
KRW
194 /* Note, we use invcount, so nr->pages, so we can't index
195 * using vaddr(req, i). */
196 for (i = 0; i < invcount; i++) {
197 ret = m2p_remove_override(
198 virt_to_page(unmap[i].host_addr), false);
199 if (ret) {
200 printk(KERN_ALERT "Failed to remove M2P override for " \
201 "%lx\n", (unsigned long)unmap[i].host_addr);
202 continue;
203 }
204 }
4d05a28d
KRW
205}
206
207/******************************************************************
208 * SCHEDULER FUNCTIONS
209 */
210
211static void print_stats(blkif_t *blkif)
212{
213 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
214 current->comm, blkif->st_oo_req,
215 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
216 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
217 blkif->st_rd_req = 0;
218 blkif->st_wr_req = 0;
219 blkif->st_oo_req = 0;
220}
221
222int blkif_schedule(void *arg)
223{
224 blkif_t *blkif = arg;
2ccbfe26 225 struct vbd *vbd = &blkif->vbd;
4d05a28d
KRW
226
227 blkif_get(blkif);
228
229 if (debug_lvl)
230 printk(KERN_DEBUG "%s: started\n", current->comm);
231
232 while (!kthread_should_stop()) {
233 if (try_to_freeze())
234 continue;
2ccbfe26
S
235 if (unlikely(vbd->size != vbd_size(vbd)))
236 vbd_resize(blkif);
4d05a28d
KRW
237
238 wait_event_interruptible(
239 blkif->wq,
240 blkif->waiting_reqs || kthread_should_stop());
241 wait_event_interruptible(
e8e28871
KRW
242 blkbk->pending_free_wq,
243 !list_empty(&blkbk->pending_free) || kthread_should_stop());
4d05a28d
KRW
244
245 blkif->waiting_reqs = 0;
246 smp_mb(); /* clear flag *before* checking for work */
247
248 if (do_block_io_op(blkif))
249 blkif->waiting_reqs = 1;
250 unplug_queue(blkif);
251
252 if (log_stats && time_after(jiffies, blkif->st_print))
253 print_stats(blkif);
254 }
255
256 if (log_stats)
257 print_stats(blkif);
258 if (debug_lvl)
259 printk(KERN_DEBUG "%s: exiting\n", current->comm);
260
261 blkif->xenblkd = NULL;
262 blkif_put(blkif);
263
264 return 0;
265}
266
267/******************************************************************
268 * COMPLETION CALLBACK -- Called as bh->b_end_io()
269 */
270
271static void __end_block_io_op(pending_req_t *pending_req, int error)
272{
273 /* An error fails the entire request. */
274 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
275 (error == -EOPNOTSUPP)) {
276 DPRINTK("blkback: write barrier op failed, not supported\n");
277 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
278 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
279 } else if (error) {
280 DPRINTK("Buffer not up-to-date at end of operation, "
281 "error=%d\n", error);
282 pending_req->status = BLKIF_RSP_ERROR;
283 }
284
285 if (atomic_dec_and_test(&pending_req->pendcnt)) {
286 fast_flush_area(pending_req);
287 make_response(pending_req->blkif, pending_req->id,
288 pending_req->operation, pending_req->status);
289 blkif_put(pending_req->blkif);
290 free_req(pending_req);
291 }
292}
293
88122933 294static void end_block_io_op(struct bio *bio, int error)
4d05a28d 295{
4d05a28d
KRW
296 __end_block_io_op(bio->bi_private, error);
297 bio_put(bio);
4d05a28d
KRW
298}
299
300
301/******************************************************************************
302 * NOTIFICATION FROM GUEST OS.
303 */
304
305static void blkif_notify_work(blkif_t *blkif)
306{
307 blkif->waiting_reqs = 1;
308 wake_up(&blkif->wq);
309}
310
88122933 311irqreturn_t blkif_be_int(int irq, void *dev_id)
4d05a28d
KRW
312{
313 blkif_notify_work(dev_id);
314 return IRQ_HANDLED;
315}
316
317
318
319/******************************************************************
320 * DOWNWARD CALLS -- These interface with the block-device layer proper.
321 */
322
323static int do_block_io_op(blkif_t *blkif)
324{
88122933
JF
325 union blkif_back_rings *blk_rings = &blkif->blk_rings;
326 struct blkif_request req;
4d05a28d
KRW
327 pending_req_t *pending_req;
328 RING_IDX rc, rp;
329 int more_to_do = 0;
330
331 rc = blk_rings->common.req_cons;
332 rp = blk_rings->common.sring->req_prod;
333 rmb(); /* Ensure we see queued requests up to 'rp'. */
334
335 while (rc != rp) {
336
337 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
338 break;
339
8270b45b 340 if (kthread_should_stop()) {
4d05a28d
KRW
341 more_to_do = 1;
342 break;
343 }
344
8270b45b
KF
345 pending_req = alloc_req();
346 if (NULL == pending_req) {
347 blkif->st_oo_req++;
4d05a28d
KRW
348 more_to_do = 1;
349 break;
350 }
351
352 switch (blkif->blk_protocol) {
353 case BLKIF_PROTOCOL_NATIVE:
354 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
355 break;
356 case BLKIF_PROTOCOL_X86_32:
357 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
358 break;
359 case BLKIF_PROTOCOL_X86_64:
360 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
361 break;
362 default:
363 BUG();
364 }
365 blk_rings->common.req_cons = ++rc; /* before make_response() */
366
367 /* Apply all sanity checks to /private copy/ of request. */
368 barrier();
369
370 switch (req.operation) {
371 case BLKIF_OP_READ:
372 blkif->st_rd_req++;
373 dispatch_rw_block_io(blkif, &req, pending_req);
374 break;
375 case BLKIF_OP_WRITE_BARRIER:
376 blkif->st_br_req++;
377 /* fall through */
378 case BLKIF_OP_WRITE:
379 blkif->st_wr_req++;
380 dispatch_rw_block_io(blkif, &req, pending_req);
381 break;
382 default:
383 /* A good sign something is wrong: sleep for a while to
384 * avoid excessive CPU consumption by a bad guest. */
385 msleep(1);
386 DPRINTK("error: unknown block io operation [%d]\n",
387 req.operation);
388 make_response(blkif, req.id, req.operation,
389 BLKIF_RSP_ERROR);
390 free_req(pending_req);
391 break;
392 }
393
394 /* Yield point for this unbounded loop. */
395 cond_resched();
396 }
397
398 return more_to_do;
399}
400
401static void dispatch_rw_block_io(blkif_t *blkif,
88122933 402 struct blkif_request *req,
4d05a28d
KRW
403 pending_req_t *pending_req)
404{
4d05a28d
KRW
405 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
406 struct phys_req preq;
407 struct {
408 unsigned long buf; unsigned int nsec;
409 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
410 unsigned int nseg;
411 struct bio *bio = NULL;
412 int ret, i;
413 int operation;
414
415 switch (req->operation) {
416 case BLKIF_OP_READ:
417 operation = READ;
418 break;
419 case BLKIF_OP_WRITE:
420 operation = WRITE;
421 break;
422 case BLKIF_OP_WRITE_BARRIER:
248e9f75 423 operation = REQ_FLUSH | REQ_FUA;
4d05a28d
KRW
424 break;
425 default:
426 operation = 0; /* make gcc happy */
427 BUG();
428 }
429
430 /* Check that number of segments is sane. */
431 nseg = req->nr_segments;
248e9f75 432 if (unlikely(nseg == 0 && operation != (REQ_FLUSH | REQ_FUA)) ||
4d05a28d
KRW
433 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
434 DPRINTK("Bad number of segments in request (%d)\n", nseg);
435 goto fail_response;
436 }
437
438 preq.dev = req->handle;
c35950bf 439 preq.sector_number = req->u.rw.sector_number;
4d05a28d
KRW
440 preq.nr_sects = 0;
441
442 pending_req->blkif = blkif;
443 pending_req->id = req->id;
444 pending_req->operation = req->operation;
445 pending_req->status = BLKIF_RSP_OKAY;
446 pending_req->nr_pages = nseg;
447
448 for (i = 0; i < nseg; i++) {
449 uint32_t flags;
450
c35950bf
KRW
451 seg[i].nsec = req->u.rw.seg[i].last_sect -
452 req->u.rw.seg[i].first_sect + 1;
4d05a28d 453
c35950bf
KRW
454 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
455 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
4d05a28d
KRW
456 goto fail_response;
457 preq.nr_sects += seg[i].nsec;
458
459 flags = GNTMAP_host_map;
460 if (operation != READ)
461 flags |= GNTMAP_readonly;
462 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
c35950bf 463 req->u.rw.seg[i].gref, blkif->domid);
4d05a28d
KRW
464 }
465
466 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
467 BUG_ON(ret);
468
469 for (i = 0; i < nseg; i++) {
470 if (unlikely(map[i].status != 0)) {
471 DPRINTK("invalid buffer -- could not remap it\n");
472 map[i].handle = BLKBACK_INVALID_HANDLE;
473 ret |= 1;
474 }
475
476 pending_handle(pending_req, i) = map[i].handle;
477
478 if (ret)
479 continue;
5dc03639
KRW
480
481 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
482 blkbk->pending_page(pending_req, i), false);
483 if (ret) {
484 printk(KERN_ALERT "Failed to install M2P override for"\
485 " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret);
486 continue;
487 }
4d05a28d 488
4d05a28d 489 seg[i].buf = map[i].dev_bus_addr |
c35950bf 490 (req->u.rw.seg[i].first_sect << 9);
4d05a28d
KRW
491 }
492
493 if (ret)
494 goto fail_flush;
495
496 if (vbd_translate(&preq, blkif, operation) != 0) {
497 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
498 operation == READ ? "read" : "write",
499 preq.sector_number,
500 preq.sector_number + preq.nr_sects, preq.dev);
501 goto fail_flush;
502 }
503
504 plug_queue(blkif, preq.bdev);
505 atomic_set(&pending_req->pendcnt, 1);
506 blkif_get(blkif);
507
508 for (i = 0; i < nseg; i++) {
509 if (((int)preq.sector_number|(int)seg[i].nsec) &
05d43865 510 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
4d05a28d
KRW
511 DPRINTK("Misaligned I/O request from domain %d",
512 blkif->domid);
513 goto fail_put_bio;
514 }
515
516 while ((bio == NULL) ||
517 (bio_add_page(bio,
e8e28871 518 blkbk->pending_page(pending_req, i),
4d05a28d
KRW
519 seg[i].nsec << 9,
520 seg[i].buf & ~PAGE_MASK) == 0)) {
521 if (bio) {
522 atomic_inc(&pending_req->pendcnt);
523 submit_bio(operation, bio);
524 }
525
526 bio = bio_alloc(GFP_KERNEL, nseg-i);
527 if (unlikely(bio == NULL))
528 goto fail_put_bio;
529
530 bio->bi_bdev = preq.bdev;
531 bio->bi_private = pending_req;
532 bio->bi_end_io = end_block_io_op;
533 bio->bi_sector = preq.sector_number;
534 }
535
536 preq.sector_number += seg[i].nsec;
537 }
538
539 if (!bio) {
248e9f75 540 BUG_ON(operation != (REQ_FLUSH | REQ_FUA));
4d05a28d
KRW
541 bio = bio_alloc(GFP_KERNEL, 0);
542 if (unlikely(bio == NULL))
543 goto fail_put_bio;
544
545 bio->bi_bdev = preq.bdev;
546 bio->bi_private = pending_req;
547 bio->bi_end_io = end_block_io_op;
548 bio->bi_sector = -1;
549 }
550
551 submit_bio(operation, bio);
552
553 if (operation == READ)
554 blkif->st_rd_sect += preq.nr_sects;
248e9f75 555 else if (operation == WRITE || operation == (REQ_FLUSH | REQ_FUA))
4d05a28d
KRW
556 blkif->st_wr_sect += preq.nr_sects;
557
558 return;
559
560 fail_flush:
561 fast_flush_area(pending_req);
562 fail_response:
563 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
564 free_req(pending_req);
565 msleep(1); /* back off a bit */
566 return;
567
568 fail_put_bio:
569 __end_block_io_op(pending_req, -EINVAL);
570 if (bio)
571 bio_put(bio);
572 unplug_queue(blkif);
573 msleep(1); /* back off a bit */
574 return;
575}
576
577
578
579/******************************************************************
580 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
581 */
582
583
584static void make_response(blkif_t *blkif, u64 id,
585 unsigned short op, int st)
586{
88122933 587 struct blkif_response resp;
4d05a28d 588 unsigned long flags;
88122933 589 union blkif_back_rings *blk_rings = &blkif->blk_rings;
4d05a28d
KRW
590 int more_to_do = 0;
591 int notify;
592
593 resp.id = id;
594 resp.operation = op;
595 resp.status = st;
596
597 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
598 /* Place on the response ring for the relevant domain. */
599 switch (blkif->blk_protocol) {
600 case BLKIF_PROTOCOL_NATIVE:
601 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
602 &resp, sizeof(resp));
603 break;
604 case BLKIF_PROTOCOL_X86_32:
605 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
606 &resp, sizeof(resp));
607 break;
608 case BLKIF_PROTOCOL_X86_64:
609 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
610 &resp, sizeof(resp));
611 break;
612 default:
613 BUG();
614 }
615 blk_rings->common.rsp_prod_pvt++;
616 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
617 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
618 /*
619 * Tail check for pending requests. Allows frontend to avoid
620 * notifications if requests are already in flight (lower
621 * overheads and promotes batching).
622 */
623 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
624
625 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
626 more_to_do = 1;
627 }
628
629 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
630
631 if (more_to_do)
632 blkif_notify_work(blkif);
633 if (notify)
634 notify_remote_via_irq(blkif->irq);
635}
636
637static int __init blkif_init(void)
638{
639 int i, mmap_pages;
8770b268 640 int rc = 0;
4d05a28d 641
88122933 642 if (!xen_pv_domain())
4d05a28d
KRW
643 return -ENODEV;
644
e8e28871
KRW
645 blkbk = (struct xen_blkbk *)vmalloc(sizeof(struct xen_blkbk));
646 if (!blkbk) {
647 printk(KERN_ALERT "%s: out of memory!\n", __func__);
648 return -ENOMEM;
649 }
650
4d05a28d
KRW
651 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
652
e8e28871 653 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
4d05a28d 654 blkif_reqs, GFP_KERNEL);
464fb419
KRW
655 blkbk->pending_grant_handles = vzalloc(sizeof(blkbk->pending_grant_handles[0]) *
656 mmap_pages);
657 blkbk->pending_pages = vzalloc(sizeof(blkbk->pending_pages[0]) * mmap_pages);
4d05a28d 658
e8e28871 659 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
8770b268 660 rc = -ENOMEM;
4d05a28d 661 goto out_of_memory;
8770b268 662 }
4d05a28d 663
464fb419 664 for (i = 0; i < mmap_pages; i++) {
e8e28871 665 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
464fb419
KRW
666 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
667 if (blkbk->pending_pages[i] == NULL) {
668 rc = -ENOMEM;
669 goto out_of_memory;
670 }
671 }
8770b268
KRW
672 rc = blkif_interface_init();
673 if (rc)
674 goto failed_init;
4d05a28d 675
e8e28871
KRW
676 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
677
678 INIT_LIST_HEAD(&blkbk->pending_free);
679 spin_lock_init(&blkbk->pending_free_lock);
680 init_waitqueue_head(&blkbk->pending_free_wq);
4d05a28d
KRW
681
682 for (i = 0; i < blkif_reqs; i++)
e8e28871 683 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
4d05a28d 684
8770b268
KRW
685 rc = blkif_xenbus_init();
686 if (rc)
687 goto failed_init;
4d05a28d
KRW
688
689 return 0;
690
691 out_of_memory:
8770b268
KRW
692 printk(KERN_ERR "%s: out of memory\n", __func__);
693 failed_init:
e8e28871 694 kfree(blkbk->pending_reqs);
464fb419
KRW
695 vfree(blkbk->pending_grant_handles);
696 for (i = 0; i < mmap_pages; i++) {
697 if (blkbk->pending_pages[i])
698 __free_page(blkbk->pending_pages[i]);
699 }
700 vfree(blkbk->pending_pages);
e8e28871
KRW
701 vfree(blkbk);
702 blkbk = NULL;
8770b268 703 return rc;
4d05a28d
KRW
704}
705
706module_init(blkif_init);
707
708MODULE_LICENSE("Dual BSD/GPL");
This page took 0.05654 seconds and 5 git commands to generate.