Commit | Line | Data |
---|---|---|
4d05a28d | 1 | /****************************************************************************** |
4d05a28d KRW |
2 | * |
3 | * Back-end of the driver for virtual block devices. This portion of the | |
4 | * driver exports a 'unified' block-device interface that can be accessed | |
5 | * by any operating system that implements a compatible front end. A | |
6 | * reference front-end implementation can be found in: | |
a1397fa3 | 7 | * drivers/block/xen-blkfront.c |
4d05a28d KRW |
8 | * |
9 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | |
10 | * Copyright (c) 2005, Christopher Clark | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License version 2 | |
14 | * as published by the Free Software Foundation; or, when distributed | |
15 | * separately from the Linux kernel or incorporated into other | |
16 | * software packages, subject to the following license: | |
17 | * | |
18 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
19 | * of this source file (the "Software"), to deal in the Software without | |
20 | * restriction, including without limitation the rights to use, copy, modify, | |
21 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
22 | * and to permit persons to whom the Software is furnished to do so, subject to | |
23 | * the following conditions: | |
24 | * | |
25 | * The above copyright notice and this permission notice shall be included in | |
26 | * all copies or substantial portions of the Software. | |
27 | * | |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
29 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
30 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
31 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
32 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
33 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
34 | * IN THE SOFTWARE. | |
35 | */ | |
36 | ||
37 | #include <linux/spinlock.h> | |
38 | #include <linux/kthread.h> | |
39 | #include <linux/list.h> | |
40 | #include <linux/delay.h> | |
88122933 | 41 | #include <linux/freezer.h> |
afd91d07 | 42 | |
88122933 JF |
43 | #include <xen/events.h> |
44 | #include <xen/page.h> | |
45 | #include <asm/xen/hypervisor.h> | |
46 | #include <asm/xen/hypercall.h> | |
4d05a28d KRW |
47 | #include "common.h" |
48 | ||
49 | /* | |
50 | * These are rather arbitrary. They are fairly large because adjacent requests | |
51 | * pulled from a communication ring are quite likely to end up being part of | |
52 | * the same scatter/gather request at the disc. | |
53 | * | |
8b6bf747 | 54 | * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** |
4d05a28d KRW |
55 | * |
56 | * This will increase the chances of being able to write whole tracks. | |
57 | * 64 should be enough to keep us competitive with Linux. | |
58 | */ | |
8b6bf747 KRW |
59 | static int xen_blkif_reqs = 64; |
60 | module_param_named(reqs, xen_blkif_reqs, int, 0); | |
4d05a28d KRW |
61 | MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); |
62 | ||
63 | /* Run-time switchable: /sys/module/blkback/parameters/ */ | |
2e9977c2 KRW |
64 | static unsigned int log_stats; |
65 | static unsigned int debug_lvl; | |
4d05a28d KRW |
66 | module_param(log_stats, int, 0644); |
67 | module_param(debug_lvl, int, 0644); | |
68 | ||
69 | /* | |
70 | * Each outstanding request that we've passed to the lower device layers has a | |
71 | * 'pending_req' allocated to it. Each buffer_head that completes decrements | |
72 | * the pendcnt towards zero. When it hits zero, the specified domain has a | |
73 | * response queued for it, with the saved 'id' passed back. | |
74 | */ | |
2e9977c2 | 75 | struct pending_req { |
01f37f2d KRW |
76 | struct blkif_st *blkif; |
77 | u64 id; | |
78 | int nr_pages; | |
79 | atomic_t pendcnt; | |
80 | unsigned short operation; | |
81 | int status; | |
82 | struct list_head free_list; | |
2e9977c2 | 83 | }; |
4d05a28d | 84 | |
4d05a28d KRW |
85 | #define BLKBACK_INVALID_HANDLE (~0) |
86 | ||
e8e28871 | 87 | struct xen_blkbk { |
2e9977c2 | 88 | struct pending_req *pending_reqs; |
a1397fa3 | 89 | /* List of all 'pending_req' available */ |
e8e28871 | 90 | struct list_head pending_free; |
a1397fa3 | 91 | /* And its spinlock. */ |
e8e28871 KRW |
92 | spinlock_t pending_free_lock; |
93 | wait_queue_head_t pending_free_wq; | |
a1397fa3 | 94 | /* The list of all pages that are available. */ |
e8e28871 | 95 | struct page **pending_pages; |
a1397fa3 | 96 | /* And the grant handles that are available. */ |
e8e28871 KRW |
97 | grant_handle_t *pending_grant_handles; |
98 | }; | |
99 | ||
100 | static struct xen_blkbk *blkbk; | |
4d05a28d | 101 | |
a1397fa3 KRW |
102 | /* |
103 | * Little helpful macro to figure out the index and virtual address of the | |
104 | * pending_pages[..]. For each 'pending_req' we have have up to | |
105 | * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through | |
01f37f2d KRW |
106 | * 10 and would index in the pending_pages[..]. |
107 | */ | |
2e9977c2 | 108 | static inline int vaddr_pagenr(struct pending_req *req, int seg) |
4d05a28d | 109 | { |
2e9977c2 KRW |
110 | return (req - blkbk->pending_reqs) * |
111 | BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; | |
4d05a28d KRW |
112 | } |
113 | ||
efe08a3e JB |
114 | #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] |
115 | ||
2e9977c2 | 116 | static inline unsigned long vaddr(struct pending_req *req, int seg) |
4d05a28d | 117 | { |
e8e28871 | 118 | unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); |
4d05a28d KRW |
119 | return (unsigned long)pfn_to_kaddr(pfn); |
120 | } | |
121 | ||
122 | #define pending_handle(_req, _seg) \ | |
e8e28871 | 123 | (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) |
4d05a28d KRW |
124 | |
125 | ||
5489377c | 126 | static int do_block_io_op(struct blkif_st *blkif); |
fc53bf75 KRW |
127 | static int dispatch_rw_block_io(struct blkif_st *blkif, |
128 | struct blkif_request *req, | |
129 | struct pending_req *pending_req); | |
5489377c | 130 | static void make_response(struct blkif_st *blkif, u64 id, |
4d05a28d KRW |
131 | unsigned short op, int st); |
132 | ||
a1397fa3 KRW |
133 | /* |
134 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. | |
4d05a28d | 135 | */ |
2e9977c2 | 136 | static struct pending_req *alloc_req(void) |
4d05a28d | 137 | { |
2e9977c2 | 138 | struct pending_req *req = NULL; |
4d05a28d KRW |
139 | unsigned long flags; |
140 | ||
e8e28871 KRW |
141 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); |
142 | if (!list_empty(&blkbk->pending_free)) { | |
2e9977c2 KRW |
143 | req = list_entry(blkbk->pending_free.next, struct pending_req, |
144 | free_list); | |
4d05a28d KRW |
145 | list_del(&req->free_list); |
146 | } | |
e8e28871 | 147 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); |
4d05a28d KRW |
148 | return req; |
149 | } | |
150 | ||
a1397fa3 KRW |
151 | /* |
152 | * Return the 'pending_req' structure back to the freepool. We also | |
153 | * wake up the thread if it was waiting for a free page. | |
154 | */ | |
2e9977c2 | 155 | static void free_req(struct pending_req *req) |
4d05a28d KRW |
156 | { |
157 | unsigned long flags; | |
158 | int was_empty; | |
159 | ||
e8e28871 KRW |
160 | spin_lock_irqsave(&blkbk->pending_free_lock, flags); |
161 | was_empty = list_empty(&blkbk->pending_free); | |
162 | list_add(&req->free_list, &blkbk->pending_free); | |
163 | spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); | |
4d05a28d | 164 | if (was_empty) |
e8e28871 | 165 | wake_up(&blkbk->pending_free_wq); |
4d05a28d KRW |
166 | } |
167 | ||
ee9ff853 KRW |
168 | /* |
169 | * Routines for managing virtual block devices (vbds). | |
170 | */ | |
42c7841d KRW |
171 | static int vbd_translate(struct phys_req *req, struct blkif_st *blkif, |
172 | int operation) | |
ee9ff853 KRW |
173 | { |
174 | struct vbd *vbd = &blkif->vbd; | |
175 | int rc = -EACCES; | |
176 | ||
177 | if ((operation != READ) && vbd->readonly) | |
178 | goto out; | |
179 | ||
180 | if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd))) | |
181 | goto out; | |
182 | ||
183 | req->dev = vbd->pdevice; | |
184 | req->bdev = vbd->bdev; | |
185 | rc = 0; | |
186 | ||
187 | out: | |
188 | return rc; | |
189 | } | |
190 | ||
42c7841d | 191 | static void vbd_resize(struct blkif_st *blkif) |
ee9ff853 KRW |
192 | { |
193 | struct vbd *vbd = &blkif->vbd; | |
194 | struct xenbus_transaction xbt; | |
195 | int err; | |
8b6bf747 | 196 | struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); |
42c7841d | 197 | unsigned long long new_size = vbd_sz(vbd); |
ee9ff853 KRW |
198 | |
199 | printk(KERN_INFO "VBD Resize: Domid: %d, Device: (%d, %d)\n", | |
200 | blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); | |
201 | printk(KERN_INFO "VBD Resize: new size %llu\n", new_size); | |
202 | vbd->size = new_size; | |
203 | again: | |
204 | err = xenbus_transaction_start(&xbt); | |
205 | if (err) { | |
206 | printk(KERN_WARNING "Error starting transaction"); | |
207 | return; | |
208 | } | |
209 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", | |
42c7841d | 210 | (unsigned long long)vbd_sz(vbd)); |
ee9ff853 KRW |
211 | if (err) { |
212 | printk(KERN_WARNING "Error writing new size"); | |
213 | goto abort; | |
214 | } | |
215 | /* | |
216 | * Write the current state; we will use this to synchronize | |
217 | * the front-end. If the current state is "connected" the | |
218 | * front-end will get the new size information online. | |
219 | */ | |
220 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); | |
221 | if (err) { | |
222 | printk(KERN_WARNING "Error writing the state"); | |
223 | goto abort; | |
224 | } | |
225 | ||
226 | err = xenbus_transaction_end(xbt, 0); | |
227 | if (err == -EAGAIN) | |
228 | goto again; | |
229 | if (err) | |
230 | printk(KERN_WARNING "Error ending transaction"); | |
231 | abort: | |
232 | xenbus_transaction_end(xbt, 1); | |
233 | } | |
234 | ||
a1397fa3 | 235 | /* |
b0aef179 KRW |
236 | * Notification from the guest OS. |
237 | */ | |
238 | static void blkif_notify_work(struct blkif_st *blkif) | |
4d05a28d | 239 | { |
b0aef179 KRW |
240 | blkif->waiting_reqs = 1; |
241 | wake_up(&blkif->wq); | |
242 | } | |
4d05a28d | 243 | |
8b6bf747 | 244 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id) |
b0aef179 KRW |
245 | { |
246 | blkif_notify_work(dev_id); | |
247 | return IRQ_HANDLED; | |
4d05a28d KRW |
248 | } |
249 | ||
2e9977c2 | 250 | /* |
4d05a28d KRW |
251 | * SCHEDULER FUNCTIONS |
252 | */ | |
253 | ||
5489377c | 254 | static void print_stats(struct blkif_st *blkif) |
4d05a28d | 255 | { |
24f567f9 | 256 | printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | f %4d\n", |
4d05a28d | 257 | current->comm, blkif->st_oo_req, |
24f567f9 | 258 | blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req); |
4d05a28d KRW |
259 | blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); |
260 | blkif->st_rd_req = 0; | |
261 | blkif->st_wr_req = 0; | |
262 | blkif->st_oo_req = 0; | |
263 | } | |
264 | ||
8b6bf747 | 265 | int xen_blkif_schedule(void *arg) |
4d05a28d | 266 | { |
5489377c | 267 | struct blkif_st *blkif = arg; |
2ccbfe26 | 268 | struct vbd *vbd = &blkif->vbd; |
4d05a28d | 269 | |
8b6bf747 | 270 | xen_blkif_get(blkif); |
4d05a28d KRW |
271 | |
272 | if (debug_lvl) | |
273 | printk(KERN_DEBUG "%s: started\n", current->comm); | |
274 | ||
275 | while (!kthread_should_stop()) { | |
276 | if (try_to_freeze()) | |
277 | continue; | |
42c7841d | 278 | if (unlikely(vbd->size != vbd_sz(vbd))) |
2ccbfe26 | 279 | vbd_resize(blkif); |
4d05a28d KRW |
280 | |
281 | wait_event_interruptible( | |
282 | blkif->wq, | |
283 | blkif->waiting_reqs || kthread_should_stop()); | |
284 | wait_event_interruptible( | |
e8e28871 | 285 | blkbk->pending_free_wq, |
2e9977c2 KRW |
286 | !list_empty(&blkbk->pending_free) || |
287 | kthread_should_stop()); | |
4d05a28d KRW |
288 | |
289 | blkif->waiting_reqs = 0; | |
290 | smp_mb(); /* clear flag *before* checking for work */ | |
291 | ||
292 | if (do_block_io_op(blkif)) | |
293 | blkif->waiting_reqs = 1; | |
4d05a28d KRW |
294 | |
295 | if (log_stats && time_after(jiffies, blkif->st_print)) | |
296 | print_stats(blkif); | |
297 | } | |
298 | ||
299 | if (log_stats) | |
300 | print_stats(blkif); | |
301 | if (debug_lvl) | |
302 | printk(KERN_DEBUG "%s: exiting\n", current->comm); | |
303 | ||
304 | blkif->xenblkd = NULL; | |
8b6bf747 | 305 | xen_blkif_put(blkif); |
4d05a28d KRW |
306 | |
307 | return 0; | |
308 | } | |
309 | ||
1a95fe6e KRW |
310 | struct seg_buf { |
311 | unsigned long buf; | |
312 | unsigned int nsec; | |
313 | }; | |
b0aef179 KRW |
314 | /* |
315 | * Unmap the grant references, and also remove the M2P over-rides | |
316 | * used in the 'pending_req'. | |
01f37f2d | 317 | */ |
9f3aedf5 | 318 | static void xen_blkbk_unmap(struct pending_req *req) |
b0aef179 KRW |
319 | { |
320 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
321 | unsigned int i, invcount = 0; | |
322 | grant_handle_t handle; | |
323 | int ret; | |
324 | ||
325 | for (i = 0; i < req->nr_pages; i++) { | |
326 | handle = pending_handle(req, i); | |
327 | if (handle == BLKBACK_INVALID_HANDLE) | |
328 | continue; | |
329 | gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), | |
330 | GNTMAP_host_map, handle); | |
331 | pending_handle(req, i) = BLKBACK_INVALID_HANDLE; | |
332 | invcount++; | |
333 | } | |
334 | ||
335 | ret = HYPERVISOR_grant_table_op( | |
336 | GNTTABOP_unmap_grant_ref, unmap, invcount); | |
337 | BUG_ON(ret); | |
01f37f2d KRW |
338 | /* |
339 | * Note, we use invcount, so nr->pages, so we can't index | |
b0aef179 KRW |
340 | * using vaddr(req, i). |
341 | */ | |
342 | for (i = 0; i < invcount; i++) { | |
343 | ret = m2p_remove_override( | |
344 | virt_to_page(unmap[i].host_addr), false); | |
345 | if (ret) { | |
01f37f2d KRW |
346 | printk(KERN_ALERT "Failed to remove M2P override for %lx\n", |
347 | (unsigned long)unmap[i].host_addr); | |
b0aef179 KRW |
348 | continue; |
349 | } | |
350 | } | |
351 | } | |
01f37f2d KRW |
352 | |
353 | static int xen_blkbk_map(struct blkif_request *req, | |
354 | struct pending_req *pending_req, | |
9f3aedf5 | 355 | struct seg_buf seg[]) |
1a95fe6e KRW |
356 | { |
357 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
358 | int i; | |
359 | int nseg = req->nr_segments; | |
360 | int ret = 0; | |
01f37f2d KRW |
361 | |
362 | /* | |
363 | * Fill out preq.nr_sects with proper amount of sectors, and setup | |
1a95fe6e KRW |
364 | * assign map[..] with the PFN of the page in our domain with the |
365 | * corresponding grant reference for each page. | |
366 | */ | |
367 | for (i = 0; i < nseg; i++) { | |
368 | uint32_t flags; | |
369 | ||
370 | flags = GNTMAP_host_map; | |
371 | if (pending_req->operation != BLKIF_OP_READ) | |
372 | flags |= GNTMAP_readonly; | |
373 | gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, | |
01f37f2d KRW |
374 | req->u.rw.seg[i].gref, |
375 | pending_req->blkif->domid); | |
1a95fe6e KRW |
376 | } |
377 | ||
378 | ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); | |
379 | BUG_ON(ret); | |
380 | ||
01f37f2d KRW |
381 | /* |
382 | * Now swizzle the MFN in our domain with the MFN from the other domain | |
1a95fe6e KRW |
383 | * so that when we access vaddr(pending_req,i) it has the contents of |
384 | * the page from the other domain. | |
385 | */ | |
386 | for (i = 0; i < nseg; i++) { | |
387 | if (unlikely(map[i].status != 0)) { | |
388 | DPRINTK("invalid buffer -- could not remap it\n"); | |
389 | map[i].handle = BLKBACK_INVALID_HANDLE; | |
390 | ret |= 1; | |
391 | } | |
392 | ||
393 | pending_handle(pending_req, i) = map[i].handle; | |
394 | ||
395 | if (ret) | |
396 | continue; | |
397 | ||
398 | ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), | |
399 | blkbk->pending_page(pending_req, i), false); | |
400 | if (ret) { | |
401 | printk(KERN_ALERT "Failed to install M2P override for"\ | |
402 | " %lx (ret: %d)\n", (unsigned long) | |
403 | map[i].dev_bus_addr, ret); | |
404 | /* We could switch over to GNTTABOP_copy */ | |
405 | continue; | |
406 | } | |
407 | ||
408 | seg[i].buf = map[i].dev_bus_addr | | |
409 | (req->u.rw.seg[i].first_sect << 9); | |
410 | } | |
411 | return ret; | |
412 | } | |
413 | ||
a1397fa3 KRW |
414 | /* |
415 | * Completion callback on the bio's. Called as bh->b_end_io() | |
4d05a28d KRW |
416 | */ |
417 | ||
2e9977c2 | 418 | static void __end_block_io_op(struct pending_req *pending_req, int error) |
4d05a28d KRW |
419 | { |
420 | /* An error fails the entire request. */ | |
24f567f9 | 421 | if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && |
4d05a28d | 422 | (error == -EOPNOTSUPP)) { |
24f567f9 KRW |
423 | DPRINTK("blkback: flush diskcache op failed, not supported\n"); |
424 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); | |
4d05a28d KRW |
425 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
426 | } else if (error) { | |
427 | DPRINTK("Buffer not up-to-date at end of operation, " | |
428 | "error=%d\n", error); | |
429 | pending_req->status = BLKIF_RSP_ERROR; | |
430 | } | |
431 | ||
01f37f2d KRW |
432 | /* |
433 | * If all of the bio's have completed it is time to unmap | |
a1397fa3 | 434 | * the grant references associated with 'request' and provide |
2e9977c2 KRW |
435 | * the proper response on the ring. |
436 | */ | |
4d05a28d | 437 | if (atomic_dec_and_test(&pending_req->pendcnt)) { |
9f3aedf5 | 438 | xen_blkbk_unmap(pending_req); |
4d05a28d KRW |
439 | make_response(pending_req->blkif, pending_req->id, |
440 | pending_req->operation, pending_req->status); | |
8b6bf747 | 441 | xen_blkif_put(pending_req->blkif); |
4d05a28d KRW |
442 | free_req(pending_req); |
443 | } | |
444 | } | |
445 | ||
a1397fa3 KRW |
446 | /* |
447 | * bio callback. | |
448 | */ | |
88122933 | 449 | static void end_block_io_op(struct bio *bio, int error) |
4d05a28d | 450 | { |
4d05a28d KRW |
451 | __end_block_io_op(bio->bi_private, error); |
452 | bio_put(bio); | |
4d05a28d KRW |
453 | } |
454 | ||
455 | ||
4d05a28d | 456 | |
a1397fa3 KRW |
457 | /* |
458 | * Function to copy the from the ring buffer the 'struct blkif_request' | |
459 | * (which has the sectors we want, number of them, grant references, etc), | |
460 | * and transmute it to the block API to hand it over to the proper block disk. | |
4d05a28d | 461 | */ |
5489377c | 462 | static int do_block_io_op(struct blkif_st *blkif) |
4d05a28d | 463 | { |
88122933 JF |
464 | union blkif_back_rings *blk_rings = &blkif->blk_rings; |
465 | struct blkif_request req; | |
2e9977c2 | 466 | struct pending_req *pending_req; |
4d05a28d KRW |
467 | RING_IDX rc, rp; |
468 | int more_to_do = 0; | |
469 | ||
470 | rc = blk_rings->common.req_cons; | |
471 | rp = blk_rings->common.sring->req_prod; | |
472 | rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
473 | ||
474 | while (rc != rp) { | |
475 | ||
476 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) | |
477 | break; | |
478 | ||
8270b45b | 479 | if (kthread_should_stop()) { |
4d05a28d KRW |
480 | more_to_do = 1; |
481 | break; | |
482 | } | |
483 | ||
8270b45b KF |
484 | pending_req = alloc_req(); |
485 | if (NULL == pending_req) { | |
486 | blkif->st_oo_req++; | |
4d05a28d KRW |
487 | more_to_do = 1; |
488 | break; | |
489 | } | |
490 | ||
491 | switch (blkif->blk_protocol) { | |
492 | case BLKIF_PROTOCOL_NATIVE: | |
493 | memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); | |
494 | break; | |
495 | case BLKIF_PROTOCOL_X86_32: | |
496 | blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); | |
497 | break; | |
498 | case BLKIF_PROTOCOL_X86_64: | |
499 | blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); | |
500 | break; | |
501 | default: | |
502 | BUG(); | |
503 | } | |
504 | blk_rings->common.req_cons = ++rc; /* before make_response() */ | |
505 | ||
506 | /* Apply all sanity checks to /private copy/ of request. */ | |
507 | barrier(); | |
508 | ||
fc53bf75 | 509 | if (dispatch_rw_block_io(blkif, &req, pending_req)) |
4d05a28d | 510 | break; |
4d05a28d KRW |
511 | |
512 | /* Yield point for this unbounded loop. */ | |
513 | cond_resched(); | |
514 | } | |
515 | ||
516 | return more_to_do; | |
517 | } | |
518 | ||
a1397fa3 | 519 | /* |
01f37f2d KRW |
520 | * Transmutation of the 'struct blkif_request' to a proper 'struct bio' |
521 | * and call the 'submit_bio' to pass it to the underlying storage. | |
a1397fa3 | 522 | */ |
fc53bf75 | 523 | static int dispatch_rw_block_io(struct blkif_st *blkif, |
88122933 | 524 | struct blkif_request *req, |
2e9977c2 | 525 | struct pending_req *pending_req) |
4d05a28d | 526 | { |
4d05a28d | 527 | struct phys_req preq; |
1a95fe6e | 528 | struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
4d05a28d KRW |
529 | unsigned int nseg; |
530 | struct bio *bio = NULL; | |
77089926 | 531 | struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
1a95fe6e | 532 | int i, nbio = 0; |
4d05a28d | 533 | int operation; |
a19be5f0 | 534 | struct blk_plug plug; |
4d05a28d KRW |
535 | |
536 | switch (req->operation) { | |
537 | case BLKIF_OP_READ: | |
fc53bf75 | 538 | blkif->st_rd_req++; |
4d05a28d KRW |
539 | operation = READ; |
540 | break; | |
541 | case BLKIF_OP_WRITE: | |
fc53bf75 | 542 | blkif->st_wr_req++; |
013c3ca1 | 543 | operation = WRITE_ODIRECT; |
4d05a28d | 544 | break; |
24f567f9 | 545 | case BLKIF_OP_FLUSH_DISKCACHE: |
fc53bf75 | 546 | blkif->st_f_req++; |
24f567f9 | 547 | operation = WRITE_FLUSH; |
01f37f2d KRW |
548 | /* |
549 | * The frontend likes to set this to -1, which vbd_translate | |
550 | * is alergic too. | |
551 | */ | |
24f567f9 | 552 | req->u.rw.sector_number = 0; |
4d05a28d | 553 | break; |
24f567f9 | 554 | case BLKIF_OP_WRITE_BARRIER: |
4d05a28d KRW |
555 | default: |
556 | operation = 0; /* make gcc happy */ | |
fc53bf75 KRW |
557 | goto fail_response; |
558 | break; | |
4d05a28d KRW |
559 | } |
560 | ||
a1397fa3 | 561 | /* Check that the number of segments is sane. */ |
4d05a28d | 562 | nseg = req->nr_segments; |
24f567f9 | 563 | if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || |
4d05a28d KRW |
564 | unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { |
565 | DPRINTK("Bad number of segments in request (%d)\n", nseg); | |
1a95fe6e | 566 | /* Haven't submitted any bio's yet. */ |
4d05a28d KRW |
567 | goto fail_response; |
568 | } | |
569 | ||
570 | preq.dev = req->handle; | |
c35950bf | 571 | preq.sector_number = req->u.rw.sector_number; |
4d05a28d KRW |
572 | preq.nr_sects = 0; |
573 | ||
574 | pending_req->blkif = blkif; | |
575 | pending_req->id = req->id; | |
576 | pending_req->operation = req->operation; | |
577 | pending_req->status = BLKIF_RSP_OKAY; | |
578 | pending_req->nr_pages = nseg; | |
e9350493 | 579 | |
4d05a28d | 580 | for (i = 0; i < nseg; i++) { |
c35950bf KRW |
581 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
582 | req->u.rw.seg[i].first_sect + 1; | |
c35950bf KRW |
583 | if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || |
584 | (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) | |
4d05a28d KRW |
585 | goto fail_response; |
586 | preq.nr_sects += seg[i].nsec; | |
976222e0 | 587 | |
4d05a28d KRW |
588 | } |
589 | ||
1a95fe6e KRW |
590 | if (vbd_translate(&preq, blkif, operation) != 0) { |
591 | DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", | |
592 | operation == READ ? "read" : "write", | |
593 | preq.sector_number, | |
594 | preq.sector_number + preq.nr_sects, preq.dev); | |
595 | goto fail_response; | |
4d05a28d | 596 | } |
01f37f2d KRW |
597 | |
598 | /* | |
599 | * This check _MUST_ be done after vbd_translate as the preq.bdev | |
600 | * is set there. | |
601 | */ | |
e9350493 KRW |
602 | for (i = 0; i < nseg; i++) { |
603 | if (((int)preq.sector_number|(int)seg[i].nsec) & | |
604 | ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { | |
605 | DPRINTK("Misaligned I/O request from domain %d", | |
606 | blkif->domid); | |
607 | goto fail_response; | |
608 | } | |
609 | } | |
01f37f2d KRW |
610 | |
611 | /* | |
612 | * If we have failed at this point, we need to undo the M2P override, | |
2e9977c2 KRW |
613 | * set gnttab_set_unmap_op on all of the grant references and perform |
614 | * the hypercall to unmap the grants - that is all done in | |
9f3aedf5 | 615 | * xen_blkbk_unmap. |
2e9977c2 | 616 | */ |
9f3aedf5 | 617 | if (xen_blkbk_map(req, pending_req, seg)) |
4d05a28d KRW |
618 | goto fail_flush; |
619 | ||
3d68b399 | 620 | /* This corresponding xen_blkif_put is done in __end_block_io_op */ |
8b6bf747 | 621 | xen_blkif_get(blkif); |
4d05a28d KRW |
622 | |
623 | for (i = 0; i < nseg; i++) { | |
4d05a28d KRW |
624 | while ((bio == NULL) || |
625 | (bio_add_page(bio, | |
e8e28871 | 626 | blkbk->pending_page(pending_req, i), |
4d05a28d KRW |
627 | seg[i].nsec << 9, |
628 | seg[i].buf & ~PAGE_MASK) == 0)) { | |
2e9977c2 | 629 | |
77089926 | 630 | bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i); |
4d05a28d KRW |
631 | if (unlikely(bio == NULL)) |
632 | goto fail_put_bio; | |
633 | ||
634 | bio->bi_bdev = preq.bdev; | |
635 | bio->bi_private = pending_req; | |
636 | bio->bi_end_io = end_block_io_op; | |
637 | bio->bi_sector = preq.sector_number; | |
638 | } | |
639 | ||
640 | preq.sector_number += seg[i].nsec; | |
641 | } | |
642 | ||
3d68b399 | 643 | /* This will be hit if the operation was a flush. */ |
4d05a28d | 644 | if (!bio) { |
24f567f9 | 645 | BUG_ON(operation != WRITE_FLUSH); |
77089926 | 646 | bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0); |
4d05a28d KRW |
647 | if (unlikely(bio == NULL)) |
648 | goto fail_put_bio; | |
649 | ||
650 | bio->bi_bdev = preq.bdev; | |
651 | bio->bi_private = pending_req; | |
652 | bio->bi_end_io = end_block_io_op; | |
4d05a28d KRW |
653 | } |
654 | ||
01f37f2d KRW |
655 | /* |
656 | * We set it one so that the last submit_bio does not have to call | |
77089926 KRW |
657 | * atomic_inc. |
658 | */ | |
659 | atomic_set(&pending_req->pendcnt, nbio); | |
660 | ||
a19be5f0 KRW |
661 | /* Get a reference count for the disk queue and start sending I/O */ |
662 | blk_start_plug(&plug); | |
663 | ||
77089926 KRW |
664 | for (i = 0; i < nbio; i++) |
665 | submit_bio(operation, biolist[i]); | |
666 | ||
a19be5f0 | 667 | /* Let the I/Os go.. */ |
3d68b399 | 668 | blk_finish_plug(&plug); |
a19be5f0 | 669 | |
4d05a28d KRW |
670 | if (operation == READ) |
671 | blkif->st_rd_sect += preq.nr_sects; | |
24f567f9 | 672 | else if (operation == WRITE || operation == WRITE_FLUSH) |
4d05a28d KRW |
673 | blkif->st_wr_sect += preq.nr_sects; |
674 | ||
fc53bf75 | 675 | return 0; |
4d05a28d KRW |
676 | |
677 | fail_flush: | |
9f3aedf5 | 678 | xen_blkbk_unmap(pending_req); |
4d05a28d | 679 | fail_response: |
0faa8cca | 680 | /* Haven't submitted any bio's yet. */ |
4d05a28d KRW |
681 | make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); |
682 | free_req(pending_req); | |
683 | msleep(1); /* back off a bit */ | |
fc53bf75 | 684 | return -EIO; |
4d05a28d KRW |
685 | |
686 | fail_put_bio: | |
77089926 KRW |
687 | for (i = 0; i < (nbio-1); i++) |
688 | bio_put(biolist[i]); | |
4d05a28d | 689 | __end_block_io_op(pending_req, -EINVAL); |
4d05a28d | 690 | msleep(1); /* back off a bit */ |
fc53bf75 | 691 | return -EIO; |
4d05a28d KRW |
692 | } |
693 | ||
694 | ||
695 | ||
a1397fa3 KRW |
696 | /* |
697 | * Put a response on the ring on how the operation fared. | |
4d05a28d | 698 | */ |
5489377c | 699 | static void make_response(struct blkif_st *blkif, u64 id, |
4d05a28d KRW |
700 | unsigned short op, int st) |
701 | { | |
88122933 | 702 | struct blkif_response resp; |
4d05a28d | 703 | unsigned long flags; |
88122933 | 704 | union blkif_back_rings *blk_rings = &blkif->blk_rings; |
4d05a28d KRW |
705 | int more_to_do = 0; |
706 | int notify; | |
707 | ||
708 | resp.id = id; | |
709 | resp.operation = op; | |
710 | resp.status = st; | |
711 | ||
712 | spin_lock_irqsave(&blkif->blk_ring_lock, flags); | |
713 | /* Place on the response ring for the relevant domain. */ | |
714 | switch (blkif->blk_protocol) { | |
715 | case BLKIF_PROTOCOL_NATIVE: | |
716 | memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), | |
717 | &resp, sizeof(resp)); | |
718 | break; | |
719 | case BLKIF_PROTOCOL_X86_32: | |
720 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), | |
721 | &resp, sizeof(resp)); | |
722 | break; | |
723 | case BLKIF_PROTOCOL_X86_64: | |
724 | memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), | |
725 | &resp, sizeof(resp)); | |
726 | break; | |
727 | default: | |
728 | BUG(); | |
729 | } | |
730 | blk_rings->common.rsp_prod_pvt++; | |
731 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); | |
732 | if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) { | |
733 | /* | |
734 | * Tail check for pending requests. Allows frontend to avoid | |
735 | * notifications if requests are already in flight (lower | |
736 | * overheads and promotes batching). | |
737 | */ | |
738 | RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); | |
739 | ||
740 | } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) { | |
741 | more_to_do = 1; | |
742 | } | |
743 | ||
744 | spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); | |
745 | ||
746 | if (more_to_do) | |
747 | blkif_notify_work(blkif); | |
748 | if (notify) | |
749 | notify_remote_via_irq(blkif->irq); | |
750 | } | |
751 | ||
8b6bf747 | 752 | static int __init xen_blkif_init(void) |
4d05a28d KRW |
753 | { |
754 | int i, mmap_pages; | |
8770b268 | 755 | int rc = 0; |
4d05a28d | 756 | |
88122933 | 757 | if (!xen_pv_domain()) |
4d05a28d KRW |
758 | return -ENODEV; |
759 | ||
2e9977c2 | 760 | blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); |
e8e28871 KRW |
761 | if (!blkbk) { |
762 | printk(KERN_ALERT "%s: out of memory!\n", __func__); | |
763 | return -ENOMEM; | |
764 | } | |
765 | ||
8b6bf747 | 766 | mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; |
4d05a28d | 767 | |
e8e28871 | 768 | blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) * |
8b6bf747 | 769 | xen_blkif_reqs, GFP_KERNEL); |
a742b02c KRW |
770 | blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) * |
771 | mmap_pages, GFP_KERNEL); | |
772 | blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * | |
773 | mmap_pages, GFP_KERNEL); | |
4d05a28d | 774 | |
2e9977c2 KRW |
775 | if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || |
776 | !blkbk->pending_pages) { | |
8770b268 | 777 | rc = -ENOMEM; |
4d05a28d | 778 | goto out_of_memory; |
8770b268 | 779 | } |
4d05a28d | 780 | |
464fb419 | 781 | for (i = 0; i < mmap_pages; i++) { |
e8e28871 | 782 | blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; |
a742b02c | 783 | blkbk->pending_pages[i] = alloc_page(GFP_KERNEL); |
464fb419 KRW |
784 | if (blkbk->pending_pages[i] == NULL) { |
785 | rc = -ENOMEM; | |
786 | goto out_of_memory; | |
787 | } | |
788 | } | |
8b6bf747 | 789 | rc = xen_blkif_interface_init(); |
8770b268 KRW |
790 | if (rc) |
791 | goto failed_init; | |
4d05a28d | 792 | |
e8e28871 KRW |
793 | memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs)); |
794 | ||
795 | INIT_LIST_HEAD(&blkbk->pending_free); | |
796 | spin_lock_init(&blkbk->pending_free_lock); | |
797 | init_waitqueue_head(&blkbk->pending_free_wq); | |
4d05a28d | 798 | |
8b6bf747 | 799 | for (i = 0; i < xen_blkif_reqs; i++) |
2e9977c2 KRW |
800 | list_add_tail(&blkbk->pending_reqs[i].free_list, |
801 | &blkbk->pending_free); | |
4d05a28d | 802 | |
8b6bf747 | 803 | rc = xen_blkif_xenbus_init(); |
8770b268 KRW |
804 | if (rc) |
805 | goto failed_init; | |
4d05a28d KRW |
806 | |
807 | return 0; | |
808 | ||
809 | out_of_memory: | |
8770b268 KRW |
810 | printk(KERN_ERR "%s: out of memory\n", __func__); |
811 | failed_init: | |
e8e28871 | 812 | kfree(blkbk->pending_reqs); |
a742b02c | 813 | kfree(blkbk->pending_grant_handles); |
464fb419 KRW |
814 | for (i = 0; i < mmap_pages; i++) { |
815 | if (blkbk->pending_pages[i]) | |
816 | __free_page(blkbk->pending_pages[i]); | |
817 | } | |
a742b02c KRW |
818 | kfree(blkbk->pending_pages); |
819 | kfree(blkbk); | |
e8e28871 | 820 | blkbk = NULL; |
8770b268 | 821 | return rc; |
4d05a28d KRW |
822 | } |
823 | ||
8b6bf747 | 824 | module_init(xen_blkif_init); |
4d05a28d KRW |
825 | |
826 | MODULE_LICENSE("Dual BSD/GPL"); |