Commit | Line | Data |
---|---|---|
4fe74b1c PB |
1 | /* |
2 | * Virtio SCSI HBA driver | |
3 | * | |
4 | * Copyright IBM Corp. 2010 | |
5 | * Copyright Red Hat, Inc. 2011 | |
6 | * | |
7 | * Authors: | |
8 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> | |
9 | * Paolo Bonzini <pbonzini@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/module.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/mempool.h> | |
19 | #include <linux/virtio.h> | |
20 | #include <linux/virtio_ids.h> | |
21 | #include <linux/virtio_config.h> | |
22 | #include <linux/virtio_scsi.h> | |
23 | #include <scsi/scsi_host.h> | |
24 | #include <scsi/scsi_device.h> | |
25 | #include <scsi/scsi_cmnd.h> | |
26 | ||
27 | #define VIRTIO_SCSI_MEMPOOL_SZ 64 | |
28 | ||
29 | /* Command queue element */ | |
30 | struct virtio_scsi_cmd { | |
31 | struct scsi_cmnd *sc; | |
32 | struct completion *comp; | |
33 | union { | |
34 | struct virtio_scsi_cmd_req cmd; | |
35 | struct virtio_scsi_ctrl_tmf_req tmf; | |
36 | struct virtio_scsi_ctrl_an_req an; | |
37 | } req; | |
38 | union { | |
39 | struct virtio_scsi_cmd_resp cmd; | |
40 | struct virtio_scsi_ctrl_tmf_resp tmf; | |
41 | struct virtio_scsi_ctrl_an_resp an; | |
42 | struct virtio_scsi_event evt; | |
43 | } resp; | |
44 | } ____cacheline_aligned_in_smp; | |
45 | ||
46 | /* Driver instance state */ | |
47 | struct virtio_scsi { | |
48 | /* Protects ctrl_vq, req_vq and sg[] */ | |
49 | spinlock_t vq_lock; | |
50 | ||
51 | struct virtio_device *vdev; | |
52 | struct virtqueue *ctrl_vq; | |
53 | struct virtqueue *event_vq; | |
54 | struct virtqueue *req_vq; | |
55 | ||
56 | /* For sglist construction when adding commands to the virtqueue. */ | |
57 | struct scatterlist sg[]; | |
58 | }; | |
59 | ||
60 | static struct kmem_cache *virtscsi_cmd_cache; | |
61 | static mempool_t *virtscsi_cmd_pool; | |
62 | ||
63 | static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) | |
64 | { | |
65 | return vdev->priv; | |
66 | } | |
67 | ||
68 | static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) | |
69 | { | |
70 | if (!resid) | |
71 | return; | |
72 | ||
73 | if (!scsi_bidi_cmnd(sc)) { | |
74 | scsi_set_resid(sc, resid); | |
75 | return; | |
76 | } | |
77 | ||
78 | scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); | |
79 | scsi_out(sc)->resid = resid - scsi_in(sc)->resid; | |
80 | } | |
81 | ||
82 | /** | |
83 | * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done | |
84 | * | |
85 | * Called with vq_lock held. | |
86 | */ | |
87 | static void virtscsi_complete_cmd(void *buf) | |
88 | { | |
89 | struct virtio_scsi_cmd *cmd = buf; | |
90 | struct scsi_cmnd *sc = cmd->sc; | |
91 | struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; | |
92 | ||
93 | dev_dbg(&sc->device->sdev_gendev, | |
94 | "cmd %p response %u status %#02x sense_len %u\n", | |
95 | sc, resp->response, resp->status, resp->sense_len); | |
96 | ||
97 | sc->result = resp->status; | |
98 | virtscsi_compute_resid(sc, resp->resid); | |
99 | switch (resp->response) { | |
100 | case VIRTIO_SCSI_S_OK: | |
101 | set_host_byte(sc, DID_OK); | |
102 | break; | |
103 | case VIRTIO_SCSI_S_OVERRUN: | |
104 | set_host_byte(sc, DID_ERROR); | |
105 | break; | |
106 | case VIRTIO_SCSI_S_ABORTED: | |
107 | set_host_byte(sc, DID_ABORT); | |
108 | break; | |
109 | case VIRTIO_SCSI_S_BAD_TARGET: | |
110 | set_host_byte(sc, DID_BAD_TARGET); | |
111 | break; | |
112 | case VIRTIO_SCSI_S_RESET: | |
113 | set_host_byte(sc, DID_RESET); | |
114 | break; | |
115 | case VIRTIO_SCSI_S_BUSY: | |
116 | set_host_byte(sc, DID_BUS_BUSY); | |
117 | break; | |
118 | case VIRTIO_SCSI_S_TRANSPORT_FAILURE: | |
119 | set_host_byte(sc, DID_TRANSPORT_DISRUPTED); | |
120 | break; | |
121 | case VIRTIO_SCSI_S_TARGET_FAILURE: | |
122 | set_host_byte(sc, DID_TARGET_FAILURE); | |
123 | break; | |
124 | case VIRTIO_SCSI_S_NEXUS_FAILURE: | |
125 | set_host_byte(sc, DID_NEXUS_FAILURE); | |
126 | break; | |
127 | default: | |
128 | scmd_printk(KERN_WARNING, sc, "Unknown response %d", | |
129 | resp->response); | |
130 | /* fall through */ | |
131 | case VIRTIO_SCSI_S_FAILURE: | |
132 | set_host_byte(sc, DID_ERROR); | |
133 | break; | |
134 | } | |
135 | ||
136 | WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE); | |
137 | if (sc->sense_buffer) { | |
138 | memcpy(sc->sense_buffer, resp->sense, | |
139 | min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE)); | |
140 | if (resp->sense_len) | |
141 | set_driver_byte(sc, DRIVER_SENSE); | |
142 | } | |
143 | ||
144 | mempool_free(cmd, virtscsi_cmd_pool); | |
145 | sc->scsi_done(sc); | |
146 | } | |
147 | ||
148 | static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf)) | |
149 | { | |
150 | struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); | |
151 | struct virtio_scsi *vscsi = shost_priv(sh); | |
152 | void *buf; | |
153 | unsigned long flags; | |
154 | unsigned int len; | |
155 | ||
156 | spin_lock_irqsave(&vscsi->vq_lock, flags); | |
157 | ||
158 | do { | |
159 | virtqueue_disable_cb(vq); | |
160 | while ((buf = virtqueue_get_buf(vq, &len)) != NULL) | |
161 | fn(buf); | |
162 | } while (!virtqueue_enable_cb(vq)); | |
163 | ||
164 | spin_unlock_irqrestore(&vscsi->vq_lock, flags); | |
165 | } | |
166 | ||
167 | static void virtscsi_req_done(struct virtqueue *vq) | |
168 | { | |
169 | virtscsi_vq_done(vq, virtscsi_complete_cmd); | |
170 | }; | |
171 | ||
172 | static void virtscsi_complete_free(void *buf) | |
173 | { | |
174 | struct virtio_scsi_cmd *cmd = buf; | |
175 | ||
176 | if (cmd->comp) | |
177 | complete_all(cmd->comp); | |
e4594bb5 PB |
178 | else |
179 | mempool_free(cmd, virtscsi_cmd_pool); | |
4fe74b1c PB |
180 | } |
181 | ||
182 | static void virtscsi_ctrl_done(struct virtqueue *vq) | |
183 | { | |
184 | virtscsi_vq_done(vq, virtscsi_complete_free); | |
185 | }; | |
186 | ||
187 | static void virtscsi_event_done(struct virtqueue *vq) | |
188 | { | |
189 | virtscsi_vq_done(vq, virtscsi_complete_free); | |
190 | }; | |
191 | ||
192 | static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, | |
193 | struct scsi_data_buffer *sdb) | |
194 | { | |
195 | struct sg_table *table = &sdb->table; | |
196 | struct scatterlist *sg_elem; | |
197 | unsigned int idx = *p_idx; | |
198 | int i; | |
199 | ||
200 | for_each_sg(table->sgl, sg_elem, table->nents, i) | |
201 | sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length); | |
202 | ||
203 | *p_idx = idx; | |
204 | } | |
205 | ||
206 | /** | |
207 | * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist | |
208 | * @vscsi : virtio_scsi state | |
209 | * @cmd : command structure | |
210 | * @out_num : number of read-only elements | |
211 | * @in_num : number of write-only elements | |
212 | * @req_size : size of the request buffer | |
213 | * @resp_size : size of the response buffer | |
214 | * | |
215 | * Called with vq_lock held. | |
216 | */ | |
217 | static void virtscsi_map_cmd(struct virtio_scsi *vscsi, | |
218 | struct virtio_scsi_cmd *cmd, | |
219 | unsigned *out_num, unsigned *in_num, | |
220 | size_t req_size, size_t resp_size) | |
221 | { | |
222 | struct scsi_cmnd *sc = cmd->sc; | |
223 | struct scatterlist *sg = vscsi->sg; | |
224 | unsigned int idx = 0; | |
225 | ||
226 | if (sc) { | |
227 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | |
228 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); | |
229 | ||
230 | /* TODO: check feature bit and fail if unsupported? */ | |
231 | BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); | |
232 | } | |
233 | ||
234 | /* Request header. */ | |
235 | sg_set_buf(&sg[idx++], &cmd->req, req_size); | |
236 | ||
237 | /* Data-out buffer. */ | |
238 | if (sc && sc->sc_data_direction != DMA_FROM_DEVICE) | |
239 | virtscsi_map_sgl(sg, &idx, scsi_out(sc)); | |
240 | ||
241 | *out_num = idx; | |
242 | ||
243 | /* Response header. */ | |
244 | sg_set_buf(&sg[idx++], &cmd->resp, resp_size); | |
245 | ||
246 | /* Data-in buffer */ | |
247 | if (sc && sc->sc_data_direction != DMA_TO_DEVICE) | |
248 | virtscsi_map_sgl(sg, &idx, scsi_in(sc)); | |
249 | ||
250 | *in_num = idx - *out_num; | |
251 | } | |
252 | ||
253 | static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq, | |
254 | struct virtio_scsi_cmd *cmd, | |
255 | size_t req_size, size_t resp_size, gfp_t gfp) | |
256 | { | |
257 | unsigned int out_num, in_num; | |
258 | unsigned long flags; | |
259 | int ret; | |
260 | ||
261 | spin_lock_irqsave(&vscsi->vq_lock, flags); | |
262 | ||
263 | virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size); | |
264 | ||
265 | ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp); | |
266 | if (ret >= 0) | |
267 | virtqueue_kick(vq); | |
268 | ||
269 | spin_unlock_irqrestore(&vscsi->vq_lock, flags); | |
270 | return ret; | |
271 | } | |
272 | ||
273 | static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) | |
274 | { | |
275 | struct virtio_scsi *vscsi = shost_priv(sh); | |
276 | struct virtio_scsi_cmd *cmd; | |
277 | int ret; | |
278 | ||
279 | dev_dbg(&sc->device->sdev_gendev, | |
280 | "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); | |
281 | ||
282 | ret = SCSI_MLQUEUE_HOST_BUSY; | |
283 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); | |
284 | if (!cmd) | |
285 | goto out; | |
286 | ||
287 | memset(cmd, 0, sizeof(*cmd)); | |
288 | cmd->sc = sc; | |
289 | cmd->req.cmd = (struct virtio_scsi_cmd_req){ | |
290 | .lun[0] = 1, | |
291 | .lun[1] = sc->device->id, | |
292 | .lun[2] = (sc->device->lun >> 8) | 0x40, | |
293 | .lun[3] = sc->device->lun & 0xff, | |
294 | .tag = (unsigned long)sc, | |
295 | .task_attr = VIRTIO_SCSI_S_SIMPLE, | |
296 | .prio = 0, | |
297 | .crn = 0, | |
298 | }; | |
299 | ||
300 | BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); | |
301 | memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); | |
302 | ||
303 | if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd, | |
304 | sizeof cmd->req.cmd, sizeof cmd->resp.cmd, | |
305 | GFP_ATOMIC) >= 0) | |
306 | ret = 0; | |
307 | ||
308 | out: | |
309 | return ret; | |
310 | } | |
311 | ||
312 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) | |
313 | { | |
314 | DECLARE_COMPLETION_ONSTACK(comp); | |
e4594bb5 | 315 | int ret = FAILED; |
4fe74b1c PB |
316 | |
317 | cmd->comp = ∁ | |
e4594bb5 PB |
318 | if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, |
319 | sizeof cmd->req.tmf, sizeof cmd->resp.tmf, | |
320 | GFP_NOIO) < 0) | |
321 | goto out; | |
4fe74b1c PB |
322 | |
323 | wait_for_completion(&comp); | |
e4594bb5 PB |
324 | if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || |
325 | cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) | |
326 | ret = SUCCESS; | |
4fe74b1c | 327 | |
e4594bb5 PB |
328 | out: |
329 | mempool_free(cmd, virtscsi_cmd_pool); | |
330 | return ret; | |
4fe74b1c PB |
331 | } |
332 | ||
333 | static int virtscsi_device_reset(struct scsi_cmnd *sc) | |
334 | { | |
335 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); | |
336 | struct virtio_scsi_cmd *cmd; | |
337 | ||
338 | sdev_printk(KERN_INFO, sc->device, "device reset\n"); | |
339 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); | |
340 | if (!cmd) | |
341 | return FAILED; | |
342 | ||
343 | memset(cmd, 0, sizeof(*cmd)); | |
344 | cmd->sc = sc; | |
345 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ | |
346 | .type = VIRTIO_SCSI_T_TMF, | |
347 | .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET, | |
348 | .lun[0] = 1, | |
349 | .lun[1] = sc->device->id, | |
350 | .lun[2] = (sc->device->lun >> 8) | 0x40, | |
351 | .lun[3] = sc->device->lun & 0xff, | |
352 | }; | |
353 | return virtscsi_tmf(vscsi, cmd); | |
354 | } | |
355 | ||
356 | static int virtscsi_abort(struct scsi_cmnd *sc) | |
357 | { | |
358 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); | |
359 | struct virtio_scsi_cmd *cmd; | |
360 | ||
361 | scmd_printk(KERN_INFO, sc, "abort\n"); | |
362 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); | |
363 | if (!cmd) | |
364 | return FAILED; | |
365 | ||
366 | memset(cmd, 0, sizeof(*cmd)); | |
367 | cmd->sc = sc; | |
368 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ | |
369 | .type = VIRTIO_SCSI_T_TMF, | |
370 | .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, | |
371 | .lun[0] = 1, | |
372 | .lun[1] = sc->device->id, | |
373 | .lun[2] = (sc->device->lun >> 8) | 0x40, | |
374 | .lun[3] = sc->device->lun & 0xff, | |
375 | .tag = (unsigned long)sc, | |
376 | }; | |
377 | return virtscsi_tmf(vscsi, cmd); | |
378 | } | |
379 | ||
380 | static struct scsi_host_template virtscsi_host_template = { | |
381 | .module = THIS_MODULE, | |
382 | .name = "Virtio SCSI HBA", | |
383 | .proc_name = "virtio_scsi", | |
384 | .queuecommand = virtscsi_queuecommand, | |
385 | .this_id = -1, | |
386 | .eh_abort_handler = virtscsi_abort, | |
387 | .eh_device_reset_handler = virtscsi_device_reset, | |
388 | ||
389 | .can_queue = 1024, | |
390 | .dma_boundary = UINT_MAX, | |
391 | .use_clustering = ENABLE_CLUSTERING, | |
392 | }; | |
393 | ||
394 | #define virtscsi_config_get(vdev, fld) \ | |
395 | ({ \ | |
396 | typeof(((struct virtio_scsi_config *)0)->fld) __val; \ | |
397 | vdev->config->get(vdev, \ | |
398 | offsetof(struct virtio_scsi_config, fld), \ | |
399 | &__val, sizeof(__val)); \ | |
400 | __val; \ | |
401 | }) | |
402 | ||
403 | #define virtscsi_config_set(vdev, fld, val) \ | |
404 | (void)({ \ | |
405 | typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ | |
406 | vdev->config->set(vdev, \ | |
407 | offsetof(struct virtio_scsi_config, fld), \ | |
408 | &__val, sizeof(__val)); \ | |
409 | }) | |
410 | ||
411 | static int virtscsi_init(struct virtio_device *vdev, | |
412 | struct virtio_scsi *vscsi) | |
413 | { | |
414 | int err; | |
415 | struct virtqueue *vqs[3]; | |
416 | vq_callback_t *callbacks[] = { | |
417 | virtscsi_ctrl_done, | |
418 | virtscsi_event_done, | |
419 | virtscsi_req_done | |
420 | }; | |
421 | const char *names[] = { | |
422 | "control", | |
423 | "event", | |
424 | "request" | |
425 | }; | |
426 | ||
427 | /* Discover virtqueues and write information to configuration. */ | |
428 | err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names); | |
429 | if (err) | |
430 | return err; | |
431 | ||
432 | vscsi->ctrl_vq = vqs[0]; | |
433 | vscsi->event_vq = vqs[1]; | |
434 | vscsi->req_vq = vqs[2]; | |
435 | ||
436 | virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); | |
437 | virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); | |
438 | return 0; | |
439 | } | |
440 | ||
441 | static int __devinit virtscsi_probe(struct virtio_device *vdev) | |
442 | { | |
443 | struct Scsi_Host *shost; | |
444 | struct virtio_scsi *vscsi; | |
445 | int err; | |
446 | u32 sg_elems; | |
447 | u32 cmd_per_lun; | |
448 | ||
449 | /* We need to know how many segments before we allocate. | |
450 | * We need an extra sg elements at head and tail. | |
451 | */ | |
452 | sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; | |
453 | ||
454 | /* Allocate memory and link the structs together. */ | |
455 | shost = scsi_host_alloc(&virtscsi_host_template, | |
456 | sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2)); | |
457 | ||
458 | if (!shost) | |
459 | return -ENOMEM; | |
460 | ||
461 | shost->sg_tablesize = sg_elems; | |
462 | vscsi = shost_priv(shost); | |
463 | vscsi->vdev = vdev; | |
464 | vdev->priv = shost; | |
465 | ||
466 | /* Random initializations. */ | |
467 | spin_lock_init(&vscsi->vq_lock); | |
468 | sg_init_table(vscsi->sg, sg_elems + 2); | |
469 | ||
470 | err = virtscsi_init(vdev, vscsi); | |
471 | if (err) | |
472 | goto virtscsi_init_failed; | |
473 | ||
474 | cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; | |
475 | shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); | |
476 | shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; | |
477 | shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1; | |
478 | shost->max_id = virtscsi_config_get(vdev, max_target) + 1; | |
479 | shost->max_channel = 0; | |
480 | shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; | |
481 | err = scsi_add_host(shost, &vdev->dev); | |
482 | if (err) | |
483 | goto scsi_add_host_failed; | |
484 | ||
485 | scsi_scan_host(shost); | |
486 | ||
487 | return 0; | |
488 | ||
489 | scsi_add_host_failed: | |
490 | vdev->config->del_vqs(vdev); | |
491 | virtscsi_init_failed: | |
492 | scsi_host_put(shost); | |
493 | return err; | |
494 | } | |
495 | ||
496 | static void virtscsi_remove_vqs(struct virtio_device *vdev) | |
497 | { | |
498 | /* Stop all the virtqueues. */ | |
499 | vdev->config->reset(vdev); | |
500 | ||
501 | vdev->config->del_vqs(vdev); | |
502 | } | |
503 | ||
504 | static void __devexit virtscsi_remove(struct virtio_device *vdev) | |
505 | { | |
506 | struct Scsi_Host *shost = virtio_scsi_host(vdev); | |
507 | ||
508 | scsi_remove_host(shost); | |
509 | ||
510 | virtscsi_remove_vqs(vdev); | |
511 | scsi_host_put(shost); | |
512 | } | |
513 | ||
514 | #ifdef CONFIG_PM | |
515 | static int virtscsi_freeze(struct virtio_device *vdev) | |
516 | { | |
517 | virtscsi_remove_vqs(vdev); | |
518 | return 0; | |
519 | } | |
520 | ||
521 | static int virtscsi_restore(struct virtio_device *vdev) | |
522 | { | |
523 | struct Scsi_Host *sh = virtio_scsi_host(vdev); | |
524 | struct virtio_scsi *vscsi = shost_priv(sh); | |
525 | ||
526 | return virtscsi_init(vdev, vscsi); | |
527 | } | |
528 | #endif | |
529 | ||
530 | static struct virtio_device_id id_table[] = { | |
531 | { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, | |
532 | { 0 }, | |
533 | }; | |
534 | ||
535 | static struct virtio_driver virtio_scsi_driver = { | |
536 | .driver.name = KBUILD_MODNAME, | |
537 | .driver.owner = THIS_MODULE, | |
538 | .id_table = id_table, | |
539 | .probe = virtscsi_probe, | |
540 | #ifdef CONFIG_PM | |
541 | .freeze = virtscsi_freeze, | |
542 | .restore = virtscsi_restore, | |
543 | #endif | |
544 | .remove = __devexit_p(virtscsi_remove), | |
545 | }; | |
546 | ||
547 | static int __init init(void) | |
548 | { | |
549 | int ret = -ENOMEM; | |
550 | ||
551 | virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); | |
552 | if (!virtscsi_cmd_cache) { | |
553 | printk(KERN_ERR "kmem_cache_create() for " | |
554 | "virtscsi_cmd_cache failed\n"); | |
555 | goto error; | |
556 | } | |
557 | ||
558 | ||
559 | virtscsi_cmd_pool = | |
560 | mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, | |
561 | virtscsi_cmd_cache); | |
562 | if (!virtscsi_cmd_pool) { | |
563 | printk(KERN_ERR "mempool_create() for" | |
564 | "virtscsi_cmd_pool failed\n"); | |
565 | goto error; | |
566 | } | |
567 | ret = register_virtio_driver(&virtio_scsi_driver); | |
568 | if (ret < 0) | |
569 | goto error; | |
570 | ||
571 | return 0; | |
572 | ||
573 | error: | |
574 | if (virtscsi_cmd_pool) { | |
575 | mempool_destroy(virtscsi_cmd_pool); | |
576 | virtscsi_cmd_pool = NULL; | |
577 | } | |
578 | if (virtscsi_cmd_cache) { | |
579 | kmem_cache_destroy(virtscsi_cmd_cache); | |
580 | virtscsi_cmd_cache = NULL; | |
581 | } | |
582 | return ret; | |
583 | } | |
584 | ||
585 | static void __exit fini(void) | |
586 | { | |
587 | unregister_virtio_driver(&virtio_scsi_driver); | |
588 | mempool_destroy(virtscsi_cmd_pool); | |
589 | kmem_cache_destroy(virtscsi_cmd_cache); | |
590 | } | |
591 | module_init(init); | |
592 | module_exit(fini); | |
593 | ||
594 | MODULE_DEVICE_TABLE(virtio, id_table); | |
595 | MODULE_DESCRIPTION("Virtio SCSI HBA driver"); | |
596 | MODULE_LICENSE("GPL"); |