Merge branch 'x86/vt-d' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu...
[deliverable/linux.git] / drivers / scsi / virtio_scsi.c
1 /*
2 * Virtio SCSI HBA driver
3 *
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/mempool.h>
21 #include <linux/virtio.h>
22 #include <linux/virtio_ids.h>
23 #include <linux/virtio_config.h>
24 #include <linux/virtio_scsi.h>
25 #include <linux/cpu.h>
26 #include <linux/blkdev.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_tcq.h>
31 #include <linux/seqlock.h>
32
33 #define VIRTIO_SCSI_MEMPOOL_SZ 64
34 #define VIRTIO_SCSI_EVENT_LEN 8
35 #define VIRTIO_SCSI_VQ_BASE 2
36
37 /* Command queue element */
38 struct virtio_scsi_cmd {
39 struct scsi_cmnd *sc;
40 struct completion *comp;
41 union {
42 struct virtio_scsi_cmd_req cmd;
43 struct virtio_scsi_cmd_req_pi cmd_pi;
44 struct virtio_scsi_ctrl_tmf_req tmf;
45 struct virtio_scsi_ctrl_an_req an;
46 } req;
47 union {
48 struct virtio_scsi_cmd_resp cmd;
49 struct virtio_scsi_ctrl_tmf_resp tmf;
50 struct virtio_scsi_ctrl_an_resp an;
51 struct virtio_scsi_event evt;
52 } resp;
53 } ____cacheline_aligned_in_smp;
54
55 struct virtio_scsi_event_node {
56 struct virtio_scsi *vscsi;
57 struct virtio_scsi_event event;
58 struct work_struct work;
59 };
60
61 struct virtio_scsi_vq {
62 /* Protects vq */
63 spinlock_t vq_lock;
64
65 struct virtqueue *vq;
66 };
67
68 /*
69 * Per-target queue state.
70 *
71 * This struct holds the data needed by the queue steering policy. When a
72 * target is sent multiple requests, we need to drive them to the same queue so
73 * that FIFO processing order is kept. However, if a target was idle, we can
74 * choose a queue arbitrarily. In this case the queue is chosen according to
75 * the current VCPU, so the driver expects the number of request queues to be
76 * equal to the number of VCPUs. This makes it easy and fast to select the
77 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
78 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
79 *
80 * tgt_seq is held to serialize reading and writing req_vq.
81 *
82 * Decrements of reqs are never concurrent with writes of req_vq: before the
83 * decrement reqs will be != 0; after the decrement the virtqueue completion
84 * routine will not use the req_vq so it can be changed by a new request.
85 * Thus they can happen outside the tgt_seq, provided of course we make reqs
86 * an atomic_t.
87 */
88 struct virtio_scsi_target_state {
89 seqcount_t tgt_seq;
90
91 /* Count of outstanding requests. */
92 atomic_t reqs;
93
94 /* Currently active virtqueue for requests sent to this target. */
95 struct virtio_scsi_vq *req_vq;
96 };
97
98 /* Driver instance state */
99 struct virtio_scsi {
100 struct virtio_device *vdev;
101
102 /* Get some buffers ready for event vq */
103 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
104
105 u32 num_queues;
106
107 /* If the affinity hint is set for virtqueues */
108 bool affinity_hint_set;
109
110 /* CPU hotplug notifier */
111 struct notifier_block nb;
112
113 /* Protected by event_vq lock */
114 bool stop_events;
115
116 struct virtio_scsi_vq ctrl_vq;
117 struct virtio_scsi_vq event_vq;
118 struct virtio_scsi_vq req_vqs[];
119 };
120
121 static struct kmem_cache *virtscsi_cmd_cache;
122 static mempool_t *virtscsi_cmd_pool;
123
124 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
125 {
126 return vdev->priv;
127 }
128
129 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
130 {
131 if (!resid)
132 return;
133
134 if (!scsi_bidi_cmnd(sc)) {
135 scsi_set_resid(sc, resid);
136 return;
137 }
138
139 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
140 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
141 }
142
143 /**
144 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
145 *
146 * Called with vq_lock held.
147 */
148 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
149 {
150 struct virtio_scsi_cmd *cmd = buf;
151 struct scsi_cmnd *sc = cmd->sc;
152 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
153 struct virtio_scsi_target_state *tgt =
154 scsi_target(sc->device)->hostdata;
155
156 dev_dbg(&sc->device->sdev_gendev,
157 "cmd %p response %u status %#02x sense_len %u\n",
158 sc, resp->response, resp->status, resp->sense_len);
159
160 sc->result = resp->status;
161 virtscsi_compute_resid(sc, resp->resid);
162 switch (resp->response) {
163 case VIRTIO_SCSI_S_OK:
164 set_host_byte(sc, DID_OK);
165 break;
166 case VIRTIO_SCSI_S_OVERRUN:
167 set_host_byte(sc, DID_ERROR);
168 break;
169 case VIRTIO_SCSI_S_ABORTED:
170 set_host_byte(sc, DID_ABORT);
171 break;
172 case VIRTIO_SCSI_S_BAD_TARGET:
173 set_host_byte(sc, DID_BAD_TARGET);
174 break;
175 case VIRTIO_SCSI_S_RESET:
176 set_host_byte(sc, DID_RESET);
177 break;
178 case VIRTIO_SCSI_S_BUSY:
179 set_host_byte(sc, DID_BUS_BUSY);
180 break;
181 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
182 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
183 break;
184 case VIRTIO_SCSI_S_TARGET_FAILURE:
185 set_host_byte(sc, DID_TARGET_FAILURE);
186 break;
187 case VIRTIO_SCSI_S_NEXUS_FAILURE:
188 set_host_byte(sc, DID_NEXUS_FAILURE);
189 break;
190 default:
191 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
192 resp->response);
193 /* fall through */
194 case VIRTIO_SCSI_S_FAILURE:
195 set_host_byte(sc, DID_ERROR);
196 break;
197 }
198
199 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
200 if (sc->sense_buffer) {
201 memcpy(sc->sense_buffer, resp->sense,
202 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
203 if (resp->sense_len)
204 set_driver_byte(sc, DRIVER_SENSE);
205 }
206
207 sc->scsi_done(sc);
208
209 atomic_dec(&tgt->reqs);
210 }
211
212 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
213 struct virtio_scsi_vq *virtscsi_vq,
214 void (*fn)(struct virtio_scsi *vscsi, void *buf))
215 {
216 void *buf;
217 unsigned int len;
218 unsigned long flags;
219 struct virtqueue *vq = virtscsi_vq->vq;
220
221 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
222 do {
223 virtqueue_disable_cb(vq);
224 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
225 fn(vscsi, buf);
226
227 if (unlikely(virtqueue_is_broken(vq)))
228 break;
229 } while (!virtqueue_enable_cb(vq));
230 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
231 }
232
233 static void virtscsi_req_done(struct virtqueue *vq)
234 {
235 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
236 struct virtio_scsi *vscsi = shost_priv(sh);
237 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
238 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
239
240 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
241 };
242
243 static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
244 {
245 int i, num_vqs;
246
247 num_vqs = vscsi->num_queues;
248 for (i = 0; i < num_vqs; i++)
249 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
250 virtscsi_complete_cmd);
251 }
252
253 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
254 {
255 struct virtio_scsi_cmd *cmd = buf;
256
257 if (cmd->comp)
258 complete_all(cmd->comp);
259 }
260
261 static void virtscsi_ctrl_done(struct virtqueue *vq)
262 {
263 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
264 struct virtio_scsi *vscsi = shost_priv(sh);
265
266 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
267 };
268
269 static void virtscsi_handle_event(struct work_struct *work);
270
271 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
272 struct virtio_scsi_event_node *event_node)
273 {
274 int err;
275 struct scatterlist sg;
276 unsigned long flags;
277
278 INIT_WORK(&event_node->work, virtscsi_handle_event);
279 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
280
281 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
282
283 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
284 GFP_ATOMIC);
285 if (!err)
286 virtqueue_kick(vscsi->event_vq.vq);
287
288 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
289
290 return err;
291 }
292
293 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
294 {
295 int i;
296
297 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
298 vscsi->event_list[i].vscsi = vscsi;
299 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
300 }
301
302 return 0;
303 }
304
305 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
306 {
307 int i;
308
309 /* Stop scheduling work before calling cancel_work_sync. */
310 spin_lock_irq(&vscsi->event_vq.vq_lock);
311 vscsi->stop_events = true;
312 spin_unlock_irq(&vscsi->event_vq.vq_lock);
313
314 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
315 cancel_work_sync(&vscsi->event_list[i].work);
316 }
317
318 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
319 struct virtio_scsi_event *event)
320 {
321 struct scsi_device *sdev;
322 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
323 unsigned int target = event->lun[1];
324 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
325
326 switch (event->reason) {
327 case VIRTIO_SCSI_EVT_RESET_RESCAN:
328 scsi_add_device(shost, 0, target, lun);
329 break;
330 case VIRTIO_SCSI_EVT_RESET_REMOVED:
331 sdev = scsi_device_lookup(shost, 0, target, lun);
332 if (sdev) {
333 scsi_remove_device(sdev);
334 scsi_device_put(sdev);
335 } else {
336 pr_err("SCSI device %d 0 %d %d not found\n",
337 shost->host_no, target, lun);
338 }
339 break;
340 default:
341 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
342 }
343 }
344
345 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
346 struct virtio_scsi_event *event)
347 {
348 struct scsi_device *sdev;
349 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
350 unsigned int target = event->lun[1];
351 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
352 u8 asc = event->reason & 255;
353 u8 ascq = event->reason >> 8;
354
355 sdev = scsi_device_lookup(shost, 0, target, lun);
356 if (!sdev) {
357 pr_err("SCSI device %d 0 %d %d not found\n",
358 shost->host_no, target, lun);
359 return;
360 }
361
362 /* Handle "Parameters changed", "Mode parameters changed", and
363 "Capacity data has changed". */
364 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
365 scsi_rescan_device(&sdev->sdev_gendev);
366
367 scsi_device_put(sdev);
368 }
369
370 static void virtscsi_handle_event(struct work_struct *work)
371 {
372 struct virtio_scsi_event_node *event_node =
373 container_of(work, struct virtio_scsi_event_node, work);
374 struct virtio_scsi *vscsi = event_node->vscsi;
375 struct virtio_scsi_event *event = &event_node->event;
376
377 if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
378 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
379 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
380 }
381
382 switch (event->event) {
383 case VIRTIO_SCSI_T_NO_EVENT:
384 break;
385 case VIRTIO_SCSI_T_TRANSPORT_RESET:
386 virtscsi_handle_transport_reset(vscsi, event);
387 break;
388 case VIRTIO_SCSI_T_PARAM_CHANGE:
389 virtscsi_handle_param_change(vscsi, event);
390 break;
391 default:
392 pr_err("Unsupport virtio scsi event %x\n", event->event);
393 }
394 virtscsi_kick_event(vscsi, event_node);
395 }
396
397 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
398 {
399 struct virtio_scsi_event_node *event_node = buf;
400
401 if (!vscsi->stop_events)
402 queue_work(system_freezable_wq, &event_node->work);
403 }
404
405 static void virtscsi_event_done(struct virtqueue *vq)
406 {
407 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
408 struct virtio_scsi *vscsi = shost_priv(sh);
409
410 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
411 };
412
413 /**
414 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
415 * @vq : the struct virtqueue we're talking about
416 * @cmd : command structure
417 * @req_size : size of the request buffer
418 * @resp_size : size of the response buffer
419 */
420 static int virtscsi_add_cmd(struct virtqueue *vq,
421 struct virtio_scsi_cmd *cmd,
422 size_t req_size, size_t resp_size)
423 {
424 struct scsi_cmnd *sc = cmd->sc;
425 struct scatterlist *sgs[6], req, resp;
426 struct sg_table *out, *in;
427 unsigned out_num = 0, in_num = 0;
428
429 out = in = NULL;
430
431 if (sc && sc->sc_data_direction != DMA_NONE) {
432 if (sc->sc_data_direction != DMA_FROM_DEVICE)
433 out = &scsi_out(sc)->table;
434 if (sc->sc_data_direction != DMA_TO_DEVICE)
435 in = &scsi_in(sc)->table;
436 }
437
438 /* Request header. */
439 sg_init_one(&req, &cmd->req, req_size);
440 sgs[out_num++] = &req;
441
442 /* Data-out buffer. */
443 if (out) {
444 /* Place WRITE protection SGLs before Data OUT payload */
445 if (scsi_prot_sg_count(sc))
446 sgs[out_num++] = scsi_prot_sglist(sc);
447 sgs[out_num++] = out->sgl;
448 }
449
450 /* Response header. */
451 sg_init_one(&resp, &cmd->resp, resp_size);
452 sgs[out_num + in_num++] = &resp;
453
454 /* Data-in buffer */
455 if (in) {
456 /* Place READ protection SGLs before Data IN payload */
457 if (scsi_prot_sg_count(sc))
458 sgs[out_num + in_num++] = scsi_prot_sglist(sc);
459 sgs[out_num + in_num++] = in->sgl;
460 }
461
462 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
463 }
464
465 static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
466 struct virtio_scsi_cmd *cmd,
467 size_t req_size, size_t resp_size)
468 {
469 unsigned long flags;
470 int err;
471 bool needs_kick = false;
472
473 spin_lock_irqsave(&vq->vq_lock, flags);
474 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
475 if (!err)
476 needs_kick = virtqueue_kick_prepare(vq->vq);
477
478 spin_unlock_irqrestore(&vq->vq_lock, flags);
479
480 if (needs_kick)
481 virtqueue_notify(vq->vq);
482 return err;
483 }
484
485 static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
486 struct scsi_cmnd *sc)
487 {
488 cmd->lun[0] = 1;
489 cmd->lun[1] = sc->device->id;
490 cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
491 cmd->lun[3] = sc->device->lun & 0xff;
492 cmd->tag = (unsigned long)sc;
493 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
494 cmd->prio = 0;
495 cmd->crn = 0;
496 }
497
498 static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
499 struct scsi_cmnd *sc)
500 {
501 struct request *rq = sc->request;
502 struct blk_integrity *bi;
503
504 virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
505
506 if (!rq || !scsi_prot_sg_count(sc))
507 return;
508
509 bi = blk_get_integrity(rq->rq_disk);
510
511 if (sc->sc_data_direction == DMA_TO_DEVICE)
512 cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
513 else if (sc->sc_data_direction == DMA_FROM_DEVICE)
514 cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
515 }
516
517 static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
518 struct virtio_scsi_vq *req_vq,
519 struct scsi_cmnd *sc)
520 {
521 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
522 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
523 int req_size;
524
525 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
526
527 /* TODO: check feature bit and fail if unsupported? */
528 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
529
530 dev_dbg(&sc->device->sdev_gendev,
531 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
532
533 memset(cmd, 0, sizeof(*cmd));
534 cmd->sc = sc;
535
536 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
537
538 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
539 virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
540 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
541 req_size = sizeof(cmd->req.cmd_pi);
542 } else {
543 virtio_scsi_init_hdr(&cmd->req.cmd, sc);
544 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
545 req_size = sizeof(cmd->req.cmd);
546 }
547
548 if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
549 return SCSI_MLQUEUE_HOST_BUSY;
550 return 0;
551 }
552
553 static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
554 struct scsi_cmnd *sc)
555 {
556 struct virtio_scsi *vscsi = shost_priv(sh);
557 struct virtio_scsi_target_state *tgt =
558 scsi_target(sc->device)->hostdata;
559
560 atomic_inc(&tgt->reqs);
561 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
562 }
563
564 static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
565 struct scsi_cmnd *sc)
566 {
567 u32 tag = blk_mq_unique_tag(sc->request);
568 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
569
570 return &vscsi->req_vqs[hwq];
571 }
572
573 static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
574 struct virtio_scsi_target_state *tgt)
575 {
576 struct virtio_scsi_vq *vq;
577 unsigned long flags;
578 u32 queue_num;
579
580 local_irq_save(flags);
581 if (atomic_inc_return(&tgt->reqs) > 1) {
582 unsigned long seq;
583
584 do {
585 seq = read_seqcount_begin(&tgt->tgt_seq);
586 vq = tgt->req_vq;
587 } while (read_seqcount_retry(&tgt->tgt_seq, seq));
588 } else {
589 /* no writes can be concurrent because of atomic_t */
590 write_seqcount_begin(&tgt->tgt_seq);
591
592 /* keep previous req_vq if a reader just arrived */
593 if (unlikely(atomic_read(&tgt->reqs) > 1)) {
594 vq = tgt->req_vq;
595 goto unlock;
596 }
597
598 queue_num = smp_processor_id();
599 while (unlikely(queue_num >= vscsi->num_queues))
600 queue_num -= vscsi->num_queues;
601 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
602 unlock:
603 write_seqcount_end(&tgt->tgt_seq);
604 }
605 local_irq_restore(flags);
606
607 return vq;
608 }
609
610 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
611 struct scsi_cmnd *sc)
612 {
613 struct virtio_scsi *vscsi = shost_priv(sh);
614 struct virtio_scsi_target_state *tgt =
615 scsi_target(sc->device)->hostdata;
616 struct virtio_scsi_vq *req_vq;
617
618 if (shost_use_blk_mq(sh))
619 req_vq = virtscsi_pick_vq_mq(vscsi, sc);
620 else
621 req_vq = virtscsi_pick_vq(vscsi, tgt);
622
623 return virtscsi_queuecommand(vscsi, req_vq, sc);
624 }
625
626 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
627 {
628 DECLARE_COMPLETION_ONSTACK(comp);
629 int ret = FAILED;
630
631 cmd->comp = &comp;
632 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
633 sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
634 goto out;
635
636 wait_for_completion(&comp);
637 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
638 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
639 ret = SUCCESS;
640
641 /*
642 * The spec guarantees that all requests related to the TMF have
643 * been completed, but the callback might not have run yet if
644 * we're using independent interrupts (e.g. MSI). Poll the
645 * virtqueues once.
646 *
647 * In the abort case, sc->scsi_done will do nothing, because
648 * the block layer must have detected a timeout and as a result
649 * REQ_ATOM_COMPLETE has been set.
650 */
651 virtscsi_poll_requests(vscsi);
652
653 out:
654 mempool_free(cmd, virtscsi_cmd_pool);
655 return ret;
656 }
657
658 static int virtscsi_device_reset(struct scsi_cmnd *sc)
659 {
660 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
661 struct virtio_scsi_cmd *cmd;
662
663 sdev_printk(KERN_INFO, sc->device, "device reset\n");
664 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
665 if (!cmd)
666 return FAILED;
667
668 memset(cmd, 0, sizeof(*cmd));
669 cmd->sc = sc;
670 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
671 .type = VIRTIO_SCSI_T_TMF,
672 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
673 .lun[0] = 1,
674 .lun[1] = sc->device->id,
675 .lun[2] = (sc->device->lun >> 8) | 0x40,
676 .lun[3] = sc->device->lun & 0xff,
677 };
678 return virtscsi_tmf(vscsi, cmd);
679 }
680
681 /**
682 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
683 * @sdev: Virtscsi target whose queue depth to change
684 * @qdepth: New queue depth
685 */
686 static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
687 {
688 struct Scsi_Host *shost = sdev->host;
689 int max_depth = shost->cmd_per_lun;
690
691 return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
692 }
693
694 static int virtscsi_abort(struct scsi_cmnd *sc)
695 {
696 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
697 struct virtio_scsi_cmd *cmd;
698
699 scmd_printk(KERN_INFO, sc, "abort\n");
700 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
701 if (!cmd)
702 return FAILED;
703
704 memset(cmd, 0, sizeof(*cmd));
705 cmd->sc = sc;
706 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
707 .type = VIRTIO_SCSI_T_TMF,
708 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
709 .lun[0] = 1,
710 .lun[1] = sc->device->id,
711 .lun[2] = (sc->device->lun >> 8) | 0x40,
712 .lun[3] = sc->device->lun & 0xff,
713 .tag = (unsigned long)sc,
714 };
715 return virtscsi_tmf(vscsi, cmd);
716 }
717
718 static int virtscsi_target_alloc(struct scsi_target *starget)
719 {
720 struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
721 struct virtio_scsi *vscsi = shost_priv(sh);
722
723 struct virtio_scsi_target_state *tgt =
724 kmalloc(sizeof(*tgt), GFP_KERNEL);
725 if (!tgt)
726 return -ENOMEM;
727
728 seqcount_init(&tgt->tgt_seq);
729 atomic_set(&tgt->reqs, 0);
730 tgt->req_vq = &vscsi->req_vqs[0];
731
732 starget->hostdata = tgt;
733 return 0;
734 }
735
736 static void virtscsi_target_destroy(struct scsi_target *starget)
737 {
738 struct virtio_scsi_target_state *tgt = starget->hostdata;
739 kfree(tgt);
740 }
741
742 static struct scsi_host_template virtscsi_host_template_single = {
743 .module = THIS_MODULE,
744 .name = "Virtio SCSI HBA",
745 .proc_name = "virtio_scsi",
746 .this_id = -1,
747 .cmd_size = sizeof(struct virtio_scsi_cmd),
748 .queuecommand = virtscsi_queuecommand_single,
749 .change_queue_depth = virtscsi_change_queue_depth,
750 .eh_abort_handler = virtscsi_abort,
751 .eh_device_reset_handler = virtscsi_device_reset,
752
753 .can_queue = 1024,
754 .dma_boundary = UINT_MAX,
755 .use_clustering = ENABLE_CLUSTERING,
756 .target_alloc = virtscsi_target_alloc,
757 .target_destroy = virtscsi_target_destroy,
758 .track_queue_depth = 1,
759 };
760
761 static struct scsi_host_template virtscsi_host_template_multi = {
762 .module = THIS_MODULE,
763 .name = "Virtio SCSI HBA",
764 .proc_name = "virtio_scsi",
765 .this_id = -1,
766 .cmd_size = sizeof(struct virtio_scsi_cmd),
767 .queuecommand = virtscsi_queuecommand_multi,
768 .change_queue_depth = virtscsi_change_queue_depth,
769 .eh_abort_handler = virtscsi_abort,
770 .eh_device_reset_handler = virtscsi_device_reset,
771
772 .can_queue = 1024,
773 .dma_boundary = UINT_MAX,
774 .use_clustering = ENABLE_CLUSTERING,
775 .target_alloc = virtscsi_target_alloc,
776 .target_destroy = virtscsi_target_destroy,
777 .track_queue_depth = 1,
778 };
779
780 #define virtscsi_config_get(vdev, fld) \
781 ({ \
782 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
783 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
784 __val; \
785 })
786
787 #define virtscsi_config_set(vdev, fld, val) \
788 do { \
789 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
790 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
791 } while(0)
792
793 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
794 {
795 int i;
796 int cpu;
797
798 /* In multiqueue mode, when the number of cpu is equal
799 * to the number of request queues, we let the qeueues
800 * to be private to one cpu by setting the affinity hint
801 * to eliminate the contention.
802 */
803 if ((vscsi->num_queues == 1 ||
804 vscsi->num_queues != num_online_cpus()) && affinity) {
805 if (vscsi->affinity_hint_set)
806 affinity = false;
807 else
808 return;
809 }
810
811 if (affinity) {
812 i = 0;
813 for_each_online_cpu(cpu) {
814 virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
815 i++;
816 }
817
818 vscsi->affinity_hint_set = true;
819 } else {
820 for (i = 0; i < vscsi->num_queues; i++) {
821 if (!vscsi->req_vqs[i].vq)
822 continue;
823
824 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
825 }
826
827 vscsi->affinity_hint_set = false;
828 }
829 }
830
831 static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
832 {
833 get_online_cpus();
834 __virtscsi_set_affinity(vscsi, affinity);
835 put_online_cpus();
836 }
837
838 static int virtscsi_cpu_callback(struct notifier_block *nfb,
839 unsigned long action, void *hcpu)
840 {
841 struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
842 switch(action) {
843 case CPU_ONLINE:
844 case CPU_ONLINE_FROZEN:
845 case CPU_DEAD:
846 case CPU_DEAD_FROZEN:
847 __virtscsi_set_affinity(vscsi, true);
848 break;
849 default:
850 break;
851 }
852 return NOTIFY_OK;
853 }
854
855 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
856 struct virtqueue *vq)
857 {
858 spin_lock_init(&virtscsi_vq->vq_lock);
859 virtscsi_vq->vq = vq;
860 }
861
862 static void virtscsi_remove_vqs(struct virtio_device *vdev)
863 {
864 struct Scsi_Host *sh = virtio_scsi_host(vdev);
865 struct virtio_scsi *vscsi = shost_priv(sh);
866
867 virtscsi_set_affinity(vscsi, false);
868
869 /* Stop all the virtqueues. */
870 vdev->config->reset(vdev);
871
872 vdev->config->del_vqs(vdev);
873 }
874
875 static int virtscsi_init(struct virtio_device *vdev,
876 struct virtio_scsi *vscsi)
877 {
878 int err;
879 u32 i;
880 u32 num_vqs;
881 vq_callback_t **callbacks;
882 const char **names;
883 struct virtqueue **vqs;
884
885 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
886 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
887 callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
888 names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
889
890 if (!callbacks || !vqs || !names) {
891 err = -ENOMEM;
892 goto out;
893 }
894
895 callbacks[0] = virtscsi_ctrl_done;
896 callbacks[1] = virtscsi_event_done;
897 names[0] = "control";
898 names[1] = "event";
899 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
900 callbacks[i] = virtscsi_req_done;
901 names[i] = "request";
902 }
903
904 /* Discover virtqueues and write information to configuration. */
905 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
906 if (err)
907 goto out;
908
909 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
910 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
911 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
912 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
913 vqs[i]);
914
915 virtscsi_set_affinity(vscsi, true);
916
917 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
918 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
919
920 err = 0;
921
922 out:
923 kfree(names);
924 kfree(callbacks);
925 kfree(vqs);
926 if (err)
927 virtscsi_remove_vqs(vdev);
928 return err;
929 }
930
931 static int virtscsi_probe(struct virtio_device *vdev)
932 {
933 struct Scsi_Host *shost;
934 struct virtio_scsi *vscsi;
935 int err, host_prot;
936 u32 sg_elems, num_targets;
937 u32 cmd_per_lun;
938 u32 num_queues;
939 struct scsi_host_template *hostt;
940
941 /* We need to know how many queues before we allocate. */
942 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
943
944 num_targets = virtscsi_config_get(vdev, max_target) + 1;
945
946 if (num_queues == 1)
947 hostt = &virtscsi_host_template_single;
948 else
949 hostt = &virtscsi_host_template_multi;
950
951 shost = scsi_host_alloc(hostt,
952 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
953 if (!shost)
954 return -ENOMEM;
955
956 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
957 shost->sg_tablesize = sg_elems;
958 vscsi = shost_priv(shost);
959 vscsi->vdev = vdev;
960 vscsi->num_queues = num_queues;
961 vdev->priv = shost;
962
963 err = virtscsi_init(vdev, vscsi);
964 if (err)
965 goto virtscsi_init_failed;
966
967 vscsi->nb.notifier_call = &virtscsi_cpu_callback;
968 err = register_hotcpu_notifier(&vscsi->nb);
969 if (err) {
970 pr_err("registering cpu notifier failed\n");
971 goto scsi_add_host_failed;
972 }
973
974 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
975 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
976 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
977
978 /* LUNs > 256 are reported with format 1, so they go in the range
979 * 16640-32767.
980 */
981 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
982 shost->max_id = num_targets;
983 shost->max_channel = 0;
984 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
985 shost->nr_hw_queues = num_queues;
986
987 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
988 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
989 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
990 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
991
992 scsi_host_set_prot(shost, host_prot);
993 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
994 }
995
996 err = scsi_add_host(shost, &vdev->dev);
997 if (err)
998 goto scsi_add_host_failed;
999
1000 virtio_device_ready(vdev);
1001
1002 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1003 virtscsi_kick_event_all(vscsi);
1004
1005 scsi_scan_host(shost);
1006 return 0;
1007
1008 scsi_add_host_failed:
1009 vdev->config->del_vqs(vdev);
1010 virtscsi_init_failed:
1011 scsi_host_put(shost);
1012 return err;
1013 }
1014
1015 static void virtscsi_remove(struct virtio_device *vdev)
1016 {
1017 struct Scsi_Host *shost = virtio_scsi_host(vdev);
1018 struct virtio_scsi *vscsi = shost_priv(shost);
1019
1020 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1021 virtscsi_cancel_event_work(vscsi);
1022
1023 scsi_remove_host(shost);
1024
1025 unregister_hotcpu_notifier(&vscsi->nb);
1026
1027 virtscsi_remove_vqs(vdev);
1028 scsi_host_put(shost);
1029 }
1030
1031 #ifdef CONFIG_PM_SLEEP
1032 static int virtscsi_freeze(struct virtio_device *vdev)
1033 {
1034 struct Scsi_Host *sh = virtio_scsi_host(vdev);
1035 struct virtio_scsi *vscsi = shost_priv(sh);
1036
1037 unregister_hotcpu_notifier(&vscsi->nb);
1038 virtscsi_remove_vqs(vdev);
1039 return 0;
1040 }
1041
1042 static int virtscsi_restore(struct virtio_device *vdev)
1043 {
1044 struct Scsi_Host *sh = virtio_scsi_host(vdev);
1045 struct virtio_scsi *vscsi = shost_priv(sh);
1046 int err;
1047
1048 err = virtscsi_init(vdev, vscsi);
1049 if (err)
1050 return err;
1051
1052 err = register_hotcpu_notifier(&vscsi->nb);
1053 if (err) {
1054 vdev->config->del_vqs(vdev);
1055 return err;
1056 }
1057
1058 virtio_device_ready(vdev);
1059
1060 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
1061 virtscsi_kick_event_all(vscsi);
1062
1063 return err;
1064 }
1065 #endif
1066
1067 static struct virtio_device_id id_table[] = {
1068 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
1069 { 0 },
1070 };
1071
1072 static unsigned int features[] = {
1073 VIRTIO_SCSI_F_HOTPLUG,
1074 VIRTIO_SCSI_F_CHANGE,
1075 VIRTIO_SCSI_F_T10_PI,
1076 };
1077
1078 static struct virtio_driver virtio_scsi_driver = {
1079 .feature_table = features,
1080 .feature_table_size = ARRAY_SIZE(features),
1081 .driver.name = KBUILD_MODNAME,
1082 .driver.owner = THIS_MODULE,
1083 .id_table = id_table,
1084 .probe = virtscsi_probe,
1085 #ifdef CONFIG_PM_SLEEP
1086 .freeze = virtscsi_freeze,
1087 .restore = virtscsi_restore,
1088 #endif
1089 .remove = virtscsi_remove,
1090 };
1091
1092 static int __init init(void)
1093 {
1094 int ret = -ENOMEM;
1095
1096 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
1097 if (!virtscsi_cmd_cache) {
1098 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
1099 goto error;
1100 }
1101
1102
1103 virtscsi_cmd_pool =
1104 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1105 virtscsi_cmd_cache);
1106 if (!virtscsi_cmd_pool) {
1107 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1108 goto error;
1109 }
1110 ret = register_virtio_driver(&virtio_scsi_driver);
1111 if (ret < 0)
1112 goto error;
1113
1114 return 0;
1115
1116 error:
1117 if (virtscsi_cmd_pool) {
1118 mempool_destroy(virtscsi_cmd_pool);
1119 virtscsi_cmd_pool = NULL;
1120 }
1121 if (virtscsi_cmd_cache) {
1122 kmem_cache_destroy(virtscsi_cmd_cache);
1123 virtscsi_cmd_cache = NULL;
1124 }
1125 return ret;
1126 }
1127
1128 static void __exit fini(void)
1129 {
1130 unregister_virtio_driver(&virtio_scsi_driver);
1131 mempool_destroy(virtscsi_cmd_pool);
1132 kmem_cache_destroy(virtscsi_cmd_cache);
1133 }
1134 module_init(init);
1135 module_exit(fini);
1136
1137 MODULE_DEVICE_TABLE(virtio, id_table);
1138 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1139 MODULE_LICENSE("GPL");
This page took 0.103352 seconds and 6 git commands to generate.