KVM: s390: Set virtio-ccw transport revision
[deliverable/linux.git] / drivers / s390 / kvm / virtio_ccw.c
1 /*
2 * ccw based virtio transport
3 *
4 * Copyright IBM Corp. 2012, 2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */
12
13 #include <linux/kernel_stat.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
16 #include <linux/err.h>
17 #include <linux/virtio.h>
18 #include <linux/virtio_config.h>
19 #include <linux/slab.h>
20 #include <linux/interrupt.h>
21 #include <linux/virtio_ring.h>
22 #include <linux/pfn.h>
23 #include <linux/async.h>
24 #include <linux/wait.h>
25 #include <linux/list.h>
26 #include <linux/bitops.h>
27 #include <linux/module.h>
28 #include <linux/io.h>
29 #include <linux/kvm_para.h>
30 #include <linux/notifier.h>
31 #include <asm/setup.h>
32 #include <asm/irq.h>
33 #include <asm/cio.h>
34 #include <asm/ccwdev.h>
35 #include <asm/virtio-ccw.h>
36 #include <asm/isc.h>
37 #include <asm/airq.h>
38
39 /*
40 * virtio related functions
41 */
42
43 struct vq_config_block {
44 __u16 index;
45 __u16 num;
46 } __packed;
47
48 #define VIRTIO_CCW_CONFIG_SIZE 0x100
49 /* same as PCI config space size, should be enough for all drivers */
50
51 struct virtio_ccw_device {
52 struct virtio_device vdev;
53 __u8 *status;
54 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
55 struct ccw_device *cdev;
56 __u32 curr_io;
57 int err;
58 unsigned int revision; /* Transport revision */
59 wait_queue_head_t wait_q;
60 spinlock_t lock;
61 struct list_head virtqueues;
62 unsigned long indicators;
63 unsigned long indicators2;
64 struct vq_config_block *config_block;
65 bool is_thinint;
66 bool going_away;
67 bool device_lost;
68 void *airq_info;
69 };
70
71 struct vq_info_block {
72 __u64 queue;
73 __u32 align;
74 __u16 index;
75 __u16 num;
76 } __packed;
77
78 struct virtio_feature_desc {
79 __u32 features;
80 __u8 index;
81 } __packed;
82
83 struct virtio_thinint_area {
84 unsigned long summary_indicator;
85 unsigned long indicator;
86 u64 bit_nr;
87 u8 isc;
88 } __packed;
89
90 struct virtio_rev_info {
91 __u16 revision;
92 __u16 length;
93 __u8 data[];
94 };
95
96 /* the highest virtio-ccw revision we support */
97 #define VIRTIO_CCW_REV_MAX 0
98
99 struct virtio_ccw_vq_info {
100 struct virtqueue *vq;
101 int num;
102 void *queue;
103 struct vq_info_block *info_block;
104 int bit_nr;
105 struct list_head node;
106 long cookie;
107 };
108
109 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
110
111 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
112 #define MAX_AIRQ_AREAS 20
113
114 static int virtio_ccw_use_airq = 1;
115
116 struct airq_info {
117 rwlock_t lock;
118 u8 summary_indicator;
119 struct airq_struct airq;
120 struct airq_iv *aiv;
121 };
122 static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
123
124 #define CCW_CMD_SET_VQ 0x13
125 #define CCW_CMD_VDEV_RESET 0x33
126 #define CCW_CMD_SET_IND 0x43
127 #define CCW_CMD_SET_CONF_IND 0x53
128 #define CCW_CMD_READ_FEAT 0x12
129 #define CCW_CMD_WRITE_FEAT 0x11
130 #define CCW_CMD_READ_CONF 0x22
131 #define CCW_CMD_WRITE_CONF 0x21
132 #define CCW_CMD_WRITE_STATUS 0x31
133 #define CCW_CMD_READ_VQ_CONF 0x32
134 #define CCW_CMD_SET_IND_ADAPTER 0x73
135 #define CCW_CMD_SET_VIRTIO_REV 0x83
136
137 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
138 #define VIRTIO_CCW_DOING_RESET 0x00040000
139 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
140 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
141 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
142 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
143 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
144 #define VIRTIO_CCW_DOING_SET_IND 0x01000000
145 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
146 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
147 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
148 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
149 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
150
151 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
152 {
153 return container_of(vdev, struct virtio_ccw_device, vdev);
154 }
155
156 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
157 {
158 unsigned long i, flags;
159
160 write_lock_irqsave(&info->lock, flags);
161 for (i = 0; i < airq_iv_end(info->aiv); i++) {
162 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
163 airq_iv_free_bit(info->aiv, i);
164 airq_iv_set_ptr(info->aiv, i, 0);
165 break;
166 }
167 }
168 write_unlock_irqrestore(&info->lock, flags);
169 }
170
171 static void virtio_airq_handler(struct airq_struct *airq)
172 {
173 struct airq_info *info = container_of(airq, struct airq_info, airq);
174 unsigned long ai;
175
176 inc_irq_stat(IRQIO_VAI);
177 read_lock(&info->lock);
178 /* Walk through indicators field, summary indicator active. */
179 for (ai = 0;;) {
180 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
181 if (ai == -1UL)
182 break;
183 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
184 }
185 info->summary_indicator = 0;
186 smp_wmb();
187 /* Walk through indicators field, summary indicator not active. */
188 for (ai = 0;;) {
189 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
190 if (ai == -1UL)
191 break;
192 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
193 }
194 read_unlock(&info->lock);
195 }
196
197 static struct airq_info *new_airq_info(void)
198 {
199 struct airq_info *info;
200 int rc;
201
202 info = kzalloc(sizeof(*info), GFP_KERNEL);
203 if (!info)
204 return NULL;
205 rwlock_init(&info->lock);
206 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
207 if (!info->aiv) {
208 kfree(info);
209 return NULL;
210 }
211 info->airq.handler = virtio_airq_handler;
212 info->airq.lsi_ptr = &info->summary_indicator;
213 info->airq.lsi_mask = 0xff;
214 info->airq.isc = VIRTIO_AIRQ_ISC;
215 rc = register_adapter_interrupt(&info->airq);
216 if (rc) {
217 airq_iv_release(info->aiv);
218 kfree(info);
219 return NULL;
220 }
221 return info;
222 }
223
224 static void destroy_airq_info(struct airq_info *info)
225 {
226 if (!info)
227 return;
228
229 unregister_adapter_interrupt(&info->airq);
230 airq_iv_release(info->aiv);
231 kfree(info);
232 }
233
234 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
235 u64 *first, void **airq_info)
236 {
237 int i, j;
238 struct airq_info *info;
239 unsigned long indicator_addr = 0;
240 unsigned long bit, flags;
241
242 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
243 if (!airq_areas[i])
244 airq_areas[i] = new_airq_info();
245 info = airq_areas[i];
246 if (!info)
247 return 0;
248 write_lock_irqsave(&info->lock, flags);
249 bit = airq_iv_alloc(info->aiv, nvqs);
250 if (bit == -1UL) {
251 /* Not enough vacancies. */
252 write_unlock_irqrestore(&info->lock, flags);
253 continue;
254 }
255 *first = bit;
256 *airq_info = info;
257 indicator_addr = (unsigned long)info->aiv->vector;
258 for (j = 0; j < nvqs; j++) {
259 airq_iv_set_ptr(info->aiv, bit + j,
260 (unsigned long)vqs[j]);
261 }
262 write_unlock_irqrestore(&info->lock, flags);
263 }
264 return indicator_addr;
265 }
266
267 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
268 {
269 struct virtio_ccw_vq_info *info;
270
271 list_for_each_entry(info, &vcdev->virtqueues, node)
272 drop_airq_indicator(info->vq, vcdev->airq_info);
273 }
274
275 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
276 {
277 unsigned long flags;
278 __u32 ret;
279
280 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
281 if (vcdev->err)
282 ret = 0;
283 else
284 ret = vcdev->curr_io & flag;
285 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
286 return ret;
287 }
288
289 static int ccw_io_helper(struct virtio_ccw_device *vcdev,
290 struct ccw1 *ccw, __u32 intparm)
291 {
292 int ret;
293 unsigned long flags;
294 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
295
296 do {
297 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
298 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
299 if (!ret) {
300 if (!vcdev->curr_io)
301 vcdev->err = 0;
302 vcdev->curr_io |= flag;
303 }
304 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
305 cpu_relax();
306 } while (ret == -EBUSY);
307 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
308 return ret ? ret : vcdev->err;
309 }
310
311 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
312 struct ccw1 *ccw)
313 {
314 int ret;
315 unsigned long *indicatorp = NULL;
316 struct virtio_thinint_area *thinint_area = NULL;
317 struct airq_info *airq_info = vcdev->airq_info;
318
319 if (vcdev->is_thinint) {
320 thinint_area = kzalloc(sizeof(*thinint_area),
321 GFP_DMA | GFP_KERNEL);
322 if (!thinint_area)
323 return;
324 thinint_area->summary_indicator =
325 (unsigned long) &airq_info->summary_indicator;
326 thinint_area->isc = VIRTIO_AIRQ_ISC;
327 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
328 ccw->count = sizeof(*thinint_area);
329 ccw->cda = (__u32)(unsigned long) thinint_area;
330 } else {
331 indicatorp = kmalloc(sizeof(&vcdev->indicators),
332 GFP_DMA | GFP_KERNEL);
333 if (!indicatorp)
334 return;
335 *indicatorp = 0;
336 ccw->cmd_code = CCW_CMD_SET_IND;
337 ccw->count = sizeof(vcdev->indicators);
338 ccw->cda = (__u32)(unsigned long) indicatorp;
339 }
340 /* Deregister indicators from host. */
341 vcdev->indicators = 0;
342 ccw->flags = 0;
343 ret = ccw_io_helper(vcdev, ccw,
344 vcdev->is_thinint ?
345 VIRTIO_CCW_DOING_SET_IND_ADAPTER :
346 VIRTIO_CCW_DOING_SET_IND);
347 if (ret && (ret != -ENODEV))
348 dev_info(&vcdev->cdev->dev,
349 "Failed to deregister indicators (%d)\n", ret);
350 else if (vcdev->is_thinint)
351 virtio_ccw_drop_indicators(vcdev);
352 kfree(indicatorp);
353 kfree(thinint_area);
354 }
355
356 static inline long do_kvm_notify(struct subchannel_id schid,
357 unsigned long queue_index,
358 long cookie)
359 {
360 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
361 register struct subchannel_id __schid asm("2") = schid;
362 register unsigned long __index asm("3") = queue_index;
363 register long __rc asm("2");
364 register long __cookie asm("4") = cookie;
365
366 asm volatile ("diag 2,4,0x500\n"
367 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
368 "d"(__cookie)
369 : "memory", "cc");
370 return __rc;
371 }
372
373 static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
374 {
375 struct virtio_ccw_vq_info *info = vq->priv;
376 struct virtio_ccw_device *vcdev;
377 struct subchannel_id schid;
378
379 vcdev = to_vc_device(info->vq->vdev);
380 ccw_device_get_schid(vcdev->cdev, &schid);
381 info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
382 if (info->cookie < 0)
383 return false;
384 return true;
385 }
386
387 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
388 struct ccw1 *ccw, int index)
389 {
390 vcdev->config_block->index = index;
391 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
392 ccw->flags = 0;
393 ccw->count = sizeof(struct vq_config_block);
394 ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
395 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
396 return vcdev->config_block->num;
397 }
398
399 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
400 {
401 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
402 struct virtio_ccw_vq_info *info = vq->priv;
403 unsigned long flags;
404 unsigned long size;
405 int ret;
406 unsigned int index = vq->index;
407
408 /* Remove from our list. */
409 spin_lock_irqsave(&vcdev->lock, flags);
410 list_del(&info->node);
411 spin_unlock_irqrestore(&vcdev->lock, flags);
412
413 /* Release from host. */
414 info->info_block->queue = 0;
415 info->info_block->align = 0;
416 info->info_block->index = index;
417 info->info_block->num = 0;
418 ccw->cmd_code = CCW_CMD_SET_VQ;
419 ccw->flags = 0;
420 ccw->count = sizeof(*info->info_block);
421 ccw->cda = (__u32)(unsigned long)(info->info_block);
422 ret = ccw_io_helper(vcdev, ccw,
423 VIRTIO_CCW_DOING_SET_VQ | index);
424 /*
425 * -ENODEV isn't considered an error: The device is gone anyway.
426 * This may happen on device detach.
427 */
428 if (ret && (ret != -ENODEV))
429 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
430 ret, index);
431
432 vring_del_virtqueue(vq);
433 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
434 free_pages_exact(info->queue, size);
435 kfree(info->info_block);
436 kfree(info);
437 }
438
439 static void virtio_ccw_del_vqs(struct virtio_device *vdev)
440 {
441 struct virtqueue *vq, *n;
442 struct ccw1 *ccw;
443 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
444
445 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
446 if (!ccw)
447 return;
448
449 virtio_ccw_drop_indicator(vcdev, ccw);
450
451 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
452 virtio_ccw_del_vq(vq, ccw);
453
454 kfree(ccw);
455 }
456
457 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
458 int i, vq_callback_t *callback,
459 const char *name,
460 struct ccw1 *ccw)
461 {
462 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
463 int err;
464 struct virtqueue *vq = NULL;
465 struct virtio_ccw_vq_info *info;
466 unsigned long size = 0; /* silence the compiler */
467 unsigned long flags;
468
469 /* Allocate queue. */
470 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
471 if (!info) {
472 dev_warn(&vcdev->cdev->dev, "no info\n");
473 err = -ENOMEM;
474 goto out_err;
475 }
476 info->info_block = kzalloc(sizeof(*info->info_block),
477 GFP_DMA | GFP_KERNEL);
478 if (!info->info_block) {
479 dev_warn(&vcdev->cdev->dev, "no info block\n");
480 err = -ENOMEM;
481 goto out_err;
482 }
483 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
484 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
485 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
486 if (info->queue == NULL) {
487 dev_warn(&vcdev->cdev->dev, "no queue\n");
488 err = -ENOMEM;
489 goto out_err;
490 }
491
492 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
493 true, info->queue, virtio_ccw_kvm_notify,
494 callback, name);
495 if (!vq) {
496 /* For now, we fail if we can't get the requested size. */
497 dev_warn(&vcdev->cdev->dev, "no vq\n");
498 err = -ENOMEM;
499 goto out_err;
500 }
501
502 /* Register it with the host. */
503 info->info_block->queue = (__u64)info->queue;
504 info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
505 info->info_block->index = i;
506 info->info_block->num = info->num;
507 ccw->cmd_code = CCW_CMD_SET_VQ;
508 ccw->flags = 0;
509 ccw->count = sizeof(*info->info_block);
510 ccw->cda = (__u32)(unsigned long)(info->info_block);
511 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
512 if (err) {
513 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
514 goto out_err;
515 }
516
517 info->vq = vq;
518 vq->priv = info;
519
520 /* Save it to our list. */
521 spin_lock_irqsave(&vcdev->lock, flags);
522 list_add(&info->node, &vcdev->virtqueues);
523 spin_unlock_irqrestore(&vcdev->lock, flags);
524
525 return vq;
526
527 out_err:
528 if (vq)
529 vring_del_virtqueue(vq);
530 if (info) {
531 if (info->queue)
532 free_pages_exact(info->queue, size);
533 kfree(info->info_block);
534 }
535 kfree(info);
536 return ERR_PTR(err);
537 }
538
539 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
540 struct virtqueue *vqs[], int nvqs,
541 struct ccw1 *ccw)
542 {
543 int ret;
544 struct virtio_thinint_area *thinint_area = NULL;
545 struct airq_info *info;
546
547 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
548 if (!thinint_area) {
549 ret = -ENOMEM;
550 goto out;
551 }
552 /* Try to get an indicator. */
553 thinint_area->indicator = get_airq_indicator(vqs, nvqs,
554 &thinint_area->bit_nr,
555 &vcdev->airq_info);
556 if (!thinint_area->indicator) {
557 ret = -ENOSPC;
558 goto out;
559 }
560 info = vcdev->airq_info;
561 thinint_area->summary_indicator =
562 (unsigned long) &info->summary_indicator;
563 thinint_area->isc = VIRTIO_AIRQ_ISC;
564 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
565 ccw->flags = CCW_FLAG_SLI;
566 ccw->count = sizeof(*thinint_area);
567 ccw->cda = (__u32)(unsigned long)thinint_area;
568 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
569 if (ret) {
570 if (ret == -EOPNOTSUPP) {
571 /*
572 * The host does not support adapter interrupts
573 * for virtio-ccw, stop trying.
574 */
575 virtio_ccw_use_airq = 0;
576 pr_info("Adapter interrupts unsupported on host\n");
577 } else
578 dev_warn(&vcdev->cdev->dev,
579 "enabling adapter interrupts = %d\n", ret);
580 virtio_ccw_drop_indicators(vcdev);
581 }
582 out:
583 kfree(thinint_area);
584 return ret;
585 }
586
587 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
588 struct virtqueue *vqs[],
589 vq_callback_t *callbacks[],
590 const char *names[])
591 {
592 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
593 unsigned long *indicatorp = NULL;
594 int ret, i;
595 struct ccw1 *ccw;
596
597 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
598 if (!ccw)
599 return -ENOMEM;
600
601 for (i = 0; i < nvqs; ++i) {
602 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
603 ccw);
604 if (IS_ERR(vqs[i])) {
605 ret = PTR_ERR(vqs[i]);
606 vqs[i] = NULL;
607 goto out;
608 }
609 }
610 ret = -ENOMEM;
611 /* We need a data area under 2G to communicate. */
612 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
613 if (!indicatorp)
614 goto out;
615 *indicatorp = (unsigned long) &vcdev->indicators;
616 if (vcdev->is_thinint) {
617 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
618 if (ret)
619 /* no error, just fall back to legacy interrupts */
620 vcdev->is_thinint = 0;
621 }
622 if (!vcdev->is_thinint) {
623 /* Register queue indicators with host. */
624 vcdev->indicators = 0;
625 ccw->cmd_code = CCW_CMD_SET_IND;
626 ccw->flags = 0;
627 ccw->count = sizeof(vcdev->indicators);
628 ccw->cda = (__u32)(unsigned long) indicatorp;
629 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
630 if (ret)
631 goto out;
632 }
633 /* Register indicators2 with host for config changes */
634 *indicatorp = (unsigned long) &vcdev->indicators2;
635 vcdev->indicators2 = 0;
636 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
637 ccw->flags = 0;
638 ccw->count = sizeof(vcdev->indicators2);
639 ccw->cda = (__u32)(unsigned long) indicatorp;
640 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
641 if (ret)
642 goto out;
643
644 kfree(indicatorp);
645 kfree(ccw);
646 return 0;
647 out:
648 kfree(indicatorp);
649 kfree(ccw);
650 virtio_ccw_del_vqs(vdev);
651 return ret;
652 }
653
654 static void virtio_ccw_reset(struct virtio_device *vdev)
655 {
656 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
657 struct ccw1 *ccw;
658
659 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
660 if (!ccw)
661 return;
662
663 /* Zero status bits. */
664 *vcdev->status = 0;
665
666 /* Send a reset ccw on device. */
667 ccw->cmd_code = CCW_CMD_VDEV_RESET;
668 ccw->flags = 0;
669 ccw->count = 0;
670 ccw->cda = 0;
671 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
672 kfree(ccw);
673 }
674
675 static u64 virtio_ccw_get_features(struct virtio_device *vdev)
676 {
677 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
678 struct virtio_feature_desc *features;
679 int ret;
680 u64 rc;
681 struct ccw1 *ccw;
682
683 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
684 if (!ccw)
685 return 0;
686
687 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
688 if (!features) {
689 rc = 0;
690 goto out_free;
691 }
692 /* Read the feature bits from the host. */
693 features->index = 0;
694 ccw->cmd_code = CCW_CMD_READ_FEAT;
695 ccw->flags = 0;
696 ccw->count = sizeof(*features);
697 ccw->cda = (__u32)(unsigned long)features;
698 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
699 if (ret) {
700 rc = 0;
701 goto out_free;
702 }
703
704 rc = le32_to_cpu(features->features);
705
706 /* Read second half of the feature bits from the host. */
707 features->index = 1;
708 ccw->cmd_code = CCW_CMD_READ_FEAT;
709 ccw->flags = 0;
710 ccw->count = sizeof(*features);
711 ccw->cda = (__u32)(unsigned long)features;
712 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
713 if (ret == 0)
714 rc |= (u64)le32_to_cpu(features->features) << 32;
715
716 out_free:
717 kfree(features);
718 kfree(ccw);
719 return rc;
720 }
721
722 static void virtio_ccw_finalize_features(struct virtio_device *vdev)
723 {
724 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
725 struct virtio_feature_desc *features;
726 struct ccw1 *ccw;
727
728 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
729 if (!ccw)
730 return;
731
732 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
733 if (!features)
734 goto out_free;
735
736 /* Give virtio_ring a chance to accept features. */
737 vring_transport_features(vdev);
738
739 features->index = 0;
740 features->features = cpu_to_le32((u32)vdev->features);
741 /* Write the first half of the feature bits to the host. */
742 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
743 ccw->flags = 0;
744 ccw->count = sizeof(*features);
745 ccw->cda = (__u32)(unsigned long)features;
746 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
747
748 features->index = 1;
749 features->features = cpu_to_le32(vdev->features >> 32);
750 /* Write the second half of the feature bits to the host. */
751 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
752 ccw->flags = 0;
753 ccw->count = sizeof(*features);
754 ccw->cda = (__u32)(unsigned long)features;
755 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
756
757 out_free:
758 kfree(features);
759 kfree(ccw);
760 }
761
762 static void virtio_ccw_get_config(struct virtio_device *vdev,
763 unsigned int offset, void *buf, unsigned len)
764 {
765 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
766 int ret;
767 struct ccw1 *ccw;
768 void *config_area;
769
770 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
771 if (!ccw)
772 return;
773
774 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
775 if (!config_area)
776 goto out_free;
777
778 /* Read the config area from the host. */
779 ccw->cmd_code = CCW_CMD_READ_CONF;
780 ccw->flags = 0;
781 ccw->count = offset + len;
782 ccw->cda = (__u32)(unsigned long)config_area;
783 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
784 if (ret)
785 goto out_free;
786
787 memcpy(vcdev->config, config_area, sizeof(vcdev->config));
788 memcpy(buf, &vcdev->config[offset], len);
789
790 out_free:
791 kfree(config_area);
792 kfree(ccw);
793 }
794
795 static void virtio_ccw_set_config(struct virtio_device *vdev,
796 unsigned int offset, const void *buf,
797 unsigned len)
798 {
799 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
800 struct ccw1 *ccw;
801 void *config_area;
802
803 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
804 if (!ccw)
805 return;
806
807 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
808 if (!config_area)
809 goto out_free;
810
811 memcpy(&vcdev->config[offset], buf, len);
812 /* Write the config area to the host. */
813 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
814 ccw->cmd_code = CCW_CMD_WRITE_CONF;
815 ccw->flags = 0;
816 ccw->count = offset + len;
817 ccw->cda = (__u32)(unsigned long)config_area;
818 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
819
820 out_free:
821 kfree(config_area);
822 kfree(ccw);
823 }
824
825 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
826 {
827 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
828
829 return *vcdev->status;
830 }
831
832 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
833 {
834 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
835 struct ccw1 *ccw;
836
837 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
838 if (!ccw)
839 return;
840
841 /* Write the status to the host. */
842 *vcdev->status = status;
843 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
844 ccw->flags = 0;
845 ccw->count = sizeof(status);
846 ccw->cda = (__u32)(unsigned long)vcdev->status;
847 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
848 kfree(ccw);
849 }
850
851 static struct virtio_config_ops virtio_ccw_config_ops = {
852 .get_features = virtio_ccw_get_features,
853 .finalize_features = virtio_ccw_finalize_features,
854 .get = virtio_ccw_get_config,
855 .set = virtio_ccw_set_config,
856 .get_status = virtio_ccw_get_status,
857 .set_status = virtio_ccw_set_status,
858 .reset = virtio_ccw_reset,
859 .find_vqs = virtio_ccw_find_vqs,
860 .del_vqs = virtio_ccw_del_vqs,
861 };
862
863
864 /*
865 * ccw bus driver related functions
866 */
867
868 static void virtio_ccw_release_dev(struct device *_d)
869 {
870 struct virtio_device *dev = container_of(_d, struct virtio_device,
871 dev);
872 struct virtio_ccw_device *vcdev = to_vc_device(dev);
873
874 kfree(vcdev->status);
875 kfree(vcdev->config_block);
876 kfree(vcdev);
877 }
878
879 static int irb_is_error(struct irb *irb)
880 {
881 if (scsw_cstat(&irb->scsw) != 0)
882 return 1;
883 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
884 return 1;
885 if (scsw_cc(&irb->scsw) != 0)
886 return 1;
887 return 0;
888 }
889
890 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
891 int index)
892 {
893 struct virtio_ccw_vq_info *info;
894 unsigned long flags;
895 struct virtqueue *vq;
896
897 vq = NULL;
898 spin_lock_irqsave(&vcdev->lock, flags);
899 list_for_each_entry(info, &vcdev->virtqueues, node) {
900 if (info->vq->index == index) {
901 vq = info->vq;
902 break;
903 }
904 }
905 spin_unlock_irqrestore(&vcdev->lock, flags);
906 return vq;
907 }
908
909 static void virtio_ccw_int_handler(struct ccw_device *cdev,
910 unsigned long intparm,
911 struct irb *irb)
912 {
913 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
914 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
915 int i;
916 struct virtqueue *vq;
917
918 if (!vcdev)
919 return;
920 /* Check if it's a notification from the host. */
921 if ((intparm == 0) &&
922 (scsw_stctl(&irb->scsw) ==
923 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
924 /* OK */
925 }
926 if (irb_is_error(irb)) {
927 /* Command reject? */
928 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
929 (irb->ecw[0] & SNS0_CMD_REJECT))
930 vcdev->err = -EOPNOTSUPP;
931 else
932 /* Map everything else to -EIO. */
933 vcdev->err = -EIO;
934 }
935 if (vcdev->curr_io & activity) {
936 switch (activity) {
937 case VIRTIO_CCW_DOING_READ_FEAT:
938 case VIRTIO_CCW_DOING_WRITE_FEAT:
939 case VIRTIO_CCW_DOING_READ_CONFIG:
940 case VIRTIO_CCW_DOING_WRITE_CONFIG:
941 case VIRTIO_CCW_DOING_WRITE_STATUS:
942 case VIRTIO_CCW_DOING_SET_VQ:
943 case VIRTIO_CCW_DOING_SET_IND:
944 case VIRTIO_CCW_DOING_SET_CONF_IND:
945 case VIRTIO_CCW_DOING_RESET:
946 case VIRTIO_CCW_DOING_READ_VQ_CONF:
947 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
948 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
949 vcdev->curr_io &= ~activity;
950 wake_up(&vcdev->wait_q);
951 break;
952 default:
953 /* don't know what to do... */
954 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
955 activity);
956 WARN_ON(1);
957 break;
958 }
959 }
960 for_each_set_bit(i, &vcdev->indicators,
961 sizeof(vcdev->indicators) * BITS_PER_BYTE) {
962 /* The bit clear must happen before the vring kick. */
963 clear_bit(i, &vcdev->indicators);
964 barrier();
965 vq = virtio_ccw_vq_by_ind(vcdev, i);
966 vring_interrupt(0, vq);
967 }
968 if (test_bit(0, &vcdev->indicators2)) {
969 virtio_config_changed(&vcdev->vdev);
970 clear_bit(0, &vcdev->indicators2);
971 }
972 }
973
974 /*
975 * We usually want to autoonline all devices, but give the admin
976 * a way to exempt devices from this.
977 */
978 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
979 (8*sizeof(long)))
980 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
981
982 static char *no_auto = "";
983
984 module_param(no_auto, charp, 0444);
985 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
986
987 static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
988 {
989 struct ccw_dev_id id;
990
991 ccw_device_get_id(cdev, &id);
992 if (test_bit(id.devno, devs_no_auto[id.ssid]))
993 return 0;
994 return 1;
995 }
996
997 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
998 {
999 struct ccw_device *cdev = data;
1000 int ret;
1001
1002 ret = ccw_device_set_online(cdev);
1003 if (ret)
1004 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
1005 }
1006
1007 static int virtio_ccw_probe(struct ccw_device *cdev)
1008 {
1009 cdev->handler = virtio_ccw_int_handler;
1010
1011 if (virtio_ccw_check_autoonline(cdev))
1012 async_schedule(virtio_ccw_auto_online, cdev);
1013 return 0;
1014 }
1015
1016 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
1017 {
1018 unsigned long flags;
1019 struct virtio_ccw_device *vcdev;
1020
1021 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1022 vcdev = dev_get_drvdata(&cdev->dev);
1023 if (!vcdev || vcdev->going_away) {
1024 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1025 return NULL;
1026 }
1027 vcdev->going_away = true;
1028 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1029 return vcdev;
1030 }
1031
1032 static void virtio_ccw_remove(struct ccw_device *cdev)
1033 {
1034 unsigned long flags;
1035 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
1036
1037 if (vcdev && cdev->online) {
1038 if (vcdev->device_lost)
1039 virtio_break_device(&vcdev->vdev);
1040 unregister_virtio_device(&vcdev->vdev);
1041 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1042 dev_set_drvdata(&cdev->dev, NULL);
1043 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1044 }
1045 cdev->handler = NULL;
1046 }
1047
1048 static int virtio_ccw_offline(struct ccw_device *cdev)
1049 {
1050 unsigned long flags;
1051 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
1052
1053 if (!vcdev)
1054 return 0;
1055 if (vcdev->device_lost)
1056 virtio_break_device(&vcdev->vdev);
1057 unregister_virtio_device(&vcdev->vdev);
1058 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1059 dev_set_drvdata(&cdev->dev, NULL);
1060 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1061 return 0;
1062 }
1063
1064 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
1065 {
1066 struct virtio_rev_info *rev;
1067 struct ccw1 *ccw;
1068 int ret;
1069
1070 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
1071 if (!ccw)
1072 return -ENOMEM;
1073 rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
1074 if (!rev) {
1075 kfree(ccw);
1076 return -ENOMEM;
1077 }
1078
1079 /* Set transport revision */
1080 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
1081 ccw->flags = 0;
1082 ccw->count = sizeof(*rev);
1083 ccw->cda = (__u32)(unsigned long)rev;
1084
1085 vcdev->revision = VIRTIO_CCW_REV_MAX;
1086 do {
1087 rev->revision = vcdev->revision;
1088 /* none of our supported revisions carry payload */
1089 rev->length = 0;
1090 ret = ccw_io_helper(vcdev, ccw,
1091 VIRTIO_CCW_DOING_SET_VIRTIO_REV);
1092 if (ret == -EOPNOTSUPP) {
1093 if (vcdev->revision == 0)
1094 /*
1095 * The host device does not support setting
1096 * the revision: let's operate it in legacy
1097 * mode.
1098 */
1099 ret = 0;
1100 else
1101 vcdev->revision--;
1102 }
1103 } while (ret == -EOPNOTSUPP);
1104
1105 kfree(ccw);
1106 kfree(rev);
1107 return ret;
1108 }
1109
1110 static int virtio_ccw_online(struct ccw_device *cdev)
1111 {
1112 int ret;
1113 struct virtio_ccw_device *vcdev;
1114 unsigned long flags;
1115
1116 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
1117 if (!vcdev) {
1118 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
1119 ret = -ENOMEM;
1120 goto out_free;
1121 }
1122 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
1123 GFP_DMA | GFP_KERNEL);
1124 if (!vcdev->config_block) {
1125 ret = -ENOMEM;
1126 goto out_free;
1127 }
1128 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
1129 if (!vcdev->status) {
1130 ret = -ENOMEM;
1131 goto out_free;
1132 }
1133
1134 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
1135
1136 vcdev->vdev.dev.parent = &cdev->dev;
1137 vcdev->vdev.dev.release = virtio_ccw_release_dev;
1138 vcdev->vdev.config = &virtio_ccw_config_ops;
1139 vcdev->cdev = cdev;
1140 init_waitqueue_head(&vcdev->wait_q);
1141 INIT_LIST_HEAD(&vcdev->virtqueues);
1142 spin_lock_init(&vcdev->lock);
1143
1144 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1145 dev_set_drvdata(&cdev->dev, vcdev);
1146 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1147 vcdev->vdev.id.vendor = cdev->id.cu_type;
1148 vcdev->vdev.id.device = cdev->id.cu_model;
1149
1150 ret = virtio_ccw_set_transport_rev(vcdev);
1151 if (ret)
1152 goto out_free;
1153
1154 ret = register_virtio_device(&vcdev->vdev);
1155 if (ret) {
1156 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
1157 ret);
1158 goto out_put;
1159 }
1160 return 0;
1161 out_put:
1162 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1163 dev_set_drvdata(&cdev->dev, NULL);
1164 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1165 put_device(&vcdev->vdev.dev);
1166 return ret;
1167 out_free:
1168 if (vcdev) {
1169 kfree(vcdev->status);
1170 kfree(vcdev->config_block);
1171 }
1172 kfree(vcdev);
1173 return ret;
1174 }
1175
1176 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
1177 {
1178 int rc;
1179 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1180
1181 /*
1182 * Make sure vcdev is set
1183 * i.e. set_offline/remove callback not already running
1184 */
1185 if (!vcdev)
1186 return NOTIFY_DONE;
1187
1188 switch (event) {
1189 case CIO_GONE:
1190 vcdev->device_lost = true;
1191 rc = NOTIFY_DONE;
1192 break;
1193 default:
1194 rc = NOTIFY_DONE;
1195 break;
1196 }
1197 return rc;
1198 }
1199
1200 static struct ccw_device_id virtio_ids[] = {
1201 { CCW_DEVICE(0x3832, 0) },
1202 {},
1203 };
1204 MODULE_DEVICE_TABLE(ccw, virtio_ids);
1205
1206 static struct ccw_driver virtio_ccw_driver = {
1207 .driver = {
1208 .owner = THIS_MODULE,
1209 .name = "virtio_ccw",
1210 },
1211 .ids = virtio_ids,
1212 .probe = virtio_ccw_probe,
1213 .remove = virtio_ccw_remove,
1214 .set_offline = virtio_ccw_offline,
1215 .set_online = virtio_ccw_online,
1216 .notify = virtio_ccw_cio_notify,
1217 .int_class = IRQIO_VIR,
1218 };
1219
1220 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
1221 int max_digit, int max_val)
1222 {
1223 int diff;
1224
1225 diff = 0;
1226 *val = 0;
1227
1228 while (diff <= max_digit) {
1229 int value = hex_to_bin(**cp);
1230
1231 if (value < 0)
1232 break;
1233 *val = *val * 16 + value;
1234 (*cp)++;
1235 diff++;
1236 }
1237
1238 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
1239 return 1;
1240
1241 return 0;
1242 }
1243
1244 static int __init parse_busid(char *str, unsigned int *cssid,
1245 unsigned int *ssid, unsigned int *devno)
1246 {
1247 char *str_work;
1248 int rc, ret;
1249
1250 rc = 1;
1251
1252 if (*str == '\0')
1253 goto out;
1254
1255 str_work = str;
1256 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
1257 if (ret || (str_work[0] != '.'))
1258 goto out;
1259 str_work++;
1260 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
1261 if (ret || (str_work[0] != '.'))
1262 goto out;
1263 str_work++;
1264 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
1265 if (ret || (str_work[0] != '\0'))
1266 goto out;
1267
1268 rc = 0;
1269 out:
1270 return rc;
1271 }
1272
1273 static void __init no_auto_parse(void)
1274 {
1275 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
1276 char *parm, *str;
1277 int rc;
1278
1279 str = no_auto;
1280 while ((parm = strsep(&str, ","))) {
1281 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
1282 &from_ssid, &from);
1283 if (rc)
1284 continue;
1285 if (parm != NULL) {
1286 rc = parse_busid(parm, &to_cssid,
1287 &to_ssid, &to);
1288 if ((from_ssid > to_ssid) ||
1289 ((from_ssid == to_ssid) && (from > to)))
1290 rc = -EINVAL;
1291 } else {
1292 to_cssid = from_cssid;
1293 to_ssid = from_ssid;
1294 to = from;
1295 }
1296 if (rc)
1297 continue;
1298 while ((from_ssid < to_ssid) ||
1299 ((from_ssid == to_ssid) && (from <= to))) {
1300 set_bit(from, devs_no_auto[from_ssid]);
1301 from++;
1302 if (from > __MAX_SUBCHANNEL) {
1303 from_ssid++;
1304 from = 0;
1305 }
1306 }
1307 }
1308 }
1309
1310 static int __init virtio_ccw_init(void)
1311 {
1312 /* parse no_auto string before we do anything further */
1313 no_auto_parse();
1314 return ccw_driver_register(&virtio_ccw_driver);
1315 }
1316 module_init(virtio_ccw_init);
1317
1318 static void __exit virtio_ccw_exit(void)
1319 {
1320 int i;
1321
1322 ccw_driver_unregister(&virtio_ccw_driver);
1323 for (i = 0; i < MAX_AIRQ_AREAS; i++)
1324 destroy_airq_info(airq_areas[i]);
1325 }
1326 module_exit(virtio_ccw_exit);
This page took 0.060832 seconds and 5 git commands to generate.