powerpc/spapr: vfio: Replace iommu_table with iommu_table_group
[deliverable/linux.git] / drivers / vfio / vfio_iommu_spapr_tce.c
CommitLineData
5ffd229c
AK
1/*
2 * VFIO: IOMMU DMA mapping support for TCE on POWER
3 *
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
14 */
15
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/uaccess.h>
20#include <linux/err.h>
21#include <linux/vfio.h>
22#include <asm/iommu.h>
23#include <asm/tce.h>
24
25#define DRIVER_VERSION "0.1"
26#define DRIVER_AUTHOR "aik@ozlabs.ru"
27#define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
28
29static void tce_iommu_detach_group(void *iommu_data,
30 struct iommu_group *iommu_group);
31
2d270df8
AK
32static long try_increment_locked_vm(long npages)
33{
34 long ret = 0, locked, lock_limit;
35
36 if (!current || !current->mm)
37 return -ESRCH; /* process exited */
38
39 if (!npages)
40 return 0;
41
42 down_write(&current->mm->mmap_sem);
43 locked = current->mm->locked_vm + npages;
44 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
45 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
46 ret = -ENOMEM;
47 else
48 current->mm->locked_vm += npages;
49
50 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
51 npages << PAGE_SHIFT,
52 current->mm->locked_vm << PAGE_SHIFT,
53 rlimit(RLIMIT_MEMLOCK),
54 ret ? " - exceeded" : "");
55
56 up_write(&current->mm->mmap_sem);
57
58 return ret;
59}
60
61static void decrement_locked_vm(long npages)
62{
63 if (!current || !current->mm || !npages)
64 return; /* process exited */
65
66 down_write(&current->mm->mmap_sem);
67 if (WARN_ON_ONCE(npages > current->mm->locked_vm))
68 npages = current->mm->locked_vm;
69 current->mm->locked_vm -= npages;
70 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
71 npages << PAGE_SHIFT,
72 current->mm->locked_vm << PAGE_SHIFT,
73 rlimit(RLIMIT_MEMLOCK));
74 up_write(&current->mm->mmap_sem);
75}
76
5ffd229c
AK
77/*
78 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
79 *
80 * This code handles mapping and unmapping of user data buffers
81 * into DMA'ble space using the IOMMU
82 */
83
84/*
85 * The container descriptor supports only a single group per container.
86 * Required by the API as the container is not supplied with the IOMMU group
87 * at the moment of initialization.
88 */
89struct tce_container {
90 struct mutex lock;
91 struct iommu_table *tbl;
92 bool enabled;
2d270df8 93 unsigned long locked_pages;
5ffd229c
AK
94};
95
e432bc7e
AK
96static bool tce_page_is_contained(struct page *page, unsigned page_shift)
97{
98 /*
99 * Check that the TCE table granularity is not bigger than the size of
100 * a page we just found. Otherwise the hardware can get access to
101 * a bigger memory chunk that it should.
102 */
103 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
104}
105
5ffd229c
AK
106static int tce_iommu_enable(struct tce_container *container)
107{
108 int ret = 0;
2d270df8 109 unsigned long locked;
5ffd229c
AK
110 struct iommu_table *tbl = container->tbl;
111
112 if (!container->tbl)
113 return -ENXIO;
114
115 if (!current->mm)
116 return -ESRCH; /* process exited */
117
118 if (container->enabled)
119 return -EBUSY;
120
121 /*
122 * When userspace pages are mapped into the IOMMU, they are effectively
123 * locked memory, so, theoretically, we need to update the accounting
124 * of locked pages on each map and unmap. For powerpc, the map unmap
125 * paths can be very hot, though, and the accounting would kill
126 * performance, especially since it would be difficult to impossible
127 * to handle the accounting in real mode only.
128 *
129 * To address that, rather than precisely accounting every page, we
130 * instead account for a worst case on locked memory when the iommu is
131 * enabled and disabled. The worst case upper bound on locked memory
132 * is the size of the whole iommu window, which is usually relatively
133 * small (compared to total memory sizes) on POWER hardware.
134 *
135 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
136 * that would effectively kill the guest at random points, much better
137 * enforcing the limit based on the max that the guest can map.
2d270df8
AK
138 *
139 * Unfortunately at the moment it counts whole tables, no matter how
140 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
141 * each with 2GB DMA window, 8GB will be counted here. The reason for
142 * this is that we cannot tell here the amount of RAM used by the guest
143 * as this information is only available from KVM and VFIO is
144 * KVM agnostic.
5ffd229c 145 */
2d270df8
AK
146 locked = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT;
147 ret = try_increment_locked_vm(locked);
148 if (ret)
149 return ret;
5ffd229c 150
2d270df8
AK
151 container->locked_pages = locked;
152
153 container->enabled = true;
5ffd229c
AK
154
155 return ret;
156}
157
158static void tce_iommu_disable(struct tce_container *container)
159{
160 if (!container->enabled)
161 return;
162
163 container->enabled = false;
164
2d270df8 165 if (!current->mm)
5ffd229c
AK
166 return;
167
2d270df8 168 decrement_locked_vm(container->locked_pages);
5ffd229c
AK
169}
170
171static void *tce_iommu_open(unsigned long arg)
172{
173 struct tce_container *container;
174
175 if (arg != VFIO_SPAPR_TCE_IOMMU) {
176 pr_err("tce_vfio: Wrong IOMMU type\n");
177 return ERR_PTR(-EINVAL);
178 }
179
180 container = kzalloc(sizeof(*container), GFP_KERNEL);
181 if (!container)
182 return ERR_PTR(-ENOMEM);
183
184 mutex_init(&container->lock);
185
186 return container;
187}
188
189static void tce_iommu_release(void *iommu_data)
190{
191 struct tce_container *container = iommu_data;
192
b348aa65 193 WARN_ON(container->tbl && !container->tbl->it_table_group->group);
5ffd229c 194
b348aa65
AK
195 if (container->tbl && container->tbl->it_table_group->group)
196 tce_iommu_detach_group(iommu_data,
197 container->tbl->it_table_group->group);
5ffd229c 198
649354b7 199 tce_iommu_disable(container);
5ffd229c
AK
200 mutex_destroy(&container->lock);
201
202 kfree(container);
203}
204
649354b7
AK
205static void tce_iommu_unuse_page(struct tce_container *container,
206 unsigned long oldtce)
207{
208 struct page *page;
209
210 if (!(oldtce & (TCE_PCI_READ | TCE_PCI_WRITE)))
211 return;
212
213 page = pfn_to_page(oldtce >> PAGE_SHIFT);
214
215 if (oldtce & TCE_PCI_WRITE)
216 SetPageDirty(page);
217
218 put_page(page);
219}
220
9b14a1ff
AK
221static int tce_iommu_clear(struct tce_container *container,
222 struct iommu_table *tbl,
223 unsigned long entry, unsigned long pages)
224{
225 unsigned long oldtce;
9b14a1ff
AK
226
227 for ( ; pages; --pages, ++entry) {
228 oldtce = iommu_clear_tce(tbl, entry);
229 if (!oldtce)
230 continue;
231
649354b7 232 tce_iommu_unuse_page(container, oldtce);
9b14a1ff
AK
233 }
234
235 return 0;
236}
237
649354b7
AK
238static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
239{
240 struct page *page = NULL;
241 enum dma_data_direction direction = iommu_tce_direction(tce);
242
243 if (get_user_pages_fast(tce & PAGE_MASK, 1,
244 direction != DMA_TO_DEVICE, &page) != 1)
245 return -EFAULT;
246
247 *hpa = __pa((unsigned long) page_address(page));
248
249 return 0;
250}
251
9b14a1ff
AK
252static long tce_iommu_build(struct tce_container *container,
253 struct iommu_table *tbl,
254 unsigned long entry, unsigned long tce, unsigned long pages)
255{
256 long i, ret = 0;
649354b7
AK
257 struct page *page;
258 unsigned long hpa;
9b14a1ff
AK
259 enum dma_data_direction direction = iommu_tce_direction(tce);
260
261 for (i = 0; i < pages; ++i) {
262 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
263
649354b7
AK
264 ret = tce_iommu_use_page(tce, &hpa);
265 if (ret)
9b14a1ff 266 break;
e432bc7e 267
649354b7 268 page = pfn_to_page(hpa >> PAGE_SHIFT);
e432bc7e
AK
269 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
270 ret = -EPERM;
271 break;
272 }
273
649354b7
AK
274 hpa |= offset;
275 ret = iommu_tce_build(tbl, entry + i, (unsigned long) __va(hpa),
276 direction);
9b14a1ff 277 if (ret) {
649354b7 278 tce_iommu_unuse_page(container, hpa);
9b14a1ff
AK
279 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
280 __func__, entry << tbl->it_page_shift,
281 tce, ret);
282 break;
283 }
00663d4e 284 tce += IOMMU_PAGE_SIZE(tbl);
9b14a1ff
AK
285 }
286
287 if (ret)
288 tce_iommu_clear(container, tbl, entry, i);
289
290 return ret;
291}
292
5ffd229c
AK
293static long tce_iommu_ioctl(void *iommu_data,
294 unsigned int cmd, unsigned long arg)
295{
296 struct tce_container *container = iommu_data;
297 unsigned long minsz;
298 long ret;
299
300 switch (cmd) {
301 case VFIO_CHECK_EXTENSION:
1b69be5e
GS
302 switch (arg) {
303 case VFIO_SPAPR_TCE_IOMMU:
304 ret = 1;
305 break;
306 default:
307 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
308 break;
309 }
310
311 return (ret < 0) ? 0 : ret;
5ffd229c
AK
312
313 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
314 struct vfio_iommu_spapr_tce_info info;
315 struct iommu_table *tbl = container->tbl;
316
317 if (WARN_ON(!tbl))
318 return -ENXIO;
319
320 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
321 dma32_window_size);
322
323 if (copy_from_user(&info, (void __user *)arg, minsz))
324 return -EFAULT;
325
326 if (info.argsz < minsz)
327 return -EINVAL;
328
00663d4e
AK
329 info.dma32_window_start = tbl->it_offset << tbl->it_page_shift;
330 info.dma32_window_size = tbl->it_size << tbl->it_page_shift;
5ffd229c
AK
331 info.flags = 0;
332
333 if (copy_to_user((void __user *)arg, &info, minsz))
334 return -EFAULT;
335
336 return 0;
337 }
338 case VFIO_IOMMU_MAP_DMA: {
339 struct vfio_iommu_type1_dma_map param;
340 struct iommu_table *tbl = container->tbl;
9b14a1ff 341 unsigned long tce;
5ffd229c 342
3c56e822
AK
343 if (!container->enabled)
344 return -EPERM;
345
5ffd229c
AK
346 if (!tbl)
347 return -ENXIO;
348
b348aa65 349 BUG_ON(!tbl->it_table_group->group);
5ffd229c
AK
350
351 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
352
353 if (copy_from_user(&param, (void __user *)arg, minsz))
354 return -EFAULT;
355
356 if (param.argsz < minsz)
357 return -EINVAL;
358
359 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
360 VFIO_DMA_MAP_FLAG_WRITE))
361 return -EINVAL;
362
00663d4e
AK
363 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
364 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
5ffd229c
AK
365 return -EINVAL;
366
367 /* iova is checked by the IOMMU API */
368 tce = param.vaddr;
369 if (param.flags & VFIO_DMA_MAP_FLAG_READ)
370 tce |= TCE_PCI_READ;
371 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
372 tce |= TCE_PCI_WRITE;
373
374 ret = iommu_tce_put_param_check(tbl, param.iova, tce);
375 if (ret)
376 return ret;
377
9b14a1ff 378 ret = tce_iommu_build(container, tbl,
00663d4e
AK
379 param.iova >> tbl->it_page_shift,
380 tce, param.size >> tbl->it_page_shift);
5ffd229c
AK
381
382 iommu_flush_tce(tbl);
383
384 return ret;
385 }
386 case VFIO_IOMMU_UNMAP_DMA: {
387 struct vfio_iommu_type1_dma_unmap param;
388 struct iommu_table *tbl = container->tbl;
389
3c56e822
AK
390 if (!container->enabled)
391 return -EPERM;
392
5ffd229c
AK
393 if (WARN_ON(!tbl))
394 return -ENXIO;
395
396 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
397 size);
398
399 if (copy_from_user(&param, (void __user *)arg, minsz))
400 return -EFAULT;
401
402 if (param.argsz < minsz)
403 return -EINVAL;
404
405 /* No flag is supported now */
406 if (param.flags)
407 return -EINVAL;
408
00663d4e 409 if (param.size & ~IOMMU_PAGE_MASK(tbl))
5ffd229c
AK
410 return -EINVAL;
411
412 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
00663d4e 413 param.size >> tbl->it_page_shift);
5ffd229c
AK
414 if (ret)
415 return ret;
416
9b14a1ff 417 ret = tce_iommu_clear(container, tbl,
00663d4e
AK
418 param.iova >> tbl->it_page_shift,
419 param.size >> tbl->it_page_shift);
5ffd229c
AK
420 iommu_flush_tce(tbl);
421
422 return ret;
423 }
424 case VFIO_IOMMU_ENABLE:
425 mutex_lock(&container->lock);
426 ret = tce_iommu_enable(container);
427 mutex_unlock(&container->lock);
428 return ret;
429
430
431 case VFIO_IOMMU_DISABLE:
432 mutex_lock(&container->lock);
433 tce_iommu_disable(container);
434 mutex_unlock(&container->lock);
435 return 0;
1b69be5e 436 case VFIO_EEH_PE_OP:
b348aa65 437 if (!container->tbl || !container->tbl->it_table_group->group)
1b69be5e
GS
438 return -ENODEV;
439
b348aa65
AK
440 return vfio_spapr_iommu_eeh_ioctl(
441 container->tbl->it_table_group->group,
442 cmd, arg);
5ffd229c
AK
443 }
444
445 return -ENOTTY;
446}
447
448static int tce_iommu_attach_group(void *iommu_data,
449 struct iommu_group *iommu_group)
450{
451 int ret;
452 struct tce_container *container = iommu_data;
453 struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group);
454
455 BUG_ON(!tbl);
456 mutex_lock(&container->lock);
457
458 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
459 iommu_group_id(iommu_group), iommu_group); */
460 if (container->tbl) {
461 pr_warn("tce_vfio: Only one group per IOMMU container is allowed, existing id=%d, attaching id=%d\n",
b348aa65
AK
462 iommu_group_id(container->tbl->
463 it_table_group->group),
5ffd229c
AK
464 iommu_group_id(iommu_group));
465 ret = -EBUSY;
22af4859
AK
466 goto unlock_exit;
467 }
468
469 if (container->enabled) {
5ffd229c
AK
470 pr_err("tce_vfio: attaching group #%u to enabled container\n",
471 iommu_group_id(iommu_group));
472 ret = -EBUSY;
22af4859 473 goto unlock_exit;
5ffd229c
AK
474 }
475
22af4859
AK
476 ret = iommu_take_ownership(tbl);
477 if (!ret)
478 container->tbl = tbl;
479
480unlock_exit:
5ffd229c
AK
481 mutex_unlock(&container->lock);
482
483 return ret;
484}
485
486static void tce_iommu_detach_group(void *iommu_data,
487 struct iommu_group *iommu_group)
488{
489 struct tce_container *container = iommu_data;
490 struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group);
491
492 BUG_ON(!tbl);
493 mutex_lock(&container->lock);
494 if (tbl != container->tbl) {
495 pr_warn("tce_vfio: detaching group #%u, expected group is #%u\n",
496 iommu_group_id(iommu_group),
b348aa65 497 iommu_group_id(tbl->it_table_group->group));
22af4859
AK
498 goto unlock_exit;
499 }
5ffd229c 500
22af4859
AK
501 if (container->enabled) {
502 pr_warn("tce_vfio: detaching group #%u from enabled container, forcing disable\n",
b348aa65 503 iommu_group_id(tbl->it_table_group->group));
22af4859 504 tce_iommu_disable(container);
5ffd229c 505 }
22af4859
AK
506
507 /* pr_debug("tce_vfio: detaching group #%u from iommu %p\n",
508 iommu_group_id(iommu_group), iommu_group); */
509 container->tbl = NULL;
510 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
511 iommu_release_ownership(tbl);
512
513unlock_exit:
5ffd229c
AK
514 mutex_unlock(&container->lock);
515}
516
517const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
518 .name = "iommu-vfio-powerpc",
519 .owner = THIS_MODULE,
520 .open = tce_iommu_open,
521 .release = tce_iommu_release,
522 .ioctl = tce_iommu_ioctl,
523 .attach_group = tce_iommu_attach_group,
524 .detach_group = tce_iommu_detach_group,
525};
526
527static int __init tce_iommu_init(void)
528{
529 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
530}
531
532static void __exit tce_iommu_cleanup(void)
533{
534 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
535}
536
537module_init(tce_iommu_init);
538module_exit(tce_iommu_cleanup);
539
540MODULE_VERSION(DRIVER_VERSION);
541MODULE_LICENSE("GPL v2");
542MODULE_AUTHOR(DRIVER_AUTHOR);
543MODULE_DESCRIPTION(DRIVER_DESC);
544
This page took 0.155686 seconds and 5 git commands to generate.