tracing: add sched_set_prio tracepoint
[deliverable/linux.git] / drivers / iommu / omap-iommu.c
CommitLineData
a9dcad5e
HD
1/*
2 * omap iommu: tlb and pagetable primitives
3 *
c127c7dc 4 * Copyright (C) 2008-2010 Nokia Corporation
a9dcad5e
HD
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
5a0e3ad6 15#include <linux/slab.h>
a9dcad5e
HD
16#include <linux/interrupt.h>
17#include <linux/ioport.h>
a9dcad5e 18#include <linux/platform_device.h>
f626b52d 19#include <linux/iommu.h>
c8d35c84 20#include <linux/omap-iommu.h>
f626b52d
OBC
21#include <linux/mutex.h>
22#include <linux/spinlock.h>
ed1c7de2 23#include <linux/io.h>
ebf7cda0 24#include <linux/pm_runtime.h>
3c92748d
FV
25#include <linux/of.h>
26#include <linux/of_iommu.h>
27#include <linux/of_irq.h>
7d682774 28#include <linux/of_platform.h>
3ca9299e
SA
29#include <linux/regmap.h>
30#include <linux/mfd/syscon.h>
a9dcad5e
HD
31
32#include <asm/cacheflush.h>
33
2ab7c848 34#include <linux/platform_data/iommu-omap.h>
a9dcad5e 35
2f7702af 36#include "omap-iopgtable.h"
ed1c7de2 37#include "omap-iommu.h"
a9dcad5e 38
5acc97db
SA
39#define to_iommu(dev) \
40 ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
41
66bc8cf3
OBC
42/* bitmap of the page sizes currently supported */
43#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
44
f626b52d
OBC
45/**
46 * struct omap_iommu_domain - omap iommu domain
47 * @pgtable: the page table
48 * @iommu_dev: an omap iommu device attached to this domain. only a single
49 * iommu device can be attached for now.
803b5277 50 * @dev: Device using this domain.
f626b52d
OBC
51 * @lock: domain lock, should be taken when attaching/detaching
52 */
53struct omap_iommu_domain {
54 u32 *pgtable;
6c32df43 55 struct omap_iommu *iommu_dev;
803b5277 56 struct device *dev;
f626b52d 57 spinlock_t lock;
8cf851e0 58 struct iommu_domain domain;
f626b52d
OBC
59};
60
7bd9e25f
IY
61#define MMU_LOCK_BASE_SHIFT 10
62#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
63#define MMU_LOCK_BASE(x) \
64 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
65
66#define MMU_LOCK_VICT_SHIFT 4
67#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
68#define MMU_LOCK_VICT(x) \
69 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
70
a9dcad5e
HD
71static struct platform_driver omap_iommu_driver;
72static struct kmem_cache *iopte_cachep;
73
8cf851e0
JR
74/**
75 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
76 * @dom: generic iommu domain handle
77 **/
78static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
79{
80 return container_of(dom, struct omap_iommu_domain, domain);
81}
82
a9dcad5e 83/**
6c32df43 84 * omap_iommu_save_ctx - Save registers for pm off-mode support
fabdbca8 85 * @dev: client device
a9dcad5e 86 **/
fabdbca8 87void omap_iommu_save_ctx(struct device *dev)
a9dcad5e 88{
fabdbca8 89 struct omap_iommu *obj = dev_to_omap_iommu(dev);
bd4396f0
SA
90 u32 *p = obj->ctx;
91 int i;
fabdbca8 92
bd4396f0
SA
93 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
94 p[i] = iommu_read_reg(obj, i * sizeof(u32));
95 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
96 }
a9dcad5e 97}
6c32df43 98EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
a9dcad5e
HD
99
100/**
6c32df43 101 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
fabdbca8 102 * @dev: client device
a9dcad5e 103 **/
fabdbca8 104void omap_iommu_restore_ctx(struct device *dev)
a9dcad5e 105{
fabdbca8 106 struct omap_iommu *obj = dev_to_omap_iommu(dev);
bd4396f0
SA
107 u32 *p = obj->ctx;
108 int i;
fabdbca8 109
bd4396f0
SA
110 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
111 iommu_write_reg(obj, p[i], i * sizeof(u32));
112 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
113 }
a9dcad5e 114}
6c32df43 115EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
a9dcad5e 116
3ca9299e
SA
117static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
118{
119 u32 val, mask;
120
121 if (!obj->syscfg)
122 return;
123
124 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
125 val = enable ? mask : 0;
126 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
127}
128
bd4396f0
SA
129static void __iommu_set_twl(struct omap_iommu *obj, bool on)
130{
131 u32 l = iommu_read_reg(obj, MMU_CNTL);
132
133 if (on)
134 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
135 else
136 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
137
138 l &= ~MMU_CNTL_MASK;
139 if (on)
140 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
141 else
142 l |= (MMU_CNTL_MMU_EN);
143
144 iommu_write_reg(obj, l, MMU_CNTL);
145}
146
147static int omap2_iommu_enable(struct omap_iommu *obj)
148{
149 u32 l, pa;
150
151 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
152 return -EINVAL;
153
154 pa = virt_to_phys(obj->iopgd);
155 if (!IS_ALIGNED(pa, SZ_16K))
156 return -EINVAL;
157
158 l = iommu_read_reg(obj, MMU_REVISION);
159 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
160 (l >> 4) & 0xf, l & 0xf);
161
162 iommu_write_reg(obj, pa, MMU_TTB);
163
3ca9299e
SA
164 dra7_cfg_dspsys_mmu(obj, true);
165
bd4396f0
SA
166 if (obj->has_bus_err_back)
167 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
168
169 __iommu_set_twl(obj, true);
170
171 return 0;
172}
173
174static void omap2_iommu_disable(struct omap_iommu *obj)
175{
176 u32 l = iommu_read_reg(obj, MMU_CNTL);
177
178 l &= ~MMU_CNTL_MASK;
179 iommu_write_reg(obj, l, MMU_CNTL);
3ca9299e 180 dra7_cfg_dspsys_mmu(obj, false);
bd4396f0
SA
181
182 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
183}
184
6c32df43 185static int iommu_enable(struct omap_iommu *obj)
a9dcad5e
HD
186{
187 int err;
72b15b6a 188 struct platform_device *pdev = to_platform_device(obj->dev);
99cb9aee 189 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
a9dcad5e 190
90e569c4 191 if (pdata && pdata->deassert_reset) {
72b15b6a
ORL
192 err = pdata->deassert_reset(pdev, pdata->reset_name);
193 if (err) {
194 dev_err(obj->dev, "deassert_reset failed: %d\n", err);
195 return err;
196 }
197 }
198
ebf7cda0 199 pm_runtime_get_sync(obj->dev);
a9dcad5e 200
bd4396f0 201 err = omap2_iommu_enable(obj);
a9dcad5e 202
a9dcad5e
HD
203 return err;
204}
205
6c32df43 206static void iommu_disable(struct omap_iommu *obj)
a9dcad5e 207{
72b15b6a 208 struct platform_device *pdev = to_platform_device(obj->dev);
99cb9aee 209 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
72b15b6a 210
bd4396f0 211 omap2_iommu_disable(obj);
a9dcad5e 212
ebf7cda0 213 pm_runtime_put_sync(obj->dev);
72b15b6a 214
90e569c4 215 if (pdata && pdata->assert_reset)
72b15b6a 216 pdata->assert_reset(pdev, pdata->reset_name);
a9dcad5e
HD
217}
218
219/*
220 * TLB operations
221 */
e1f23813 222static u32 iotlb_cr_to_virt(struct cr_regs *cr)
a9dcad5e 223{
bd4396f0
SA
224 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
225 u32 mask = get_cam_va_mask(cr->cam & page_size);
226
227 return cr->cam & mask;
a9dcad5e 228}
a9dcad5e
HD
229
230static u32 get_iopte_attr(struct iotlb_entry *e)
231{
bd4396f0
SA
232 u32 attr;
233
234 attr = e->mixed << 5;
235 attr |= e->endian;
236 attr |= e->elsz >> 3;
237 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
238 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
239 return attr;
a9dcad5e
HD
240}
241
6c32df43 242static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
a9dcad5e 243{
bd4396f0
SA
244 u32 status, fault_addr;
245
246 status = iommu_read_reg(obj, MMU_IRQSTATUS);
247 status &= MMU_IRQ_MASK;
248 if (!status) {
249 *da = 0;
250 return 0;
251 }
252
253 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
254 *da = fault_addr;
255
256 iommu_write_reg(obj, status, MMU_IRQSTATUS);
257
258 return status;
a9dcad5e
HD
259}
260
69c2c196 261void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
a9dcad5e
HD
262{
263 u32 val;
264
265 val = iommu_read_reg(obj, MMU_LOCK);
266
267 l->base = MMU_LOCK_BASE(val);
268 l->vict = MMU_LOCK_VICT(val);
a9dcad5e
HD
269}
270
69c2c196 271void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
a9dcad5e
HD
272{
273 u32 val;
274
a9dcad5e
HD
275 val = (l->base << MMU_LOCK_BASE_SHIFT);
276 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
277
278 iommu_write_reg(obj, val, MMU_LOCK);
279}
280
6c32df43 281static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
a9dcad5e 282{
bd4396f0
SA
283 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
284 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
a9dcad5e
HD
285}
286
6c32df43 287static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
a9dcad5e 288{
bd4396f0
SA
289 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
290 iommu_write_reg(obj, cr->ram, MMU_RAM);
a9dcad5e
HD
291
292 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
293 iommu_write_reg(obj, 1, MMU_LD_TLB);
294}
295
37c2836c 296/* only used in iotlb iteration for-loop */
69c2c196 297struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
37c2836c
HD
298{
299 struct cr_regs cr;
300 struct iotlb_lock l;
301
302 iotlb_lock_get(obj, &l);
303 l.vict = n;
304 iotlb_lock_set(obj, &l);
305 iotlb_read_cr(obj, &cr);
306
307 return cr;
308}
309
bd4396f0
SA
310#ifdef PREFETCH_IOTLB
311static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
312 struct iotlb_entry *e)
313{
314 struct cr_regs *cr;
315
316 if (!e)
317 return NULL;
318
319 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
320 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
321 e->da);
322 return ERR_PTR(-EINVAL);
323 }
324
325 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
326 if (!cr)
327 return ERR_PTR(-ENOMEM);
328
329 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
330 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
331
332 return cr;
333}
334
a9dcad5e
HD
335/**
336 * load_iotlb_entry - Set an iommu tlb entry
337 * @obj: target iommu
338 * @e: an iommu tlb entry info
339 **/
6c32df43 340static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
a9dcad5e 341{
a9dcad5e
HD
342 int err = 0;
343 struct iotlb_lock l;
344 struct cr_regs *cr;
345
346 if (!obj || !obj->nr_tlb_entries || !e)
347 return -EINVAL;
348
ebf7cda0 349 pm_runtime_get_sync(obj->dev);
a9dcad5e 350
be6d8026
KH
351 iotlb_lock_get(obj, &l);
352 if (l.base == obj->nr_tlb_entries) {
353 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
a9dcad5e
HD
354 err = -EBUSY;
355 goto out;
356 }
be6d8026 357 if (!e->prsvd) {
37c2836c
HD
358 int i;
359 struct cr_regs tmp;
be6d8026 360
37c2836c 361 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
be6d8026
KH
362 if (!iotlb_cr_valid(&tmp))
363 break;
37c2836c 364
be6d8026
KH
365 if (i == obj->nr_tlb_entries) {
366 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
367 err = -EBUSY;
368 goto out;
369 }
37c2836c
HD
370
371 iotlb_lock_get(obj, &l);
be6d8026
KH
372 } else {
373 l.vict = l.base;
374 iotlb_lock_set(obj, &l);
375 }
a9dcad5e
HD
376
377 cr = iotlb_alloc_cr(obj, e);
378 if (IS_ERR(cr)) {
ebf7cda0 379 pm_runtime_put_sync(obj->dev);
a9dcad5e
HD
380 return PTR_ERR(cr);
381 }
382
383 iotlb_load_cr(obj, cr);
384 kfree(cr);
385
be6d8026
KH
386 if (e->prsvd)
387 l.base++;
a9dcad5e
HD
388 /* increment victim for next tlb load */
389 if (++l.vict == obj->nr_tlb_entries)
be6d8026 390 l.vict = l.base;
a9dcad5e
HD
391 iotlb_lock_set(obj, &l);
392out:
ebf7cda0 393 pm_runtime_put_sync(obj->dev);
a9dcad5e
HD
394 return err;
395}
a9dcad5e 396
5da14a47
OBC
397#else /* !PREFETCH_IOTLB */
398
6c32df43 399static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
5da14a47
OBC
400{
401 return 0;
402}
403
404#endif /* !PREFETCH_IOTLB */
405
6c32df43 406static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
5da14a47
OBC
407{
408 return load_iotlb_entry(obj, e);
409}
a9dcad5e
HD
410
411/**
412 * flush_iotlb_page - Clear an iommu tlb entry
413 * @obj: target iommu
414 * @da: iommu device virtual address
415 *
416 * Clear an iommu tlb entry which includes 'da' address.
417 **/
6c32df43 418static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
a9dcad5e 419{
a9dcad5e 420 int i;
37c2836c 421 struct cr_regs cr;
a9dcad5e 422
ebf7cda0 423 pm_runtime_get_sync(obj->dev);
a9dcad5e 424
37c2836c 425 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
a9dcad5e
HD
426 u32 start;
427 size_t bytes;
428
a9dcad5e
HD
429 if (!iotlb_cr_valid(&cr))
430 continue;
431
432 start = iotlb_cr_to_virt(&cr);
433 bytes = iopgsz_to_bytes(cr.cam & 3);
434
435 if ((start <= da) && (da < start + bytes)) {
436 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
437 __func__, start, da, bytes);
0fa035e5 438 iotlb_load_cr(obj, &cr);
a9dcad5e 439 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
f7129a0e 440 break;
a9dcad5e
HD
441 }
442 }
ebf7cda0 443 pm_runtime_put_sync(obj->dev);
a9dcad5e
HD
444
445 if (i == obj->nr_tlb_entries)
446 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
447}
a9dcad5e
HD
448
449/**
450 * flush_iotlb_all - Clear all iommu tlb entries
451 * @obj: target iommu
452 **/
6c32df43 453static void flush_iotlb_all(struct omap_iommu *obj)
a9dcad5e
HD
454{
455 struct iotlb_lock l;
456
ebf7cda0 457 pm_runtime_get_sync(obj->dev);
a9dcad5e
HD
458
459 l.base = 0;
460 l.vict = 0;
461 iotlb_lock_set(obj, &l);
462
463 iommu_write_reg(obj, 1, MMU_GFLUSH);
464
ebf7cda0 465 pm_runtime_put_sync(obj->dev);
a9dcad5e 466}
ddfa975a 467
a9dcad5e
HD
468/*
469 * H/W pagetable operations
470 */
471static void flush_iopgd_range(u32 *first, u32 *last)
472{
473 /* FIXME: L2 cache should be taken care of if it exists */
474 do {
475 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
476 : : "r" (first));
477 first += L1_CACHE_BYTES / sizeof(*first);
478 } while (first <= last);
479}
480
481static void flush_iopte_range(u32 *first, u32 *last)
482{
483 /* FIXME: L2 cache should be taken care of if it exists */
484 do {
485 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
486 : : "r" (first));
487 first += L1_CACHE_BYTES / sizeof(*first);
488 } while (first <= last);
489}
490
491static void iopte_free(u32 *iopte)
492{
493 /* Note: freed iopte's must be clean ready for re-use */
e28045ab
ZZ
494 if (iopte)
495 kmem_cache_free(iopte_cachep, iopte);
a9dcad5e
HD
496}
497
6c32df43 498static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
a9dcad5e
HD
499{
500 u32 *iopte;
501
502 /* a table has already existed */
503 if (*iopgd)
504 goto pte_ready;
505
506 /*
507 * do the allocation outside the page table lock
508 */
509 spin_unlock(&obj->page_table_lock);
510 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
511 spin_lock(&obj->page_table_lock);
512
513 if (!*iopgd) {
514 if (!iopte)
515 return ERR_PTR(-ENOMEM);
516
517 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
518 flush_iopgd_range(iopgd, iopgd);
519
520 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
521 } else {
522 /* We raced, free the reduniovant table */
523 iopte_free(iopte);
524 }
525
526pte_ready:
527 iopte = iopte_offset(iopgd, da);
528
529 dev_vdbg(obj->dev,
530 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
531 __func__, da, iopgd, *iopgd, iopte, *iopte);
532
533 return iopte;
534}
535
6c32df43 536static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
a9dcad5e
HD
537{
538 u32 *iopgd = iopgd_offset(obj, da);
539
4abb7617
HD
540 if ((da | pa) & ~IOSECTION_MASK) {
541 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
542 __func__, da, pa, IOSECTION_SIZE);
543 return -EINVAL;
544 }
545
a9dcad5e
HD
546 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
547 flush_iopgd_range(iopgd, iopgd);
548 return 0;
549}
550
6c32df43 551static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
a9dcad5e
HD
552{
553 u32 *iopgd = iopgd_offset(obj, da);
554 int i;
555
4abb7617
HD
556 if ((da | pa) & ~IOSUPER_MASK) {
557 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__, da, pa, IOSUPER_SIZE);
559 return -EINVAL;
560 }
561
a9dcad5e
HD
562 for (i = 0; i < 16; i++)
563 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
564 flush_iopgd_range(iopgd, iopgd + 15);
565 return 0;
566}
567
6c32df43 568static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
a9dcad5e
HD
569{
570 u32 *iopgd = iopgd_offset(obj, da);
571 u32 *iopte = iopte_alloc(obj, iopgd, da);
572
573 if (IS_ERR(iopte))
574 return PTR_ERR(iopte);
575
576 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
577 flush_iopte_range(iopte, iopte);
578
579 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
580 __func__, da, pa, iopte, *iopte);
581
582 return 0;
583}
584
6c32df43 585static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
a9dcad5e
HD
586{
587 u32 *iopgd = iopgd_offset(obj, da);
588 u32 *iopte = iopte_alloc(obj, iopgd, da);
589 int i;
590
4abb7617
HD
591 if ((da | pa) & ~IOLARGE_MASK) {
592 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
593 __func__, da, pa, IOLARGE_SIZE);
594 return -EINVAL;
595 }
596
a9dcad5e
HD
597 if (IS_ERR(iopte))
598 return PTR_ERR(iopte);
599
600 for (i = 0; i < 16; i++)
601 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
602 flush_iopte_range(iopte, iopte + 15);
603 return 0;
604}
605
6c32df43
OBC
606static int
607iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
a9dcad5e 608{
6c32df43 609 int (*fn)(struct omap_iommu *, u32, u32, u32);
a9dcad5e
HD
610 u32 prot;
611 int err;
612
613 if (!obj || !e)
614 return -EINVAL;
615
616 switch (e->pgsz) {
617 case MMU_CAM_PGSZ_16M:
618 fn = iopgd_alloc_super;
619 break;
620 case MMU_CAM_PGSZ_1M:
621 fn = iopgd_alloc_section;
622 break;
623 case MMU_CAM_PGSZ_64K:
624 fn = iopte_alloc_large;
625 break;
626 case MMU_CAM_PGSZ_4K:
627 fn = iopte_alloc_page;
628 break;
629 default:
630 fn = NULL;
631 BUG();
632 break;
633 }
634
635 prot = get_iopte_attr(e);
636
637 spin_lock(&obj->page_table_lock);
638 err = fn(obj, e->da, e->pa, prot);
639 spin_unlock(&obj->page_table_lock);
640
641 return err;
642}
643
644/**
6c32df43 645 * omap_iopgtable_store_entry - Make an iommu pte entry
a9dcad5e
HD
646 * @obj: target iommu
647 * @e: an iommu tlb entry info
648 **/
4899a563
SA
649static int
650omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
a9dcad5e
HD
651{
652 int err;
653
654 flush_iotlb_page(obj, e->da);
655 err = iopgtable_store_entry_core(obj, e);
a9dcad5e 656 if (!err)
5da14a47 657 prefetch_iotlb_entry(obj, e);
a9dcad5e
HD
658 return err;
659}
a9dcad5e
HD
660
661/**
662 * iopgtable_lookup_entry - Lookup an iommu pte entry
663 * @obj: target iommu
664 * @da: iommu device virtual address
665 * @ppgd: iommu pgd entry pointer to be returned
666 * @ppte: iommu pte entry pointer to be returned
667 **/
e1f23813
OBC
668static void
669iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
a9dcad5e
HD
670{
671 u32 *iopgd, *iopte = NULL;
672
673 iopgd = iopgd_offset(obj, da);
674 if (!*iopgd)
675 goto out;
676
a1a54456 677 if (iopgd_is_table(*iopgd))
a9dcad5e
HD
678 iopte = iopte_offset(iopgd, da);
679out:
680 *ppgd = iopgd;
681 *ppte = iopte;
682}
a9dcad5e 683
6c32df43 684static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
a9dcad5e
HD
685{
686 size_t bytes;
687 u32 *iopgd = iopgd_offset(obj, da);
688 int nent = 1;
689
690 if (!*iopgd)
691 return 0;
692
a1a54456 693 if (iopgd_is_table(*iopgd)) {
a9dcad5e
HD
694 int i;
695 u32 *iopte = iopte_offset(iopgd, da);
696
697 bytes = IOPTE_SIZE;
698 if (*iopte & IOPTE_LARGE) {
699 nent *= 16;
700 /* rewind to the 1st entry */
c127c7dc 701 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
a9dcad5e
HD
702 }
703 bytes *= nent;
704 memset(iopte, 0, nent * sizeof(*iopte));
705 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
706
707 /*
708 * do table walk to check if this table is necessary or not
709 */
710 iopte = iopte_offset(iopgd, 0);
711 for (i = 0; i < PTRS_PER_IOPTE; i++)
712 if (iopte[i])
713 goto out;
714
715 iopte_free(iopte);
716 nent = 1; /* for the next L1 entry */
717 } else {
718 bytes = IOPGD_SIZE;
dcc730dc 719 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
a9dcad5e
HD
720 nent *= 16;
721 /* rewind to the 1st entry */
8d33ea58 722 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
a9dcad5e
HD
723 }
724 bytes *= nent;
725 }
726 memset(iopgd, 0, nent * sizeof(*iopgd));
727 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
728out:
729 return bytes;
730}
731
732/**
733 * iopgtable_clear_entry - Remove an iommu pte entry
734 * @obj: target iommu
735 * @da: iommu device virtual address
736 **/
6c32df43 737static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
a9dcad5e
HD
738{
739 size_t bytes;
740
741 spin_lock(&obj->page_table_lock);
742
743 bytes = iopgtable_clear_entry_core(obj, da);
744 flush_iotlb_page(obj, da);
745
746 spin_unlock(&obj->page_table_lock);
747
748 return bytes;
749}
a9dcad5e 750
6c32df43 751static void iopgtable_clear_entry_all(struct omap_iommu *obj)
a9dcad5e
HD
752{
753 int i;
754
755 spin_lock(&obj->page_table_lock);
756
757 for (i = 0; i < PTRS_PER_IOPGD; i++) {
758 u32 da;
759 u32 *iopgd;
760
761 da = i << IOPGD_SHIFT;
762 iopgd = iopgd_offset(obj, da);
763
764 if (!*iopgd)
765 continue;
766
a1a54456 767 if (iopgd_is_table(*iopgd))
a9dcad5e
HD
768 iopte_free(iopte_offset(iopgd, 0));
769
770 *iopgd = 0;
771 flush_iopgd_range(iopgd, iopgd);
772 }
773
774 flush_iotlb_all(obj);
775
776 spin_unlock(&obj->page_table_lock);
777}
778
779/*
780 * Device IOMMU generic operations
781 */
782static irqreturn_t iommu_fault_handler(int irq, void *data)
783{
d594f1f3 784 u32 da, errs;
a9dcad5e 785 u32 *iopgd, *iopte;
6c32df43 786 struct omap_iommu *obj = data;
e7f10f02 787 struct iommu_domain *domain = obj->domain;
8cf851e0 788 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
a9dcad5e 789
2088ecba 790 if (!omap_domain->iommu_dev)
a9dcad5e
HD
791 return IRQ_NONE;
792
d594f1f3 793 errs = iommu_report_fault(obj, &da);
c56b2ddd
LP
794 if (errs == 0)
795 return IRQ_HANDLED;
d594f1f3
DC
796
797 /* Fault callback or TLB/PTE Dynamic loading */
e7f10f02 798 if (!report_iommu_fault(domain, obj->dev, da, 0))
a9dcad5e
HD
799 return IRQ_HANDLED;
800
37b29810
HD
801 iommu_disable(obj);
802
a9dcad5e
HD
803 iopgd = iopgd_offset(obj, da);
804
a1a54456 805 if (!iopgd_is_table(*iopgd)) {
b6c2e09f 806 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
5835b6a6 807 obj->name, errs, da, iopgd, *iopgd);
a9dcad5e
HD
808 return IRQ_NONE;
809 }
810
811 iopte = iopte_offset(iopgd, da);
812
b6c2e09f 813 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
5835b6a6 814 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
a9dcad5e
HD
815
816 return IRQ_NONE;
817}
818
819static int device_match_by_alias(struct device *dev, void *data)
820{
6c32df43 821 struct omap_iommu *obj = to_iommu(dev);
a9dcad5e
HD
822 const char *name = data;
823
824 pr_debug("%s: %s %s\n", __func__, obj->name, name);
825
826 return strcmp(obj->name, name) == 0;
827}
828
829/**
f626b52d 830 * omap_iommu_attach() - attach iommu device to an iommu domain
fabdbca8 831 * @name: name of target omap iommu device
f626b52d 832 * @iopgd: page table
a9dcad5e 833 **/
fabdbca8 834static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
a9dcad5e 835{
7ee08b9e 836 int err;
fabdbca8
OBC
837 struct device *dev;
838 struct omap_iommu *obj;
839
5835b6a6
SA
840 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
841 device_match_by_alias);
fabdbca8 842 if (!dev)
7ee08b9e 843 return ERR_PTR(-ENODEV);
fabdbca8
OBC
844
845 obj = to_iommu(dev);
a9dcad5e 846
f626b52d 847 spin_lock(&obj->iommu_lock);
a9dcad5e 848
f626b52d
OBC
849 obj->iopgd = iopgd;
850 err = iommu_enable(obj);
851 if (err)
852 goto err_enable;
853 flush_iotlb_all(obj);
854
f626b52d 855 spin_unlock(&obj->iommu_lock);
a9dcad5e
HD
856
857 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
858 return obj;
859
a9dcad5e 860err_enable:
f626b52d 861 spin_unlock(&obj->iommu_lock);
a9dcad5e
HD
862 return ERR_PTR(err);
863}
a9dcad5e
HD
864
865/**
f626b52d 866 * omap_iommu_detach - release iommu device
a9dcad5e
HD
867 * @obj: target iommu
868 **/
6c32df43 869static void omap_iommu_detach(struct omap_iommu *obj)
a9dcad5e 870{
acf9d467 871 if (!obj || IS_ERR(obj))
a9dcad5e
HD
872 return;
873
f626b52d 874 spin_lock(&obj->iommu_lock);
a9dcad5e 875
2088ecba 876 iommu_disable(obj);
f626b52d 877 obj->iopgd = NULL;
d594f1f3 878
f626b52d 879 spin_unlock(&obj->iommu_lock);
d594f1f3 880
a9dcad5e 881 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
d594f1f3 882}
d594f1f3 883
3ca9299e
SA
884static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
885 struct omap_iommu *obj)
886{
887 struct device_node *np = pdev->dev.of_node;
888 int ret;
889
890 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
891 return 0;
892
893 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
894 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
895 return -EINVAL;
896 }
897
898 obj->syscfg =
899 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
900 if (IS_ERR(obj->syscfg)) {
901 /* can fail with -EPROBE_DEFER */
902 ret = PTR_ERR(obj->syscfg);
903 return ret;
904 }
905
906 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
907 &obj->id)) {
908 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
909 return -EINVAL;
910 }
911
912 if (obj->id != 0 && obj->id != 1) {
913 dev_err(&pdev->dev, "invalid IOMMU instance id\n");
914 return -EINVAL;
915 }
916
917 return 0;
918}
919
a9dcad5e
HD
920/*
921 * OMAP Device MMU(IOMMU) detection
922 */
d34d6517 923static int omap_iommu_probe(struct platform_device *pdev)
a9dcad5e
HD
924{
925 int err = -ENODEV;
a9dcad5e 926 int irq;
6c32df43 927 struct omap_iommu *obj;
a9dcad5e 928 struct resource *res;
99cb9aee 929 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
3c92748d 930 struct device_node *of = pdev->dev.of_node;
a9dcad5e 931
f129b3df 932 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
a9dcad5e
HD
933 if (!obj)
934 return -ENOMEM;
935
3c92748d
FV
936 if (of) {
937 obj->name = dev_name(&pdev->dev);
938 obj->nr_tlb_entries = 32;
939 err = of_property_read_u32(of, "ti,#tlb-entries",
940 &obj->nr_tlb_entries);
941 if (err && err != -EINVAL)
942 return err;
943 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
944 return -EINVAL;
b148d5fb
SA
945 if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
946 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
3c92748d
FV
947 } else {
948 obj->nr_tlb_entries = pdata->nr_tlb_entries;
949 obj->name = pdata->name;
3c92748d 950 }
3c92748d 951
a9dcad5e
HD
952 obj->dev = &pdev->dev;
953 obj->ctx = (void *)obj + sizeof(*obj);
954
f626b52d 955 spin_lock_init(&obj->iommu_lock);
a9dcad5e 956 spin_lock_init(&obj->page_table_lock);
a9dcad5e
HD
957
958 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
f129b3df
SA
959 obj->regbase = devm_ioremap_resource(obj->dev, res);
960 if (IS_ERR(obj->regbase))
961 return PTR_ERR(obj->regbase);
da4a0f76 962
3ca9299e
SA
963 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
964 if (err)
965 return err;
966
a9dcad5e 967 irq = platform_get_irq(pdev, 0);
f129b3df
SA
968 if (irq < 0)
969 return -ENODEV;
970
971 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
972 dev_name(obj->dev), obj);
a9dcad5e 973 if (err < 0)
f129b3df 974 return err;
a9dcad5e
HD
975 platform_set_drvdata(pdev, obj);
976
ebf7cda0
ORL
977 pm_runtime_irq_safe(obj->dev);
978 pm_runtime_enable(obj->dev);
979
61c75352
SA
980 omap_iommu_debugfs_add(obj);
981
a9dcad5e
HD
982 dev_info(&pdev->dev, "%s registered\n", obj->name);
983 return 0;
a9dcad5e
HD
984}
985
d34d6517 986static int omap_iommu_remove(struct platform_device *pdev)
a9dcad5e 987{
6c32df43 988 struct omap_iommu *obj = platform_get_drvdata(pdev);
a9dcad5e 989
a9dcad5e 990 iopgtable_clear_entry_all(obj);
61c75352 991 omap_iommu_debugfs_remove(obj);
a9dcad5e 992
ebf7cda0
ORL
993 pm_runtime_disable(obj->dev);
994
a9dcad5e 995 dev_info(&pdev->dev, "%s removed\n", obj->name);
a9dcad5e
HD
996 return 0;
997}
998
d943b0ff 999static const struct of_device_id omap_iommu_of_match[] = {
3c92748d
FV
1000 { .compatible = "ti,omap2-iommu" },
1001 { .compatible = "ti,omap4-iommu" },
1002 { .compatible = "ti,dra7-iommu" },
3ca9299e 1003 { .compatible = "ti,dra7-dsp-iommu" },
3c92748d
FV
1004 {},
1005};
3c92748d 1006
a9dcad5e
HD
1007static struct platform_driver omap_iommu_driver = {
1008 .probe = omap_iommu_probe,
d34d6517 1009 .remove = omap_iommu_remove,
a9dcad5e
HD
1010 .driver = {
1011 .name = "omap-iommu",
3c92748d 1012 .of_match_table = of_match_ptr(omap_iommu_of_match),
a9dcad5e
HD
1013 },
1014};
1015
1016static void iopte_cachep_ctor(void *iopte)
1017{
1018 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1019}
1020
286f600b 1021static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
ed1c7de2
TL
1022{
1023 memset(e, 0, sizeof(*e));
1024
1025 e->da = da;
1026 e->pa = pa;
d760e3e0 1027 e->valid = MMU_CAM_V;
286f600b
LP
1028 e->pgsz = pgsz;
1029 e->endian = MMU_RAM_ENDIAN_LITTLE;
1030 e->elsz = MMU_RAM_ELSZ_8;
1031 e->mixed = 0;
ed1c7de2
TL
1032
1033 return iopgsz_to_bytes(e->pgsz);
1034}
1035
f626b52d 1036static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
5835b6a6 1037 phys_addr_t pa, size_t bytes, int prot)
f626b52d 1038{
8cf851e0 1039 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
6c32df43 1040 struct omap_iommu *oiommu = omap_domain->iommu_dev;
f626b52d 1041 struct device *dev = oiommu->dev;
f626b52d
OBC
1042 struct iotlb_entry e;
1043 int omap_pgsz;
286f600b 1044 u32 ret;
f626b52d 1045
f626b52d
OBC
1046 omap_pgsz = bytes_to_iopgsz(bytes);
1047 if (omap_pgsz < 0) {
1048 dev_err(dev, "invalid size to map: %d\n", bytes);
1049 return -EINVAL;
1050 }
1051
1d7f449c 1052 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
f626b52d 1053
286f600b 1054 iotlb_init_entry(&e, da, pa, omap_pgsz);
f626b52d 1055
6c32df43 1056 ret = omap_iopgtable_store_entry(oiommu, &e);
b4550d41 1057 if (ret)
6c32df43 1058 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
f626b52d 1059
b4550d41 1060 return ret;
f626b52d
OBC
1061}
1062
5009065d 1063static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
5835b6a6 1064 size_t size)
f626b52d 1065{
8cf851e0 1066 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
6c32df43 1067 struct omap_iommu *oiommu = omap_domain->iommu_dev;
f626b52d 1068 struct device *dev = oiommu->dev;
f626b52d 1069
5009065d 1070 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
f626b52d 1071
5009065d 1072 return iopgtable_clear_entry(oiommu, da);
f626b52d
OBC
1073}
1074
1075static int
1076omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1077{
8cf851e0 1078 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
6c32df43 1079 struct omap_iommu *oiommu;
fabdbca8 1080 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
f626b52d
OBC
1081 int ret = 0;
1082
e3f595b9
SA
1083 if (!arch_data || !arch_data->name) {
1084 dev_err(dev, "device doesn't have an associated iommu\n");
1085 return -EINVAL;
1086 }
1087
f626b52d
OBC
1088 spin_lock(&omap_domain->lock);
1089
1090 /* only a single device is supported per domain for now */
1091 if (omap_domain->iommu_dev) {
1092 dev_err(dev, "iommu domain is already attached\n");
1093 ret = -EBUSY;
1094 goto out;
1095 }
1096
1097 /* get a handle to and enable the omap iommu */
fabdbca8 1098 oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
f626b52d
OBC
1099 if (IS_ERR(oiommu)) {
1100 ret = PTR_ERR(oiommu);
1101 dev_err(dev, "can't get omap iommu: %d\n", ret);
1102 goto out;
1103 }
1104
fabdbca8 1105 omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
803b5277 1106 omap_domain->dev = dev;
e7f10f02 1107 oiommu->domain = domain;
f626b52d
OBC
1108
1109out:
1110 spin_unlock(&omap_domain->lock);
1111 return ret;
1112}
1113
803b5277 1114static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
5835b6a6 1115 struct device *dev)
f626b52d 1116{
fabdbca8 1117 struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
803b5277 1118 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
f626b52d
OBC
1119
1120 /* only a single device is supported per domain for now */
1121 if (omap_domain->iommu_dev != oiommu) {
1122 dev_err(dev, "invalid iommu device\n");
803b5277 1123 return;
f626b52d
OBC
1124 }
1125
1126 iopgtable_clear_entry_all(oiommu);
1127
1128 omap_iommu_detach(oiommu);
1129
fabdbca8 1130 omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
803b5277 1131 omap_domain->dev = NULL;
f24d9ad3 1132 oiommu->domain = NULL;
803b5277 1133}
f626b52d 1134
803b5277 1135static void omap_iommu_detach_dev(struct iommu_domain *domain,
5835b6a6 1136 struct device *dev)
803b5277 1137{
8cf851e0 1138 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
803b5277
ORL
1139
1140 spin_lock(&omap_domain->lock);
1141 _omap_iommu_detach_dev(omap_domain, dev);
f626b52d
OBC
1142 spin_unlock(&omap_domain->lock);
1143}
1144
8cf851e0 1145static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
f626b52d
OBC
1146{
1147 struct omap_iommu_domain *omap_domain;
1148
8cf851e0
JR
1149 if (type != IOMMU_DOMAIN_UNMANAGED)
1150 return NULL;
1151
f626b52d 1152 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
99ee98d6 1153 if (!omap_domain)
f626b52d 1154 goto out;
f626b52d
OBC
1155
1156 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
99ee98d6 1157 if (!omap_domain->pgtable)
f626b52d 1158 goto fail_nomem;
f626b52d
OBC
1159
1160 /*
1161 * should never fail, but please keep this around to ensure
1162 * we keep the hardware happy
1163 */
1164 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1165
1166 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1167 spin_lock_init(&omap_domain->lock);
1168
8cf851e0
JR
1169 omap_domain->domain.geometry.aperture_start = 0;
1170 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
1171 omap_domain->domain.geometry.force_aperture = true;
f626b52d 1172
8cf851e0 1173 return &omap_domain->domain;
f626b52d
OBC
1174
1175fail_nomem:
1176 kfree(omap_domain);
1177out:
8cf851e0 1178 return NULL;
f626b52d
OBC
1179}
1180
8cf851e0 1181static void omap_iommu_domain_free(struct iommu_domain *domain)
f626b52d 1182{
8cf851e0 1183 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
f626b52d 1184
803b5277
ORL
1185 /*
1186 * An iommu device is still attached
1187 * (currently, only one device can be attached) ?
1188 */
1189 if (omap_domain->iommu_dev)
1190 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1191
f626b52d
OBC
1192 kfree(omap_domain->pgtable);
1193 kfree(omap_domain);
1194}
1195
1196static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
5835b6a6 1197 dma_addr_t da)
f626b52d 1198{
8cf851e0 1199 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
6c32df43 1200 struct omap_iommu *oiommu = omap_domain->iommu_dev;
f626b52d
OBC
1201 struct device *dev = oiommu->dev;
1202 u32 *pgd, *pte;
1203 phys_addr_t ret = 0;
1204
1205 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1206
1207 if (pte) {
1208 if (iopte_is_small(*pte))
1209 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1210 else if (iopte_is_large(*pte))
1211 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1212 else
2abfcfbc 1213 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
5835b6a6 1214 (unsigned long long)da);
f626b52d
OBC
1215 } else {
1216 if (iopgd_is_section(*pgd))
1217 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1218 else if (iopgd_is_super(*pgd))
1219 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1220 else
2abfcfbc 1221 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
5835b6a6 1222 (unsigned long long)da);
f626b52d
OBC
1223 }
1224
1225 return ret;
1226}
1227
07a02030
LP
1228static int omap_iommu_add_device(struct device *dev)
1229{
1230 struct omap_iommu_arch_data *arch_data;
1231 struct device_node *np;
7d682774 1232 struct platform_device *pdev;
07a02030
LP
1233
1234 /*
1235 * Allocate the archdata iommu structure for DT-based devices.
1236 *
1237 * TODO: Simplify this when removing non-DT support completely from the
1238 * IOMMU users.
1239 */
1240 if (!dev->of_node)
1241 return 0;
1242
1243 np = of_parse_phandle(dev->of_node, "iommus", 0);
1244 if (!np)
1245 return 0;
1246
7d682774
SA
1247 pdev = of_find_device_by_node(np);
1248 if (WARN_ON(!pdev)) {
1249 of_node_put(np);
1250 return -EINVAL;
1251 }
1252
07a02030
LP
1253 arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
1254 if (!arch_data) {
1255 of_node_put(np);
1256 return -ENOMEM;
1257 }
1258
7d682774 1259 arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
07a02030
LP
1260 dev->archdata.iommu = arch_data;
1261
1262 of_node_put(np);
1263
1264 return 0;
1265}
1266
1267static void omap_iommu_remove_device(struct device *dev)
1268{
1269 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1270
1271 if (!dev->of_node || !arch_data)
1272 return;
1273
1274 kfree(arch_data->name);
1275 kfree(arch_data);
1276}
1277
b22f6434 1278static const struct iommu_ops omap_iommu_ops = {
8cf851e0
JR
1279 .domain_alloc = omap_iommu_domain_alloc,
1280 .domain_free = omap_iommu_domain_free,
f626b52d
OBC
1281 .attach_dev = omap_iommu_attach_dev,
1282 .detach_dev = omap_iommu_detach_dev,
1283 .map = omap_iommu_map,
1284 .unmap = omap_iommu_unmap,
315786eb 1285 .map_sg = default_iommu_map_sg,
f626b52d 1286 .iova_to_phys = omap_iommu_iova_to_phys,
07a02030
LP
1287 .add_device = omap_iommu_add_device,
1288 .remove_device = omap_iommu_remove_device,
66bc8cf3 1289 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
f626b52d
OBC
1290};
1291
a9dcad5e
HD
1292static int __init omap_iommu_init(void)
1293{
1294 struct kmem_cache *p;
1295 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1296 size_t align = 1 << 10; /* L2 pagetable alignement */
f938aab2
TR
1297 struct device_node *np;
1298
1299 np = of_find_matching_node(NULL, omap_iommu_of_match);
1300 if (!np)
1301 return 0;
1302
1303 of_node_put(np);
a9dcad5e
HD
1304
1305 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1306 iopte_cachep_ctor);
1307 if (!p)
1308 return -ENOMEM;
1309 iopte_cachep = p;
1310
a65bc64f 1311 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
f626b52d 1312
61c75352
SA
1313 omap_iommu_debugfs_init();
1314
a9dcad5e
HD
1315 return platform_driver_register(&omap_iommu_driver);
1316}
435792d9 1317subsys_initcall(omap_iommu_init);
0cdbf727 1318/* must be ready before omap3isp is probed */
This page took 0.47984 seconds and 5 git commands to generate.