tracing: add sched_set_prio tracepoint
[deliverable/linux.git] / drivers / iommu / exynos-iommu.c
1 /*
2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11 #define DEBUG
12 #endif
13
14 #include <linux/clk.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/iommu.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/dma-iommu.h>
28
29 typedef u32 sysmmu_iova_t;
30 typedef u32 sysmmu_pte_t;
31
32 /* We do not consider super section mapping (16MB) */
33 #define SECT_ORDER 20
34 #define LPAGE_ORDER 16
35 #define SPAGE_ORDER 12
36
37 #define SECT_SIZE (1 << SECT_ORDER)
38 #define LPAGE_SIZE (1 << LPAGE_ORDER)
39 #define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41 #define SECT_MASK (~(SECT_SIZE - 1))
42 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
43 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
45 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 ((*(sent) & 3) == 1))
51 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
52
53 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
55 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
56
57 /*
58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
59 * v5.0 introduced support for 36bit physical address space by shifting
60 * all page entry values by 4 bits.
61 * All SYSMMU controllers in the system support the address spaces of the same
62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
63 * value (0 or 4).
64 */
65 static short PG_ENT_SHIFT = -1;
66 #define SYSMMU_PG_ENT_SHIFT 0
67 #define SYSMMU_V5_PG_ENT_SHIFT 4
68
69 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
70 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
71 #define section_offs(iova) (iova & (SECT_SIZE - 1))
72 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
73 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
74 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
75 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
76
77 #define NUM_LV1ENTRIES 4096
78 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
79
80 static u32 lv1ent_offset(sysmmu_iova_t iova)
81 {
82 return iova >> SECT_ORDER;
83 }
84
85 static u32 lv2ent_offset(sysmmu_iova_t iova)
86 {
87 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
88 }
89
90 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
91 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
92
93 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
94 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
95
96 #define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
97 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
98 #define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
99 #define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
100
101 #define CTRL_ENABLE 0x5
102 #define CTRL_BLOCK 0x7
103 #define CTRL_DISABLE 0x0
104
105 #define CFG_LRU 0x1
106 #define CFG_QOS(n) ((n & 0xF) << 7)
107 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
108 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
109 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
110
111 /* common registers */
112 #define REG_MMU_CTRL 0x000
113 #define REG_MMU_CFG 0x004
114 #define REG_MMU_STATUS 0x008
115 #define REG_MMU_VERSION 0x034
116
117 #define MMU_MAJ_VER(val) ((val) >> 7)
118 #define MMU_MIN_VER(val) ((val) & 0x7F)
119 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
120
121 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
122
123 /* v1.x - v3.x registers */
124 #define REG_MMU_FLUSH 0x00C
125 #define REG_MMU_FLUSH_ENTRY 0x010
126 #define REG_PT_BASE_ADDR 0x014
127 #define REG_INT_STATUS 0x018
128 #define REG_INT_CLEAR 0x01C
129
130 #define REG_PAGE_FAULT_ADDR 0x024
131 #define REG_AW_FAULT_ADDR 0x028
132 #define REG_AR_FAULT_ADDR 0x02C
133 #define REG_DEFAULT_SLAVE_ADDR 0x030
134
135 /* v5.x registers */
136 #define REG_V5_PT_BASE_PFN 0x00C
137 #define REG_V5_MMU_FLUSH_ALL 0x010
138 #define REG_V5_MMU_FLUSH_ENTRY 0x014
139 #define REG_V5_INT_STATUS 0x060
140 #define REG_V5_INT_CLEAR 0x064
141 #define REG_V5_FAULT_AR_VA 0x070
142 #define REG_V5_FAULT_AW_VA 0x080
143
144 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
145
146 static struct device *dma_dev;
147 static struct kmem_cache *lv2table_kmem_cache;
148 static sysmmu_pte_t *zero_lv2_table;
149 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
150
151 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
152 {
153 return pgtable + lv1ent_offset(iova);
154 }
155
156 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
157 {
158 return (sysmmu_pte_t *)phys_to_virt(
159 lv2table_base(sent)) + lv2ent_offset(iova);
160 }
161
162 /*
163 * IOMMU fault information register
164 */
165 struct sysmmu_fault_info {
166 unsigned int bit; /* bit number in STATUS register */
167 unsigned short addr_reg; /* register to read VA fault address */
168 const char *name; /* human readable fault name */
169 unsigned int type; /* fault type for report_iommu_fault */
170 };
171
172 static const struct sysmmu_fault_info sysmmu_faults[] = {
173 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
174 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
175 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
176 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
177 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
178 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
179 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
180 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
181 };
182
183 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
184 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
185 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
186 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
187 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
188 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
189 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
190 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
191 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
192 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
193 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
194 };
195
196 /*
197 * This structure is attached to dev.archdata.iommu of the master device
198 * on device add, contains a list of SYSMMU controllers defined by device tree,
199 * which are bound to given master device. It is usually referenced by 'owner'
200 * pointer.
201 */
202 struct exynos_iommu_owner {
203 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
204 struct iommu_domain *domain; /* domain this device is attached */
205 };
206
207 /*
208 * This structure exynos specific generalization of struct iommu_domain.
209 * It contains list of SYSMMU controllers from all master devices, which has
210 * been attached to this domain and page tables of IO address space defined by
211 * it. It is usually referenced by 'domain' pointer.
212 */
213 struct exynos_iommu_domain {
214 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
215 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
216 short *lv2entcnt; /* free lv2 entry counter for each section */
217 spinlock_t lock; /* lock for modyfying list of clients */
218 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
219 struct iommu_domain domain; /* generic domain data structure */
220 };
221
222 /*
223 * This structure hold all data of a single SYSMMU controller, this includes
224 * hw resources like registers and clocks, pointers and list nodes to connect
225 * it to all other structures, internal state and parameters read from device
226 * tree. It is usually referenced by 'data' pointer.
227 */
228 struct sysmmu_drvdata {
229 struct device *sysmmu; /* SYSMMU controller device */
230 struct device *master; /* master device (owner) */
231 void __iomem *sfrbase; /* our registers */
232 struct clk *clk; /* SYSMMU's clock */
233 struct clk *aclk; /* SYSMMU's aclk clock */
234 struct clk *pclk; /* SYSMMU's pclk clock */
235 struct clk *clk_master; /* master's device clock */
236 int activations; /* number of calls to sysmmu_enable */
237 spinlock_t lock; /* lock for modyfying state */
238 struct exynos_iommu_domain *domain; /* domain we belong to */
239 struct list_head domain_node; /* node for domain clients list */
240 struct list_head owner_node; /* node for owner controllers list */
241 phys_addr_t pgtable; /* assigned page table structure */
242 unsigned int version; /* our version */
243 };
244
245 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
246 {
247 return container_of(dom, struct exynos_iommu_domain, domain);
248 }
249
250 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
251 {
252 /* return true if the System MMU was not active previously
253 and it needs to be initialized */
254 return ++data->activations == 1;
255 }
256
257 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
258 {
259 /* return true if the System MMU is needed to be disabled */
260 BUG_ON(data->activations < 1);
261 return --data->activations == 0;
262 }
263
264 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
265 {
266 return data->activations > 0;
267 }
268
269 static void sysmmu_unblock(struct sysmmu_drvdata *data)
270 {
271 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
272 }
273
274 static bool sysmmu_block(struct sysmmu_drvdata *data)
275 {
276 int i = 120;
277
278 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
279 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
280 --i;
281
282 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
283 sysmmu_unblock(data);
284 return false;
285 }
286
287 return true;
288 }
289
290 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
291 {
292 if (MMU_MAJ_VER(data->version) < 5)
293 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
294 else
295 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
296 }
297
298 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
299 sysmmu_iova_t iova, unsigned int num_inv)
300 {
301 unsigned int i;
302
303 for (i = 0; i < num_inv; i++) {
304 if (MMU_MAJ_VER(data->version) < 5)
305 writel((iova & SPAGE_MASK) | 1,
306 data->sfrbase + REG_MMU_FLUSH_ENTRY);
307 else
308 writel((iova & SPAGE_MASK) | 1,
309 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
310 iova += SPAGE_SIZE;
311 }
312 }
313
314 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
315 {
316 if (MMU_MAJ_VER(data->version) < 5)
317 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
318 else
319 writel(pgd >> PAGE_SHIFT,
320 data->sfrbase + REG_V5_PT_BASE_PFN);
321
322 __sysmmu_tlb_invalidate(data);
323 }
324
325 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
326 {
327 u32 ver;
328
329 clk_enable(data->clk_master);
330 clk_enable(data->clk);
331 clk_enable(data->pclk);
332 clk_enable(data->aclk);
333
334 ver = readl(data->sfrbase + REG_MMU_VERSION);
335
336 /* controllers on some SoCs don't report proper version */
337 if (ver == 0x80000001u)
338 data->version = MAKE_MMU_VER(1, 0);
339 else
340 data->version = MMU_RAW_VER(ver);
341
342 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
343 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
344
345 clk_disable(data->aclk);
346 clk_disable(data->pclk);
347 clk_disable(data->clk);
348 clk_disable(data->clk_master);
349 }
350
351 static void show_fault_information(struct sysmmu_drvdata *data,
352 const struct sysmmu_fault_info *finfo,
353 sysmmu_iova_t fault_addr)
354 {
355 sysmmu_pte_t *ent;
356
357 dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
358 finfo->name, fault_addr, &data->pgtable);
359 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
360 dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
361 if (lv1ent_page(ent)) {
362 ent = page_entry(ent, fault_addr);
363 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
364 }
365 }
366
367 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
368 {
369 /* SYSMMU is in blocked state when interrupt occurred. */
370 struct sysmmu_drvdata *data = dev_id;
371 const struct sysmmu_fault_info *finfo;
372 unsigned int i, n, itype;
373 sysmmu_iova_t fault_addr = -1;
374 unsigned short reg_status, reg_clear;
375 int ret = -ENOSYS;
376
377 WARN_ON(!is_sysmmu_active(data));
378
379 if (MMU_MAJ_VER(data->version) < 5) {
380 reg_status = REG_INT_STATUS;
381 reg_clear = REG_INT_CLEAR;
382 finfo = sysmmu_faults;
383 n = ARRAY_SIZE(sysmmu_faults);
384 } else {
385 reg_status = REG_V5_INT_STATUS;
386 reg_clear = REG_V5_INT_CLEAR;
387 finfo = sysmmu_v5_faults;
388 n = ARRAY_SIZE(sysmmu_v5_faults);
389 }
390
391 spin_lock(&data->lock);
392
393 clk_enable(data->clk_master);
394
395 itype = __ffs(readl(data->sfrbase + reg_status));
396 for (i = 0; i < n; i++, finfo++)
397 if (finfo->bit == itype)
398 break;
399 /* unknown/unsupported fault */
400 BUG_ON(i == n);
401
402 /* print debug message */
403 fault_addr = readl(data->sfrbase + finfo->addr_reg);
404 show_fault_information(data, finfo, fault_addr);
405
406 if (data->domain)
407 ret = report_iommu_fault(&data->domain->domain,
408 data->master, fault_addr, finfo->type);
409 /* fault is not recovered by fault handler */
410 BUG_ON(ret != 0);
411
412 writel(1 << itype, data->sfrbase + reg_clear);
413
414 sysmmu_unblock(data);
415
416 clk_disable(data->clk_master);
417
418 spin_unlock(&data->lock);
419
420 return IRQ_HANDLED;
421 }
422
423 static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
424 {
425 clk_enable(data->clk_master);
426
427 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
428 writel(0, data->sfrbase + REG_MMU_CFG);
429
430 clk_disable(data->aclk);
431 clk_disable(data->pclk);
432 clk_disable(data->clk);
433 clk_disable(data->clk_master);
434 }
435
436 static bool __sysmmu_disable(struct sysmmu_drvdata *data)
437 {
438 bool disabled;
439 unsigned long flags;
440
441 spin_lock_irqsave(&data->lock, flags);
442
443 disabled = set_sysmmu_inactive(data);
444
445 if (disabled) {
446 data->pgtable = 0;
447 data->domain = NULL;
448
449 __sysmmu_disable_nocount(data);
450
451 dev_dbg(data->sysmmu, "Disabled\n");
452 } else {
453 dev_dbg(data->sysmmu, "%d times left to disable\n",
454 data->activations);
455 }
456
457 spin_unlock_irqrestore(&data->lock, flags);
458
459 return disabled;
460 }
461
462 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
463 {
464 unsigned int cfg;
465
466 if (data->version <= MAKE_MMU_VER(3, 1))
467 cfg = CFG_LRU | CFG_QOS(15);
468 else if (data->version <= MAKE_MMU_VER(3, 2))
469 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
470 else
471 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
472
473 writel(cfg, data->sfrbase + REG_MMU_CFG);
474 }
475
476 static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
477 {
478 clk_enable(data->clk_master);
479 clk_enable(data->clk);
480 clk_enable(data->pclk);
481 clk_enable(data->aclk);
482
483 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
484
485 __sysmmu_init_config(data);
486
487 __sysmmu_set_ptbase(data, data->pgtable);
488
489 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
490
491 clk_disable(data->clk_master);
492 }
493
494 static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
495 struct exynos_iommu_domain *domain)
496 {
497 int ret = 0;
498 unsigned long flags;
499
500 spin_lock_irqsave(&data->lock, flags);
501 if (set_sysmmu_active(data)) {
502 data->pgtable = pgtable;
503 data->domain = domain;
504
505 __sysmmu_enable_nocount(data);
506
507 dev_dbg(data->sysmmu, "Enabled\n");
508 } else {
509 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
510
511 dev_dbg(data->sysmmu, "already enabled\n");
512 }
513
514 if (WARN_ON(ret < 0))
515 set_sysmmu_inactive(data); /* decrement count */
516
517 spin_unlock_irqrestore(&data->lock, flags);
518
519 return ret;
520 }
521
522 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
523 sysmmu_iova_t iova)
524 {
525 unsigned long flags;
526
527 clk_enable(data->clk_master);
528
529 spin_lock_irqsave(&data->lock, flags);
530 if (is_sysmmu_active(data)) {
531 if (data->version >= MAKE_MMU_VER(3, 3))
532 __sysmmu_tlb_invalidate_entry(data, iova, 1);
533 }
534 spin_unlock_irqrestore(&data->lock, flags);
535
536 clk_disable(data->clk_master);
537 }
538
539 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
540 sysmmu_iova_t iova, size_t size)
541 {
542 unsigned long flags;
543
544 spin_lock_irqsave(&data->lock, flags);
545 if (is_sysmmu_active(data)) {
546 unsigned int num_inv = 1;
547
548 clk_enable(data->clk_master);
549
550 /*
551 * L2TLB invalidation required
552 * 4KB page: 1 invalidation
553 * 64KB page: 16 invalidations
554 * 1MB page: 64 invalidations
555 * because it is set-associative TLB
556 * with 8-way and 64 sets.
557 * 1MB page can be cached in one of all sets.
558 * 64KB page can be one of 16 consecutive sets.
559 */
560 if (MMU_MAJ_VER(data->version) == 2)
561 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
562
563 if (sysmmu_block(data)) {
564 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
565 sysmmu_unblock(data);
566 }
567 clk_disable(data->clk_master);
568 } else {
569 dev_dbg(data->master,
570 "disabled. Skipping TLB invalidation @ %#x\n", iova);
571 }
572 spin_unlock_irqrestore(&data->lock, flags);
573 }
574
575 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
576 {
577 int irq, ret;
578 struct device *dev = &pdev->dev;
579 struct sysmmu_drvdata *data;
580 struct resource *res;
581
582 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
583 if (!data)
584 return -ENOMEM;
585
586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
587 data->sfrbase = devm_ioremap_resource(dev, res);
588 if (IS_ERR(data->sfrbase))
589 return PTR_ERR(data->sfrbase);
590
591 irq = platform_get_irq(pdev, 0);
592 if (irq <= 0) {
593 dev_err(dev, "Unable to find IRQ resource\n");
594 return irq;
595 }
596
597 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
598 dev_name(dev), data);
599 if (ret) {
600 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
601 return ret;
602 }
603
604 data->clk = devm_clk_get(dev, "sysmmu");
605 if (!IS_ERR(data->clk)) {
606 ret = clk_prepare(data->clk);
607 if (ret) {
608 dev_err(dev, "Failed to prepare clk\n");
609 return ret;
610 }
611 } else {
612 data->clk = NULL;
613 }
614
615 data->aclk = devm_clk_get(dev, "aclk");
616 if (!IS_ERR(data->aclk)) {
617 ret = clk_prepare(data->aclk);
618 if (ret) {
619 dev_err(dev, "Failed to prepare aclk\n");
620 return ret;
621 }
622 } else {
623 data->aclk = NULL;
624 }
625
626 data->pclk = devm_clk_get(dev, "pclk");
627 if (!IS_ERR(data->pclk)) {
628 ret = clk_prepare(data->pclk);
629 if (ret) {
630 dev_err(dev, "Failed to prepare pclk\n");
631 return ret;
632 }
633 } else {
634 data->pclk = NULL;
635 }
636
637 if (!data->clk && (!data->aclk || !data->pclk)) {
638 dev_err(dev, "Failed to get device clock(s)!\n");
639 return -ENOSYS;
640 }
641
642 data->clk_master = devm_clk_get(dev, "master");
643 if (!IS_ERR(data->clk_master)) {
644 ret = clk_prepare(data->clk_master);
645 if (ret) {
646 dev_err(dev, "Failed to prepare master's clk\n");
647 return ret;
648 }
649 } else {
650 data->clk_master = NULL;
651 }
652
653 data->sysmmu = dev;
654 spin_lock_init(&data->lock);
655
656 platform_set_drvdata(pdev, data);
657
658 __sysmmu_get_version(data);
659 if (PG_ENT_SHIFT < 0) {
660 if (MMU_MAJ_VER(data->version) < 5)
661 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
662 else
663 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
664 }
665
666 pm_runtime_enable(dev);
667
668 return 0;
669 }
670
671 #ifdef CONFIG_PM_SLEEP
672 static int exynos_sysmmu_suspend(struct device *dev)
673 {
674 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
675
676 dev_dbg(dev, "suspend\n");
677 if (is_sysmmu_active(data)) {
678 __sysmmu_disable_nocount(data);
679 pm_runtime_put(dev);
680 }
681 return 0;
682 }
683
684 static int exynos_sysmmu_resume(struct device *dev)
685 {
686 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
687
688 dev_dbg(dev, "resume\n");
689 if (is_sysmmu_active(data)) {
690 pm_runtime_get_sync(dev);
691 __sysmmu_enable_nocount(data);
692 }
693 return 0;
694 }
695 #endif
696
697 static const struct dev_pm_ops sysmmu_pm_ops = {
698 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
699 };
700
701 static const struct of_device_id sysmmu_of_match[] __initconst = {
702 { .compatible = "samsung,exynos-sysmmu", },
703 { },
704 };
705
706 static struct platform_driver exynos_sysmmu_driver __refdata = {
707 .probe = exynos_sysmmu_probe,
708 .driver = {
709 .name = "exynos-sysmmu",
710 .of_match_table = sysmmu_of_match,
711 .pm = &sysmmu_pm_ops,
712 }
713 };
714
715 static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
716 {
717 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
718 DMA_TO_DEVICE);
719 *ent = val;
720 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
721 DMA_TO_DEVICE);
722 }
723
724 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
725 {
726 struct exynos_iommu_domain *domain;
727 dma_addr_t handle;
728 int i;
729
730 /* Check if correct PTE offsets are initialized */
731 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
732
733 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
734 if (!domain)
735 return NULL;
736
737 if (type == IOMMU_DOMAIN_DMA) {
738 if (iommu_get_dma_cookie(&domain->domain) != 0)
739 goto err_pgtable;
740 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
741 goto err_pgtable;
742 }
743
744 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
745 if (!domain->pgtable)
746 goto err_dma_cookie;
747
748 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
749 if (!domain->lv2entcnt)
750 goto err_counter;
751
752 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
753 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
754 domain->pgtable[i + 0] = ZERO_LV2LINK;
755 domain->pgtable[i + 1] = ZERO_LV2LINK;
756 domain->pgtable[i + 2] = ZERO_LV2LINK;
757 domain->pgtable[i + 3] = ZERO_LV2LINK;
758 domain->pgtable[i + 4] = ZERO_LV2LINK;
759 domain->pgtable[i + 5] = ZERO_LV2LINK;
760 domain->pgtable[i + 6] = ZERO_LV2LINK;
761 domain->pgtable[i + 7] = ZERO_LV2LINK;
762 }
763
764 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
765 DMA_TO_DEVICE);
766 /* For mapping page table entries we rely on dma == phys */
767 BUG_ON(handle != virt_to_phys(domain->pgtable));
768
769 spin_lock_init(&domain->lock);
770 spin_lock_init(&domain->pgtablelock);
771 INIT_LIST_HEAD(&domain->clients);
772
773 domain->domain.geometry.aperture_start = 0;
774 domain->domain.geometry.aperture_end = ~0UL;
775 domain->domain.geometry.force_aperture = true;
776
777 return &domain->domain;
778
779 err_counter:
780 free_pages((unsigned long)domain->pgtable, 2);
781 err_dma_cookie:
782 if (type == IOMMU_DOMAIN_DMA)
783 iommu_put_dma_cookie(&domain->domain);
784 err_pgtable:
785 kfree(domain);
786 return NULL;
787 }
788
789 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
790 {
791 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
792 struct sysmmu_drvdata *data, *next;
793 unsigned long flags;
794 int i;
795
796 WARN_ON(!list_empty(&domain->clients));
797
798 spin_lock_irqsave(&domain->lock, flags);
799
800 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
801 if (__sysmmu_disable(data))
802 data->master = NULL;
803 list_del_init(&data->domain_node);
804 }
805
806 spin_unlock_irqrestore(&domain->lock, flags);
807
808 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
809 iommu_put_dma_cookie(iommu_domain);
810
811 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
812 DMA_TO_DEVICE);
813
814 for (i = 0; i < NUM_LV1ENTRIES; i++)
815 if (lv1ent_page(domain->pgtable + i)) {
816 phys_addr_t base = lv2table_base(domain->pgtable + i);
817
818 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
819 DMA_TO_DEVICE);
820 kmem_cache_free(lv2table_kmem_cache,
821 phys_to_virt(base));
822 }
823
824 free_pages((unsigned long)domain->pgtable, 2);
825 free_pages((unsigned long)domain->lv2entcnt, 1);
826 kfree(domain);
827 }
828
829 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
830 struct device *dev)
831 {
832 struct exynos_iommu_owner *owner = dev->archdata.iommu;
833 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
834 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
835 struct sysmmu_drvdata *data, *next;
836 unsigned long flags;
837 bool found = false;
838
839 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
840 return;
841
842 spin_lock_irqsave(&domain->lock, flags);
843 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
844 if (data->master == dev) {
845 if (__sysmmu_disable(data)) {
846 data->master = NULL;
847 list_del_init(&data->domain_node);
848 }
849 pm_runtime_put(data->sysmmu);
850 found = true;
851 }
852 }
853 spin_unlock_irqrestore(&domain->lock, flags);
854
855 owner->domain = NULL;
856
857 if (found)
858 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
859 __func__, &pagetable);
860 else
861 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
862 }
863
864 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
865 struct device *dev)
866 {
867 struct exynos_iommu_owner *owner = dev->archdata.iommu;
868 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
869 struct sysmmu_drvdata *data;
870 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
871 unsigned long flags;
872 int ret = -ENODEV;
873
874 if (!has_sysmmu(dev))
875 return -ENODEV;
876
877 if (owner->domain)
878 exynos_iommu_detach_device(owner->domain, dev);
879
880 list_for_each_entry(data, &owner->controllers, owner_node) {
881 pm_runtime_get_sync(data->sysmmu);
882 ret = __sysmmu_enable(data, pagetable, domain);
883 if (ret >= 0) {
884 data->master = dev;
885
886 spin_lock_irqsave(&domain->lock, flags);
887 list_add_tail(&data->domain_node, &domain->clients);
888 spin_unlock_irqrestore(&domain->lock, flags);
889 }
890 }
891
892 if (ret < 0) {
893 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
894 __func__, &pagetable);
895 return ret;
896 }
897
898 owner->domain = iommu_domain;
899 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
900 __func__, &pagetable, (ret == 0) ? "" : ", again");
901
902 return ret;
903 }
904
905 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
906 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
907 {
908 if (lv1ent_section(sent)) {
909 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
910 return ERR_PTR(-EADDRINUSE);
911 }
912
913 if (lv1ent_fault(sent)) {
914 sysmmu_pte_t *pent;
915 bool need_flush_flpd_cache = lv1ent_zero(sent);
916
917 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
918 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
919 if (!pent)
920 return ERR_PTR(-ENOMEM);
921
922 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
923 kmemleak_ignore(pent);
924 *pgcounter = NUM_LV2ENTRIES;
925 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
926
927 /*
928 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
929 * FLPD cache may cache the address of zero_l2_table. This
930 * function replaces the zero_l2_table with new L2 page table
931 * to write valid mappings.
932 * Accessing the valid area may cause page fault since FLPD
933 * cache may still cache zero_l2_table for the valid area
934 * instead of new L2 page table that has the mapping
935 * information of the valid area.
936 * Thus any replacement of zero_l2_table with other valid L2
937 * page table must involve FLPD cache invalidation for System
938 * MMU v3.3.
939 * FLPD cache invalidation is performed with TLB invalidation
940 * by VPN without blocking. It is safe to invalidate TLB without
941 * blocking because the target address of TLB invalidation is
942 * not currently mapped.
943 */
944 if (need_flush_flpd_cache) {
945 struct sysmmu_drvdata *data;
946
947 spin_lock(&domain->lock);
948 list_for_each_entry(data, &domain->clients, domain_node)
949 sysmmu_tlb_invalidate_flpdcache(data, iova);
950 spin_unlock(&domain->lock);
951 }
952 }
953
954 return page_entry(sent, iova);
955 }
956
957 static int lv1set_section(struct exynos_iommu_domain *domain,
958 sysmmu_pte_t *sent, sysmmu_iova_t iova,
959 phys_addr_t paddr, short *pgcnt)
960 {
961 if (lv1ent_section(sent)) {
962 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
963 iova);
964 return -EADDRINUSE;
965 }
966
967 if (lv1ent_page(sent)) {
968 if (*pgcnt != NUM_LV2ENTRIES) {
969 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
970 iova);
971 return -EADDRINUSE;
972 }
973
974 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
975 *pgcnt = 0;
976 }
977
978 update_pte(sent, mk_lv1ent_sect(paddr));
979
980 spin_lock(&domain->lock);
981 if (lv1ent_page_zero(sent)) {
982 struct sysmmu_drvdata *data;
983 /*
984 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
985 * entry by speculative prefetch of SLPD which has no mapping.
986 */
987 list_for_each_entry(data, &domain->clients, domain_node)
988 sysmmu_tlb_invalidate_flpdcache(data, iova);
989 }
990 spin_unlock(&domain->lock);
991
992 return 0;
993 }
994
995 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
996 short *pgcnt)
997 {
998 if (size == SPAGE_SIZE) {
999 if (WARN_ON(!lv2ent_fault(pent)))
1000 return -EADDRINUSE;
1001
1002 update_pte(pent, mk_lv2ent_spage(paddr));
1003 *pgcnt -= 1;
1004 } else { /* size == LPAGE_SIZE */
1005 int i;
1006 dma_addr_t pent_base = virt_to_phys(pent);
1007
1008 dma_sync_single_for_cpu(dma_dev, pent_base,
1009 sizeof(*pent) * SPAGES_PER_LPAGE,
1010 DMA_TO_DEVICE);
1011 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1012 if (WARN_ON(!lv2ent_fault(pent))) {
1013 if (i > 0)
1014 memset(pent - i, 0, sizeof(*pent) * i);
1015 return -EADDRINUSE;
1016 }
1017
1018 *pent = mk_lv2ent_lpage(paddr);
1019 }
1020 dma_sync_single_for_device(dma_dev, pent_base,
1021 sizeof(*pent) * SPAGES_PER_LPAGE,
1022 DMA_TO_DEVICE);
1023 *pgcnt -= SPAGES_PER_LPAGE;
1024 }
1025
1026 return 0;
1027 }
1028
1029 /*
1030 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1031 *
1032 * System MMU v3.x has advanced logic to improve address translation
1033 * performance with caching more page table entries by a page table walk.
1034 * However, the logic has a bug that while caching faulty page table entries,
1035 * System MMU reports page fault if the cached fault entry is hit even though
1036 * the fault entry is updated to a valid entry after the entry is cached.
1037 * To prevent caching faulty page table entries which may be updated to valid
1038 * entries later, the virtual memory manager should care about the workaround
1039 * for the problem. The following describes the workaround.
1040 *
1041 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1042 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1043 *
1044 * Precisely, any start address of I/O virtual region must be aligned with
1045 * the following sizes for System MMU v3.1 and v3.2.
1046 * System MMU v3.1: 128KiB
1047 * System MMU v3.2: 256KiB
1048 *
1049 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1050 * more workarounds.
1051 * - Any two consecutive I/O virtual regions must have a hole of size larger
1052 * than or equal to 128KiB.
1053 * - Start address of an I/O virtual region must be aligned by 128KiB.
1054 */
1055 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1056 unsigned long l_iova, phys_addr_t paddr, size_t size,
1057 int prot)
1058 {
1059 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1060 sysmmu_pte_t *entry;
1061 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1062 unsigned long flags;
1063 int ret = -ENOMEM;
1064
1065 BUG_ON(domain->pgtable == NULL);
1066
1067 spin_lock_irqsave(&domain->pgtablelock, flags);
1068
1069 entry = section_entry(domain->pgtable, iova);
1070
1071 if (size == SECT_SIZE) {
1072 ret = lv1set_section(domain, entry, iova, paddr,
1073 &domain->lv2entcnt[lv1ent_offset(iova)]);
1074 } else {
1075 sysmmu_pte_t *pent;
1076
1077 pent = alloc_lv2entry(domain, entry, iova,
1078 &domain->lv2entcnt[lv1ent_offset(iova)]);
1079
1080 if (IS_ERR(pent))
1081 ret = PTR_ERR(pent);
1082 else
1083 ret = lv2set_page(pent, paddr, size,
1084 &domain->lv2entcnt[lv1ent_offset(iova)]);
1085 }
1086
1087 if (ret)
1088 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1089 __func__, ret, size, iova);
1090
1091 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1092
1093 return ret;
1094 }
1095
1096 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1097 sysmmu_iova_t iova, size_t size)
1098 {
1099 struct sysmmu_drvdata *data;
1100 unsigned long flags;
1101
1102 spin_lock_irqsave(&domain->lock, flags);
1103
1104 list_for_each_entry(data, &domain->clients, domain_node)
1105 sysmmu_tlb_invalidate_entry(data, iova, size);
1106
1107 spin_unlock_irqrestore(&domain->lock, flags);
1108 }
1109
1110 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1111 unsigned long l_iova, size_t size)
1112 {
1113 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1114 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1115 sysmmu_pte_t *ent;
1116 size_t err_pgsize;
1117 unsigned long flags;
1118
1119 BUG_ON(domain->pgtable == NULL);
1120
1121 spin_lock_irqsave(&domain->pgtablelock, flags);
1122
1123 ent = section_entry(domain->pgtable, iova);
1124
1125 if (lv1ent_section(ent)) {
1126 if (WARN_ON(size < SECT_SIZE)) {
1127 err_pgsize = SECT_SIZE;
1128 goto err;
1129 }
1130
1131 /* workaround for h/w bug in System MMU v3.3 */
1132 update_pte(ent, ZERO_LV2LINK);
1133 size = SECT_SIZE;
1134 goto done;
1135 }
1136
1137 if (unlikely(lv1ent_fault(ent))) {
1138 if (size > SECT_SIZE)
1139 size = SECT_SIZE;
1140 goto done;
1141 }
1142
1143 /* lv1ent_page(sent) == true here */
1144
1145 ent = page_entry(ent, iova);
1146
1147 if (unlikely(lv2ent_fault(ent))) {
1148 size = SPAGE_SIZE;
1149 goto done;
1150 }
1151
1152 if (lv2ent_small(ent)) {
1153 update_pte(ent, 0);
1154 size = SPAGE_SIZE;
1155 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1156 goto done;
1157 }
1158
1159 /* lv1ent_large(ent) == true here */
1160 if (WARN_ON(size < LPAGE_SIZE)) {
1161 err_pgsize = LPAGE_SIZE;
1162 goto err;
1163 }
1164
1165 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1166 sizeof(*ent) * SPAGES_PER_LPAGE,
1167 DMA_TO_DEVICE);
1168 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1169 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1170 sizeof(*ent) * SPAGES_PER_LPAGE,
1171 DMA_TO_DEVICE);
1172 size = LPAGE_SIZE;
1173 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1174 done:
1175 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1176
1177 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1178
1179 return size;
1180 err:
1181 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1182
1183 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1184 __func__, size, iova, err_pgsize);
1185
1186 return 0;
1187 }
1188
1189 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1190 dma_addr_t iova)
1191 {
1192 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1193 sysmmu_pte_t *entry;
1194 unsigned long flags;
1195 phys_addr_t phys = 0;
1196
1197 spin_lock_irqsave(&domain->pgtablelock, flags);
1198
1199 entry = section_entry(domain->pgtable, iova);
1200
1201 if (lv1ent_section(entry)) {
1202 phys = section_phys(entry) + section_offs(iova);
1203 } else if (lv1ent_page(entry)) {
1204 entry = page_entry(entry, iova);
1205
1206 if (lv2ent_large(entry))
1207 phys = lpage_phys(entry) + lpage_offs(iova);
1208 else if (lv2ent_small(entry))
1209 phys = spage_phys(entry) + spage_offs(iova);
1210 }
1211
1212 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1213
1214 return phys;
1215 }
1216
1217 static struct iommu_group *get_device_iommu_group(struct device *dev)
1218 {
1219 struct iommu_group *group;
1220
1221 group = iommu_group_get(dev);
1222 if (!group)
1223 group = iommu_group_alloc();
1224
1225 return group;
1226 }
1227
1228 static int exynos_iommu_add_device(struct device *dev)
1229 {
1230 struct iommu_group *group;
1231
1232 if (!has_sysmmu(dev))
1233 return -ENODEV;
1234
1235 group = iommu_group_get_for_dev(dev);
1236
1237 if (IS_ERR(group))
1238 return PTR_ERR(group);
1239
1240 iommu_group_put(group);
1241
1242 return 0;
1243 }
1244
1245 static void exynos_iommu_remove_device(struct device *dev)
1246 {
1247 if (!has_sysmmu(dev))
1248 return;
1249
1250 iommu_group_remove_device(dev);
1251 }
1252
1253 static int exynos_iommu_of_xlate(struct device *dev,
1254 struct of_phandle_args *spec)
1255 {
1256 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1257 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1258 struct sysmmu_drvdata *data;
1259
1260 if (!sysmmu)
1261 return -ENODEV;
1262
1263 data = platform_get_drvdata(sysmmu);
1264 if (!data)
1265 return -ENODEV;
1266
1267 if (!owner) {
1268 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1269 if (!owner)
1270 return -ENOMEM;
1271
1272 INIT_LIST_HEAD(&owner->controllers);
1273 dev->archdata.iommu = owner;
1274 }
1275
1276 list_add_tail(&data->owner_node, &owner->controllers);
1277 return 0;
1278 }
1279
1280 static struct iommu_ops exynos_iommu_ops = {
1281 .domain_alloc = exynos_iommu_domain_alloc,
1282 .domain_free = exynos_iommu_domain_free,
1283 .attach_dev = exynos_iommu_attach_device,
1284 .detach_dev = exynos_iommu_detach_device,
1285 .map = exynos_iommu_map,
1286 .unmap = exynos_iommu_unmap,
1287 .map_sg = default_iommu_map_sg,
1288 .iova_to_phys = exynos_iommu_iova_to_phys,
1289 .device_group = get_device_iommu_group,
1290 .add_device = exynos_iommu_add_device,
1291 .remove_device = exynos_iommu_remove_device,
1292 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1293 .of_xlate = exynos_iommu_of_xlate,
1294 };
1295
1296 static bool init_done;
1297
1298 static int __init exynos_iommu_init(void)
1299 {
1300 int ret;
1301
1302 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1303 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1304 if (!lv2table_kmem_cache) {
1305 pr_err("%s: Failed to create kmem cache\n", __func__);
1306 return -ENOMEM;
1307 }
1308
1309 ret = platform_driver_register(&exynos_sysmmu_driver);
1310 if (ret) {
1311 pr_err("%s: Failed to register driver\n", __func__);
1312 goto err_reg_driver;
1313 }
1314
1315 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1316 if (zero_lv2_table == NULL) {
1317 pr_err("%s: Failed to allocate zero level2 page table\n",
1318 __func__);
1319 ret = -ENOMEM;
1320 goto err_zero_lv2;
1321 }
1322
1323 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1324 if (ret) {
1325 pr_err("%s: Failed to register exynos-iommu driver.\n",
1326 __func__);
1327 goto err_set_iommu;
1328 }
1329
1330 init_done = true;
1331
1332 return 0;
1333 err_set_iommu:
1334 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1335 err_zero_lv2:
1336 platform_driver_unregister(&exynos_sysmmu_driver);
1337 err_reg_driver:
1338 kmem_cache_destroy(lv2table_kmem_cache);
1339 return ret;
1340 }
1341
1342 static int __init exynos_iommu_of_setup(struct device_node *np)
1343 {
1344 struct platform_device *pdev;
1345
1346 if (!init_done)
1347 exynos_iommu_init();
1348
1349 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1350 if (IS_ERR(pdev))
1351 return PTR_ERR(pdev);
1352
1353 /*
1354 * use the first registered sysmmu device for performing
1355 * dma mapping operations on iommu page tables (cpu cache flush)
1356 */
1357 if (!dma_dev)
1358 dma_dev = &pdev->dev;
1359
1360 of_iommu_set_ops(np, &exynos_iommu_ops);
1361 return 0;
1362 }
1363
1364 IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1365 exynos_iommu_of_setup);
This page took 0.058211 seconds and 5 git commands to generate.