Commit | Line | Data |
---|---|---|
a9dcad5e HD |
1 | /* |
2 | * omap iommu: tlb and pagetable primitives | |
3 | * | |
c127c7dc | 4 | * Copyright (C) 2008-2010 Nokia Corporation |
a9dcad5e HD |
5 | * |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, | |
7 | * Paul Mundt and Toshihiro Kobayashi | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/err.h> | |
15 | #include <linux/module.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
a9dcad5e HD |
17 | #include <linux/interrupt.h> |
18 | #include <linux/ioport.h> | |
19 | #include <linux/clk.h> | |
20 | #include <linux/platform_device.h> | |
f626b52d OBC |
21 | #include <linux/iommu.h> |
22 | #include <linux/mutex.h> | |
23 | #include <linux/spinlock.h> | |
a9dcad5e HD |
24 | |
25 | #include <asm/cacheflush.h> | |
26 | ||
ce491cf8 | 27 | #include <plat/iommu.h> |
a9dcad5e | 28 | |
fcf3a6ef | 29 | #include <plat/iopgtable.h> |
a9dcad5e | 30 | |
37c2836c HD |
31 | #define for_each_iotlb_cr(obj, n, __i, cr) \ |
32 | for (__i = 0; \ | |
33 | (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ | |
34 | __i++) | |
35 | ||
f626b52d OBC |
36 | /** |
37 | * struct omap_iommu_domain - omap iommu domain | |
38 | * @pgtable: the page table | |
39 | * @iommu_dev: an omap iommu device attached to this domain. only a single | |
40 | * iommu device can be attached for now. | |
41 | * @lock: domain lock, should be taken when attaching/detaching | |
42 | */ | |
43 | struct omap_iommu_domain { | |
44 | u32 *pgtable; | |
45 | struct iommu *iommu_dev; | |
46 | spinlock_t lock; | |
47 | }; | |
48 | ||
a9dcad5e HD |
49 | /* accommodate the difference between omap1 and omap2/3 */ |
50 | static const struct iommu_functions *arch_iommu; | |
51 | ||
52 | static struct platform_driver omap_iommu_driver; | |
53 | static struct kmem_cache *iopte_cachep; | |
54 | ||
55 | /** | |
56 | * install_iommu_arch - Install archtecure specific iommu functions | |
57 | * @ops: a pointer to architecture specific iommu functions | |
58 | * | |
59 | * There are several kind of iommu algorithm(tlb, pagetable) among | |
60 | * omap series. This interface installs such an iommu algorighm. | |
61 | **/ | |
62 | int install_iommu_arch(const struct iommu_functions *ops) | |
63 | { | |
64 | if (arch_iommu) | |
65 | return -EBUSY; | |
66 | ||
67 | arch_iommu = ops; | |
68 | return 0; | |
69 | } | |
70 | EXPORT_SYMBOL_GPL(install_iommu_arch); | |
71 | ||
72 | /** | |
73 | * uninstall_iommu_arch - Uninstall archtecure specific iommu functions | |
74 | * @ops: a pointer to architecture specific iommu functions | |
75 | * | |
76 | * This interface uninstalls the iommu algorighm installed previously. | |
77 | **/ | |
78 | void uninstall_iommu_arch(const struct iommu_functions *ops) | |
79 | { | |
80 | if (arch_iommu != ops) | |
81 | pr_err("%s: not your arch\n", __func__); | |
82 | ||
83 | arch_iommu = NULL; | |
84 | } | |
85 | EXPORT_SYMBOL_GPL(uninstall_iommu_arch); | |
86 | ||
87 | /** | |
88 | * iommu_save_ctx - Save registers for pm off-mode support | |
89 | * @obj: target iommu | |
90 | **/ | |
91 | void iommu_save_ctx(struct iommu *obj) | |
92 | { | |
93 | arch_iommu->save_ctx(obj); | |
94 | } | |
95 | EXPORT_SYMBOL_GPL(iommu_save_ctx); | |
96 | ||
97 | /** | |
98 | * iommu_restore_ctx - Restore registers for pm off-mode support | |
99 | * @obj: target iommu | |
100 | **/ | |
101 | void iommu_restore_ctx(struct iommu *obj) | |
102 | { | |
103 | arch_iommu->restore_ctx(obj); | |
104 | } | |
105 | EXPORT_SYMBOL_GPL(iommu_restore_ctx); | |
106 | ||
107 | /** | |
108 | * iommu_arch_version - Return running iommu arch version | |
109 | **/ | |
110 | u32 iommu_arch_version(void) | |
111 | { | |
112 | return arch_iommu->version; | |
113 | } | |
114 | EXPORT_SYMBOL_GPL(iommu_arch_version); | |
115 | ||
116 | static int iommu_enable(struct iommu *obj) | |
117 | { | |
118 | int err; | |
119 | ||
120 | if (!obj) | |
121 | return -EINVAL; | |
122 | ||
ef4815ab MH |
123 | if (!arch_iommu) |
124 | return -ENODEV; | |
125 | ||
a9dcad5e HD |
126 | clk_enable(obj->clk); |
127 | ||
128 | err = arch_iommu->enable(obj); | |
129 | ||
130 | clk_disable(obj->clk); | |
131 | return err; | |
132 | } | |
133 | ||
134 | static void iommu_disable(struct iommu *obj) | |
135 | { | |
136 | if (!obj) | |
137 | return; | |
138 | ||
139 | clk_enable(obj->clk); | |
140 | ||
141 | arch_iommu->disable(obj); | |
142 | ||
143 | clk_disable(obj->clk); | |
144 | } | |
145 | ||
146 | /* | |
147 | * TLB operations | |
148 | */ | |
149 | void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | |
150 | { | |
151 | BUG_ON(!cr || !e); | |
152 | ||
153 | arch_iommu->cr_to_e(cr, e); | |
154 | } | |
155 | EXPORT_SYMBOL_GPL(iotlb_cr_to_e); | |
156 | ||
157 | static inline int iotlb_cr_valid(struct cr_regs *cr) | |
158 | { | |
159 | if (!cr) | |
160 | return -EINVAL; | |
161 | ||
162 | return arch_iommu->cr_valid(cr); | |
163 | } | |
164 | ||
165 | static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | |
166 | struct iotlb_entry *e) | |
167 | { | |
168 | if (!e) | |
169 | return NULL; | |
170 | ||
171 | return arch_iommu->alloc_cr(obj, e); | |
172 | } | |
173 | ||
e1f23813 | 174 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
a9dcad5e HD |
175 | { |
176 | return arch_iommu->cr_to_virt(cr); | |
177 | } | |
a9dcad5e HD |
178 | |
179 | static u32 get_iopte_attr(struct iotlb_entry *e) | |
180 | { | |
181 | return arch_iommu->get_pte_attr(e); | |
182 | } | |
183 | ||
184 | static u32 iommu_report_fault(struct iommu *obj, u32 *da) | |
185 | { | |
186 | return arch_iommu->fault_isr(obj, da); | |
187 | } | |
188 | ||
189 | static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | |
190 | { | |
191 | u32 val; | |
192 | ||
193 | val = iommu_read_reg(obj, MMU_LOCK); | |
194 | ||
195 | l->base = MMU_LOCK_BASE(val); | |
196 | l->vict = MMU_LOCK_VICT(val); | |
197 | ||
a9dcad5e HD |
198 | } |
199 | ||
200 | static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | |
201 | { | |
202 | u32 val; | |
203 | ||
a9dcad5e HD |
204 | val = (l->base << MMU_LOCK_BASE_SHIFT); |
205 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | |
206 | ||
207 | iommu_write_reg(obj, val, MMU_LOCK); | |
208 | } | |
209 | ||
210 | static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) | |
211 | { | |
212 | arch_iommu->tlb_read_cr(obj, cr); | |
213 | } | |
214 | ||
215 | static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | |
216 | { | |
217 | arch_iommu->tlb_load_cr(obj, cr); | |
218 | ||
219 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | |
220 | iommu_write_reg(obj, 1, MMU_LD_TLB); | |
221 | } | |
222 | ||
223 | /** | |
224 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | |
225 | * @obj: target iommu | |
226 | * @cr: contents of cam and ram register | |
227 | * @buf: output buffer | |
228 | **/ | |
229 | static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | |
230 | char *buf) | |
231 | { | |
232 | BUG_ON(!cr || !buf); | |
233 | ||
234 | return arch_iommu->dump_cr(obj, cr, buf); | |
235 | } | |
236 | ||
37c2836c HD |
237 | /* only used in iotlb iteration for-loop */ |
238 | static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) | |
239 | { | |
240 | struct cr_regs cr; | |
241 | struct iotlb_lock l; | |
242 | ||
243 | iotlb_lock_get(obj, &l); | |
244 | l.vict = n; | |
245 | iotlb_lock_set(obj, &l); | |
246 | iotlb_read_cr(obj, &cr); | |
247 | ||
248 | return cr; | |
249 | } | |
250 | ||
a9dcad5e HD |
251 | /** |
252 | * load_iotlb_entry - Set an iommu tlb entry | |
253 | * @obj: target iommu | |
254 | * @e: an iommu tlb entry info | |
255 | **/ | |
e1f23813 | 256 | static int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) |
a9dcad5e | 257 | { |
a9dcad5e HD |
258 | int err = 0; |
259 | struct iotlb_lock l; | |
260 | struct cr_regs *cr; | |
261 | ||
262 | if (!obj || !obj->nr_tlb_entries || !e) | |
263 | return -EINVAL; | |
264 | ||
265 | clk_enable(obj->clk); | |
266 | ||
be6d8026 KH |
267 | iotlb_lock_get(obj, &l); |
268 | if (l.base == obj->nr_tlb_entries) { | |
269 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); | |
a9dcad5e HD |
270 | err = -EBUSY; |
271 | goto out; | |
272 | } | |
be6d8026 | 273 | if (!e->prsvd) { |
37c2836c HD |
274 | int i; |
275 | struct cr_regs tmp; | |
be6d8026 | 276 | |
37c2836c | 277 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) |
be6d8026 KH |
278 | if (!iotlb_cr_valid(&tmp)) |
279 | break; | |
37c2836c | 280 | |
be6d8026 KH |
281 | if (i == obj->nr_tlb_entries) { |
282 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | |
283 | err = -EBUSY; | |
284 | goto out; | |
285 | } | |
37c2836c HD |
286 | |
287 | iotlb_lock_get(obj, &l); | |
be6d8026 KH |
288 | } else { |
289 | l.vict = l.base; | |
290 | iotlb_lock_set(obj, &l); | |
291 | } | |
a9dcad5e HD |
292 | |
293 | cr = iotlb_alloc_cr(obj, e); | |
294 | if (IS_ERR(cr)) { | |
295 | clk_disable(obj->clk); | |
296 | return PTR_ERR(cr); | |
297 | } | |
298 | ||
299 | iotlb_load_cr(obj, cr); | |
300 | kfree(cr); | |
301 | ||
be6d8026 KH |
302 | if (e->prsvd) |
303 | l.base++; | |
a9dcad5e HD |
304 | /* increment victim for next tlb load */ |
305 | if (++l.vict == obj->nr_tlb_entries) | |
be6d8026 | 306 | l.vict = l.base; |
a9dcad5e HD |
307 | iotlb_lock_set(obj, &l); |
308 | out: | |
309 | clk_disable(obj->clk); | |
310 | return err; | |
311 | } | |
a9dcad5e HD |
312 | |
313 | /** | |
314 | * flush_iotlb_page - Clear an iommu tlb entry | |
315 | * @obj: target iommu | |
316 | * @da: iommu device virtual address | |
317 | * | |
318 | * Clear an iommu tlb entry which includes 'da' address. | |
319 | **/ | |
e1f23813 | 320 | static void flush_iotlb_page(struct iommu *obj, u32 da) |
a9dcad5e | 321 | { |
a9dcad5e | 322 | int i; |
37c2836c | 323 | struct cr_regs cr; |
a9dcad5e HD |
324 | |
325 | clk_enable(obj->clk); | |
326 | ||
37c2836c | 327 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { |
a9dcad5e HD |
328 | u32 start; |
329 | size_t bytes; | |
330 | ||
a9dcad5e HD |
331 | if (!iotlb_cr_valid(&cr)) |
332 | continue; | |
333 | ||
334 | start = iotlb_cr_to_virt(&cr); | |
335 | bytes = iopgsz_to_bytes(cr.cam & 3); | |
336 | ||
337 | if ((start <= da) && (da < start + bytes)) { | |
338 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | |
339 | __func__, start, da, bytes); | |
0fa035e5 | 340 | iotlb_load_cr(obj, &cr); |
a9dcad5e HD |
341 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
342 | } | |
343 | } | |
344 | clk_disable(obj->clk); | |
345 | ||
346 | if (i == obj->nr_tlb_entries) | |
347 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | |
348 | } | |
a9dcad5e HD |
349 | |
350 | /** | |
351 | * flush_iotlb_range - Clear an iommu tlb entries | |
352 | * @obj: target iommu | |
353 | * @start: iommu device virtual address(start) | |
354 | * @end: iommu device virtual address(end) | |
355 | * | |
356 | * Clear an iommu tlb entry which includes 'da' address. | |
357 | **/ | |
358 | void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) | |
359 | { | |
360 | u32 da = start; | |
361 | ||
362 | while (da < end) { | |
363 | flush_iotlb_page(obj, da); | |
364 | /* FIXME: Optimize for multiple page size */ | |
365 | da += IOPTE_SIZE; | |
366 | } | |
367 | } | |
368 | EXPORT_SYMBOL_GPL(flush_iotlb_range); | |
369 | ||
370 | /** | |
371 | * flush_iotlb_all - Clear all iommu tlb entries | |
372 | * @obj: target iommu | |
373 | **/ | |
e1f23813 | 374 | static void flush_iotlb_all(struct iommu *obj) |
a9dcad5e HD |
375 | { |
376 | struct iotlb_lock l; | |
377 | ||
378 | clk_enable(obj->clk); | |
379 | ||
380 | l.base = 0; | |
381 | l.vict = 0; | |
382 | iotlb_lock_set(obj, &l); | |
383 | ||
384 | iommu_write_reg(obj, 1, MMU_GFLUSH); | |
385 | ||
386 | clk_disable(obj->clk); | |
387 | } | |
a9dcad5e | 388 | |
ddfa975a KH |
389 | /** |
390 | * iommu_set_twl - enable/disable table walking logic | |
391 | * @obj: target iommu | |
392 | * @on: enable/disable | |
393 | * | |
394 | * Function used to enable/disable TWL. If one wants to work | |
395 | * exclusively with locked TLB entries and receive notifications | |
396 | * for TLB miss then call this function to disable TWL. | |
397 | */ | |
398 | void iommu_set_twl(struct iommu *obj, bool on) | |
399 | { | |
400 | clk_enable(obj->clk); | |
401 | arch_iommu->set_twl(obj, on); | |
402 | clk_disable(obj->clk); | |
403 | } | |
404 | EXPORT_SYMBOL_GPL(iommu_set_twl); | |
405 | ||
a9dcad5e HD |
406 | #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) |
407 | ||
14e0e679 | 408 | ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 409 | { |
a9dcad5e HD |
410 | if (!obj || !buf) |
411 | return -EINVAL; | |
412 | ||
413 | clk_enable(obj->clk); | |
414 | ||
14e0e679 | 415 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); |
a9dcad5e HD |
416 | |
417 | clk_disable(obj->clk); | |
418 | ||
419 | return bytes; | |
420 | } | |
421 | EXPORT_SYMBOL_GPL(iommu_dump_ctx); | |
422 | ||
14e0e679 | 423 | static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) |
a9dcad5e HD |
424 | { |
425 | int i; | |
37c2836c HD |
426 | struct iotlb_lock saved; |
427 | struct cr_regs tmp; | |
a9dcad5e HD |
428 | struct cr_regs *p = crs; |
429 | ||
430 | clk_enable(obj->clk); | |
a9dcad5e | 431 | iotlb_lock_get(obj, &saved); |
a9dcad5e | 432 | |
37c2836c | 433 | for_each_iotlb_cr(obj, num, i, tmp) { |
a9dcad5e HD |
434 | if (!iotlb_cr_valid(&tmp)) |
435 | continue; | |
a9dcad5e HD |
436 | *p++ = tmp; |
437 | } | |
37c2836c | 438 | |
a9dcad5e HD |
439 | iotlb_lock_set(obj, &saved); |
440 | clk_disable(obj->clk); | |
441 | ||
442 | return p - crs; | |
443 | } | |
444 | ||
445 | /** | |
446 | * dump_tlb_entries - dump cr arrays to given buffer | |
447 | * @obj: target iommu | |
448 | * @buf: output buffer | |
449 | **/ | |
14e0e679 | 450 | size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 451 | { |
14e0e679 | 452 | int i, num; |
a9dcad5e HD |
453 | struct cr_regs *cr; |
454 | char *p = buf; | |
455 | ||
14e0e679 HD |
456 | num = bytes / sizeof(*cr); |
457 | num = min(obj->nr_tlb_entries, num); | |
458 | ||
459 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); | |
a9dcad5e HD |
460 | if (!cr) |
461 | return 0; | |
462 | ||
14e0e679 HD |
463 | num = __dump_tlb_entries(obj, cr, num); |
464 | for (i = 0; i < num; i++) | |
a9dcad5e HD |
465 | p += iotlb_dump_cr(obj, cr + i, p); |
466 | kfree(cr); | |
467 | ||
468 | return p - buf; | |
469 | } | |
470 | EXPORT_SYMBOL_GPL(dump_tlb_entries); | |
471 | ||
472 | int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | |
473 | { | |
474 | return driver_for_each_device(&omap_iommu_driver.driver, | |
475 | NULL, data, fn); | |
476 | } | |
477 | EXPORT_SYMBOL_GPL(foreach_iommu_device); | |
478 | ||
479 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | |
480 | ||
481 | /* | |
482 | * H/W pagetable operations | |
483 | */ | |
484 | static void flush_iopgd_range(u32 *first, u32 *last) | |
485 | { | |
486 | /* FIXME: L2 cache should be taken care of if it exists */ | |
487 | do { | |
488 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | |
489 | : : "r" (first)); | |
490 | first += L1_CACHE_BYTES / sizeof(*first); | |
491 | } while (first <= last); | |
492 | } | |
493 | ||
494 | static void flush_iopte_range(u32 *first, u32 *last) | |
495 | { | |
496 | /* FIXME: L2 cache should be taken care of if it exists */ | |
497 | do { | |
498 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | |
499 | : : "r" (first)); | |
500 | first += L1_CACHE_BYTES / sizeof(*first); | |
501 | } while (first <= last); | |
502 | } | |
503 | ||
504 | static void iopte_free(u32 *iopte) | |
505 | { | |
506 | /* Note: freed iopte's must be clean ready for re-use */ | |
507 | kmem_cache_free(iopte_cachep, iopte); | |
508 | } | |
509 | ||
510 | static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) | |
511 | { | |
512 | u32 *iopte; | |
513 | ||
514 | /* a table has already existed */ | |
515 | if (*iopgd) | |
516 | goto pte_ready; | |
517 | ||
518 | /* | |
519 | * do the allocation outside the page table lock | |
520 | */ | |
521 | spin_unlock(&obj->page_table_lock); | |
522 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | |
523 | spin_lock(&obj->page_table_lock); | |
524 | ||
525 | if (!*iopgd) { | |
526 | if (!iopte) | |
527 | return ERR_PTR(-ENOMEM); | |
528 | ||
529 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | |
530 | flush_iopgd_range(iopgd, iopgd); | |
531 | ||
532 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | |
533 | } else { | |
534 | /* We raced, free the reduniovant table */ | |
535 | iopte_free(iopte); | |
536 | } | |
537 | ||
538 | pte_ready: | |
539 | iopte = iopte_offset(iopgd, da); | |
540 | ||
541 | dev_vdbg(obj->dev, | |
542 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | |
543 | __func__, da, iopgd, *iopgd, iopte, *iopte); | |
544 | ||
545 | return iopte; | |
546 | } | |
547 | ||
548 | static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
549 | { | |
550 | u32 *iopgd = iopgd_offset(obj, da); | |
551 | ||
4abb7617 HD |
552 | if ((da | pa) & ~IOSECTION_MASK) { |
553 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | |
554 | __func__, da, pa, IOSECTION_SIZE); | |
555 | return -EINVAL; | |
556 | } | |
557 | ||
a9dcad5e HD |
558 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; |
559 | flush_iopgd_range(iopgd, iopgd); | |
560 | return 0; | |
561 | } | |
562 | ||
563 | static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
564 | { | |
565 | u32 *iopgd = iopgd_offset(obj, da); | |
566 | int i; | |
567 | ||
4abb7617 HD |
568 | if ((da | pa) & ~IOSUPER_MASK) { |
569 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | |
570 | __func__, da, pa, IOSUPER_SIZE); | |
571 | return -EINVAL; | |
572 | } | |
573 | ||
a9dcad5e HD |
574 | for (i = 0; i < 16; i++) |
575 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | |
576 | flush_iopgd_range(iopgd, iopgd + 15); | |
577 | return 0; | |
578 | } | |
579 | ||
580 | static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
581 | { | |
582 | u32 *iopgd = iopgd_offset(obj, da); | |
583 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
584 | ||
585 | if (IS_ERR(iopte)) | |
586 | return PTR_ERR(iopte); | |
587 | ||
588 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | |
589 | flush_iopte_range(iopte, iopte); | |
590 | ||
591 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | |
592 | __func__, da, pa, iopte, *iopte); | |
593 | ||
594 | return 0; | |
595 | } | |
596 | ||
597 | static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
598 | { | |
599 | u32 *iopgd = iopgd_offset(obj, da); | |
600 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
601 | int i; | |
602 | ||
4abb7617 HD |
603 | if ((da | pa) & ~IOLARGE_MASK) { |
604 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", | |
605 | __func__, da, pa, IOLARGE_SIZE); | |
606 | return -EINVAL; | |
607 | } | |
608 | ||
a9dcad5e HD |
609 | if (IS_ERR(iopte)) |
610 | return PTR_ERR(iopte); | |
611 | ||
612 | for (i = 0; i < 16; i++) | |
613 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | |
614 | flush_iopte_range(iopte, iopte + 15); | |
615 | return 0; | |
616 | } | |
617 | ||
618 | static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | |
619 | { | |
620 | int (*fn)(struct iommu *, u32, u32, u32); | |
621 | u32 prot; | |
622 | int err; | |
623 | ||
624 | if (!obj || !e) | |
625 | return -EINVAL; | |
626 | ||
627 | switch (e->pgsz) { | |
628 | case MMU_CAM_PGSZ_16M: | |
629 | fn = iopgd_alloc_super; | |
630 | break; | |
631 | case MMU_CAM_PGSZ_1M: | |
632 | fn = iopgd_alloc_section; | |
633 | break; | |
634 | case MMU_CAM_PGSZ_64K: | |
635 | fn = iopte_alloc_large; | |
636 | break; | |
637 | case MMU_CAM_PGSZ_4K: | |
638 | fn = iopte_alloc_page; | |
639 | break; | |
640 | default: | |
641 | fn = NULL; | |
642 | BUG(); | |
643 | break; | |
644 | } | |
645 | ||
646 | prot = get_iopte_attr(e); | |
647 | ||
648 | spin_lock(&obj->page_table_lock); | |
649 | err = fn(obj, e->da, e->pa, prot); | |
650 | spin_unlock(&obj->page_table_lock); | |
651 | ||
652 | return err; | |
653 | } | |
654 | ||
655 | /** | |
656 | * iopgtable_store_entry - Make an iommu pte entry | |
657 | * @obj: target iommu | |
658 | * @e: an iommu tlb entry info | |
659 | **/ | |
660 | int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) | |
661 | { | |
662 | int err; | |
663 | ||
664 | flush_iotlb_page(obj, e->da); | |
665 | err = iopgtable_store_entry_core(obj, e); | |
666 | #ifdef PREFETCH_IOTLB | |
667 | if (!err) | |
668 | load_iotlb_entry(obj, e); | |
669 | #endif | |
670 | return err; | |
671 | } | |
672 | EXPORT_SYMBOL_GPL(iopgtable_store_entry); | |
673 | ||
674 | /** | |
675 | * iopgtable_lookup_entry - Lookup an iommu pte entry | |
676 | * @obj: target iommu | |
677 | * @da: iommu device virtual address | |
678 | * @ppgd: iommu pgd entry pointer to be returned | |
679 | * @ppte: iommu pte entry pointer to be returned | |
680 | **/ | |
e1f23813 OBC |
681 | static void |
682 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | |
a9dcad5e HD |
683 | { |
684 | u32 *iopgd, *iopte = NULL; | |
685 | ||
686 | iopgd = iopgd_offset(obj, da); | |
687 | if (!*iopgd) | |
688 | goto out; | |
689 | ||
a1a54456 | 690 | if (iopgd_is_table(*iopgd)) |
a9dcad5e HD |
691 | iopte = iopte_offset(iopgd, da); |
692 | out: | |
693 | *ppgd = iopgd; | |
694 | *ppte = iopte; | |
695 | } | |
a9dcad5e HD |
696 | |
697 | static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) | |
698 | { | |
699 | size_t bytes; | |
700 | u32 *iopgd = iopgd_offset(obj, da); | |
701 | int nent = 1; | |
702 | ||
703 | if (!*iopgd) | |
704 | return 0; | |
705 | ||
a1a54456 | 706 | if (iopgd_is_table(*iopgd)) { |
a9dcad5e HD |
707 | int i; |
708 | u32 *iopte = iopte_offset(iopgd, da); | |
709 | ||
710 | bytes = IOPTE_SIZE; | |
711 | if (*iopte & IOPTE_LARGE) { | |
712 | nent *= 16; | |
713 | /* rewind to the 1st entry */ | |
c127c7dc | 714 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
a9dcad5e HD |
715 | } |
716 | bytes *= nent; | |
717 | memset(iopte, 0, nent * sizeof(*iopte)); | |
718 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | |
719 | ||
720 | /* | |
721 | * do table walk to check if this table is necessary or not | |
722 | */ | |
723 | iopte = iopte_offset(iopgd, 0); | |
724 | for (i = 0; i < PTRS_PER_IOPTE; i++) | |
725 | if (iopte[i]) | |
726 | goto out; | |
727 | ||
728 | iopte_free(iopte); | |
729 | nent = 1; /* for the next L1 entry */ | |
730 | } else { | |
731 | bytes = IOPGD_SIZE; | |
dcc730dc | 732 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
a9dcad5e HD |
733 | nent *= 16; |
734 | /* rewind to the 1st entry */ | |
8d33ea58 | 735 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
a9dcad5e HD |
736 | } |
737 | bytes *= nent; | |
738 | } | |
739 | memset(iopgd, 0, nent * sizeof(*iopgd)); | |
740 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | |
741 | out: | |
742 | return bytes; | |
743 | } | |
744 | ||
745 | /** | |
746 | * iopgtable_clear_entry - Remove an iommu pte entry | |
747 | * @obj: target iommu | |
748 | * @da: iommu device virtual address | |
749 | **/ | |
e1f23813 | 750 | static size_t iopgtable_clear_entry(struct iommu *obj, u32 da) |
a9dcad5e HD |
751 | { |
752 | size_t bytes; | |
753 | ||
754 | spin_lock(&obj->page_table_lock); | |
755 | ||
756 | bytes = iopgtable_clear_entry_core(obj, da); | |
757 | flush_iotlb_page(obj, da); | |
758 | ||
759 | spin_unlock(&obj->page_table_lock); | |
760 | ||
761 | return bytes; | |
762 | } | |
a9dcad5e HD |
763 | |
764 | static void iopgtable_clear_entry_all(struct iommu *obj) | |
765 | { | |
766 | int i; | |
767 | ||
768 | spin_lock(&obj->page_table_lock); | |
769 | ||
770 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | |
771 | u32 da; | |
772 | u32 *iopgd; | |
773 | ||
774 | da = i << IOPGD_SHIFT; | |
775 | iopgd = iopgd_offset(obj, da); | |
776 | ||
777 | if (!*iopgd) | |
778 | continue; | |
779 | ||
a1a54456 | 780 | if (iopgd_is_table(*iopgd)) |
a9dcad5e HD |
781 | iopte_free(iopte_offset(iopgd, 0)); |
782 | ||
783 | *iopgd = 0; | |
784 | flush_iopgd_range(iopgd, iopgd); | |
785 | } | |
786 | ||
787 | flush_iotlb_all(obj); | |
788 | ||
789 | spin_unlock(&obj->page_table_lock); | |
790 | } | |
791 | ||
792 | /* | |
793 | * Device IOMMU generic operations | |
794 | */ | |
795 | static irqreturn_t iommu_fault_handler(int irq, void *data) | |
796 | { | |
d594f1f3 | 797 | u32 da, errs; |
a9dcad5e | 798 | u32 *iopgd, *iopte; |
a9dcad5e HD |
799 | struct iommu *obj = data; |
800 | ||
801 | if (!obj->refcount) | |
802 | return IRQ_NONE; | |
803 | ||
a9dcad5e | 804 | clk_enable(obj->clk); |
d594f1f3 | 805 | errs = iommu_report_fault(obj, &da); |
a9dcad5e | 806 | clk_disable(obj->clk); |
c56b2ddd LP |
807 | if (errs == 0) |
808 | return IRQ_HANDLED; | |
d594f1f3 DC |
809 | |
810 | /* Fault callback or TLB/PTE Dynamic loading */ | |
811 | if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) | |
a9dcad5e HD |
812 | return IRQ_HANDLED; |
813 | ||
37b29810 HD |
814 | iommu_disable(obj); |
815 | ||
a9dcad5e HD |
816 | iopgd = iopgd_offset(obj, da); |
817 | ||
a1a54456 | 818 | if (!iopgd_is_table(*iopgd)) { |
d594f1f3 DC |
819 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p " |
820 | "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd); | |
a9dcad5e HD |
821 | return IRQ_NONE; |
822 | } | |
823 | ||
824 | iopte = iopte_offset(iopgd, da); | |
825 | ||
d594f1f3 DC |
826 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x " |
827 | "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd, | |
828 | iopte, *iopte); | |
a9dcad5e HD |
829 | |
830 | return IRQ_NONE; | |
831 | } | |
832 | ||
833 | static int device_match_by_alias(struct device *dev, void *data) | |
834 | { | |
835 | struct iommu *obj = to_iommu(dev); | |
836 | const char *name = data; | |
837 | ||
838 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | |
839 | ||
840 | return strcmp(obj->name, name) == 0; | |
841 | } | |
842 | ||
c7f4ab26 GLF |
843 | /** |
844 | * iommu_set_da_range - Set a valid device address range | |
845 | * @obj: target iommu | |
846 | * @start Start of valid range | |
847 | * @end End of valid range | |
848 | **/ | |
849 | int iommu_set_da_range(struct iommu *obj, u32 start, u32 end) | |
850 | { | |
851 | ||
852 | if (!obj) | |
853 | return -EFAULT; | |
854 | ||
855 | if (end < start || !PAGE_ALIGN(start | end)) | |
856 | return -EINVAL; | |
857 | ||
858 | obj->da_start = start; | |
859 | obj->da_end = end; | |
860 | ||
861 | return 0; | |
862 | } | |
863 | EXPORT_SYMBOL_GPL(iommu_set_da_range); | |
864 | ||
a9dcad5e | 865 | /** |
f626b52d OBC |
866 | * omap_find_iommu_device() - find an omap iommu device by name |
867 | * @name: name of the iommu device | |
868 | * | |
869 | * The generic iommu API requires the caller to provide the device | |
870 | * he wishes to attach to a certain iommu domain. | |
871 | * | |
872 | * Drivers generally should not bother with this as it should just | |
873 | * be taken care of by the DMA-API using dev_archdata. | |
874 | * | |
875 | * This function is provided as an interim solution until the latter | |
876 | * materializes, and omap3isp is fully migrated to the DMA-API. | |
877 | */ | |
878 | struct device *omap_find_iommu_device(const char *name) | |
879 | { | |
880 | return driver_find_device(&omap_iommu_driver.driver, NULL, | |
881 | (void *)name, | |
882 | device_match_by_alias); | |
883 | } | |
884 | EXPORT_SYMBOL_GPL(omap_find_iommu_device); | |
885 | ||
886 | /** | |
887 | * omap_iommu_attach() - attach iommu device to an iommu domain | |
888 | * @dev: target omap iommu device | |
889 | * @iopgd: page table | |
a9dcad5e | 890 | **/ |
f626b52d | 891 | static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) |
a9dcad5e HD |
892 | { |
893 | int err = -ENOMEM; | |
f626b52d | 894 | struct iommu *obj = to_iommu(dev); |
a9dcad5e | 895 | |
f626b52d | 896 | spin_lock(&obj->iommu_lock); |
a9dcad5e | 897 | |
f626b52d OBC |
898 | /* an iommu device can only be attached once */ |
899 | if (++obj->refcount > 1) { | |
900 | dev_err(dev, "%s: already attached!\n", obj->name); | |
901 | err = -EBUSY; | |
902 | goto err_enable; | |
a9dcad5e HD |
903 | } |
904 | ||
f626b52d OBC |
905 | obj->iopgd = iopgd; |
906 | err = iommu_enable(obj); | |
907 | if (err) | |
908 | goto err_enable; | |
909 | flush_iotlb_all(obj); | |
910 | ||
a9dcad5e HD |
911 | if (!try_module_get(obj->owner)) |
912 | goto err_module; | |
913 | ||
f626b52d | 914 | spin_unlock(&obj->iommu_lock); |
a9dcad5e HD |
915 | |
916 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
917 | return obj; | |
918 | ||
919 | err_module: | |
920 | if (obj->refcount == 1) | |
921 | iommu_disable(obj); | |
922 | err_enable: | |
923 | obj->refcount--; | |
f626b52d | 924 | spin_unlock(&obj->iommu_lock); |
a9dcad5e HD |
925 | return ERR_PTR(err); |
926 | } | |
a9dcad5e HD |
927 | |
928 | /** | |
f626b52d | 929 | * omap_iommu_detach - release iommu device |
a9dcad5e HD |
930 | * @obj: target iommu |
931 | **/ | |
f626b52d | 932 | static void omap_iommu_detach(struct iommu *obj) |
a9dcad5e | 933 | { |
acf9d467 | 934 | if (!obj || IS_ERR(obj)) |
a9dcad5e HD |
935 | return; |
936 | ||
f626b52d | 937 | spin_lock(&obj->iommu_lock); |
a9dcad5e HD |
938 | |
939 | if (--obj->refcount == 0) | |
940 | iommu_disable(obj); | |
941 | ||
942 | module_put(obj->owner); | |
943 | ||
f626b52d OBC |
944 | obj->iopgd = NULL; |
945 | ||
946 | spin_unlock(&obj->iommu_lock); | |
a9dcad5e HD |
947 | |
948 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
949 | } | |
a9dcad5e | 950 | |
d594f1f3 DC |
951 | int iommu_set_isr(const char *name, |
952 | int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, | |
953 | void *priv), | |
954 | void *isr_priv) | |
955 | { | |
956 | struct device *dev; | |
957 | struct iommu *obj; | |
958 | ||
959 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | |
960 | device_match_by_alias); | |
961 | if (!dev) | |
962 | return -ENODEV; | |
963 | ||
964 | obj = to_iommu(dev); | |
965 | mutex_lock(&obj->iommu_lock); | |
966 | if (obj->refcount != 0) { | |
967 | mutex_unlock(&obj->iommu_lock); | |
968 | return -EBUSY; | |
969 | } | |
970 | obj->isr = isr; | |
971 | obj->isr_priv = isr_priv; | |
972 | mutex_unlock(&obj->iommu_lock); | |
973 | ||
974 | return 0; | |
975 | } | |
976 | EXPORT_SYMBOL_GPL(iommu_set_isr); | |
977 | ||
a9dcad5e HD |
978 | /* |
979 | * OMAP Device MMU(IOMMU) detection | |
980 | */ | |
981 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | |
982 | { | |
983 | int err = -ENODEV; | |
a9dcad5e HD |
984 | int irq; |
985 | struct iommu *obj; | |
986 | struct resource *res; | |
987 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | |
988 | ||
989 | if (pdev->num_resources != 2) | |
990 | return -EINVAL; | |
991 | ||
992 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | |
993 | if (!obj) | |
994 | return -ENOMEM; | |
995 | ||
996 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | |
997 | if (IS_ERR(obj->clk)) | |
998 | goto err_clk; | |
999 | ||
1000 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | |
1001 | obj->name = pdata->name; | |
1002 | obj->dev = &pdev->dev; | |
1003 | obj->ctx = (void *)obj + sizeof(*obj); | |
c7f4ab26 GLF |
1004 | obj->da_start = pdata->da_start; |
1005 | obj->da_end = pdata->da_end; | |
a9dcad5e | 1006 | |
f626b52d | 1007 | spin_lock_init(&obj->iommu_lock); |
a9dcad5e HD |
1008 | mutex_init(&obj->mmap_lock); |
1009 | spin_lock_init(&obj->page_table_lock); | |
1010 | INIT_LIST_HEAD(&obj->mmap); | |
1011 | ||
1012 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1013 | if (!res) { | |
1014 | err = -ENODEV; | |
1015 | goto err_mem; | |
1016 | } | |
a9dcad5e HD |
1017 | |
1018 | res = request_mem_region(res->start, resource_size(res), | |
1019 | dev_name(&pdev->dev)); | |
1020 | if (!res) { | |
1021 | err = -EIO; | |
1022 | goto err_mem; | |
1023 | } | |
1024 | ||
da4a0f76 AK |
1025 | obj->regbase = ioremap(res->start, resource_size(res)); |
1026 | if (!obj->regbase) { | |
1027 | err = -ENOMEM; | |
1028 | goto err_ioremap; | |
1029 | } | |
1030 | ||
a9dcad5e HD |
1031 | irq = platform_get_irq(pdev, 0); |
1032 | if (irq < 0) { | |
1033 | err = -ENODEV; | |
1034 | goto err_irq; | |
1035 | } | |
1036 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | |
1037 | dev_name(&pdev->dev), obj); | |
1038 | if (err < 0) | |
1039 | goto err_irq; | |
1040 | platform_set_drvdata(pdev, obj); | |
1041 | ||
a9dcad5e HD |
1042 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
1043 | return 0; | |
1044 | ||
a9dcad5e | 1045 | err_irq: |
a9dcad5e | 1046 | iounmap(obj->regbase); |
da4a0f76 AK |
1047 | err_ioremap: |
1048 | release_mem_region(res->start, resource_size(res)); | |
a9dcad5e HD |
1049 | err_mem: |
1050 | clk_put(obj->clk); | |
1051 | err_clk: | |
1052 | kfree(obj); | |
1053 | return err; | |
1054 | } | |
1055 | ||
1056 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | |
1057 | { | |
1058 | int irq; | |
1059 | struct resource *res; | |
1060 | struct iommu *obj = platform_get_drvdata(pdev); | |
1061 | ||
1062 | platform_set_drvdata(pdev, NULL); | |
1063 | ||
1064 | iopgtable_clear_entry_all(obj); | |
a9dcad5e HD |
1065 | |
1066 | irq = platform_get_irq(pdev, 0); | |
1067 | free_irq(irq, obj); | |
1068 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1069 | release_mem_region(res->start, resource_size(res)); | |
1070 | iounmap(obj->regbase); | |
1071 | ||
1072 | clk_put(obj->clk); | |
1073 | dev_info(&pdev->dev, "%s removed\n", obj->name); | |
1074 | kfree(obj); | |
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | static struct platform_driver omap_iommu_driver = { | |
1079 | .probe = omap_iommu_probe, | |
1080 | .remove = __devexit_p(omap_iommu_remove), | |
1081 | .driver = { | |
1082 | .name = "omap-iommu", | |
1083 | }, | |
1084 | }; | |
1085 | ||
1086 | static void iopte_cachep_ctor(void *iopte) | |
1087 | { | |
1088 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | |
1089 | } | |
1090 | ||
f626b52d OBC |
1091 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
1092 | phys_addr_t pa, int order, int prot) | |
1093 | { | |
1094 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1095 | struct iommu *oiommu = omap_domain->iommu_dev; | |
1096 | struct device *dev = oiommu->dev; | |
1097 | size_t bytes = PAGE_SIZE << order; | |
1098 | struct iotlb_entry e; | |
1099 | int omap_pgsz; | |
1100 | u32 ret, flags; | |
1101 | ||
1102 | /* we only support mapping a single iommu page for now */ | |
1103 | omap_pgsz = bytes_to_iopgsz(bytes); | |
1104 | if (omap_pgsz < 0) { | |
1105 | dev_err(dev, "invalid size to map: %d\n", bytes); | |
1106 | return -EINVAL; | |
1107 | } | |
1108 | ||
1109 | dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); | |
1110 | ||
1111 | flags = omap_pgsz | prot; | |
1112 | ||
1113 | iotlb_init_entry(&e, da, pa, flags); | |
1114 | ||
1115 | ret = iopgtable_store_entry(oiommu, &e); | |
1116 | if (ret) { | |
1117 | dev_err(dev, "iopgtable_store_entry failed: %d\n", ret); | |
1118 | return ret; | |
1119 | } | |
1120 | ||
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, | |
1125 | int order) | |
1126 | { | |
1127 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1128 | struct iommu *oiommu = omap_domain->iommu_dev; | |
1129 | struct device *dev = oiommu->dev; | |
1130 | size_t bytes = PAGE_SIZE << order; | |
1131 | size_t ret; | |
1132 | ||
1133 | dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes); | |
1134 | ||
1135 | ret = iopgtable_clear_entry(oiommu, da); | |
1136 | if (ret != bytes) { | |
1137 | dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes); | |
1138 | return -EINVAL; | |
1139 | } | |
1140 | ||
1141 | return 0; | |
1142 | } | |
1143 | ||
1144 | static int | |
1145 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
1146 | { | |
1147 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1148 | struct iommu *oiommu; | |
1149 | int ret = 0; | |
1150 | ||
1151 | spin_lock(&omap_domain->lock); | |
1152 | ||
1153 | /* only a single device is supported per domain for now */ | |
1154 | if (omap_domain->iommu_dev) { | |
1155 | dev_err(dev, "iommu domain is already attached\n"); | |
1156 | ret = -EBUSY; | |
1157 | goto out; | |
1158 | } | |
1159 | ||
1160 | /* get a handle to and enable the omap iommu */ | |
1161 | oiommu = omap_iommu_attach(dev, omap_domain->pgtable); | |
1162 | if (IS_ERR(oiommu)) { | |
1163 | ret = PTR_ERR(oiommu); | |
1164 | dev_err(dev, "can't get omap iommu: %d\n", ret); | |
1165 | goto out; | |
1166 | } | |
1167 | ||
1168 | omap_domain->iommu_dev = oiommu; | |
1169 | ||
1170 | out: | |
1171 | spin_unlock(&omap_domain->lock); | |
1172 | return ret; | |
1173 | } | |
1174 | ||
1175 | static void omap_iommu_detach_dev(struct iommu_domain *domain, | |
1176 | struct device *dev) | |
1177 | { | |
1178 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1179 | struct iommu *oiommu = to_iommu(dev); | |
1180 | ||
1181 | spin_lock(&omap_domain->lock); | |
1182 | ||
1183 | /* only a single device is supported per domain for now */ | |
1184 | if (omap_domain->iommu_dev != oiommu) { | |
1185 | dev_err(dev, "invalid iommu device\n"); | |
1186 | goto out; | |
1187 | } | |
1188 | ||
1189 | iopgtable_clear_entry_all(oiommu); | |
1190 | ||
1191 | omap_iommu_detach(oiommu); | |
1192 | ||
1193 | omap_domain->iommu_dev = NULL; | |
1194 | ||
1195 | out: | |
1196 | spin_unlock(&omap_domain->lock); | |
1197 | } | |
1198 | ||
1199 | static int omap_iommu_domain_init(struct iommu_domain *domain) | |
1200 | { | |
1201 | struct omap_iommu_domain *omap_domain; | |
1202 | ||
1203 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); | |
1204 | if (!omap_domain) { | |
1205 | pr_err("kzalloc failed\n"); | |
1206 | goto out; | |
1207 | } | |
1208 | ||
1209 | omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); | |
1210 | if (!omap_domain->pgtable) { | |
1211 | pr_err("kzalloc failed\n"); | |
1212 | goto fail_nomem; | |
1213 | } | |
1214 | ||
1215 | /* | |
1216 | * should never fail, but please keep this around to ensure | |
1217 | * we keep the hardware happy | |
1218 | */ | |
1219 | BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); | |
1220 | ||
1221 | clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); | |
1222 | spin_lock_init(&omap_domain->lock); | |
1223 | ||
1224 | domain->priv = omap_domain; | |
1225 | ||
1226 | return 0; | |
1227 | ||
1228 | fail_nomem: | |
1229 | kfree(omap_domain); | |
1230 | out: | |
1231 | return -ENOMEM; | |
1232 | } | |
1233 | ||
1234 | /* assume device was already detached */ | |
1235 | static void omap_iommu_domain_destroy(struct iommu_domain *domain) | |
1236 | { | |
1237 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1238 | ||
1239 | domain->priv = NULL; | |
1240 | ||
1241 | kfree(omap_domain->pgtable); | |
1242 | kfree(omap_domain); | |
1243 | } | |
1244 | ||
1245 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | |
1246 | unsigned long da) | |
1247 | { | |
1248 | struct omap_iommu_domain *omap_domain = domain->priv; | |
1249 | struct iommu *oiommu = omap_domain->iommu_dev; | |
1250 | struct device *dev = oiommu->dev; | |
1251 | u32 *pgd, *pte; | |
1252 | phys_addr_t ret = 0; | |
1253 | ||
1254 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); | |
1255 | ||
1256 | if (pte) { | |
1257 | if (iopte_is_small(*pte)) | |
1258 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); | |
1259 | else if (iopte_is_large(*pte)) | |
1260 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); | |
1261 | else | |
1262 | dev_err(dev, "bogus pte 0x%x", *pte); | |
1263 | } else { | |
1264 | if (iopgd_is_section(*pgd)) | |
1265 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); | |
1266 | else if (iopgd_is_super(*pgd)) | |
1267 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); | |
1268 | else | |
1269 | dev_err(dev, "bogus pgd 0x%x", *pgd); | |
1270 | } | |
1271 | ||
1272 | return ret; | |
1273 | } | |
1274 | ||
1275 | static int omap_iommu_domain_has_cap(struct iommu_domain *domain, | |
1276 | unsigned long cap) | |
1277 | { | |
1278 | return 0; | |
1279 | } | |
1280 | ||
1281 | static struct iommu_ops omap_iommu_ops = { | |
1282 | .domain_init = omap_iommu_domain_init, | |
1283 | .domain_destroy = omap_iommu_domain_destroy, | |
1284 | .attach_dev = omap_iommu_attach_dev, | |
1285 | .detach_dev = omap_iommu_detach_dev, | |
1286 | .map = omap_iommu_map, | |
1287 | .unmap = omap_iommu_unmap, | |
1288 | .iova_to_phys = omap_iommu_iova_to_phys, | |
1289 | .domain_has_cap = omap_iommu_domain_has_cap, | |
1290 | }; | |
1291 | ||
a9dcad5e HD |
1292 | static int __init omap_iommu_init(void) |
1293 | { | |
1294 | struct kmem_cache *p; | |
1295 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
1296 | size_t align = 1 << 10; /* L2 pagetable alignement */ | |
1297 | ||
1298 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | |
1299 | iopte_cachep_ctor); | |
1300 | if (!p) | |
1301 | return -ENOMEM; | |
1302 | iopte_cachep = p; | |
1303 | ||
f626b52d OBC |
1304 | register_iommu(&omap_iommu_ops); |
1305 | ||
a9dcad5e HD |
1306 | return platform_driver_register(&omap_iommu_driver); |
1307 | } | |
1308 | module_init(omap_iommu_init); | |
1309 | ||
1310 | static void __exit omap_iommu_exit(void) | |
1311 | { | |
1312 | kmem_cache_destroy(iopte_cachep); | |
1313 | ||
1314 | platform_driver_unregister(&omap_iommu_driver); | |
1315 | } | |
1316 | module_exit(omap_iommu_exit); | |
1317 | ||
1318 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | |
1319 | MODULE_ALIAS("platform:omap-iommu"); | |
1320 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | |
1321 | MODULE_LICENSE("GPL v2"); |