iommu/io-pgtable-arm: Allow appropriate DMA API use
[deliverable/linux.git] / drivers / iommu / io-pgtable-arm.c
1 /*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28
29 #include "io-pgtable.h"
30
31 #define ARM_LPAE_MAX_ADDR_BITS 48
32 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33 #define ARM_LPAE_MAX_LEVELS 4
34
35 /* Struct accessors */
36 #define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
38
39 #define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
41
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45 /*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51 /*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55 #define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
58
59 #define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
61
62 /*
63 * Calculate the index at level l used to map virtual address a using the
64 * pagetable in d.
65 */
66 #define ARM_LPAE_PGD_IDX(l,d) \
67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
68
69 #define ARM_LPAE_LVL_IDX(a,l,d) \
70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
72
73 /* Calculate the block/page mapping size at level l for pagetable in d. */
74 #define ARM_LPAE_BLOCK_SIZE(l,d) \
75 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
76 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
77
78 /* Page table bits */
79 #define ARM_LPAE_PTE_TYPE_SHIFT 0
80 #define ARM_LPAE_PTE_TYPE_MASK 0x3
81
82 #define ARM_LPAE_PTE_TYPE_BLOCK 1
83 #define ARM_LPAE_PTE_TYPE_TABLE 3
84 #define ARM_LPAE_PTE_TYPE_PAGE 3
85
86 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
87 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
88 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
89 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
90 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
91 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
92 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
93 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
94
95 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
96 /* Ignore the contiguous bit for block splitting */
97 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
98 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
99 ARM_LPAE_PTE_ATTR_HI_MASK)
100
101 /* Stage-1 PTE */
102 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
103 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
104 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
105 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
106
107 /* Stage-2 PTE */
108 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
109 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
110 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
111 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
112 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
113 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
114
115 /* Register bits */
116 #define ARM_32_LPAE_TCR_EAE (1 << 31)
117 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
118
119 #define ARM_LPAE_TCR_EPD1 (1 << 23)
120
121 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
122 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
123 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
124
125 #define ARM_LPAE_TCR_SH0_SHIFT 12
126 #define ARM_LPAE_TCR_SH0_MASK 0x3
127 #define ARM_LPAE_TCR_SH_NS 0
128 #define ARM_LPAE_TCR_SH_OS 2
129 #define ARM_LPAE_TCR_SH_IS 3
130
131 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
132 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
133 #define ARM_LPAE_TCR_RGN_MASK 0x3
134 #define ARM_LPAE_TCR_RGN_NC 0
135 #define ARM_LPAE_TCR_RGN_WBWA 1
136 #define ARM_LPAE_TCR_RGN_WT 2
137 #define ARM_LPAE_TCR_RGN_WB 3
138
139 #define ARM_LPAE_TCR_SL0_SHIFT 6
140 #define ARM_LPAE_TCR_SL0_MASK 0x3
141
142 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
143 #define ARM_LPAE_TCR_SZ_MASK 0xf
144
145 #define ARM_LPAE_TCR_PS_SHIFT 16
146 #define ARM_LPAE_TCR_PS_MASK 0x7
147
148 #define ARM_LPAE_TCR_IPS_SHIFT 32
149 #define ARM_LPAE_TCR_IPS_MASK 0x7
150
151 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
152 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
153 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
154 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
155 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
156 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
157
158 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
159 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
160 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
161 #define ARM_LPAE_MAIR_ATTR_NC 0x44
162 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
163 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
164 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
165 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
166
167 /* IOPTE accessors */
168 #define iopte_deref(pte,d) \
169 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
170 & ~((1ULL << (d)->pg_shift) - 1)))
171
172 #define iopte_type(pte,l) \
173 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
174
175 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
176
177 #define iopte_leaf(pte,l) \
178 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
179 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
180 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
181
182 #define iopte_to_pfn(pte,d) \
183 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
184
185 #define pfn_to_iopte(pfn,d) \
186 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
187
188 struct arm_lpae_io_pgtable {
189 struct io_pgtable iop;
190
191 int levels;
192 size_t pgd_size;
193 unsigned long pg_shift;
194 unsigned long bits_per_level;
195
196 void *pgd;
197 };
198
199 typedef u64 arm_lpae_iopte;
200
201 static bool selftest_running = false;
202
203 static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
204 {
205 return phys_to_dma(dev, virt_to_phys(pages));
206 }
207
208 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
209 struct io_pgtable_cfg *cfg)
210 {
211 struct device *dev = cfg->iommu_dev;
212 dma_addr_t dma;
213 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
214
215 if (!pages)
216 return NULL;
217
218 if (dev) {
219 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
220 if (dma_mapping_error(dev, dma))
221 goto out_free;
222 /*
223 * We depend on the IOMMU being able to work with any physical
224 * address directly, so if the DMA layer suggests it can't by
225 * giving us back some translation, that bodes very badly...
226 */
227 if (dma != __arm_lpae_dma_addr(dev, pages))
228 goto out_unmap;
229 }
230
231 return pages;
232
233 out_unmap:
234 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
235 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
236 out_free:
237 free_pages_exact(pages, size);
238 return NULL;
239 }
240
241 static void __arm_lpae_free_pages(void *pages, size_t size,
242 struct io_pgtable_cfg *cfg)
243 {
244 struct device *dev = cfg->iommu_dev;
245
246 if (dev)
247 dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
248 size, DMA_TO_DEVICE);
249 free_pages_exact(pages, size);
250 }
251
252 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
253 struct io_pgtable_cfg *cfg, void *cookie)
254 {
255 struct device *dev = cfg->iommu_dev;
256
257 *ptep = pte;
258
259 if (dev)
260 dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
261 sizeof(pte), DMA_TO_DEVICE);
262 else if (cfg->tlb->flush_pgtable)
263 cfg->tlb->flush_pgtable(ptep, sizeof(pte), cookie);
264 }
265
266 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
267 unsigned long iova, phys_addr_t paddr,
268 arm_lpae_iopte prot, int lvl,
269 arm_lpae_iopte *ptep)
270 {
271 arm_lpae_iopte pte = prot;
272 struct io_pgtable_cfg *cfg = &data->iop.cfg;
273
274 /* We require an unmap first */
275 if (iopte_leaf(*ptep, lvl)) {
276 WARN_ON(!selftest_running);
277 return -EEXIST;
278 }
279
280 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
281 pte |= ARM_LPAE_PTE_NS;
282
283 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
284 pte |= ARM_LPAE_PTE_TYPE_PAGE;
285 else
286 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
287
288 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
289 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
290
291 __arm_lpae_set_pte(ptep, pte, cfg, data->iop.cookie);
292 return 0;
293 }
294
295 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
296 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
297 int lvl, arm_lpae_iopte *ptep)
298 {
299 arm_lpae_iopte *cptep, pte;
300 void *cookie = data->iop.cookie;
301 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
302 struct io_pgtable_cfg *cfg = &data->iop.cfg;
303
304 /* Find our entry at the current level */
305 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
306
307 /* If we can install a leaf entry at this level, then do so */
308 if (size == block_size && (size & cfg->pgsize_bitmap))
309 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
310
311 /* We can't allocate tables at the final level */
312 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
313 return -EINVAL;
314
315 /* Grab a pointer to the next level */
316 pte = *ptep;
317 if (!pte) {
318 cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
319 GFP_ATOMIC, cfg);
320 if (!cptep)
321 return -ENOMEM;
322
323 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
324 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
325 pte |= ARM_LPAE_PTE_NSTABLE;
326 __arm_lpae_set_pte(ptep, pte, cfg, cookie);
327 } else {
328 cptep = iopte_deref(pte, data);
329 }
330
331 /* Rinse, repeat */
332 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
333 }
334
335 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
336 int prot)
337 {
338 arm_lpae_iopte pte;
339
340 if (data->iop.fmt == ARM_64_LPAE_S1 ||
341 data->iop.fmt == ARM_32_LPAE_S1) {
342 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
343
344 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
345 pte |= ARM_LPAE_PTE_AP_RDONLY;
346
347 if (prot & IOMMU_CACHE)
348 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
349 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
350 } else {
351 pte = ARM_LPAE_PTE_HAP_FAULT;
352 if (prot & IOMMU_READ)
353 pte |= ARM_LPAE_PTE_HAP_READ;
354 if (prot & IOMMU_WRITE)
355 pte |= ARM_LPAE_PTE_HAP_WRITE;
356 if (prot & IOMMU_CACHE)
357 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
358 else
359 pte |= ARM_LPAE_PTE_MEMATTR_NC;
360 }
361
362 if (prot & IOMMU_NOEXEC)
363 pte |= ARM_LPAE_PTE_XN;
364
365 return pte;
366 }
367
368 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
369 phys_addr_t paddr, size_t size, int iommu_prot)
370 {
371 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
372 arm_lpae_iopte *ptep = data->pgd;
373 int lvl = ARM_LPAE_START_LVL(data);
374 arm_lpae_iopte prot;
375
376 /* If no access, then nothing to do */
377 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
378 return 0;
379
380 prot = arm_lpae_prot_to_pte(data, iommu_prot);
381 return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
382 }
383
384 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
385 arm_lpae_iopte *ptep)
386 {
387 arm_lpae_iopte *start, *end;
388 unsigned long table_size;
389
390 /* Only leaf entries at the last level */
391 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
392 return;
393
394 if (lvl == ARM_LPAE_START_LVL(data))
395 table_size = data->pgd_size;
396 else
397 table_size = 1UL << data->pg_shift;
398
399 start = ptep;
400 end = (void *)ptep + table_size;
401
402 while (ptep != end) {
403 arm_lpae_iopte pte = *ptep++;
404
405 if (!pte || iopte_leaf(pte, lvl))
406 continue;
407
408 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
409 }
410
411 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
412 }
413
414 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
415 {
416 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
417
418 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
419 kfree(data);
420 }
421
422 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
423 unsigned long iova, size_t size,
424 arm_lpae_iopte prot, int lvl,
425 arm_lpae_iopte *ptep, size_t blk_size)
426 {
427 unsigned long blk_start, blk_end;
428 phys_addr_t blk_paddr;
429 arm_lpae_iopte table = 0;
430 struct io_pgtable_cfg *cfg = &data->iop.cfg;
431 void *cookie = data->iop.cookie;
432
433 blk_start = iova & ~(blk_size - 1);
434 blk_end = blk_start + blk_size;
435 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
436
437 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
438 arm_lpae_iopte *tablep;
439
440 /* Unmap! */
441 if (blk_start == iova)
442 continue;
443
444 /* __arm_lpae_map expects a pointer to the start of the table */
445 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
446 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
447 tablep) < 0) {
448 if (table) {
449 /* Free the table we allocated */
450 tablep = iopte_deref(table, data);
451 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
452 }
453 return 0; /* Bytes unmapped */
454 }
455 }
456
457 __arm_lpae_set_pte(ptep, table, cfg, cookie);
458 iova &= ~(blk_size - 1);
459 cfg->tlb->tlb_add_flush(iova, blk_size, true, cookie);
460 return size;
461 }
462
463 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
464 unsigned long iova, size_t size, int lvl,
465 arm_lpae_iopte *ptep)
466 {
467 arm_lpae_iopte pte;
468 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
469 void *cookie = data->iop.cookie;
470 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
471
472 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
473 pte = *ptep;
474
475 /* Something went horribly wrong and we ran out of page table */
476 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
477 return 0;
478
479 /* If the size matches this level, we're in the right place */
480 if (size == blk_size) {
481 __arm_lpae_set_pte(ptep, 0, &data->iop.cfg, cookie);
482
483 if (!iopte_leaf(pte, lvl)) {
484 /* Also flush any partial walks */
485 tlb->tlb_add_flush(iova, size, false, cookie);
486 tlb->tlb_sync(cookie);
487 ptep = iopte_deref(pte, data);
488 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
489 } else {
490 tlb->tlb_add_flush(iova, size, true, cookie);
491 }
492
493 return size;
494 } else if (iopte_leaf(pte, lvl)) {
495 /*
496 * Insert a table at the next level to map the old region,
497 * minus the part we want to unmap
498 */
499 return arm_lpae_split_blk_unmap(data, iova, size,
500 iopte_prot(pte), lvl, ptep,
501 blk_size);
502 }
503
504 /* Keep on walkin' */
505 ptep = iopte_deref(pte, data);
506 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
507 }
508
509 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
510 size_t size)
511 {
512 size_t unmapped;
513 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
514 struct io_pgtable *iop = &data->iop;
515 arm_lpae_iopte *ptep = data->pgd;
516 int lvl = ARM_LPAE_START_LVL(data);
517
518 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
519 if (unmapped)
520 iop->cfg.tlb->tlb_sync(iop->cookie);
521
522 return unmapped;
523 }
524
525 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
526 unsigned long iova)
527 {
528 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
529 arm_lpae_iopte pte, *ptep = data->pgd;
530 int lvl = ARM_LPAE_START_LVL(data);
531
532 do {
533 /* Valid IOPTE pointer? */
534 if (!ptep)
535 return 0;
536
537 /* Grab the IOPTE we're interested in */
538 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
539
540 /* Valid entry? */
541 if (!pte)
542 return 0;
543
544 /* Leaf entry? */
545 if (iopte_leaf(pte,lvl))
546 goto found_translation;
547
548 /* Take it to the next level */
549 ptep = iopte_deref(pte, data);
550 } while (++lvl < ARM_LPAE_MAX_LEVELS);
551
552 /* Ran out of page tables to walk */
553 return 0;
554
555 found_translation:
556 iova &= ((1 << data->pg_shift) - 1);
557 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
558 }
559
560 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
561 {
562 unsigned long granule;
563
564 /*
565 * We need to restrict the supported page sizes to match the
566 * translation regime for a particular granule. Aim to match
567 * the CPU page size if possible, otherwise prefer smaller sizes.
568 * While we're at it, restrict the block sizes to match the
569 * chosen granule.
570 */
571 if (cfg->pgsize_bitmap & PAGE_SIZE)
572 granule = PAGE_SIZE;
573 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
574 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
575 else if (cfg->pgsize_bitmap & PAGE_MASK)
576 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
577 else
578 granule = 0;
579
580 switch (granule) {
581 case SZ_4K:
582 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
583 break;
584 case SZ_16K:
585 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
586 break;
587 case SZ_64K:
588 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
589 break;
590 default:
591 cfg->pgsize_bitmap = 0;
592 }
593 }
594
595 static struct arm_lpae_io_pgtable *
596 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
597 {
598 unsigned long va_bits, pgd_bits;
599 struct arm_lpae_io_pgtable *data;
600
601 arm_lpae_restrict_pgsizes(cfg);
602
603 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
604 return NULL;
605
606 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
607 return NULL;
608
609 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
610 return NULL;
611
612 data = kmalloc(sizeof(*data), GFP_KERNEL);
613 if (!data)
614 return NULL;
615
616 data->pg_shift = __ffs(cfg->pgsize_bitmap);
617 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
618
619 va_bits = cfg->ias - data->pg_shift;
620 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
621
622 /* Calculate the actual size of our pgd (without concatenation) */
623 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
624 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
625
626 data->iop.ops = (struct io_pgtable_ops) {
627 .map = arm_lpae_map,
628 .unmap = arm_lpae_unmap,
629 .iova_to_phys = arm_lpae_iova_to_phys,
630 };
631
632 return data;
633 }
634
635 static struct io_pgtable *
636 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
637 {
638 u64 reg;
639 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
640
641 if (!data)
642 return NULL;
643
644 /* TCR */
645 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
646 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
647 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
648
649 switch (1 << data->pg_shift) {
650 case SZ_4K:
651 reg |= ARM_LPAE_TCR_TG0_4K;
652 break;
653 case SZ_16K:
654 reg |= ARM_LPAE_TCR_TG0_16K;
655 break;
656 case SZ_64K:
657 reg |= ARM_LPAE_TCR_TG0_64K;
658 break;
659 }
660
661 switch (cfg->oas) {
662 case 32:
663 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
664 break;
665 case 36:
666 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
667 break;
668 case 40:
669 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
670 break;
671 case 42:
672 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
673 break;
674 case 44:
675 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
676 break;
677 case 48:
678 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
679 break;
680 default:
681 goto out_free_data;
682 }
683
684 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
685
686 /* Disable speculative walks through TTBR1 */
687 reg |= ARM_LPAE_TCR_EPD1;
688 cfg->arm_lpae_s1_cfg.tcr = reg;
689
690 /* MAIRs */
691 reg = (ARM_LPAE_MAIR_ATTR_NC
692 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
693 (ARM_LPAE_MAIR_ATTR_WBRWA
694 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
695 (ARM_LPAE_MAIR_ATTR_DEVICE
696 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
697
698 cfg->arm_lpae_s1_cfg.mair[0] = reg;
699 cfg->arm_lpae_s1_cfg.mair[1] = 0;
700
701 /* Looking good; allocate a pgd */
702 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
703 if (!data->pgd)
704 goto out_free_data;
705
706 if (cfg->tlb->flush_pgtable)
707 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
708
709 /* TTBRs */
710 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
711 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
712 return &data->iop;
713
714 out_free_data:
715 kfree(data);
716 return NULL;
717 }
718
719 static struct io_pgtable *
720 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
721 {
722 u64 reg, sl;
723 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
724
725 if (!data)
726 return NULL;
727
728 /*
729 * Concatenate PGDs at level 1 if possible in order to reduce
730 * the depth of the stage-2 walk.
731 */
732 if (data->levels == ARM_LPAE_MAX_LEVELS) {
733 unsigned long pgd_pages;
734
735 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
736 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
737 data->pgd_size = pgd_pages << data->pg_shift;
738 data->levels--;
739 }
740 }
741
742 /* VTCR */
743 reg = ARM_64_LPAE_S2_TCR_RES1 |
744 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
745 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
746 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
747
748 sl = ARM_LPAE_START_LVL(data);
749
750 switch (1 << data->pg_shift) {
751 case SZ_4K:
752 reg |= ARM_LPAE_TCR_TG0_4K;
753 sl++; /* SL0 format is different for 4K granule size */
754 break;
755 case SZ_16K:
756 reg |= ARM_LPAE_TCR_TG0_16K;
757 break;
758 case SZ_64K:
759 reg |= ARM_LPAE_TCR_TG0_64K;
760 break;
761 }
762
763 switch (cfg->oas) {
764 case 32:
765 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
766 break;
767 case 36:
768 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
769 break;
770 case 40:
771 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
772 break;
773 case 42:
774 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
775 break;
776 case 44:
777 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
778 break;
779 case 48:
780 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
781 break;
782 default:
783 goto out_free_data;
784 }
785
786 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
787 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
788 cfg->arm_lpae_s2_cfg.vtcr = reg;
789
790 /* Allocate pgd pages */
791 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
792 if (!data->pgd)
793 goto out_free_data;
794
795 if (cfg->tlb->flush_pgtable)
796 cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
797
798 /* VTTBR */
799 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
800 return &data->iop;
801
802 out_free_data:
803 kfree(data);
804 return NULL;
805 }
806
807 static struct io_pgtable *
808 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
809 {
810 struct io_pgtable *iop;
811
812 if (cfg->ias > 32 || cfg->oas > 40)
813 return NULL;
814
815 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
816 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
817 if (iop) {
818 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
819 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
820 }
821
822 return iop;
823 }
824
825 static struct io_pgtable *
826 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
827 {
828 struct io_pgtable *iop;
829
830 if (cfg->ias > 40 || cfg->oas > 40)
831 return NULL;
832
833 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
834 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
835 if (iop)
836 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
837
838 return iop;
839 }
840
841 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
842 .alloc = arm_64_lpae_alloc_pgtable_s1,
843 .free = arm_lpae_free_pgtable,
844 };
845
846 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
847 .alloc = arm_64_lpae_alloc_pgtable_s2,
848 .free = arm_lpae_free_pgtable,
849 };
850
851 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
852 .alloc = arm_32_lpae_alloc_pgtable_s1,
853 .free = arm_lpae_free_pgtable,
854 };
855
856 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
857 .alloc = arm_32_lpae_alloc_pgtable_s2,
858 .free = arm_lpae_free_pgtable,
859 };
860
861 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
862
863 static struct io_pgtable_cfg *cfg_cookie;
864
865 static void dummy_tlb_flush_all(void *cookie)
866 {
867 WARN_ON(cookie != cfg_cookie);
868 }
869
870 static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
871 void *cookie)
872 {
873 WARN_ON(cookie != cfg_cookie);
874 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
875 }
876
877 static void dummy_tlb_sync(void *cookie)
878 {
879 WARN_ON(cookie != cfg_cookie);
880 }
881
882 static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
883 {
884 WARN_ON(cookie != cfg_cookie);
885 }
886
887 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
888 .tlb_flush_all = dummy_tlb_flush_all,
889 .tlb_add_flush = dummy_tlb_add_flush,
890 .tlb_sync = dummy_tlb_sync,
891 .flush_pgtable = dummy_flush_pgtable,
892 };
893
894 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
895 {
896 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
897 struct io_pgtable_cfg *cfg = &data->iop.cfg;
898
899 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
900 cfg->pgsize_bitmap, cfg->ias);
901 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
902 data->levels, data->pgd_size, data->pg_shift,
903 data->bits_per_level, data->pgd);
904 }
905
906 #define __FAIL(ops, i) ({ \
907 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
908 arm_lpae_dump_ops(ops); \
909 selftest_running = false; \
910 -EFAULT; \
911 })
912
913 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
914 {
915 static const enum io_pgtable_fmt fmts[] = {
916 ARM_64_LPAE_S1,
917 ARM_64_LPAE_S2,
918 };
919
920 int i, j;
921 unsigned long iova;
922 size_t size;
923 struct io_pgtable_ops *ops;
924
925 selftest_running = true;
926
927 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
928 cfg_cookie = cfg;
929 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
930 if (!ops) {
931 pr_err("selftest: failed to allocate io pgtable ops\n");
932 return -ENOMEM;
933 }
934
935 /*
936 * Initial sanity checks.
937 * Empty page tables shouldn't provide any translations.
938 */
939 if (ops->iova_to_phys(ops, 42))
940 return __FAIL(ops, i);
941
942 if (ops->iova_to_phys(ops, SZ_1G + 42))
943 return __FAIL(ops, i);
944
945 if (ops->iova_to_phys(ops, SZ_2G + 42))
946 return __FAIL(ops, i);
947
948 /*
949 * Distinct mappings of different granule sizes.
950 */
951 iova = 0;
952 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
953 while (j != BITS_PER_LONG) {
954 size = 1UL << j;
955
956 if (ops->map(ops, iova, iova, size, IOMMU_READ |
957 IOMMU_WRITE |
958 IOMMU_NOEXEC |
959 IOMMU_CACHE))
960 return __FAIL(ops, i);
961
962 /* Overlapping mappings */
963 if (!ops->map(ops, iova, iova + size, size,
964 IOMMU_READ | IOMMU_NOEXEC))
965 return __FAIL(ops, i);
966
967 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
968 return __FAIL(ops, i);
969
970 iova += SZ_1G;
971 j++;
972 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
973 }
974
975 /* Partial unmap */
976 size = 1UL << __ffs(cfg->pgsize_bitmap);
977 if (ops->unmap(ops, SZ_1G + size, size) != size)
978 return __FAIL(ops, i);
979
980 /* Remap of partial unmap */
981 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
982 return __FAIL(ops, i);
983
984 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
985 return __FAIL(ops, i);
986
987 /* Full unmap */
988 iova = 0;
989 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
990 while (j != BITS_PER_LONG) {
991 size = 1UL << j;
992
993 if (ops->unmap(ops, iova, size) != size)
994 return __FAIL(ops, i);
995
996 if (ops->iova_to_phys(ops, iova + 42))
997 return __FAIL(ops, i);
998
999 /* Remap full block */
1000 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1001 return __FAIL(ops, i);
1002
1003 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1004 return __FAIL(ops, i);
1005
1006 iova += SZ_1G;
1007 j++;
1008 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1009 }
1010
1011 free_io_pgtable_ops(ops);
1012 }
1013
1014 selftest_running = false;
1015 return 0;
1016 }
1017
1018 static int __init arm_lpae_do_selftests(void)
1019 {
1020 static const unsigned long pgsize[] = {
1021 SZ_4K | SZ_2M | SZ_1G,
1022 SZ_16K | SZ_32M,
1023 SZ_64K | SZ_512M,
1024 };
1025
1026 static const unsigned int ias[] = {
1027 32, 36, 40, 42, 44, 48,
1028 };
1029
1030 int i, j, pass = 0, fail = 0;
1031 struct io_pgtable_cfg cfg = {
1032 .tlb = &dummy_tlb_ops,
1033 .oas = 48,
1034 };
1035
1036 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1037 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1038 cfg.pgsize_bitmap = pgsize[i];
1039 cfg.ias = ias[j];
1040 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1041 pgsize[i], ias[j]);
1042 if (arm_lpae_run_tests(&cfg))
1043 fail++;
1044 else
1045 pass++;
1046 }
1047 }
1048
1049 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1050 return fail ? -EFAULT : 0;
1051 }
1052 subsys_initcall(arm_lpae_do_selftests);
1053 #endif
This page took 0.052185 seconds and 5 git commands to generate.