2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
29 #include "io-pgtable.h"
31 #define ARM_LPAE_MAX_ADDR_BITS 48
32 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33 #define ARM_LPAE_MAX_LEVELS 4
35 /* Struct accessors */
36 #define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
39 #define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
55 #define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
59 #define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
63 * Calculate the index at level l used to map virtual address a using the
66 #define ARM_LPAE_PGD_IDX(l,d) \
67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
69 #define ARM_LPAE_LVL_IDX(a,l,d) \
70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
73 /* Calculate the block/page mapping size at level l for pagetable in d. */
74 #define ARM_LPAE_BLOCK_SIZE(l,d) \
75 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
76 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79 #define ARM_LPAE_PTE_TYPE_SHIFT 0
80 #define ARM_LPAE_PTE_TYPE_MASK 0x3
82 #define ARM_LPAE_PTE_TYPE_BLOCK 1
83 #define ARM_LPAE_PTE_TYPE_TABLE 3
84 #define ARM_LPAE_PTE_TYPE_PAGE 3
86 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
87 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
88 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
89 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
90 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
91 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
92 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
93 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
95 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
96 /* Ignore the contiguous bit for block splitting */
97 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
98 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
99 ARM_LPAE_PTE_ATTR_HI_MASK)
102 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
103 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
104 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
105 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
108 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
109 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
110 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
111 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
112 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
113 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
116 #define ARM_32_LPAE_TCR_EAE (1 << 31)
117 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
119 #define ARM_LPAE_TCR_EPD1 (1 << 23)
121 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
122 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
123 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
125 #define ARM_LPAE_TCR_SH0_SHIFT 12
126 #define ARM_LPAE_TCR_SH0_MASK 0x3
127 #define ARM_LPAE_TCR_SH_NS 0
128 #define ARM_LPAE_TCR_SH_OS 2
129 #define ARM_LPAE_TCR_SH_IS 3
131 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
132 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
133 #define ARM_LPAE_TCR_RGN_MASK 0x3
134 #define ARM_LPAE_TCR_RGN_NC 0
135 #define ARM_LPAE_TCR_RGN_WBWA 1
136 #define ARM_LPAE_TCR_RGN_WT 2
137 #define ARM_LPAE_TCR_RGN_WB 3
139 #define ARM_LPAE_TCR_SL0_SHIFT 6
140 #define ARM_LPAE_TCR_SL0_MASK 0x3
142 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
143 #define ARM_LPAE_TCR_SZ_MASK 0xf
145 #define ARM_LPAE_TCR_PS_SHIFT 16
146 #define ARM_LPAE_TCR_PS_MASK 0x7
148 #define ARM_LPAE_TCR_IPS_SHIFT 32
149 #define ARM_LPAE_TCR_IPS_MASK 0x7
151 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
152 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
153 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
154 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
155 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
156 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
158 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
159 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
160 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
161 #define ARM_LPAE_MAIR_ATTR_NC 0x44
162 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
163 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
164 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
165 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
167 /* IOPTE accessors */
168 #define iopte_deref(pte,d) \
169 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
170 & ~((1ULL << (d)->pg_shift) - 1)))
172 #define iopte_type(pte,l) \
173 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
175 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
177 #define iopte_leaf(pte,l) \
178 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
179 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
180 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
182 #define iopte_to_pfn(pte,d) \
183 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
185 #define pfn_to_iopte(pfn,d) \
186 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
188 struct arm_lpae_io_pgtable
{
189 struct io_pgtable iop
;
193 unsigned long pg_shift
;
194 unsigned long bits_per_level
;
199 typedef u64 arm_lpae_iopte
;
201 static bool selftest_running
= false;
203 static dma_addr_t
__arm_lpae_dma_addr(struct device
*dev
, void *pages
)
205 return phys_to_dma(dev
, virt_to_phys(pages
));
208 static void *__arm_lpae_alloc_pages(size_t size
, gfp_t gfp
,
209 struct io_pgtable_cfg
*cfg
)
211 struct device
*dev
= cfg
->iommu_dev
;
213 void *pages
= alloc_pages_exact(size
, gfp
| __GFP_ZERO
);
219 dma
= dma_map_single(dev
, pages
, size
, DMA_TO_DEVICE
);
220 if (dma_mapping_error(dev
, dma
))
223 * We depend on the IOMMU being able to work with any physical
224 * address directly, so if the DMA layer suggests it can't by
225 * giving us back some translation, that bodes very badly...
227 if (dma
!= __arm_lpae_dma_addr(dev
, pages
))
234 dev_err(dev
, "Cannot accommodate DMA translation for IOMMU page tables\n");
235 dma_unmap_single(dev
, dma
, size
, DMA_TO_DEVICE
);
237 free_pages_exact(pages
, size
);
241 static void __arm_lpae_free_pages(void *pages
, size_t size
,
242 struct io_pgtable_cfg
*cfg
)
244 struct device
*dev
= cfg
->iommu_dev
;
247 dma_unmap_single(dev
, __arm_lpae_dma_addr(dev
, pages
),
248 size
, DMA_TO_DEVICE
);
249 free_pages_exact(pages
, size
);
252 static void __arm_lpae_set_pte(arm_lpae_iopte
*ptep
, arm_lpae_iopte pte
,
253 struct io_pgtable_cfg
*cfg
, void *cookie
)
255 struct device
*dev
= cfg
->iommu_dev
;
260 dma_sync_single_for_device(dev
, __arm_lpae_dma_addr(dev
, ptep
),
261 sizeof(pte
), DMA_TO_DEVICE
);
262 else if (cfg
->tlb
->flush_pgtable
)
263 cfg
->tlb
->flush_pgtable(ptep
, sizeof(pte
), cookie
);
266 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
267 unsigned long iova
, phys_addr_t paddr
,
268 arm_lpae_iopte prot
, int lvl
,
269 arm_lpae_iopte
*ptep
)
271 arm_lpae_iopte pte
= prot
;
272 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
274 /* We require an unmap first */
275 if (iopte_leaf(*ptep
, lvl
)) {
276 WARN_ON(!selftest_running
);
280 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
281 pte
|= ARM_LPAE_PTE_NS
;
283 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
284 pte
|= ARM_LPAE_PTE_TYPE_PAGE
;
286 pte
|= ARM_LPAE_PTE_TYPE_BLOCK
;
288 pte
|= ARM_LPAE_PTE_AF
| ARM_LPAE_PTE_SH_IS
;
289 pte
|= pfn_to_iopte(paddr
>> data
->pg_shift
, data
);
291 __arm_lpae_set_pte(ptep
, pte
, cfg
, data
->iop
.cookie
);
295 static int __arm_lpae_map(struct arm_lpae_io_pgtable
*data
, unsigned long iova
,
296 phys_addr_t paddr
, size_t size
, arm_lpae_iopte prot
,
297 int lvl
, arm_lpae_iopte
*ptep
)
299 arm_lpae_iopte
*cptep
, pte
;
300 void *cookie
= data
->iop
.cookie
;
301 size_t block_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
302 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
304 /* Find our entry at the current level */
305 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
307 /* If we can install a leaf entry at this level, then do so */
308 if (size
== block_size
&& (size
& cfg
->pgsize_bitmap
))
309 return arm_lpae_init_pte(data
, iova
, paddr
, prot
, lvl
, ptep
);
311 /* We can't allocate tables at the final level */
312 if (WARN_ON(lvl
>= ARM_LPAE_MAX_LEVELS
- 1))
315 /* Grab a pointer to the next level */
318 cptep
= __arm_lpae_alloc_pages(1UL << data
->pg_shift
,
323 pte
= __pa(cptep
) | ARM_LPAE_PTE_TYPE_TABLE
;
324 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
325 pte
|= ARM_LPAE_PTE_NSTABLE
;
326 __arm_lpae_set_pte(ptep
, pte
, cfg
, cookie
);
328 cptep
= iopte_deref(pte
, data
);
332 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
+ 1, cptep
);
335 static arm_lpae_iopte
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable
*data
,
340 if (data
->iop
.fmt
== ARM_64_LPAE_S1
||
341 data
->iop
.fmt
== ARM_32_LPAE_S1
) {
342 pte
= ARM_LPAE_PTE_AP_UNPRIV
| ARM_LPAE_PTE_nG
;
344 if (!(prot
& IOMMU_WRITE
) && (prot
& IOMMU_READ
))
345 pte
|= ARM_LPAE_PTE_AP_RDONLY
;
347 if (prot
& IOMMU_CACHE
)
348 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
349 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
351 pte
= ARM_LPAE_PTE_HAP_FAULT
;
352 if (prot
& IOMMU_READ
)
353 pte
|= ARM_LPAE_PTE_HAP_READ
;
354 if (prot
& IOMMU_WRITE
)
355 pte
|= ARM_LPAE_PTE_HAP_WRITE
;
356 if (prot
& IOMMU_CACHE
)
357 pte
|= ARM_LPAE_PTE_MEMATTR_OIWB
;
359 pte
|= ARM_LPAE_PTE_MEMATTR_NC
;
362 if (prot
& IOMMU_NOEXEC
)
363 pte
|= ARM_LPAE_PTE_XN
;
368 static int arm_lpae_map(struct io_pgtable_ops
*ops
, unsigned long iova
,
369 phys_addr_t paddr
, size_t size
, int iommu_prot
)
371 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
372 arm_lpae_iopte
*ptep
= data
->pgd
;
373 int lvl
= ARM_LPAE_START_LVL(data
);
376 /* If no access, then nothing to do */
377 if (!(iommu_prot
& (IOMMU_READ
| IOMMU_WRITE
)))
380 prot
= arm_lpae_prot_to_pte(data
, iommu_prot
);
381 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
, ptep
);
384 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable
*data
, int lvl
,
385 arm_lpae_iopte
*ptep
)
387 arm_lpae_iopte
*start
, *end
;
388 unsigned long table_size
;
390 /* Only leaf entries at the last level */
391 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
394 if (lvl
== ARM_LPAE_START_LVL(data
))
395 table_size
= data
->pgd_size
;
397 table_size
= 1UL << data
->pg_shift
;
400 end
= (void *)ptep
+ table_size
;
402 while (ptep
!= end
) {
403 arm_lpae_iopte pte
= *ptep
++;
405 if (!pte
|| iopte_leaf(pte
, lvl
))
408 __arm_lpae_free_pgtable(data
, lvl
+ 1, iopte_deref(pte
, data
));
411 __arm_lpae_free_pages(start
, table_size
, &data
->iop
.cfg
);
414 static void arm_lpae_free_pgtable(struct io_pgtable
*iop
)
416 struct arm_lpae_io_pgtable
*data
= io_pgtable_to_data(iop
);
418 __arm_lpae_free_pgtable(data
, ARM_LPAE_START_LVL(data
), data
->pgd
);
422 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable
*data
,
423 unsigned long iova
, size_t size
,
424 arm_lpae_iopte prot
, int lvl
,
425 arm_lpae_iopte
*ptep
, size_t blk_size
)
427 unsigned long blk_start
, blk_end
;
428 phys_addr_t blk_paddr
;
429 arm_lpae_iopte table
= 0;
430 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
431 void *cookie
= data
->iop
.cookie
;
433 blk_start
= iova
& ~(blk_size
- 1);
434 blk_end
= blk_start
+ blk_size
;
435 blk_paddr
= iopte_to_pfn(*ptep
, data
) << data
->pg_shift
;
437 for (; blk_start
< blk_end
; blk_start
+= size
, blk_paddr
+= size
) {
438 arm_lpae_iopte
*tablep
;
441 if (blk_start
== iova
)
444 /* __arm_lpae_map expects a pointer to the start of the table */
445 tablep
= &table
- ARM_LPAE_LVL_IDX(blk_start
, lvl
, data
);
446 if (__arm_lpae_map(data
, blk_start
, blk_paddr
, size
, prot
, lvl
,
449 /* Free the table we allocated */
450 tablep
= iopte_deref(table
, data
);
451 __arm_lpae_free_pgtable(data
, lvl
+ 1, tablep
);
453 return 0; /* Bytes unmapped */
457 __arm_lpae_set_pte(ptep
, table
, cfg
, cookie
);
458 iova
&= ~(blk_size
- 1);
459 cfg
->tlb
->tlb_add_flush(iova
, blk_size
, true, cookie
);
463 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
464 unsigned long iova
, size_t size
, int lvl
,
465 arm_lpae_iopte
*ptep
)
468 const struct iommu_gather_ops
*tlb
= data
->iop
.cfg
.tlb
;
469 void *cookie
= data
->iop
.cookie
;
470 size_t blk_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
472 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
475 /* Something went horribly wrong and we ran out of page table */
476 if (WARN_ON(!pte
|| (lvl
== ARM_LPAE_MAX_LEVELS
)))
479 /* If the size matches this level, we're in the right place */
480 if (size
== blk_size
) {
481 __arm_lpae_set_pte(ptep
, 0, &data
->iop
.cfg
, cookie
);
483 if (!iopte_leaf(pte
, lvl
)) {
484 /* Also flush any partial walks */
485 tlb
->tlb_add_flush(iova
, size
, false, cookie
);
486 tlb
->tlb_sync(cookie
);
487 ptep
= iopte_deref(pte
, data
);
488 __arm_lpae_free_pgtable(data
, lvl
+ 1, ptep
);
490 tlb
->tlb_add_flush(iova
, size
, true, cookie
);
494 } else if (iopte_leaf(pte
, lvl
)) {
496 * Insert a table at the next level to map the old region,
497 * minus the part we want to unmap
499 return arm_lpae_split_blk_unmap(data
, iova
, size
,
500 iopte_prot(pte
), lvl
, ptep
,
504 /* Keep on walkin' */
505 ptep
= iopte_deref(pte
, data
);
506 return __arm_lpae_unmap(data
, iova
, size
, lvl
+ 1, ptep
);
509 static int arm_lpae_unmap(struct io_pgtable_ops
*ops
, unsigned long iova
,
513 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
514 struct io_pgtable
*iop
= &data
->iop
;
515 arm_lpae_iopte
*ptep
= data
->pgd
;
516 int lvl
= ARM_LPAE_START_LVL(data
);
518 unmapped
= __arm_lpae_unmap(data
, iova
, size
, lvl
, ptep
);
520 iop
->cfg
.tlb
->tlb_sync(iop
->cookie
);
525 static phys_addr_t
arm_lpae_iova_to_phys(struct io_pgtable_ops
*ops
,
528 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
529 arm_lpae_iopte pte
, *ptep
= data
->pgd
;
530 int lvl
= ARM_LPAE_START_LVL(data
);
533 /* Valid IOPTE pointer? */
537 /* Grab the IOPTE we're interested in */
538 pte
= *(ptep
+ ARM_LPAE_LVL_IDX(iova
, lvl
, data
));
545 if (iopte_leaf(pte
,lvl
))
546 goto found_translation
;
548 /* Take it to the next level */
549 ptep
= iopte_deref(pte
, data
);
550 } while (++lvl
< ARM_LPAE_MAX_LEVELS
);
552 /* Ran out of page tables to walk */
556 iova
&= ((1 << data
->pg_shift
) - 1);
557 return ((phys_addr_t
)iopte_to_pfn(pte
,data
) << data
->pg_shift
) | iova
;
560 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg
*cfg
)
562 unsigned long granule
;
565 * We need to restrict the supported page sizes to match the
566 * translation regime for a particular granule. Aim to match
567 * the CPU page size if possible, otherwise prefer smaller sizes.
568 * While we're at it, restrict the block sizes to match the
571 if (cfg
->pgsize_bitmap
& PAGE_SIZE
)
573 else if (cfg
->pgsize_bitmap
& ~PAGE_MASK
)
574 granule
= 1UL << __fls(cfg
->pgsize_bitmap
& ~PAGE_MASK
);
575 else if (cfg
->pgsize_bitmap
& PAGE_MASK
)
576 granule
= 1UL << __ffs(cfg
->pgsize_bitmap
& PAGE_MASK
);
582 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
585 cfg
->pgsize_bitmap
&= (SZ_16K
| SZ_32M
);
588 cfg
->pgsize_bitmap
&= (SZ_64K
| SZ_512M
);
591 cfg
->pgsize_bitmap
= 0;
595 static struct arm_lpae_io_pgtable
*
596 arm_lpae_alloc_pgtable(struct io_pgtable_cfg
*cfg
)
598 unsigned long va_bits
, pgd_bits
;
599 struct arm_lpae_io_pgtable
*data
;
601 arm_lpae_restrict_pgsizes(cfg
);
603 if (!(cfg
->pgsize_bitmap
& (SZ_4K
| SZ_16K
| SZ_64K
)))
606 if (cfg
->ias
> ARM_LPAE_MAX_ADDR_BITS
)
609 if (cfg
->oas
> ARM_LPAE_MAX_ADDR_BITS
)
612 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
616 data
->pg_shift
= __ffs(cfg
->pgsize_bitmap
);
617 data
->bits_per_level
= data
->pg_shift
- ilog2(sizeof(arm_lpae_iopte
));
619 va_bits
= cfg
->ias
- data
->pg_shift
;
620 data
->levels
= DIV_ROUND_UP(va_bits
, data
->bits_per_level
);
622 /* Calculate the actual size of our pgd (without concatenation) */
623 pgd_bits
= va_bits
- (data
->bits_per_level
* (data
->levels
- 1));
624 data
->pgd_size
= 1UL << (pgd_bits
+ ilog2(sizeof(arm_lpae_iopte
)));
626 data
->iop
.ops
= (struct io_pgtable_ops
) {
628 .unmap
= arm_lpae_unmap
,
629 .iova_to_phys
= arm_lpae_iova_to_phys
,
635 static struct io_pgtable
*
636 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
639 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
645 reg
= (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
646 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
647 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
649 switch (1 << data
->pg_shift
) {
651 reg
|= ARM_LPAE_TCR_TG0_4K
;
654 reg
|= ARM_LPAE_TCR_TG0_16K
;
657 reg
|= ARM_LPAE_TCR_TG0_64K
;
663 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
666 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
669 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
672 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
675 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
678 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
684 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
686 /* Disable speculative walks through TTBR1 */
687 reg
|= ARM_LPAE_TCR_EPD1
;
688 cfg
->arm_lpae_s1_cfg
.tcr
= reg
;
691 reg
= (ARM_LPAE_MAIR_ATTR_NC
692 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC
)) |
693 (ARM_LPAE_MAIR_ATTR_WBRWA
694 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE
)) |
695 (ARM_LPAE_MAIR_ATTR_DEVICE
696 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV
));
698 cfg
->arm_lpae_s1_cfg
.mair
[0] = reg
;
699 cfg
->arm_lpae_s1_cfg
.mair
[1] = 0;
701 /* Looking good; allocate a pgd */
702 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
706 if (cfg
->tlb
->flush_pgtable
)
707 cfg
->tlb
->flush_pgtable(data
->pgd
, data
->pgd_size
, cookie
);
710 cfg
->arm_lpae_s1_cfg
.ttbr
[0] = virt_to_phys(data
->pgd
);
711 cfg
->arm_lpae_s1_cfg
.ttbr
[1] = 0;
719 static struct io_pgtable
*
720 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
723 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
729 * Concatenate PGDs at level 1 if possible in order to reduce
730 * the depth of the stage-2 walk.
732 if (data
->levels
== ARM_LPAE_MAX_LEVELS
) {
733 unsigned long pgd_pages
;
735 pgd_pages
= data
->pgd_size
>> ilog2(sizeof(arm_lpae_iopte
));
736 if (pgd_pages
<= ARM_LPAE_S2_MAX_CONCAT_PAGES
) {
737 data
->pgd_size
= pgd_pages
<< data
->pg_shift
;
743 reg
= ARM_64_LPAE_S2_TCR_RES1
|
744 (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
745 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
746 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
748 sl
= ARM_LPAE_START_LVL(data
);
750 switch (1 << data
->pg_shift
) {
752 reg
|= ARM_LPAE_TCR_TG0_4K
;
753 sl
++; /* SL0 format is different for 4K granule size */
756 reg
|= ARM_LPAE_TCR_TG0_16K
;
759 reg
|= ARM_LPAE_TCR_TG0_64K
;
765 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
768 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
771 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
774 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
777 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
780 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
786 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
787 reg
|= (~sl
& ARM_LPAE_TCR_SL0_MASK
) << ARM_LPAE_TCR_SL0_SHIFT
;
788 cfg
->arm_lpae_s2_cfg
.vtcr
= reg
;
790 /* Allocate pgd pages */
791 data
->pgd
= __arm_lpae_alloc_pages(data
->pgd_size
, GFP_KERNEL
, cfg
);
795 if (cfg
->tlb
->flush_pgtable
)
796 cfg
->tlb
->flush_pgtable(data
->pgd
, data
->pgd_size
, cookie
);
799 cfg
->arm_lpae_s2_cfg
.vttbr
= virt_to_phys(data
->pgd
);
807 static struct io_pgtable
*
808 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
810 struct io_pgtable
*iop
;
812 if (cfg
->ias
> 32 || cfg
->oas
> 40)
815 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
816 iop
= arm_64_lpae_alloc_pgtable_s1(cfg
, cookie
);
818 cfg
->arm_lpae_s1_cfg
.tcr
|= ARM_32_LPAE_TCR_EAE
;
819 cfg
->arm_lpae_s1_cfg
.tcr
&= 0xffffffff;
825 static struct io_pgtable
*
826 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
828 struct io_pgtable
*iop
;
830 if (cfg
->ias
> 40 || cfg
->oas
> 40)
833 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
834 iop
= arm_64_lpae_alloc_pgtable_s2(cfg
, cookie
);
836 cfg
->arm_lpae_s2_cfg
.vtcr
&= 0xffffffff;
841 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns
= {
842 .alloc
= arm_64_lpae_alloc_pgtable_s1
,
843 .free
= arm_lpae_free_pgtable
,
846 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns
= {
847 .alloc
= arm_64_lpae_alloc_pgtable_s2
,
848 .free
= arm_lpae_free_pgtable
,
851 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns
= {
852 .alloc
= arm_32_lpae_alloc_pgtable_s1
,
853 .free
= arm_lpae_free_pgtable
,
856 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns
= {
857 .alloc
= arm_32_lpae_alloc_pgtable_s2
,
858 .free
= arm_lpae_free_pgtable
,
861 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
863 static struct io_pgtable_cfg
*cfg_cookie
;
865 static void dummy_tlb_flush_all(void *cookie
)
867 WARN_ON(cookie
!= cfg_cookie
);
870 static void dummy_tlb_add_flush(unsigned long iova
, size_t size
, bool leaf
,
873 WARN_ON(cookie
!= cfg_cookie
);
874 WARN_ON(!(size
& cfg_cookie
->pgsize_bitmap
));
877 static void dummy_tlb_sync(void *cookie
)
879 WARN_ON(cookie
!= cfg_cookie
);
882 static void dummy_flush_pgtable(void *ptr
, size_t size
, void *cookie
)
884 WARN_ON(cookie
!= cfg_cookie
);
887 static struct iommu_gather_ops dummy_tlb_ops __initdata
= {
888 .tlb_flush_all
= dummy_tlb_flush_all
,
889 .tlb_add_flush
= dummy_tlb_add_flush
,
890 .tlb_sync
= dummy_tlb_sync
,
891 .flush_pgtable
= dummy_flush_pgtable
,
894 static void __init
arm_lpae_dump_ops(struct io_pgtable_ops
*ops
)
896 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
897 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
899 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
900 cfg
->pgsize_bitmap
, cfg
->ias
);
901 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
902 data
->levels
, data
->pgd_size
, data
->pg_shift
,
903 data
->bits_per_level
, data
->pgd
);
906 #define __FAIL(ops, i) ({ \
907 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
908 arm_lpae_dump_ops(ops); \
909 selftest_running = false; \
913 static int __init
arm_lpae_run_tests(struct io_pgtable_cfg
*cfg
)
915 static const enum io_pgtable_fmt fmts
[] = {
923 struct io_pgtable_ops
*ops
;
925 selftest_running
= true;
927 for (i
= 0; i
< ARRAY_SIZE(fmts
); ++i
) {
929 ops
= alloc_io_pgtable_ops(fmts
[i
], cfg
, cfg
);
931 pr_err("selftest: failed to allocate io pgtable ops\n");
936 * Initial sanity checks.
937 * Empty page tables shouldn't provide any translations.
939 if (ops
->iova_to_phys(ops
, 42))
940 return __FAIL(ops
, i
);
942 if (ops
->iova_to_phys(ops
, SZ_1G
+ 42))
943 return __FAIL(ops
, i
);
945 if (ops
->iova_to_phys(ops
, SZ_2G
+ 42))
946 return __FAIL(ops
, i
);
949 * Distinct mappings of different granule sizes.
952 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
953 while (j
!= BITS_PER_LONG
) {
956 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_READ
|
960 return __FAIL(ops
, i
);
962 /* Overlapping mappings */
963 if (!ops
->map(ops
, iova
, iova
+ size
, size
,
964 IOMMU_READ
| IOMMU_NOEXEC
))
965 return __FAIL(ops
, i
);
967 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
968 return __FAIL(ops
, i
);
972 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
976 size
= 1UL << __ffs(cfg
->pgsize_bitmap
);
977 if (ops
->unmap(ops
, SZ_1G
+ size
, size
) != size
)
978 return __FAIL(ops
, i
);
980 /* Remap of partial unmap */
981 if (ops
->map(ops
, SZ_1G
+ size
, size
, size
, IOMMU_READ
))
982 return __FAIL(ops
, i
);
984 if (ops
->iova_to_phys(ops
, SZ_1G
+ size
+ 42) != (size
+ 42))
985 return __FAIL(ops
, i
);
989 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
990 while (j
!= BITS_PER_LONG
) {
993 if (ops
->unmap(ops
, iova
, size
) != size
)
994 return __FAIL(ops
, i
);
996 if (ops
->iova_to_phys(ops
, iova
+ 42))
997 return __FAIL(ops
, i
);
999 /* Remap full block */
1000 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_WRITE
))
1001 return __FAIL(ops
, i
);
1003 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
1004 return __FAIL(ops
, i
);
1008 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
1011 free_io_pgtable_ops(ops
);
1014 selftest_running
= false;
1018 static int __init
arm_lpae_do_selftests(void)
1020 static const unsigned long pgsize
[] = {
1021 SZ_4K
| SZ_2M
| SZ_1G
,
1026 static const unsigned int ias
[] = {
1027 32, 36, 40, 42, 44, 48,
1030 int i
, j
, pass
= 0, fail
= 0;
1031 struct io_pgtable_cfg cfg
= {
1032 .tlb
= &dummy_tlb_ops
,
1036 for (i
= 0; i
< ARRAY_SIZE(pgsize
); ++i
) {
1037 for (j
= 0; j
< ARRAY_SIZE(ias
); ++j
) {
1038 cfg
.pgsize_bitmap
= pgsize
[i
];
1040 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1042 if (arm_lpae_run_tests(&cfg
))
1049 pr_info("selftest: completed with %d PASS %d FAIL\n", pass
, fail
);
1050 return fail
? -EFAULT
: 0;
1052 subsys_initcall(arm_lpae_do_selftests
);