2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 42-bit addressing (dependent on VA_BITS)
28 * - Context fault reporting
31 #define pr_fmt(fmt) "arm-smmu: " fmt
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
38 #include <linux/iommu.h>
40 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
47 #include <linux/amba/bus.h>
49 #include <asm/pgalloc.h>
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS 128
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
76 #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
77 #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
78 #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
79 #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
80 #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
81 #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
83 #if PAGE_SIZE == SZ_4K
84 #define ARM_SMMU_PTE_CONT_ENTRIES 16
85 #elif PAGE_SIZE == SZ_64K
86 #define ARM_SMMU_PTE_CONT_ENTRIES 32
88 #define ARM_SMMU_PTE_CONT_ENTRIES 1
91 #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
92 #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
95 #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
96 #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
97 #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
98 #define ARM_SMMU_PTE_nG (((pteval_t)1) << 11)
101 #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
102 #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
103 #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
104 #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
105 #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
106 #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
108 /* Configuration registers */
109 #define ARM_SMMU_GR0_sCR0 0x0
110 #define sCR0_CLIENTPD (1 << 0)
111 #define sCR0_GFRE (1 << 1)
112 #define sCR0_GFIE (1 << 2)
113 #define sCR0_GCFGFRE (1 << 4)
114 #define sCR0_GCFGFIE (1 << 5)
115 #define sCR0_USFCFG (1 << 10)
116 #define sCR0_VMIDPNE (1 << 11)
117 #define sCR0_PTM (1 << 12)
118 #define sCR0_FB (1 << 13)
119 #define sCR0_BSU_SHIFT 14
120 #define sCR0_BSU_MASK 0x3
122 /* Identification registers */
123 #define ARM_SMMU_GR0_ID0 0x20
124 #define ARM_SMMU_GR0_ID1 0x24
125 #define ARM_SMMU_GR0_ID2 0x28
126 #define ARM_SMMU_GR0_ID3 0x2c
127 #define ARM_SMMU_GR0_ID4 0x30
128 #define ARM_SMMU_GR0_ID5 0x34
129 #define ARM_SMMU_GR0_ID6 0x38
130 #define ARM_SMMU_GR0_ID7 0x3c
131 #define ARM_SMMU_GR0_sGFSR 0x48
132 #define ARM_SMMU_GR0_sGFSYNR0 0x50
133 #define ARM_SMMU_GR0_sGFSYNR1 0x54
134 #define ARM_SMMU_GR0_sGFSYNR2 0x58
135 #define ARM_SMMU_GR0_PIDR0 0xfe0
136 #define ARM_SMMU_GR0_PIDR1 0xfe4
137 #define ARM_SMMU_GR0_PIDR2 0xfe8
139 #define ID0_S1TS (1 << 30)
140 #define ID0_S2TS (1 << 29)
141 #define ID0_NTS (1 << 28)
142 #define ID0_SMS (1 << 27)
143 #define ID0_PTFS_SHIFT 24
144 #define ID0_PTFS_MASK 0x2
145 #define ID0_PTFS_V8_ONLY 0x2
146 #define ID0_CTTW (1 << 14)
147 #define ID0_NUMIRPT_SHIFT 16
148 #define ID0_NUMIRPT_MASK 0xff
149 #define ID0_NUMSMRG_SHIFT 0
150 #define ID0_NUMSMRG_MASK 0xff
152 #define ID1_PAGESIZE (1 << 31)
153 #define ID1_NUMPAGENDXB_SHIFT 28
154 #define ID1_NUMPAGENDXB_MASK 7
155 #define ID1_NUMS2CB_SHIFT 16
156 #define ID1_NUMS2CB_MASK 0xff
157 #define ID1_NUMCB_SHIFT 0
158 #define ID1_NUMCB_MASK 0xff
160 #define ID2_OAS_SHIFT 4
161 #define ID2_OAS_MASK 0xf
162 #define ID2_IAS_SHIFT 0
163 #define ID2_IAS_MASK 0xf
164 #define ID2_UBS_SHIFT 8
165 #define ID2_UBS_MASK 0xf
166 #define ID2_PTFS_4K (1 << 12)
167 #define ID2_PTFS_16K (1 << 13)
168 #define ID2_PTFS_64K (1 << 14)
170 #define PIDR2_ARCH_SHIFT 4
171 #define PIDR2_ARCH_MASK 0xf
173 /* Global TLB invalidation */
174 #define ARM_SMMU_GR0_STLBIALL 0x60
175 #define ARM_SMMU_GR0_TLBIVMID 0x64
176 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
177 #define ARM_SMMU_GR0_TLBIALLH 0x6c
178 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
179 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
180 #define sTLBGSTATUS_GSACTIVE (1 << 0)
181 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
183 /* Stream mapping registers */
184 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
185 #define SMR_VALID (1 << 31)
186 #define SMR_MASK_SHIFT 16
187 #define SMR_MASK_MASK 0x7fff
188 #define SMR_ID_SHIFT 0
189 #define SMR_ID_MASK 0x7fff
191 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
192 #define S2CR_CBNDX_SHIFT 0
193 #define S2CR_CBNDX_MASK 0xff
194 #define S2CR_TYPE_SHIFT 16
195 #define S2CR_TYPE_MASK 0x3
196 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
197 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
198 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
200 /* Context bank attribute registers */
201 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
202 #define CBAR_VMID_SHIFT 0
203 #define CBAR_VMID_MASK 0xff
204 #define CBAR_S1_BPSHCFG_SHIFT 8
205 #define CBAR_S1_BPSHCFG_MASK 3
206 #define CBAR_S1_BPSHCFG_NSH 3
207 #define CBAR_S1_MEMATTR_SHIFT 12
208 #define CBAR_S1_MEMATTR_MASK 0xf
209 #define CBAR_S1_MEMATTR_WB 0xf
210 #define CBAR_TYPE_SHIFT 16
211 #define CBAR_TYPE_MASK 0x3
212 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
213 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
214 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
215 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
216 #define CBAR_IRPTNDX_SHIFT 24
217 #define CBAR_IRPTNDX_MASK 0xff
219 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
220 #define CBA2R_RW64_32BIT (0 << 0)
221 #define CBA2R_RW64_64BIT (1 << 0)
223 /* Translation context bank */
224 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
225 #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize)
227 #define ARM_SMMU_CB_SCTLR 0x0
228 #define ARM_SMMU_CB_RESUME 0x8
229 #define ARM_SMMU_CB_TTBCR2 0x10
230 #define ARM_SMMU_CB_TTBR0_LO 0x20
231 #define ARM_SMMU_CB_TTBR0_HI 0x24
232 #define ARM_SMMU_CB_TTBCR 0x30
233 #define ARM_SMMU_CB_S1_MAIR0 0x38
234 #define ARM_SMMU_CB_FSR 0x58
235 #define ARM_SMMU_CB_FAR_LO 0x60
236 #define ARM_SMMU_CB_FAR_HI 0x64
237 #define ARM_SMMU_CB_FSYNR0 0x68
238 #define ARM_SMMU_CB_S1_TLBIASID 0x610
240 #define SCTLR_S1_ASIDPNE (1 << 12)
241 #define SCTLR_CFCFG (1 << 7)
242 #define SCTLR_CFIE (1 << 6)
243 #define SCTLR_CFRE (1 << 5)
244 #define SCTLR_E (1 << 4)
245 #define SCTLR_AFE (1 << 2)
246 #define SCTLR_TRE (1 << 1)
247 #define SCTLR_M (1 << 0)
248 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
250 #define RESUME_RETRY (0 << 0)
251 #define RESUME_TERMINATE (1 << 0)
253 #define TTBCR_EAE (1 << 31)
255 #define TTBCR_PASIZE_SHIFT 16
256 #define TTBCR_PASIZE_MASK 0x7
258 #define TTBCR_TG0_4K (0 << 14)
259 #define TTBCR_TG0_64K (1 << 14)
261 #define TTBCR_SH0_SHIFT 12
262 #define TTBCR_SH0_MASK 0x3
263 #define TTBCR_SH_NS 0
264 #define TTBCR_SH_OS 2
265 #define TTBCR_SH_IS 3
267 #define TTBCR_ORGN0_SHIFT 10
268 #define TTBCR_IRGN0_SHIFT 8
269 #define TTBCR_RGN_MASK 0x3
270 #define TTBCR_RGN_NC 0
271 #define TTBCR_RGN_WBWA 1
272 #define TTBCR_RGN_WT 2
273 #define TTBCR_RGN_WB 3
275 #define TTBCR_SL0_SHIFT 6
276 #define TTBCR_SL0_MASK 0x3
277 #define TTBCR_SL0_LVL_2 0
278 #define TTBCR_SL0_LVL_1 1
280 #define TTBCR_T1SZ_SHIFT 16
281 #define TTBCR_T0SZ_SHIFT 0
282 #define TTBCR_SZ_MASK 0xf
284 #define TTBCR2_SEP_SHIFT 15
285 #define TTBCR2_SEP_MASK 0x7
287 #define TTBCR2_PASIZE_SHIFT 0
288 #define TTBCR2_PASIZE_MASK 0x7
290 /* Common definitions for PASize and SEP fields */
291 #define TTBCR2_ADDR_32 0
292 #define TTBCR2_ADDR_36 1
293 #define TTBCR2_ADDR_40 2
294 #define TTBCR2_ADDR_42 3
295 #define TTBCR2_ADDR_44 4
296 #define TTBCR2_ADDR_48 5
298 #define TTBRn_HI_ASID_SHIFT 16
300 #define MAIR_ATTR_SHIFT(n) ((n) << 3)
301 #define MAIR_ATTR_MASK 0xff
302 #define MAIR_ATTR_DEVICE 0x04
303 #define MAIR_ATTR_NC 0x44
304 #define MAIR_ATTR_WBRWA 0xff
305 #define MAIR_ATTR_IDX_NC 0
306 #define MAIR_ATTR_IDX_CACHE 1
307 #define MAIR_ATTR_IDX_DEV 2
309 #define FSR_MULTI (1 << 31)
310 #define FSR_SS (1 << 30)
311 #define FSR_UUT (1 << 8)
312 #define FSR_ASF (1 << 7)
313 #define FSR_TLBLKF (1 << 6)
314 #define FSR_TLBMCF (1 << 5)
315 #define FSR_EF (1 << 4)
316 #define FSR_PF (1 << 3)
317 #define FSR_AFF (1 << 2)
318 #define FSR_TF (1 << 1)
320 #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \
322 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
323 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
325 #define FSYNR0_WNR (1 << 4)
327 struct arm_smmu_smr
{
333 struct arm_smmu_master_cfg
{
335 u16 streamids
[MAX_MASTER_STREAMIDS
];
336 struct arm_smmu_smr
*smrs
;
339 struct arm_smmu_master
{
340 struct device_node
*of_node
;
342 struct arm_smmu_master_cfg cfg
;
345 struct arm_smmu_device
{
350 unsigned long pagesize
;
352 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
353 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
354 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
355 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
356 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
359 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
363 u32 num_context_banks
;
364 u32 num_s2_context_banks
;
365 DECLARE_BITMAP(context_map
, ARM_SMMU_MAX_CBS
);
368 u32 num_mapping_groups
;
369 DECLARE_BITMAP(smr_map
, ARM_SMMU_MAX_SMRS
);
371 unsigned long input_size
;
372 unsigned long s1_output_size
;
373 unsigned long s2_output_size
;
376 u32 num_context_irqs
;
379 struct list_head list
;
380 struct rb_root masters
;
383 struct arm_smmu_cfg
{
389 #define INVALID_IRPTNDX 0xff
391 #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
392 #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
394 struct arm_smmu_domain
{
395 struct arm_smmu_device
*smmu
;
396 struct arm_smmu_cfg cfg
;
400 static DEFINE_SPINLOCK(arm_smmu_devices_lock
);
401 static LIST_HEAD(arm_smmu_devices
);
403 struct arm_smmu_option_prop
{
408 static struct arm_smmu_option_prop arm_smmu_options
[] = {
409 { ARM_SMMU_OPT_SECURE_CFG_ACCESS
, "calxeda,smmu-secure-config-access" },
413 static void parse_driver_options(struct arm_smmu_device
*smmu
)
417 if (of_property_read_bool(smmu
->dev
->of_node
,
418 arm_smmu_options
[i
].prop
)) {
419 smmu
->options
|= arm_smmu_options
[i
].opt
;
420 dev_notice(smmu
->dev
, "option %s\n",
421 arm_smmu_options
[i
].prop
);
423 } while (arm_smmu_options
[++i
].opt
);
426 static struct device
*dev_get_master_dev(struct device
*dev
)
428 if (dev_is_pci(dev
)) {
429 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
430 while (!pci_is_root_bus(bus
))
432 return bus
->bridge
->parent
;
438 static struct arm_smmu_master
*find_smmu_master(struct arm_smmu_device
*smmu
,
439 struct device_node
*dev_node
)
441 struct rb_node
*node
= smmu
->masters
.rb_node
;
444 struct arm_smmu_master
*master
;
445 master
= container_of(node
, struct arm_smmu_master
, node
);
447 if (dev_node
< master
->of_node
)
448 node
= node
->rb_left
;
449 else if (dev_node
> master
->of_node
)
450 node
= node
->rb_right
;
458 static struct arm_smmu_master_cfg
*
459 find_smmu_master_cfg(struct arm_smmu_device
*smmu
, struct device
*dev
)
461 struct arm_smmu_master
*master
;
464 return dev
->archdata
.iommu
;
466 master
= find_smmu_master(smmu
, dev
->of_node
);
467 return master
? &master
->cfg
: NULL
;
470 static int insert_smmu_master(struct arm_smmu_device
*smmu
,
471 struct arm_smmu_master
*master
)
473 struct rb_node
**new, *parent
;
475 new = &smmu
->masters
.rb_node
;
478 struct arm_smmu_master
*this;
479 this = container_of(*new, struct arm_smmu_master
, node
);
482 if (master
->of_node
< this->of_node
)
483 new = &((*new)->rb_left
);
484 else if (master
->of_node
> this->of_node
)
485 new = &((*new)->rb_right
);
490 rb_link_node(&master
->node
, parent
, new);
491 rb_insert_color(&master
->node
, &smmu
->masters
);
495 static int register_smmu_master(struct arm_smmu_device
*smmu
,
497 struct of_phandle_args
*masterspec
)
500 struct arm_smmu_master
*master
;
502 master
= find_smmu_master(smmu
, masterspec
->np
);
505 "rejecting multiple registrations for master device %s\n",
506 masterspec
->np
->name
);
510 if (masterspec
->args_count
> MAX_MASTER_STREAMIDS
) {
512 "reached maximum number (%d) of stream IDs for master device %s\n",
513 MAX_MASTER_STREAMIDS
, masterspec
->np
->name
);
517 master
= devm_kzalloc(dev
, sizeof(*master
), GFP_KERNEL
);
521 master
->of_node
= masterspec
->np
;
522 master
->cfg
.num_streamids
= masterspec
->args_count
;
524 for (i
= 0; i
< master
->cfg
.num_streamids
; ++i
)
525 master
->cfg
.streamids
[i
] = masterspec
->args
[i
];
527 return insert_smmu_master(smmu
, master
);
530 static struct arm_smmu_device
*find_smmu_for_device(struct device
*dev
)
532 struct arm_smmu_device
*smmu
;
533 struct arm_smmu_master
*master
= NULL
;
534 struct device_node
*dev_node
= dev_get_master_dev(dev
)->of_node
;
536 spin_lock(&arm_smmu_devices_lock
);
537 list_for_each_entry(smmu
, &arm_smmu_devices
, list
) {
538 master
= find_smmu_master(smmu
, dev_node
);
542 spin_unlock(&arm_smmu_devices_lock
);
544 return master
? smmu
: NULL
;
547 static int __arm_smmu_alloc_bitmap(unsigned long *map
, int start
, int end
)
552 idx
= find_next_zero_bit(map
, end
, start
);
555 } while (test_and_set_bit(idx
, map
));
560 static void __arm_smmu_free_bitmap(unsigned long *map
, int idx
)
565 /* Wait for any pending TLB invalidations to complete */
566 static void arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
)
569 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
571 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_sTLBGSYNC
);
572 while (readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sTLBGSTATUS
)
573 & sTLBGSTATUS_GSACTIVE
) {
575 if (++count
== TLB_LOOP_TIMEOUT
) {
576 dev_err_ratelimited(smmu
->dev
,
577 "TLB sync timed out -- SMMU may be deadlocked\n");
584 static void arm_smmu_tlb_inv_context(struct arm_smmu_domain
*smmu_domain
)
586 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
587 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
588 void __iomem
*base
= ARM_SMMU_GR0(smmu
);
589 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
592 base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
593 writel_relaxed(ARM_SMMU_CB_ASID(cfg
),
594 base
+ ARM_SMMU_CB_S1_TLBIASID
);
596 base
= ARM_SMMU_GR0(smmu
);
597 writel_relaxed(ARM_SMMU_CB_VMID(cfg
),
598 base
+ ARM_SMMU_GR0_TLBIVMID
);
601 arm_smmu_tlb_sync(smmu
);
604 static irqreturn_t
arm_smmu_context_fault(int irq
, void *dev
)
607 u32 fsr
, far
, fsynr
, resume
;
609 struct iommu_domain
*domain
= dev
;
610 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
611 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
612 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
613 void __iomem
*cb_base
;
615 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
616 fsr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSR
);
618 if (!(fsr
& FSR_FAULT
))
622 dev_err_ratelimited(smmu
->dev
,
623 "Unexpected context fault (fsr 0x%u)\n",
626 fsynr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSYNR0
);
627 flags
= fsynr
& FSYNR0_WNR
? IOMMU_FAULT_WRITE
: IOMMU_FAULT_READ
;
629 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_LO
);
632 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_HI
);
633 iova
|= ((unsigned long)far
<< 32);
636 if (!report_iommu_fault(domain
, smmu
->dev
, iova
, flags
)) {
638 resume
= RESUME_RETRY
;
640 dev_err_ratelimited(smmu
->dev
,
641 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
642 iova
, fsynr
, cfg
->cbndx
);
644 resume
= RESUME_TERMINATE
;
647 /* Clear the faulting FSR */
648 writel(fsr
, cb_base
+ ARM_SMMU_CB_FSR
);
650 /* Retry or terminate any stalled transactions */
652 writel_relaxed(resume
, cb_base
+ ARM_SMMU_CB_RESUME
);
657 static irqreturn_t
arm_smmu_global_fault(int irq
, void *dev
)
659 u32 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
;
660 struct arm_smmu_device
*smmu
= dev
;
661 void __iomem
*gr0_base
= ARM_SMMU_GR0_NS(smmu
);
663 gfsr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSR
);
664 gfsynr0
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR0
);
665 gfsynr1
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR1
);
666 gfsynr2
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR2
);
671 dev_err_ratelimited(smmu
->dev
,
672 "Unexpected global fault, this could be serious\n");
673 dev_err_ratelimited(smmu
->dev
,
674 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
675 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
);
677 writel(gfsr
, gr0_base
+ ARM_SMMU_GR0_sGFSR
);
681 static void arm_smmu_flush_pgtable(struct arm_smmu_device
*smmu
, void *addr
,
684 unsigned long offset
= (unsigned long)addr
& ~PAGE_MASK
;
687 /* Ensure new page tables are visible to the hardware walker */
688 if (smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
) {
692 * If the SMMU can't walk tables in the CPU caches, treat them
693 * like non-coherent DMA since we need to flush the new entries
694 * all the way out to memory. There's no possibility of
695 * recursion here as the SMMU table walker will not be wired
696 * through another SMMU.
698 dma_map_page(smmu
->dev
, virt_to_page(addr
), offset
, size
,
703 static void arm_smmu_init_context_bank(struct arm_smmu_domain
*smmu_domain
)
707 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
708 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
709 void __iomem
*cb_base
, *gr0_base
, *gr1_base
;
711 gr0_base
= ARM_SMMU_GR0(smmu
);
712 gr1_base
= ARM_SMMU_GR1(smmu
);
713 stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
714 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
718 if (smmu
->version
== 1)
719 reg
|= cfg
->irptndx
<< CBAR_IRPTNDX_SHIFT
;
722 * Use the weakest shareability/memory types, so they are
723 * overridden by the ttbcr/pte.
726 reg
|= (CBAR_S1_BPSHCFG_NSH
<< CBAR_S1_BPSHCFG_SHIFT
) |
727 (CBAR_S1_MEMATTR_WB
<< CBAR_S1_MEMATTR_SHIFT
);
729 reg
|= ARM_SMMU_CB_VMID(cfg
) << CBAR_VMID_SHIFT
;
731 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBAR(cfg
->cbndx
));
733 if (smmu
->version
> 1) {
736 reg
= CBA2R_RW64_64BIT
;
738 reg
= CBA2R_RW64_32BIT
;
741 gr1_base
+ ARM_SMMU_GR1_CBA2R(cfg
->cbndx
));
744 switch (smmu
->input_size
) {
746 reg
= (TTBCR2_ADDR_32
<< TTBCR2_SEP_SHIFT
);
749 reg
= (TTBCR2_ADDR_36
<< TTBCR2_SEP_SHIFT
);
752 reg
= (TTBCR2_ADDR_40
<< TTBCR2_SEP_SHIFT
);
755 reg
= (TTBCR2_ADDR_42
<< TTBCR2_SEP_SHIFT
);
758 reg
= (TTBCR2_ADDR_44
<< TTBCR2_SEP_SHIFT
);
761 reg
= (TTBCR2_ADDR_48
<< TTBCR2_SEP_SHIFT
);
765 switch (smmu
->s1_output_size
) {
767 reg
|= (TTBCR2_ADDR_32
<< TTBCR2_PASIZE_SHIFT
);
770 reg
|= (TTBCR2_ADDR_36
<< TTBCR2_PASIZE_SHIFT
);
773 reg
|= (TTBCR2_ADDR_40
<< TTBCR2_PASIZE_SHIFT
);
776 reg
|= (TTBCR2_ADDR_42
<< TTBCR2_PASIZE_SHIFT
);
779 reg
|= (TTBCR2_ADDR_44
<< TTBCR2_PASIZE_SHIFT
);
782 reg
|= (TTBCR2_ADDR_48
<< TTBCR2_PASIZE_SHIFT
);
787 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR2
);
791 arm_smmu_flush_pgtable(smmu
, cfg
->pgd
,
792 PTRS_PER_PGD
* sizeof(pgd_t
));
793 reg
= __pa(cfg
->pgd
);
794 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_LO
);
795 reg
= (phys_addr_t
)__pa(cfg
->pgd
) >> 32;
797 reg
|= ARM_SMMU_CB_ASID(cfg
) << TTBRn_HI_ASID_SHIFT
;
798 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_HI
);
802 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
804 if (smmu
->version
> 1) {
805 if (PAGE_SIZE
== SZ_4K
)
811 reg
|= (64 - smmu
->s1_output_size
) << TTBCR_T0SZ_SHIFT
;
813 switch (smmu
->s2_output_size
) {
815 reg
|= (TTBCR2_ADDR_32
<< TTBCR_PASIZE_SHIFT
);
818 reg
|= (TTBCR2_ADDR_36
<< TTBCR_PASIZE_SHIFT
);
821 reg
|= (TTBCR2_ADDR_40
<< TTBCR_PASIZE_SHIFT
);
824 reg
|= (TTBCR2_ADDR_42
<< TTBCR_PASIZE_SHIFT
);
827 reg
|= (TTBCR2_ADDR_44
<< TTBCR_PASIZE_SHIFT
);
830 reg
|= (TTBCR2_ADDR_48
<< TTBCR_PASIZE_SHIFT
);
834 reg
|= (64 - smmu
->input_size
) << TTBCR_T0SZ_SHIFT
;
841 (TTBCR_SH_IS
<< TTBCR_SH0_SHIFT
) |
842 (TTBCR_RGN_WBWA
<< TTBCR_ORGN0_SHIFT
) |
843 (TTBCR_RGN_WBWA
<< TTBCR_IRGN0_SHIFT
) |
844 (TTBCR_SL0_LVL_1
<< TTBCR_SL0_SHIFT
);
845 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
847 /* MAIR0 (stage-1 only) */
849 reg
= (MAIR_ATTR_NC
<< MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC
)) |
850 (MAIR_ATTR_WBRWA
<< MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE
)) |
851 (MAIR_ATTR_DEVICE
<< MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV
));
852 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR0
);
856 reg
= SCTLR_CFCFG
| SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_M
| SCTLR_EAE_SBOP
;
858 reg
|= SCTLR_S1_ASIDPNE
;
862 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_SCTLR
);
865 static int arm_smmu_init_domain_context(struct iommu_domain
*domain
,
866 struct arm_smmu_device
*smmu
)
869 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
870 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
872 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_NESTED
) {
874 * We will likely want to change this if/when KVM gets
877 cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
878 start
= smmu
->num_s2_context_banks
;
879 } else if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
) {
880 cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
881 start
= smmu
->num_s2_context_banks
;
883 cfg
->cbar
= CBAR_TYPE_S2_TRANS
;
887 ret
= __arm_smmu_alloc_bitmap(smmu
->context_map
, start
,
888 smmu
->num_context_banks
);
889 if (IS_ERR_VALUE(ret
))
893 if (smmu
->version
== 1) {
894 cfg
->irptndx
= atomic_inc_return(&smmu
->irptndx
);
895 cfg
->irptndx
%= smmu
->num_context_irqs
;
897 cfg
->irptndx
= cfg
->cbndx
;
900 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
901 ret
= request_irq(irq
, arm_smmu_context_fault
, IRQF_SHARED
,
902 "arm-smmu-context-fault", domain
);
903 if (IS_ERR_VALUE(ret
)) {
904 dev_err(smmu
->dev
, "failed to request context IRQ %d (%u)\n",
906 cfg
->irptndx
= INVALID_IRPTNDX
;
907 goto out_free_context
;
910 smmu_domain
->smmu
= smmu
;
911 arm_smmu_init_context_bank(smmu_domain
);
915 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
919 static void arm_smmu_destroy_domain_context(struct iommu_domain
*domain
)
921 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
922 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
923 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
924 void __iomem
*cb_base
;
930 /* Disable the context bank and nuke the TLB before freeing it. */
931 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
932 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
933 arm_smmu_tlb_inv_context(smmu_domain
);
935 if (cfg
->irptndx
!= INVALID_IRPTNDX
) {
936 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
937 free_irq(irq
, domain
);
940 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
943 static int arm_smmu_domain_init(struct iommu_domain
*domain
)
945 struct arm_smmu_domain
*smmu_domain
;
949 * Allocate the domain and initialise some of its data structures.
950 * We can't really do anything meaningful until we've added a
953 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
957 pgd
= kzalloc(PTRS_PER_PGD
* sizeof(pgd_t
), GFP_KERNEL
);
959 goto out_free_domain
;
960 smmu_domain
->cfg
.pgd
= pgd
;
962 spin_lock_init(&smmu_domain
->lock
);
963 domain
->priv
= smmu_domain
;
971 static void arm_smmu_free_ptes(pmd_t
*pmd
)
973 pgtable_t table
= pmd_pgtable(*pmd
);
974 pgtable_page_dtor(table
);
978 static void arm_smmu_free_pmds(pud_t
*pud
)
981 pmd_t
*pmd
, *pmd_base
= pmd_offset(pud
, 0);
984 for (i
= 0; i
< PTRS_PER_PMD
; ++i
) {
988 arm_smmu_free_ptes(pmd
);
992 pmd_free(NULL
, pmd_base
);
995 static void arm_smmu_free_puds(pgd_t
*pgd
)
998 pud_t
*pud
, *pud_base
= pud_offset(pgd
, 0);
1001 for (i
= 0; i
< PTRS_PER_PUD
; ++i
) {
1005 arm_smmu_free_pmds(pud
);
1009 pud_free(NULL
, pud_base
);
1012 static void arm_smmu_free_pgtables(struct arm_smmu_domain
*smmu_domain
)
1015 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1016 pgd_t
*pgd
, *pgd_base
= cfg
->pgd
;
1019 * Recursively free the page tables for this domain. We don't
1020 * care about speculative TLB filling because the tables should
1021 * not be active in any context bank at this point (SCTLR.M is 0).
1024 for (i
= 0; i
< PTRS_PER_PGD
; ++i
) {
1027 arm_smmu_free_puds(pgd
);
1034 static void arm_smmu_domain_destroy(struct iommu_domain
*domain
)
1036 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1039 * Free the domain resources. We assume that all devices have
1040 * already been detached.
1042 arm_smmu_destroy_domain_context(domain
);
1043 arm_smmu_free_pgtables(smmu_domain
);
1047 static int arm_smmu_master_configure_smrs(struct arm_smmu_device
*smmu
,
1048 struct arm_smmu_master_cfg
*cfg
)
1051 struct arm_smmu_smr
*smrs
;
1052 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1054 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
))
1060 smrs
= kmalloc(sizeof(*smrs
) * cfg
->num_streamids
, GFP_KERNEL
);
1062 dev_err(smmu
->dev
, "failed to allocate %d SMRs\n",
1063 cfg
->num_streamids
);
1067 /* Allocate the SMRs on the SMMU */
1068 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1069 int idx
= __arm_smmu_alloc_bitmap(smmu
->smr_map
, 0,
1070 smmu
->num_mapping_groups
);
1071 if (IS_ERR_VALUE(idx
)) {
1072 dev_err(smmu
->dev
, "failed to allocate free SMR\n");
1076 smrs
[i
] = (struct arm_smmu_smr
) {
1078 .mask
= 0, /* We don't currently share SMRs */
1079 .id
= cfg
->streamids
[i
],
1083 /* It worked! Now, poke the actual hardware */
1084 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1085 u32 reg
= SMR_VALID
| smrs
[i
].id
<< SMR_ID_SHIFT
|
1086 smrs
[i
].mask
<< SMR_MASK_SHIFT
;
1087 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_SMR(smrs
[i
].idx
));
1095 __arm_smmu_free_bitmap(smmu
->smr_map
, smrs
[i
].idx
);
1100 static void arm_smmu_master_free_smrs(struct arm_smmu_device
*smmu
,
1101 struct arm_smmu_master_cfg
*cfg
)
1104 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1105 struct arm_smmu_smr
*smrs
= cfg
->smrs
;
1107 /* Invalidate the SMRs before freeing back to the allocator */
1108 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1109 u8 idx
= smrs
[i
].idx
;
1110 writel_relaxed(~SMR_VALID
, gr0_base
+ ARM_SMMU_GR0_SMR(idx
));
1111 __arm_smmu_free_bitmap(smmu
->smr_map
, idx
);
1118 static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device
*smmu
,
1119 struct arm_smmu_master_cfg
*cfg
)
1122 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1124 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1125 u16 sid
= cfg
->streamids
[i
];
1126 writel_relaxed(S2CR_TYPE_BYPASS
,
1127 gr0_base
+ ARM_SMMU_GR0_S2CR(sid
));
1131 static int arm_smmu_domain_add_master(struct arm_smmu_domain
*smmu_domain
,
1132 struct arm_smmu_master_cfg
*cfg
)
1135 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1136 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1138 ret
= arm_smmu_master_configure_smrs(smmu
, cfg
);
1142 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1144 idx
= cfg
->smrs
? cfg
->smrs
[i
].idx
: cfg
->streamids
[i
];
1145 s2cr
= S2CR_TYPE_TRANS
|
1146 (smmu_domain
->cfg
.cbndx
<< S2CR_CBNDX_SHIFT
);
1147 writel_relaxed(s2cr
, gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1153 static void arm_smmu_domain_remove_master(struct arm_smmu_domain
*smmu_domain
,
1154 struct arm_smmu_master_cfg
*cfg
)
1156 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1159 * We *must* clear the S2CR first, because freeing the SMR means
1160 * that it can be re-allocated immediately.
1162 arm_smmu_bypass_stream_mapping(smmu
, cfg
);
1163 arm_smmu_master_free_smrs(smmu
, cfg
);
1166 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1169 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1170 struct arm_smmu_device
*smmu
;
1171 struct arm_smmu_master_cfg
*cfg
;
1172 unsigned long flags
;
1174 smmu
= dev_get_master_dev(dev
)->archdata
.iommu
;
1176 dev_err(dev
, "cannot attach to SMMU, is it on the same bus?\n");
1181 * Sanity check the domain. We don't support domains across
1184 spin_lock_irqsave(&smmu_domain
->lock
, flags
);
1185 if (!smmu_domain
->smmu
) {
1186 /* Now that we have a master, we can finalise the domain */
1187 ret
= arm_smmu_init_domain_context(domain
, smmu
);
1188 if (IS_ERR_VALUE(ret
))
1190 } else if (smmu_domain
->smmu
!= smmu
) {
1192 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1193 dev_name(smmu_domain
->smmu
->dev
),
1194 dev_name(smmu
->dev
));
1197 spin_unlock_irqrestore(&smmu_domain
->lock
, flags
);
1199 /* Looks ok, so add the device to the domain */
1200 cfg
= find_smmu_master_cfg(smmu_domain
->smmu
, dev
);
1204 return arm_smmu_domain_add_master(smmu_domain
, cfg
);
1207 spin_unlock_irqrestore(&smmu_domain
->lock
, flags
);
1211 static void arm_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1213 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1214 struct arm_smmu_master_cfg
*cfg
;
1216 cfg
= find_smmu_master_cfg(smmu_domain
->smmu
, dev
);
1218 arm_smmu_domain_remove_master(smmu_domain
, cfg
);
1221 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr
,
1224 return !(addr
& ~ARM_SMMU_PTE_CONT_MASK
) &&
1225 (addr
+ ARM_SMMU_PTE_CONT_SIZE
<= end
);
1228 static int arm_smmu_alloc_init_pte(struct arm_smmu_device
*smmu
, pmd_t
*pmd
,
1229 unsigned long addr
, unsigned long end
,
1230 unsigned long pfn
, int prot
, int stage
)
1233 pteval_t pteval
= ARM_SMMU_PTE_PAGE
| ARM_SMMU_PTE_AF
| ARM_SMMU_PTE_XN
;
1235 if (pmd_none(*pmd
)) {
1236 /* Allocate a new set of tables */
1237 pgtable_t table
= alloc_page(GFP_ATOMIC
|__GFP_ZERO
);
1241 arm_smmu_flush_pgtable(smmu
, page_address(table
), PAGE_SIZE
);
1242 if (!pgtable_page_ctor(table
)) {
1246 pmd_populate(NULL
, pmd
, table
);
1247 arm_smmu_flush_pgtable(smmu
, pmd
, sizeof(*pmd
));
1251 pteval
|= ARM_SMMU_PTE_AP_UNPRIV
| ARM_SMMU_PTE_nG
;
1252 if (!(prot
& IOMMU_WRITE
) && (prot
& IOMMU_READ
))
1253 pteval
|= ARM_SMMU_PTE_AP_RDONLY
;
1255 if (prot
& IOMMU_CACHE
)
1256 pteval
|= (MAIR_ATTR_IDX_CACHE
<<
1257 ARM_SMMU_PTE_ATTRINDX_SHIFT
);
1259 pteval
|= ARM_SMMU_PTE_HAP_FAULT
;
1260 if (prot
& IOMMU_READ
)
1261 pteval
|= ARM_SMMU_PTE_HAP_READ
;
1262 if (prot
& IOMMU_WRITE
)
1263 pteval
|= ARM_SMMU_PTE_HAP_WRITE
;
1264 if (prot
& IOMMU_CACHE
)
1265 pteval
|= ARM_SMMU_PTE_MEMATTR_OIWB
;
1267 pteval
|= ARM_SMMU_PTE_MEMATTR_NC
;
1270 /* If no access, create a faulting entry to avoid TLB fills */
1271 if (prot
& IOMMU_EXEC
)
1272 pteval
&= ~ARM_SMMU_PTE_XN
;
1273 else if (!(prot
& (IOMMU_READ
| IOMMU_WRITE
)))
1274 pteval
&= ~ARM_SMMU_PTE_PAGE
;
1276 pteval
|= ARM_SMMU_PTE_SH_IS
;
1277 start
= pmd_page_vaddr(*pmd
) + pte_index(addr
);
1281 * Install the page table entries. This is fairly complicated
1282 * since we attempt to make use of the contiguous hint in the
1283 * ptes where possible. The contiguous hint indicates a series
1284 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
1285 * contiguous region with the following constraints:
1287 * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
1288 * - Each pte in the region has the contiguous hint bit set
1290 * This complicates unmapping (also handled by this code, when
1291 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
1292 * possible, yet highly unlikely, that a client may unmap only
1293 * part of a contiguous range. This requires clearing of the
1294 * contiguous hint bits in the range before installing the new
1297 * Note that re-mapping an address range without first unmapping
1298 * it is not supported, so TLB invalidation is not required here
1299 * and is instead performed at unmap and domain-init time.
1303 pteval
&= ~ARM_SMMU_PTE_CONT
;
1305 if (arm_smmu_pte_is_contiguous_range(addr
, end
)) {
1306 i
= ARM_SMMU_PTE_CONT_ENTRIES
;
1307 pteval
|= ARM_SMMU_PTE_CONT
;
1308 } else if (pte_val(*pte
) &
1309 (ARM_SMMU_PTE_CONT
| ARM_SMMU_PTE_PAGE
)) {
1312 unsigned long idx
= pte_index(addr
);
1314 idx
&= ~(ARM_SMMU_PTE_CONT_ENTRIES
- 1);
1315 cont_start
= pmd_page_vaddr(*pmd
) + idx
;
1316 for (j
= 0; j
< ARM_SMMU_PTE_CONT_ENTRIES
; ++j
)
1317 pte_val(*(cont_start
+ j
)) &= ~ARM_SMMU_PTE_CONT
;
1319 arm_smmu_flush_pgtable(smmu
, cont_start
,
1321 ARM_SMMU_PTE_CONT_ENTRIES
);
1325 *pte
= pfn_pte(pfn
, __pgprot(pteval
));
1326 } while (pte
++, pfn
++, addr
+= PAGE_SIZE
, --i
);
1327 } while (addr
!= end
);
1329 arm_smmu_flush_pgtable(smmu
, start
, sizeof(*pte
) * (pte
- start
));
1333 static int arm_smmu_alloc_init_pmd(struct arm_smmu_device
*smmu
, pud_t
*pud
,
1334 unsigned long addr
, unsigned long end
,
1335 phys_addr_t phys
, int prot
, int stage
)
1339 unsigned long next
, pfn
= __phys_to_pfn(phys
);
1341 #ifndef __PAGETABLE_PMD_FOLDED
1342 if (pud_none(*pud
)) {
1343 pmd
= (pmd_t
*)get_zeroed_page(GFP_ATOMIC
);
1347 arm_smmu_flush_pgtable(smmu
, pmd
, PAGE_SIZE
);
1348 pud_populate(NULL
, pud
, pmd
);
1349 arm_smmu_flush_pgtable(smmu
, pud
, sizeof(*pud
));
1351 pmd
+= pmd_index(addr
);
1354 pmd
= pmd_offset(pud
, addr
);
1357 next
= pmd_addr_end(addr
, end
);
1358 ret
= arm_smmu_alloc_init_pte(smmu
, pmd
, addr
, next
, pfn
,
1360 phys
+= next
- addr
;
1361 } while (pmd
++, addr
= next
, addr
< end
);
1366 static int arm_smmu_alloc_init_pud(struct arm_smmu_device
*smmu
, pgd_t
*pgd
,
1367 unsigned long addr
, unsigned long end
,
1368 phys_addr_t phys
, int prot
, int stage
)
1374 #ifndef __PAGETABLE_PUD_FOLDED
1375 if (pgd_none(*pgd
)) {
1376 pud
= (pud_t
*)get_zeroed_page(GFP_ATOMIC
);
1380 arm_smmu_flush_pgtable(smmu
, pud
, PAGE_SIZE
);
1381 pgd_populate(NULL
, pgd
, pud
);
1382 arm_smmu_flush_pgtable(smmu
, pgd
, sizeof(*pgd
));
1384 pud
+= pud_index(addr
);
1387 pud
= pud_offset(pgd
, addr
);
1390 next
= pud_addr_end(addr
, end
);
1391 ret
= arm_smmu_alloc_init_pmd(smmu
, pud
, addr
, next
, phys
,
1393 phys
+= next
- addr
;
1394 } while (pud
++, addr
= next
, addr
< end
);
1399 static int arm_smmu_handle_mapping(struct arm_smmu_domain
*smmu_domain
,
1400 unsigned long iova
, phys_addr_t paddr
,
1401 size_t size
, int prot
)
1405 phys_addr_t input_mask
, output_mask
;
1406 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1407 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1408 pgd_t
*pgd
= cfg
->pgd
;
1409 unsigned long flags
;
1411 if (cfg
->cbar
== CBAR_TYPE_S2_TRANS
) {
1413 output_mask
= (1ULL << smmu
->s2_output_size
) - 1;
1416 output_mask
= (1ULL << smmu
->s1_output_size
) - 1;
1422 if (size
& ~PAGE_MASK
)
1425 input_mask
= (1ULL << smmu
->input_size
) - 1;
1426 if ((phys_addr_t
)iova
& ~input_mask
)
1429 if (paddr
& ~output_mask
)
1432 spin_lock_irqsave(&smmu_domain
->lock
, flags
);
1433 pgd
+= pgd_index(iova
);
1436 unsigned long next
= pgd_addr_end(iova
, end
);
1438 ret
= arm_smmu_alloc_init_pud(smmu
, pgd
, iova
, next
, paddr
,
1443 paddr
+= next
- iova
;
1445 } while (pgd
++, iova
!= end
);
1448 spin_unlock_irqrestore(&smmu_domain
->lock
, flags
);
1453 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1454 phys_addr_t paddr
, size_t size
, int prot
)
1456 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1461 return arm_smmu_handle_mapping(smmu_domain
, iova
, paddr
, size
, prot
);
1464 static size_t arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
1468 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1470 ret
= arm_smmu_handle_mapping(smmu_domain
, iova
, 0, size
, 0);
1471 arm_smmu_tlb_inv_context(smmu_domain
);
1472 return ret
? 0 : size
;
1475 static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain
*domain
,
1482 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1483 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1489 pgd
= *(pgdp
+ pgd_index(iova
));
1493 pud
= *pud_offset(&pgd
, iova
);
1497 pmd
= *pmd_offset(&pud
, iova
);
1501 pte
= *(pmd_page_vaddr(pmd
) + pte_index(iova
));
1505 return __pfn_to_phys(pte_pfn(pte
)) | (iova
& ~PAGE_MASK
);
1508 static int arm_smmu_domain_has_cap(struct iommu_domain
*domain
,
1511 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1512 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1513 u32 features
= smmu
? smmu
->features
: 0;
1516 case IOMMU_CAP_CACHE_COHERENCY
:
1517 return features
& ARM_SMMU_FEAT_COHERENT_WALK
;
1518 case IOMMU_CAP_INTR_REMAP
:
1519 return 1; /* MSIs are just memory writes */
1525 static int __arm_smmu_get_pci_sid(struct pci_dev
*pdev
, u16 alias
, void *data
)
1527 *((u16
*)data
) = alias
;
1528 return 0; /* Continue walking */
1531 static int arm_smmu_add_device(struct device
*dev
)
1533 struct arm_smmu_device
*smmu
;
1534 struct iommu_group
*group
;
1537 if (dev
->archdata
.iommu
) {
1538 dev_warn(dev
, "IOMMU driver already assigned to device\n");
1542 smmu
= find_smmu_for_device(dev
);
1546 group
= iommu_group_alloc();
1547 if (IS_ERR(group
)) {
1548 dev_err(dev
, "Failed to allocate IOMMU group\n");
1549 return PTR_ERR(group
);
1552 if (dev_is_pci(dev
)) {
1553 struct arm_smmu_master_cfg
*cfg
;
1554 struct pci_dev
*pdev
= to_pci_dev(dev
);
1556 cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
1562 cfg
->num_streamids
= 1;
1564 * Assume Stream ID == Requester ID for now.
1565 * We need a way to describe the ID mappings in FDT.
1567 pci_for_each_dma_alias(pdev
, __arm_smmu_get_pci_sid
,
1568 &cfg
->streamids
[0]);
1569 dev
->archdata
.iommu
= cfg
;
1571 dev
->archdata
.iommu
= smmu
;
1574 ret
= iommu_group_add_device(group
, dev
);
1577 iommu_group_put(group
);
1581 static void arm_smmu_remove_device(struct device
*dev
)
1583 if (dev_is_pci(dev
))
1584 kfree(dev
->archdata
.iommu
);
1586 dev
->archdata
.iommu
= NULL
;
1587 iommu_group_remove_device(dev
);
1590 static struct iommu_ops arm_smmu_ops
= {
1591 .domain_init
= arm_smmu_domain_init
,
1592 .domain_destroy
= arm_smmu_domain_destroy
,
1593 .attach_dev
= arm_smmu_attach_dev
,
1594 .detach_dev
= arm_smmu_detach_dev
,
1595 .map
= arm_smmu_map
,
1596 .unmap
= arm_smmu_unmap
,
1597 .iova_to_phys
= arm_smmu_iova_to_phys
,
1598 .domain_has_cap
= arm_smmu_domain_has_cap
,
1599 .add_device
= arm_smmu_add_device
,
1600 .remove_device
= arm_smmu_remove_device
,
1601 .pgsize_bitmap
= (SECTION_SIZE
|
1602 ARM_SMMU_PTE_CONT_SIZE
|
1606 static void arm_smmu_device_reset(struct arm_smmu_device
*smmu
)
1608 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1609 void __iomem
*cb_base
;
1613 /* clear global FSR */
1614 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1615 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1617 /* Mark all SMRn as invalid and all S2CRn as bypass */
1618 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
) {
1619 writel_relaxed(~SMR_VALID
, gr0_base
+ ARM_SMMU_GR0_SMR(i
));
1620 writel_relaxed(S2CR_TYPE_BYPASS
, gr0_base
+ ARM_SMMU_GR0_S2CR(i
));
1623 /* Make sure all context banks are disabled and clear CB_FSR */
1624 for (i
= 0; i
< smmu
->num_context_banks
; ++i
) {
1625 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, i
);
1626 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
1627 writel_relaxed(FSR_FAULT
, cb_base
+ ARM_SMMU_CB_FSR
);
1630 /* Invalidate the TLB, just in case */
1631 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_STLBIALL
);
1632 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLH
);
1633 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLNSNH
);
1635 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1637 /* Enable fault reporting */
1638 reg
|= (sCR0_GFRE
| sCR0_GFIE
| sCR0_GCFGFRE
| sCR0_GCFGFIE
);
1640 /* Disable TLB broadcasting. */
1641 reg
|= (sCR0_VMIDPNE
| sCR0_PTM
);
1643 /* Enable client access, but bypass when no mapping is found */
1644 reg
&= ~(sCR0_CLIENTPD
| sCR0_USFCFG
);
1646 /* Disable forced broadcasting */
1649 /* Don't upgrade barriers */
1650 reg
&= ~(sCR0_BSU_MASK
<< sCR0_BSU_SHIFT
);
1652 /* Push the button */
1653 arm_smmu_tlb_sync(smmu
);
1654 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1657 static int arm_smmu_id_size_to_bits(int size
)
1676 static int arm_smmu_device_cfg_probe(struct arm_smmu_device
*smmu
)
1679 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1682 dev_notice(smmu
->dev
, "probing hardware configuration...\n");
1685 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_PIDR2
);
1686 smmu
->version
= ((id
>> PIDR2_ARCH_SHIFT
) & PIDR2_ARCH_MASK
) + 1;
1687 dev_notice(smmu
->dev
, "SMMUv%d with:\n", smmu
->version
);
1690 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID0
);
1691 #ifndef CONFIG_64BIT
1692 if (((id
>> ID0_PTFS_SHIFT
) & ID0_PTFS_MASK
) == ID0_PTFS_V8_ONLY
) {
1693 dev_err(smmu
->dev
, "\tno v7 descriptor support!\n");
1697 if (id
& ID0_S1TS
) {
1698 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
1699 dev_notice(smmu
->dev
, "\tstage 1 translation\n");
1702 if (id
& ID0_S2TS
) {
1703 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
1704 dev_notice(smmu
->dev
, "\tstage 2 translation\n");
1708 smmu
->features
|= ARM_SMMU_FEAT_TRANS_NESTED
;
1709 dev_notice(smmu
->dev
, "\tnested translation\n");
1712 if (!(smmu
->features
&
1713 (ARM_SMMU_FEAT_TRANS_S1
| ARM_SMMU_FEAT_TRANS_S2
|
1714 ARM_SMMU_FEAT_TRANS_NESTED
))) {
1715 dev_err(smmu
->dev
, "\tno translation support!\n");
1719 if (id
& ID0_CTTW
) {
1720 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1721 dev_notice(smmu
->dev
, "\tcoherent table walk\n");
1727 smmu
->features
|= ARM_SMMU_FEAT_STREAM_MATCH
;
1728 smmu
->num_mapping_groups
= (id
>> ID0_NUMSMRG_SHIFT
) &
1730 if (smmu
->num_mapping_groups
== 0) {
1732 "stream-matching supported, but no SMRs present!\n");
1736 smr
= SMR_MASK_MASK
<< SMR_MASK_SHIFT
;
1737 smr
|= (SMR_ID_MASK
<< SMR_ID_SHIFT
);
1738 writel_relaxed(smr
, gr0_base
+ ARM_SMMU_GR0_SMR(0));
1739 smr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_SMR(0));
1741 mask
= (smr
>> SMR_MASK_SHIFT
) & SMR_MASK_MASK
;
1742 sid
= (smr
>> SMR_ID_SHIFT
) & SMR_ID_MASK
;
1743 if ((mask
& sid
) != sid
) {
1745 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1750 dev_notice(smmu
->dev
,
1751 "\tstream matching with %u register groups, mask 0x%x",
1752 smmu
->num_mapping_groups
, mask
);
1756 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID1
);
1757 smmu
->pagesize
= (id
& ID1_PAGESIZE
) ? SZ_64K
: SZ_4K
;
1759 /* Check for size mismatch of SMMU address space from mapped region */
1760 size
= 1 << (((id
>> ID1_NUMPAGENDXB_SHIFT
) & ID1_NUMPAGENDXB_MASK
) + 1);
1761 size
*= (smmu
->pagesize
<< 1);
1762 if (smmu
->size
!= size
)
1763 dev_warn(smmu
->dev
, "SMMU address space size (0x%lx) differs "
1764 "from mapped region size (0x%lx)!\n", size
, smmu
->size
);
1766 smmu
->num_s2_context_banks
= (id
>> ID1_NUMS2CB_SHIFT
) &
1768 smmu
->num_context_banks
= (id
>> ID1_NUMCB_SHIFT
) & ID1_NUMCB_MASK
;
1769 if (smmu
->num_s2_context_banks
> smmu
->num_context_banks
) {
1770 dev_err(smmu
->dev
, "impossible number of S2 context banks!\n");
1773 dev_notice(smmu
->dev
, "\t%u context banks (%u stage-2 only)\n",
1774 smmu
->num_context_banks
, smmu
->num_s2_context_banks
);
1777 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID2
);
1778 size
= arm_smmu_id_size_to_bits((id
>> ID2_IAS_SHIFT
) & ID2_IAS_MASK
);
1781 * Stage-1 output limited by stage-2 input size due to pgd
1782 * allocation (PTRS_PER_PGD).
1785 smmu
->s1_output_size
= min((unsigned long)VA_BITS
, size
);
1787 smmu
->s1_output_size
= min(32UL, size
);
1790 /* The stage-2 output mask is also applied for bypass */
1791 size
= arm_smmu_id_size_to_bits((id
>> ID2_OAS_SHIFT
) & ID2_OAS_MASK
);
1792 smmu
->s2_output_size
= min((unsigned long)PHYS_MASK_SHIFT
, size
);
1794 if (smmu
->version
== 1) {
1795 smmu
->input_size
= 32;
1798 size
= (id
>> ID2_UBS_SHIFT
) & ID2_UBS_MASK
;
1799 size
= min(VA_BITS
, arm_smmu_id_size_to_bits(size
));
1803 smmu
->input_size
= size
;
1805 if ((PAGE_SIZE
== SZ_4K
&& !(id
& ID2_PTFS_4K
)) ||
1806 (PAGE_SIZE
== SZ_64K
&& !(id
& ID2_PTFS_64K
)) ||
1807 (PAGE_SIZE
!= SZ_4K
&& PAGE_SIZE
!= SZ_64K
)) {
1808 dev_err(smmu
->dev
, "CPU page size 0x%lx unsupported\n",
1814 dev_notice(smmu
->dev
,
1815 "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
1816 smmu
->input_size
, smmu
->s1_output_size
, smmu
->s2_output_size
);
1820 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
)
1822 struct resource
*res
;
1823 struct arm_smmu_device
*smmu
;
1824 struct device
*dev
= &pdev
->dev
;
1825 struct rb_node
*node
;
1826 struct of_phandle_args masterspec
;
1827 int num_irqs
, i
, err
;
1829 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
1831 dev_err(dev
, "failed to allocate arm_smmu_device\n");
1836 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1837 smmu
->base
= devm_ioremap_resource(dev
, res
);
1838 if (IS_ERR(smmu
->base
))
1839 return PTR_ERR(smmu
->base
);
1840 smmu
->size
= resource_size(res
);
1842 if (of_property_read_u32(dev
->of_node
, "#global-interrupts",
1843 &smmu
->num_global_irqs
)) {
1844 dev_err(dev
, "missing #global-interrupts property\n");
1849 while ((res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, num_irqs
))) {
1851 if (num_irqs
> smmu
->num_global_irqs
)
1852 smmu
->num_context_irqs
++;
1855 if (!smmu
->num_context_irqs
) {
1856 dev_err(dev
, "found %d interrupts but expected at least %d\n",
1857 num_irqs
, smmu
->num_global_irqs
+ 1);
1861 smmu
->irqs
= devm_kzalloc(dev
, sizeof(*smmu
->irqs
) * num_irqs
,
1864 dev_err(dev
, "failed to allocate %d irqs\n", num_irqs
);
1868 for (i
= 0; i
< num_irqs
; ++i
) {
1869 int irq
= platform_get_irq(pdev
, i
);
1871 dev_err(dev
, "failed to get irq index %d\n", i
);
1874 smmu
->irqs
[i
] = irq
;
1878 smmu
->masters
= RB_ROOT
;
1879 while (!of_parse_phandle_with_args(dev
->of_node
, "mmu-masters",
1880 "#stream-id-cells", i
,
1882 err
= register_smmu_master(smmu
, dev
, &masterspec
);
1884 dev_err(dev
, "failed to add master %s\n",
1885 masterspec
.np
->name
);
1886 goto out_put_masters
;
1891 dev_notice(dev
, "registered %d master devices\n", i
);
1893 err
= arm_smmu_device_cfg_probe(smmu
);
1895 goto out_put_masters
;
1897 parse_driver_options(smmu
);
1899 if (smmu
->version
> 1 &&
1900 smmu
->num_context_banks
!= smmu
->num_context_irqs
) {
1902 "found only %d context interrupt(s) but %d required\n",
1903 smmu
->num_context_irqs
, smmu
->num_context_banks
);
1905 goto out_put_masters
;
1908 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
) {
1909 err
= request_irq(smmu
->irqs
[i
],
1910 arm_smmu_global_fault
,
1912 "arm-smmu global fault",
1915 dev_err(dev
, "failed to request global IRQ %d (%u)\n",
1921 INIT_LIST_HEAD(&smmu
->list
);
1922 spin_lock(&arm_smmu_devices_lock
);
1923 list_add(&smmu
->list
, &arm_smmu_devices
);
1924 spin_unlock(&arm_smmu_devices_lock
);
1926 arm_smmu_device_reset(smmu
);
1931 free_irq(smmu
->irqs
[i
], smmu
);
1934 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1935 struct arm_smmu_master
*master
;
1936 master
= container_of(node
, struct arm_smmu_master
, node
);
1937 of_node_put(master
->of_node
);
1943 static int arm_smmu_device_remove(struct platform_device
*pdev
)
1946 struct device
*dev
= &pdev
->dev
;
1947 struct arm_smmu_device
*curr
, *smmu
= NULL
;
1948 struct rb_node
*node
;
1950 spin_lock(&arm_smmu_devices_lock
);
1951 list_for_each_entry(curr
, &arm_smmu_devices
, list
) {
1952 if (curr
->dev
== dev
) {
1954 list_del(&smmu
->list
);
1958 spin_unlock(&arm_smmu_devices_lock
);
1963 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1964 struct arm_smmu_master
*master
;
1965 master
= container_of(node
, struct arm_smmu_master
, node
);
1966 of_node_put(master
->of_node
);
1969 if (!bitmap_empty(smmu
->context_map
, ARM_SMMU_MAX_CBS
))
1970 dev_err(dev
, "removing device with active domains!\n");
1972 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
)
1973 free_irq(smmu
->irqs
[i
], smmu
);
1975 /* Turn the thing off */
1976 writel(sCR0_CLIENTPD
,ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1981 static struct of_device_id arm_smmu_of_match
[] = {
1982 { .compatible
= "arm,smmu-v1", },
1983 { .compatible
= "arm,smmu-v2", },
1984 { .compatible
= "arm,mmu-400", },
1985 { .compatible
= "arm,mmu-500", },
1988 MODULE_DEVICE_TABLE(of
, arm_smmu_of_match
);
1991 static struct platform_driver arm_smmu_driver
= {
1993 .owner
= THIS_MODULE
,
1995 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
1997 .probe
= arm_smmu_device_dt_probe
,
1998 .remove
= arm_smmu_device_remove
,
2001 static int __init
arm_smmu_init(void)
2005 ret
= platform_driver_register(&arm_smmu_driver
);
2009 /* Oh, for a proper bus abstraction */
2010 if (!iommu_present(&platform_bus_type
))
2011 bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
2013 #ifdef CONFIG_ARM_AMBA
2014 if (!iommu_present(&amba_bustype
))
2015 bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
2019 if (!iommu_present(&pci_bus_type
))
2020 bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
2026 static void __exit
arm_smmu_exit(void)
2028 return platform_driver_unregister(&arm_smmu_driver
);
2031 subsys_initcall(arm_smmu_init
);
2032 module_exit(arm_smmu_exit
);
2034 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2035 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2036 MODULE_LICENSE("GPL v2");