Merge tag 'trace-fixes-v4.6-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / drivers / iommu / arm-smmu.c
CommitLineData
45ae7cff
WD
1/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
45ae7cff
WD
26 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
9adb9594 32#include <linux/dma-iommu.h>
45ae7cff
WD
33#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
37#include <linux/iommu.h>
859a732e 38#include <linux/iopoll.h>
45ae7cff
WD
39#include <linux/module.h>
40#include <linux/of.h>
bae2c2d4 41#include <linux/of_address.h>
a9a1b0b5 42#include <linux/pci.h>
45ae7cff
WD
43#include <linux/platform_device.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46
47#include <linux/amba/bus.h>
48
518f7136 49#include "io-pgtable.h"
45ae7cff
WD
50
51/* Maximum number of stream IDs assigned to a single device */
636e97b0 52#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
45ae7cff
WD
53
54/* Maximum number of context banks per SMMU */
55#define ARM_SMMU_MAX_CBS 128
56
57/* Maximum number of mapping groups per SMMU */
58#define ARM_SMMU_MAX_SMRS 128
59
45ae7cff
WD
60/* SMMU global address space */
61#define ARM_SMMU_GR0(smmu) ((smmu)->base)
c757e852 62#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
45ae7cff 63
3a5df8ff
AH
64/*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69#define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
668b4ada
TC
74#ifdef CONFIG_64BIT
75#define smmu_writeq writeq_relaxed
76#else
77#define smmu_writeq(reg64, addr) \
78 do { \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
83 } while (0)
84#endif
85
45ae7cff
WD
86/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
97#define sCR0_BSU_SHIFT 14
98#define sCR0_BSU_MASK 0x3
99
100/* Identification registers */
101#define ARM_SMMU_GR0_ID0 0x20
102#define ARM_SMMU_GR0_ID1 0x24
103#define ARM_SMMU_GR0_ID2 0x28
104#define ARM_SMMU_GR0_ID3 0x2c
105#define ARM_SMMU_GR0_ID4 0x30
106#define ARM_SMMU_GR0_ID5 0x34
107#define ARM_SMMU_GR0_ID6 0x38
108#define ARM_SMMU_GR0_ID7 0x3c
109#define ARM_SMMU_GR0_sGFSR 0x48
110#define ARM_SMMU_GR0_sGFSYNR0 0x50
111#define ARM_SMMU_GR0_sGFSYNR1 0x54
112#define ARM_SMMU_GR0_sGFSYNR2 0x58
45ae7cff
WD
113
114#define ID0_S1TS (1 << 30)
115#define ID0_S2TS (1 << 29)
116#define ID0_NTS (1 << 28)
117#define ID0_SMS (1 << 27)
859a732e 118#define ID0_ATOSNS (1 << 26)
45ae7cff
WD
119#define ID0_CTTW (1 << 14)
120#define ID0_NUMIRPT_SHIFT 16
121#define ID0_NUMIRPT_MASK 0xff
3c8766d0
OH
122#define ID0_NUMSIDB_SHIFT 9
123#define ID0_NUMSIDB_MASK 0xf
45ae7cff
WD
124#define ID0_NUMSMRG_SHIFT 0
125#define ID0_NUMSMRG_MASK 0xff
126
127#define ID1_PAGESIZE (1 << 31)
128#define ID1_NUMPAGENDXB_SHIFT 28
129#define ID1_NUMPAGENDXB_MASK 7
130#define ID1_NUMS2CB_SHIFT 16
131#define ID1_NUMS2CB_MASK 0xff
132#define ID1_NUMCB_SHIFT 0
133#define ID1_NUMCB_MASK 0xff
134
135#define ID2_OAS_SHIFT 4
136#define ID2_OAS_MASK 0xf
137#define ID2_IAS_SHIFT 0
138#define ID2_IAS_MASK 0xf
139#define ID2_UBS_SHIFT 8
140#define ID2_UBS_MASK 0xf
141#define ID2_PTFS_4K (1 << 12)
142#define ID2_PTFS_16K (1 << 13)
143#define ID2_PTFS_64K (1 << 14)
144
45ae7cff 145/* Global TLB invalidation */
45ae7cff
WD
146#define ARM_SMMU_GR0_TLBIVMID 0x64
147#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
148#define ARM_SMMU_GR0_TLBIALLH 0x6c
149#define ARM_SMMU_GR0_sTLBGSYNC 0x70
150#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
151#define sTLBGSTATUS_GSACTIVE (1 << 0)
152#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
153
154/* Stream mapping registers */
155#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
156#define SMR_VALID (1 << 31)
157#define SMR_MASK_SHIFT 16
158#define SMR_MASK_MASK 0x7fff
159#define SMR_ID_SHIFT 0
160#define SMR_ID_MASK 0x7fff
161
162#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
163#define S2CR_CBNDX_SHIFT 0
164#define S2CR_CBNDX_MASK 0xff
165#define S2CR_TYPE_SHIFT 16
166#define S2CR_TYPE_MASK 0x3
167#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
168#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
169#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
170
d346180e
RM
171#define S2CR_PRIVCFG_SHIFT 24
172#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
173
45ae7cff
WD
174/* Context bank attribute registers */
175#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
176#define CBAR_VMID_SHIFT 0
177#define CBAR_VMID_MASK 0xff
57ca90f6
WD
178#define CBAR_S1_BPSHCFG_SHIFT 8
179#define CBAR_S1_BPSHCFG_MASK 3
180#define CBAR_S1_BPSHCFG_NSH 3
45ae7cff
WD
181#define CBAR_S1_MEMATTR_SHIFT 12
182#define CBAR_S1_MEMATTR_MASK 0xf
183#define CBAR_S1_MEMATTR_WB 0xf
184#define CBAR_TYPE_SHIFT 16
185#define CBAR_TYPE_MASK 0x3
186#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
187#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
188#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
189#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
190#define CBAR_IRPTNDX_SHIFT 24
191#define CBAR_IRPTNDX_MASK 0xff
192
193#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
194#define CBA2R_RW64_32BIT (0 << 0)
195#define CBA2R_RW64_64BIT (1 << 0)
196
197/* Translation context bank */
198#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
c757e852 199#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
45ae7cff
WD
200
201#define ARM_SMMU_CB_SCTLR 0x0
202#define ARM_SMMU_CB_RESUME 0x8
203#define ARM_SMMU_CB_TTBCR2 0x10
668b4ada
TC
204#define ARM_SMMU_CB_TTBR0 0x20
205#define ARM_SMMU_CB_TTBR1 0x28
45ae7cff
WD
206#define ARM_SMMU_CB_TTBCR 0x30
207#define ARM_SMMU_CB_S1_MAIR0 0x38
518f7136 208#define ARM_SMMU_CB_S1_MAIR1 0x3c
859a732e
MH
209#define ARM_SMMU_CB_PAR_LO 0x50
210#define ARM_SMMU_CB_PAR_HI 0x54
45ae7cff
WD
211#define ARM_SMMU_CB_FSR 0x58
212#define ARM_SMMU_CB_FAR_LO 0x60
213#define ARM_SMMU_CB_FAR_HI 0x64
214#define ARM_SMMU_CB_FSYNR0 0x68
518f7136 215#define ARM_SMMU_CB_S1_TLBIVA 0x600
1463fe44 216#define ARM_SMMU_CB_S1_TLBIASID 0x610
518f7136
WD
217#define ARM_SMMU_CB_S1_TLBIVAL 0x620
218#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
219#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
661d962f 220#define ARM_SMMU_CB_ATS1PR 0x800
859a732e 221#define ARM_SMMU_CB_ATSR 0x8f0
45ae7cff
WD
222
223#define SCTLR_S1_ASIDPNE (1 << 12)
224#define SCTLR_CFCFG (1 << 7)
225#define SCTLR_CFIE (1 << 6)
226#define SCTLR_CFRE (1 << 5)
227#define SCTLR_E (1 << 4)
228#define SCTLR_AFE (1 << 2)
229#define SCTLR_TRE (1 << 1)
230#define SCTLR_M (1 << 0)
231#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
232
859a732e
MH
233#define CB_PAR_F (1 << 0)
234
235#define ATSR_ACTIVE (1 << 0)
236
45ae7cff
WD
237#define RESUME_RETRY (0 << 0)
238#define RESUME_TERMINATE (1 << 0)
239
45ae7cff 240#define TTBCR2_SEP_SHIFT 15
5dc5616e 241#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
45ae7cff 242
668b4ada 243#define TTBRn_ASID_SHIFT 48
45ae7cff
WD
244
245#define FSR_MULTI (1 << 31)
246#define FSR_SS (1 << 30)
247#define FSR_UUT (1 << 8)
248#define FSR_ASF (1 << 7)
249#define FSR_TLBLKF (1 << 6)
250#define FSR_TLBMCF (1 << 5)
251#define FSR_EF (1 << 4)
252#define FSR_PF (1 << 3)
253#define FSR_AFF (1 << 2)
254#define FSR_TF (1 << 1)
255
2907320d
MH
256#define FSR_IGN (FSR_AFF | FSR_ASF | \
257 FSR_TLBMCF | FSR_TLBLKF)
258#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
adaba320 259 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
45ae7cff
WD
260
261#define FSYNR0_WNR (1 << 4)
262
4cf740b0 263static int force_stage;
25a1c96c 264module_param(force_stage, int, S_IRUGO);
4cf740b0
WD
265MODULE_PARM_DESC(force_stage,
266 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
25a1c96c
RM
267static bool disable_bypass;
268module_param(disable_bypass, bool, S_IRUGO);
269MODULE_PARM_DESC(disable_bypass,
270 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
4cf740b0 271
09360403
RM
272enum arm_smmu_arch_version {
273 ARM_SMMU_V1 = 1,
274 ARM_SMMU_V2,
275};
276
45ae7cff
WD
277struct arm_smmu_smr {
278 u8 idx;
279 u16 mask;
280 u16 id;
281};
282
a9a1b0b5 283struct arm_smmu_master_cfg {
45ae7cff
WD
284 int num_streamids;
285 u16 streamids[MAX_MASTER_STREAMIDS];
45ae7cff
WD
286 struct arm_smmu_smr *smrs;
287};
288
a9a1b0b5
WD
289struct arm_smmu_master {
290 struct device_node *of_node;
a9a1b0b5
WD
291 struct rb_node node;
292 struct arm_smmu_master_cfg cfg;
293};
294
45ae7cff
WD
295struct arm_smmu_device {
296 struct device *dev;
45ae7cff
WD
297
298 void __iomem *base;
299 unsigned long size;
c757e852 300 unsigned long pgshift;
45ae7cff
WD
301
302#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
303#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
304#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
305#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
306#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
859a732e 307#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
45ae7cff 308 u32 features;
3a5df8ff
AH
309
310#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
311 u32 options;
09360403 312 enum arm_smmu_arch_version version;
45ae7cff
WD
313
314 u32 num_context_banks;
315 u32 num_s2_context_banks;
316 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
317 atomic_t irptndx;
318
319 u32 num_mapping_groups;
320 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
321
518f7136
WD
322 unsigned long va_size;
323 unsigned long ipa_size;
324 unsigned long pa_size;
45ae7cff
WD
325
326 u32 num_global_irqs;
327 u32 num_context_irqs;
328 unsigned int *irqs;
329
45ae7cff
WD
330 struct list_head list;
331 struct rb_root masters;
332};
333
334struct arm_smmu_cfg {
45ae7cff
WD
335 u8 cbndx;
336 u8 irptndx;
337 u32 cbar;
45ae7cff 338};
faea13b7 339#define INVALID_IRPTNDX 0xff
45ae7cff 340
ecfadb6e
WD
341#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
342#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
343
c752ce45
WD
344enum arm_smmu_domain_stage {
345 ARM_SMMU_DOMAIN_S1 = 0,
346 ARM_SMMU_DOMAIN_S2,
347 ARM_SMMU_DOMAIN_NESTED,
348};
349
45ae7cff 350struct arm_smmu_domain {
44680eed 351 struct arm_smmu_device *smmu;
518f7136
WD
352 struct io_pgtable_ops *pgtbl_ops;
353 spinlock_t pgtbl_lock;
44680eed 354 struct arm_smmu_cfg cfg;
c752ce45 355 enum arm_smmu_domain_stage stage;
518f7136 356 struct mutex init_mutex; /* Protects smmu pointer */
1d672638 357 struct iommu_domain domain;
45ae7cff
WD
358};
359
518f7136
WD
360static struct iommu_ops arm_smmu_ops;
361
45ae7cff
WD
362static DEFINE_SPINLOCK(arm_smmu_devices_lock);
363static LIST_HEAD(arm_smmu_devices);
364
3a5df8ff
AH
365struct arm_smmu_option_prop {
366 u32 opt;
367 const char *prop;
368};
369
2907320d 370static struct arm_smmu_option_prop arm_smmu_options[] = {
3a5df8ff
AH
371 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
372 { 0, NULL},
373};
374
1d672638
JR
375static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
376{
377 return container_of(dom, struct arm_smmu_domain, domain);
378}
379
3a5df8ff
AH
380static void parse_driver_options(struct arm_smmu_device *smmu)
381{
382 int i = 0;
2907320d 383
3a5df8ff
AH
384 do {
385 if (of_property_read_bool(smmu->dev->of_node,
386 arm_smmu_options[i].prop)) {
387 smmu->options |= arm_smmu_options[i].opt;
388 dev_notice(smmu->dev, "option %s\n",
389 arm_smmu_options[i].prop);
390 }
391 } while (arm_smmu_options[++i].opt);
392}
393
8f68f8e2 394static struct device_node *dev_get_dev_node(struct device *dev)
a9a1b0b5
WD
395{
396 if (dev_is_pci(dev)) {
397 struct pci_bus *bus = to_pci_dev(dev)->bus;
2907320d 398
a9a1b0b5
WD
399 while (!pci_is_root_bus(bus))
400 bus = bus->parent;
8f68f8e2 401 return bus->bridge->parent->of_node;
a9a1b0b5
WD
402 }
403
8f68f8e2 404 return dev->of_node;
a9a1b0b5
WD
405}
406
45ae7cff
WD
407static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
408 struct device_node *dev_node)
409{
410 struct rb_node *node = smmu->masters.rb_node;
411
412 while (node) {
413 struct arm_smmu_master *master;
2907320d 414
45ae7cff
WD
415 master = container_of(node, struct arm_smmu_master, node);
416
417 if (dev_node < master->of_node)
418 node = node->rb_left;
419 else if (dev_node > master->of_node)
420 node = node->rb_right;
421 else
422 return master;
423 }
424
425 return NULL;
426}
427
a9a1b0b5 428static struct arm_smmu_master_cfg *
8f68f8e2 429find_smmu_master_cfg(struct device *dev)
a9a1b0b5 430{
8f68f8e2
WD
431 struct arm_smmu_master_cfg *cfg = NULL;
432 struct iommu_group *group = iommu_group_get(dev);
a9a1b0b5 433
8f68f8e2
WD
434 if (group) {
435 cfg = iommu_group_get_iommudata(group);
436 iommu_group_put(group);
437 }
a9a1b0b5 438
8f68f8e2 439 return cfg;
a9a1b0b5
WD
440}
441
45ae7cff
WD
442static int insert_smmu_master(struct arm_smmu_device *smmu,
443 struct arm_smmu_master *master)
444{
445 struct rb_node **new, *parent;
446
447 new = &smmu->masters.rb_node;
448 parent = NULL;
449 while (*new) {
2907320d
MH
450 struct arm_smmu_master *this
451 = container_of(*new, struct arm_smmu_master, node);
45ae7cff
WD
452
453 parent = *new;
454 if (master->of_node < this->of_node)
455 new = &((*new)->rb_left);
456 else if (master->of_node > this->of_node)
457 new = &((*new)->rb_right);
458 else
459 return -EEXIST;
460 }
461
462 rb_link_node(&master->node, parent, new);
463 rb_insert_color(&master->node, &smmu->masters);
464 return 0;
465}
466
467static int register_smmu_master(struct arm_smmu_device *smmu,
468 struct device *dev,
469 struct of_phandle_args *masterspec)
470{
471 int i;
472 struct arm_smmu_master *master;
473
474 master = find_smmu_master(smmu, masterspec->np);
475 if (master) {
476 dev_err(dev,
477 "rejecting multiple registrations for master device %s\n",
478 masterspec->np->name);
479 return -EBUSY;
480 }
481
482 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
483 dev_err(dev,
484 "reached maximum number (%d) of stream IDs for master device %s\n",
485 MAX_MASTER_STREAMIDS, masterspec->np->name);
486 return -ENOSPC;
487 }
488
489 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
490 if (!master)
491 return -ENOMEM;
492
a9a1b0b5
WD
493 master->of_node = masterspec->np;
494 master->cfg.num_streamids = masterspec->args_count;
45ae7cff 495
3c8766d0
OH
496 for (i = 0; i < master->cfg.num_streamids; ++i) {
497 u16 streamid = masterspec->args[i];
45ae7cff 498
3c8766d0
OH
499 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
500 (streamid >= smmu->num_mapping_groups)) {
501 dev_err(dev,
502 "stream ID for master device %s greater than maximum allowed (%d)\n",
503 masterspec->np->name, smmu->num_mapping_groups);
504 return -ERANGE;
505 }
506 master->cfg.streamids[i] = streamid;
507 }
45ae7cff
WD
508 return insert_smmu_master(smmu, master);
509}
510
44680eed 511static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
45ae7cff 512{
44680eed 513 struct arm_smmu_device *smmu;
a9a1b0b5 514 struct arm_smmu_master *master = NULL;
8f68f8e2 515 struct device_node *dev_node = dev_get_dev_node(dev);
45ae7cff
WD
516
517 spin_lock(&arm_smmu_devices_lock);
44680eed 518 list_for_each_entry(smmu, &arm_smmu_devices, list) {
a9a1b0b5
WD
519 master = find_smmu_master(smmu, dev_node);
520 if (master)
521 break;
522 }
45ae7cff 523 spin_unlock(&arm_smmu_devices_lock);
44680eed 524
a9a1b0b5 525 return master ? smmu : NULL;
45ae7cff
WD
526}
527
528static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
529{
530 int idx;
531
532 do {
533 idx = find_next_zero_bit(map, end, start);
534 if (idx == end)
535 return -ENOSPC;
536 } while (test_and_set_bit(idx, map));
537
538 return idx;
539}
540
541static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
542{
543 clear_bit(idx, map);
544}
545
546/* Wait for any pending TLB invalidations to complete */
518f7136 547static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
45ae7cff
WD
548{
549 int count = 0;
550 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
551
552 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
553 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
554 & sTLBGSTATUS_GSACTIVE) {
555 cpu_relax();
556 if (++count == TLB_LOOP_TIMEOUT) {
557 dev_err_ratelimited(smmu->dev,
558 "TLB sync timed out -- SMMU may be deadlocked\n");
559 return;
560 }
561 udelay(1);
562 }
563}
564
518f7136
WD
565static void arm_smmu_tlb_sync(void *cookie)
566{
567 struct arm_smmu_domain *smmu_domain = cookie;
568 __arm_smmu_tlb_sync(smmu_domain->smmu);
569}
570
571static void arm_smmu_tlb_inv_context(void *cookie)
1463fe44 572{
518f7136 573 struct arm_smmu_domain *smmu_domain = cookie;
44680eed
WD
574 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
575 struct arm_smmu_device *smmu = smmu_domain->smmu;
1463fe44 576 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
518f7136 577 void __iomem *base;
1463fe44
WD
578
579 if (stage1) {
580 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
ecfadb6e
WD
581 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
582 base + ARM_SMMU_CB_S1_TLBIASID);
1463fe44
WD
583 } else {
584 base = ARM_SMMU_GR0(smmu);
ecfadb6e
WD
585 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
586 base + ARM_SMMU_GR0_TLBIVMID);
1463fe44
WD
587 }
588
518f7136
WD
589 __arm_smmu_tlb_sync(smmu);
590}
591
592static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
06c610e8 593 size_t granule, bool leaf, void *cookie)
518f7136
WD
594{
595 struct arm_smmu_domain *smmu_domain = cookie;
596 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
597 struct arm_smmu_device *smmu = smmu_domain->smmu;
598 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
599 void __iomem *reg;
600
601 if (stage1) {
602 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
603 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
604
605 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
606 iova &= ~12UL;
607 iova |= ARM_SMMU_CB_ASID(cfg);
75df1386
RM
608 do {
609 writel_relaxed(iova, reg);
610 iova += granule;
611 } while (size -= granule);
518f7136
WD
612#ifdef CONFIG_64BIT
613 } else {
614 iova >>= 12;
615 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
75df1386
RM
616 do {
617 writeq_relaxed(iova, reg);
618 iova += granule >> 12;
619 } while (size -= granule);
518f7136
WD
620#endif
621 }
622#ifdef CONFIG_64BIT
623 } else if (smmu->version == ARM_SMMU_V2) {
624 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
625 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
626 ARM_SMMU_CB_S2_TLBIIPAS2;
75df1386
RM
627 iova >>= 12;
628 do {
629 writeq_relaxed(iova, reg);
630 iova += granule >> 12;
631 } while (size -= granule);
518f7136
WD
632#endif
633 } else {
634 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
635 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
636 }
637}
638
518f7136
WD
639static struct iommu_gather_ops arm_smmu_gather_ops = {
640 .tlb_flush_all = arm_smmu_tlb_inv_context,
641 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
642 .tlb_sync = arm_smmu_tlb_sync,
518f7136
WD
643};
644
45ae7cff
WD
645static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
646{
647 int flags, ret;
648 u32 fsr, far, fsynr, resume;
649 unsigned long iova;
650 struct iommu_domain *domain = dev;
1d672638 651 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
652 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
653 struct arm_smmu_device *smmu = smmu_domain->smmu;
45ae7cff
WD
654 void __iomem *cb_base;
655
44680eed 656 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff
WD
657 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
658
659 if (!(fsr & FSR_FAULT))
660 return IRQ_NONE;
661
662 if (fsr & FSR_IGN)
663 dev_err_ratelimited(smmu->dev,
70c9a7db 664 "Unexpected context fault (fsr 0x%x)\n",
45ae7cff
WD
665 fsr);
666
667 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
668 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
669
670 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
671 iova = far;
672#ifdef CONFIG_64BIT
673 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
674 iova |= ((unsigned long)far << 32);
675#endif
676
677 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
678 ret = IRQ_HANDLED;
679 resume = RESUME_RETRY;
680 } else {
2ef0f031
AH
681 dev_err_ratelimited(smmu->dev,
682 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
44680eed 683 iova, fsynr, cfg->cbndx);
45ae7cff
WD
684 ret = IRQ_NONE;
685 resume = RESUME_TERMINATE;
686 }
687
688 /* Clear the faulting FSR */
689 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
690
691 /* Retry or terminate any stalled transactions */
692 if (fsr & FSR_SS)
693 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
694
695 return ret;
696}
697
698static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
699{
700 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
701 struct arm_smmu_device *smmu = dev;
3a5df8ff 702 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
45ae7cff
WD
703
704 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
705 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
706 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
707 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
708
3a5df8ff
AH
709 if (!gfsr)
710 return IRQ_NONE;
711
45ae7cff
WD
712 dev_err_ratelimited(smmu->dev,
713 "Unexpected global fault, this could be serious\n");
714 dev_err_ratelimited(smmu->dev,
715 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
716 gfsr, gfsynr0, gfsynr1, gfsynr2);
717
718 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
adaba320 719 return IRQ_HANDLED;
45ae7cff
WD
720}
721
518f7136
WD
722static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
723 struct io_pgtable_cfg *pgtbl_cfg)
45ae7cff
WD
724{
725 u32 reg;
668b4ada 726 u64 reg64;
45ae7cff 727 bool stage1;
44680eed
WD
728 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
729 struct arm_smmu_device *smmu = smmu_domain->smmu;
c88ae5de 730 void __iomem *cb_base, *gr1_base;
45ae7cff 731
45ae7cff 732 gr1_base = ARM_SMMU_GR1(smmu);
44680eed
WD
733 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
734 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff 735
4a1c93cb
WD
736 if (smmu->version > ARM_SMMU_V1) {
737 /*
738 * CBA2R.
739 * *Must* be initialised before CBAR thanks to VMID16
740 * architectural oversight affected some implementations.
741 */
742#ifdef CONFIG_64BIT
743 reg = CBA2R_RW64_64BIT;
744#else
745 reg = CBA2R_RW64_32BIT;
746#endif
747 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
748 }
749
45ae7cff 750 /* CBAR */
44680eed 751 reg = cfg->cbar;
09360403 752 if (smmu->version == ARM_SMMU_V1)
2907320d 753 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
45ae7cff 754
57ca90f6
WD
755 /*
756 * Use the weakest shareability/memory types, so they are
757 * overridden by the ttbcr/pte.
758 */
759 if (stage1) {
760 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
761 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
762 } else {
44680eed 763 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
57ca90f6 764 }
44680eed 765 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
45ae7cff 766
518f7136
WD
767 /* TTBRs */
768 if (stage1) {
668b4ada
TC
769 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
770
771 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
772 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
773
774 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
775 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
776 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
518f7136 777 } else {
668b4ada
TC
778 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
779 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
518f7136 780 }
a65217a4 781
518f7136
WD
782 /* TTBCR */
783 if (stage1) {
784 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
785 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
786 if (smmu->version > ARM_SMMU_V1) {
787 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
5dc5616e 788 reg |= TTBCR2_SEP_UPSTREAM;
518f7136 789 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
45ae7cff
WD
790 }
791 } else {
518f7136
WD
792 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
793 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
45ae7cff
WD
794 }
795
518f7136 796 /* MAIRs (stage-1 only) */
45ae7cff 797 if (stage1) {
518f7136 798 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
45ae7cff 799 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
518f7136
WD
800 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
801 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
45ae7cff
WD
802 }
803
45ae7cff
WD
804 /* SCTLR */
805 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
806 if (stage1)
807 reg |= SCTLR_S1_ASIDPNE;
808#ifdef __BIG_ENDIAN
809 reg |= SCTLR_E;
810#endif
25724841 811 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
45ae7cff
WD
812}
813
814static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44680eed 815 struct arm_smmu_device *smmu)
45ae7cff 816{
a18037b2 817 int irq, start, ret = 0;
518f7136
WD
818 unsigned long ias, oas;
819 struct io_pgtable_ops *pgtbl_ops;
820 struct io_pgtable_cfg pgtbl_cfg;
821 enum io_pgtable_fmt fmt;
1d672638 822 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed 823 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
45ae7cff 824
518f7136 825 mutex_lock(&smmu_domain->init_mutex);
a18037b2
MH
826 if (smmu_domain->smmu)
827 goto out_unlock;
828
9800699c
RM
829 /* We're bypassing these SIDs, so don't allocate an actual context */
830 if (domain->type == IOMMU_DOMAIN_DMA) {
831 smmu_domain->smmu = smmu;
832 goto out_unlock;
833 }
834
c752ce45
WD
835 /*
836 * Mapping the requested stage onto what we support is surprisingly
837 * complicated, mainly because the spec allows S1+S2 SMMUs without
838 * support for nested translation. That means we end up with the
839 * following table:
840 *
841 * Requested Supported Actual
842 * S1 N S1
843 * S1 S1+S2 S1
844 * S1 S2 S2
845 * S1 S1 S1
846 * N N N
847 * N S1+S2 S2
848 * N S2 S2
849 * N S1 S1
850 *
851 * Note that you can't actually request stage-2 mappings.
852 */
853 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
854 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
855 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
856 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
857
858 switch (smmu_domain->stage) {
859 case ARM_SMMU_DOMAIN_S1:
860 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
861 start = smmu->num_s2_context_banks;
518f7136
WD
862 ias = smmu->va_size;
863 oas = smmu->ipa_size;
864 if (IS_ENABLED(CONFIG_64BIT))
865 fmt = ARM_64_LPAE_S1;
866 else
867 fmt = ARM_32_LPAE_S1;
c752ce45
WD
868 break;
869 case ARM_SMMU_DOMAIN_NESTED:
45ae7cff
WD
870 /*
871 * We will likely want to change this if/when KVM gets
872 * involved.
873 */
c752ce45 874 case ARM_SMMU_DOMAIN_S2:
9c5c92e3
WD
875 cfg->cbar = CBAR_TYPE_S2_TRANS;
876 start = 0;
518f7136
WD
877 ias = smmu->ipa_size;
878 oas = smmu->pa_size;
879 if (IS_ENABLED(CONFIG_64BIT))
880 fmt = ARM_64_LPAE_S2;
881 else
882 fmt = ARM_32_LPAE_S2;
c752ce45
WD
883 break;
884 default:
885 ret = -EINVAL;
886 goto out_unlock;
45ae7cff
WD
887 }
888
889 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
890 smmu->num_context_banks);
891 if (IS_ERR_VALUE(ret))
a18037b2 892 goto out_unlock;
45ae7cff 893
44680eed 894 cfg->cbndx = ret;
09360403 895 if (smmu->version == ARM_SMMU_V1) {
44680eed
WD
896 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
897 cfg->irptndx %= smmu->num_context_irqs;
45ae7cff 898 } else {
44680eed 899 cfg->irptndx = cfg->cbndx;
45ae7cff
WD
900 }
901
518f7136
WD
902 pgtbl_cfg = (struct io_pgtable_cfg) {
903 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
904 .ias = ias,
905 .oas = oas,
906 .tlb = &arm_smmu_gather_ops,
2df7a25c 907 .iommu_dev = smmu->dev,
518f7136
WD
908 };
909
910 smmu_domain->smmu = smmu;
911 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
912 if (!pgtbl_ops) {
913 ret = -ENOMEM;
914 goto out_clear_smmu;
915 }
916
917 /* Update our support page sizes to reflect the page table format */
918 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
a18037b2 919
518f7136
WD
920 /* Initialise the context bank with our page table cfg */
921 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
922
923 /*
924 * Request context fault interrupt. Do this last to avoid the
925 * handler seeing a half-initialised domain state.
926 */
44680eed 927 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
45ae7cff
WD
928 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
929 "arm-smmu-context-fault", domain);
930 if (IS_ERR_VALUE(ret)) {
931 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
44680eed
WD
932 cfg->irptndx, irq);
933 cfg->irptndx = INVALID_IRPTNDX;
45ae7cff
WD
934 }
935
518f7136
WD
936 mutex_unlock(&smmu_domain->init_mutex);
937
938 /* Publish page table ops for map/unmap */
939 smmu_domain->pgtbl_ops = pgtbl_ops;
a9a1b0b5 940 return 0;
45ae7cff 941
518f7136
WD
942out_clear_smmu:
943 smmu_domain->smmu = NULL;
a18037b2 944out_unlock:
518f7136 945 mutex_unlock(&smmu_domain->init_mutex);
45ae7cff
WD
946 return ret;
947}
948
949static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
950{
1d672638 951 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
952 struct arm_smmu_device *smmu = smmu_domain->smmu;
953 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1463fe44 954 void __iomem *cb_base;
45ae7cff
WD
955 int irq;
956
9800699c 957 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
45ae7cff
WD
958 return;
959
518f7136
WD
960 /*
961 * Disable the context bank and free the page tables before freeing
962 * it.
963 */
44680eed 964 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1463fe44 965 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1463fe44 966
44680eed
WD
967 if (cfg->irptndx != INVALID_IRPTNDX) {
968 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
45ae7cff
WD
969 free_irq(irq, domain);
970 }
971
44830b0c 972 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44680eed 973 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
45ae7cff
WD
974}
975
1d672638 976static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
45ae7cff
WD
977{
978 struct arm_smmu_domain *smmu_domain;
45ae7cff 979
9adb9594 980 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1d672638 981 return NULL;
45ae7cff
WD
982 /*
983 * Allocate the domain and initialise some of its data structures.
984 * We can't really do anything meaningful until we've added a
985 * master.
986 */
987 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
988 if (!smmu_domain)
1d672638 989 return NULL;
45ae7cff 990
9adb9594
RM
991 if (type == IOMMU_DOMAIN_DMA &&
992 iommu_get_dma_cookie(&smmu_domain->domain)) {
993 kfree(smmu_domain);
994 return NULL;
995 }
996
518f7136
WD
997 mutex_init(&smmu_domain->init_mutex);
998 spin_lock_init(&smmu_domain->pgtbl_lock);
1d672638
JR
999
1000 return &smmu_domain->domain;
45ae7cff
WD
1001}
1002
1d672638 1003static void arm_smmu_domain_free(struct iommu_domain *domain)
45ae7cff 1004{
1d672638 1005 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1463fe44
WD
1006
1007 /*
1008 * Free the domain resources. We assume that all devices have
1009 * already been detached.
1010 */
9adb9594 1011 iommu_put_dma_cookie(domain);
45ae7cff 1012 arm_smmu_destroy_domain_context(domain);
45ae7cff
WD
1013 kfree(smmu_domain);
1014}
1015
1016static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
a9a1b0b5 1017 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1018{
1019 int i;
1020 struct arm_smmu_smr *smrs;
1021 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1022
1023 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1024 return 0;
1025
a9a1b0b5 1026 if (cfg->smrs)
45ae7cff
WD
1027 return -EEXIST;
1028
2907320d 1029 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
45ae7cff 1030 if (!smrs) {
a9a1b0b5
WD
1031 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1032 cfg->num_streamids);
45ae7cff
WD
1033 return -ENOMEM;
1034 }
1035
44680eed 1036 /* Allocate the SMRs on the SMMU */
a9a1b0b5 1037 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff
WD
1038 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1039 smmu->num_mapping_groups);
1040 if (IS_ERR_VALUE(idx)) {
1041 dev_err(smmu->dev, "failed to allocate free SMR\n");
1042 goto err_free_smrs;
1043 }
1044
1045 smrs[i] = (struct arm_smmu_smr) {
1046 .idx = idx,
1047 .mask = 0, /* We don't currently share SMRs */
a9a1b0b5 1048 .id = cfg->streamids[i],
45ae7cff
WD
1049 };
1050 }
1051
1052 /* It worked! Now, poke the actual hardware */
a9a1b0b5 1053 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff
WD
1054 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1055 smrs[i].mask << SMR_MASK_SHIFT;
1056 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1057 }
1058
a9a1b0b5 1059 cfg->smrs = smrs;
45ae7cff
WD
1060 return 0;
1061
1062err_free_smrs:
1063 while (--i >= 0)
1064 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1065 kfree(smrs);
1066 return -ENOSPC;
1067}
1068
1069static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
a9a1b0b5 1070 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1071{
1072 int i;
1073 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
a9a1b0b5 1074 struct arm_smmu_smr *smrs = cfg->smrs;
45ae7cff 1075
43b412be
WD
1076 if (!smrs)
1077 return;
1078
45ae7cff 1079 /* Invalidate the SMRs before freeing back to the allocator */
a9a1b0b5 1080 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff 1081 u8 idx = smrs[i].idx;
2907320d 1082
45ae7cff
WD
1083 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1084 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1085 }
1086
a9a1b0b5 1087 cfg->smrs = NULL;
45ae7cff
WD
1088 kfree(smrs);
1089}
1090
45ae7cff 1091static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
a9a1b0b5 1092 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1093{
1094 int i, ret;
44680eed 1095 struct arm_smmu_device *smmu = smmu_domain->smmu;
45ae7cff
WD
1096 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1097
cbf8277e
WD
1098 /*
1099 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
5f634956
WD
1100 * for all devices behind the SMMU. Note that we need to take
1101 * care configuring SMRs for devices both a platform_device and
1102 * and a PCI device (i.e. a PCI host controller)
cbf8277e
WD
1103 */
1104 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1105 return 0;
1106
5f634956
WD
1107 /* Devices in an IOMMU group may already be configured */
1108 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1109 if (ret)
1110 return ret == -EEXIST ? 0 : ret;
1111
a9a1b0b5 1112 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff 1113 u32 idx, s2cr;
2907320d 1114
a9a1b0b5 1115 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
d346180e 1116 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
44680eed 1117 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
45ae7cff
WD
1118 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1119 }
1120
1121 return 0;
1122}
1123
1124static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
a9a1b0b5 1125 struct arm_smmu_master_cfg *cfg)
45ae7cff 1126{
43b412be 1127 int i;
44680eed 1128 struct arm_smmu_device *smmu = smmu_domain->smmu;
43b412be 1129 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
45ae7cff 1130
8f68f8e2
WD
1131 /* An IOMMU group is torn down by the first device to be removed */
1132 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1133 return;
45ae7cff
WD
1134
1135 /*
1136 * We *must* clear the S2CR first, because freeing the SMR means
1137 * that it can be re-allocated immediately.
1138 */
43b412be
WD
1139 for (i = 0; i < cfg->num_streamids; ++i) {
1140 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
25a1c96c 1141 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
43b412be 1142
25a1c96c 1143 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
43b412be
WD
1144 }
1145
a9a1b0b5 1146 arm_smmu_master_free_smrs(smmu, cfg);
45ae7cff
WD
1147}
1148
bc7f2ce0
WD
1149static void arm_smmu_detach_dev(struct device *dev,
1150 struct arm_smmu_master_cfg *cfg)
1151{
1152 struct iommu_domain *domain = dev->archdata.iommu;
1153 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1154
1155 dev->archdata.iommu = NULL;
1156 arm_smmu_domain_remove_master(smmu_domain, cfg);
1157}
1158
45ae7cff
WD
1159static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1160{
a18037b2 1161 int ret;
1d672638 1162 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1163 struct arm_smmu_device *smmu;
a9a1b0b5 1164 struct arm_smmu_master_cfg *cfg;
45ae7cff 1165
8f68f8e2 1166 smmu = find_smmu_for_device(dev);
44680eed 1167 if (!smmu) {
45ae7cff
WD
1168 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1169 return -ENXIO;
1170 }
1171
518f7136
WD
1172 /* Ensure that the domain is finalised */
1173 ret = arm_smmu_init_domain_context(domain, smmu);
1174 if (IS_ERR_VALUE(ret))
1175 return ret;
1176
45ae7cff 1177 /*
44680eed
WD
1178 * Sanity check the domain. We don't support domains across
1179 * different SMMUs.
45ae7cff 1180 */
518f7136 1181 if (smmu_domain->smmu != smmu) {
45ae7cff
WD
1182 dev_err(dev,
1183 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
a18037b2
MH
1184 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1185 return -EINVAL;
45ae7cff 1186 }
45ae7cff
WD
1187
1188 /* Looks ok, so add the device to the domain */
8f68f8e2 1189 cfg = find_smmu_master_cfg(dev);
a9a1b0b5 1190 if (!cfg)
45ae7cff
WD
1191 return -ENODEV;
1192
bc7f2ce0
WD
1193 /* Detach the dev from its current domain */
1194 if (dev->archdata.iommu)
1195 arm_smmu_detach_dev(dev, cfg);
1196
844e35bd
WD
1197 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1198 if (!ret)
1199 dev->archdata.iommu = domain;
45ae7cff
WD
1200 return ret;
1201}
1202
45ae7cff 1203static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
b410aed9 1204 phys_addr_t paddr, size_t size, int prot)
45ae7cff 1205{
518f7136
WD
1206 int ret;
1207 unsigned long flags;
1d672638 1208 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1209 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1210
518f7136 1211 if (!ops)
45ae7cff
WD
1212 return -ENODEV;
1213
518f7136
WD
1214 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1215 ret = ops->map(ops, iova, paddr, size, prot);
1216 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1217 return ret;
45ae7cff
WD
1218}
1219
1220static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1221 size_t size)
1222{
518f7136
WD
1223 size_t ret;
1224 unsigned long flags;
1d672638 1225 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1226 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1227
518f7136
WD
1228 if (!ops)
1229 return 0;
1230
1231 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1232 ret = ops->unmap(ops, iova, size);
1233 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1234 return ret;
45ae7cff
WD
1235}
1236
859a732e
MH
1237static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1238 dma_addr_t iova)
1239{
1d672638 1240 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
859a732e
MH
1241 struct arm_smmu_device *smmu = smmu_domain->smmu;
1242 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1243 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1244 struct device *dev = smmu->dev;
1245 void __iomem *cb_base;
1246 u32 tmp;
1247 u64 phys;
661d962f 1248 unsigned long va;
859a732e
MH
1249
1250 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1251
661d962f
RM
1252 /* ATS1 registers can only be written atomically */
1253 va = iova & ~0xfffUL;
661d962f 1254 if (smmu->version == ARM_SMMU_V2)
668b4ada 1255 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
661d962f 1256 else
661d962f 1257 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
859a732e
MH
1258
1259 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1260 !(tmp & ATSR_ACTIVE), 5, 50)) {
1261 dev_err(dev,
077124c9 1262 "iova to phys timed out on %pad. Falling back to software table walk.\n",
859a732e
MH
1263 &iova);
1264 return ops->iova_to_phys(ops, iova);
1265 }
1266
1267 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1268 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1269
1270 if (phys & CB_PAR_F) {
1271 dev_err(dev, "translation fault!\n");
1272 dev_err(dev, "PAR = 0x%llx\n", phys);
1273 return 0;
1274 }
1275
1276 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1277}
1278
45ae7cff 1279static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
859a732e 1280 dma_addr_t iova)
45ae7cff 1281{
518f7136
WD
1282 phys_addr_t ret;
1283 unsigned long flags;
1d672638 1284 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1285 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1286
518f7136 1287 if (!ops)
a44a9791 1288 return 0;
45ae7cff 1289
518f7136 1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
83a60ed8
BR
1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1292 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
859a732e 1293 ret = arm_smmu_iova_to_phys_hard(domain, iova);
83a60ed8 1294 } else {
859a732e 1295 ret = ops->iova_to_phys(ops, iova);
83a60ed8
BR
1296 }
1297
518f7136 1298 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
859a732e 1299
518f7136 1300 return ret;
45ae7cff
WD
1301}
1302
1fd0c775 1303static bool arm_smmu_capable(enum iommu_cap cap)
45ae7cff 1304{
d0948945
WD
1305 switch (cap) {
1306 case IOMMU_CAP_CACHE_COHERENCY:
1fd0c775
JR
1307 /*
1308 * Return true here as the SMMU can always send out coherent
1309 * requests.
1310 */
1311 return true;
d0948945 1312 case IOMMU_CAP_INTR_REMAP:
1fd0c775 1313 return true; /* MSIs are just memory writes */
0029a8dd
AM
1314 case IOMMU_CAP_NOEXEC:
1315 return true;
d0948945 1316 default:
1fd0c775 1317 return false;
d0948945 1318 }
45ae7cff 1319}
45ae7cff 1320
a9a1b0b5
WD
1321static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1322{
1323 *((u16 *)data) = alias;
1324 return 0; /* Continue walking */
45ae7cff
WD
1325}
1326
8f68f8e2
WD
1327static void __arm_smmu_release_pci_iommudata(void *data)
1328{
1329 kfree(data);
1330}
1331
af659932
JR
1332static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1333 struct iommu_group *group)
45ae7cff 1334{
03edb226 1335 struct arm_smmu_master_cfg *cfg;
af659932
JR
1336 u16 sid;
1337 int i;
a9a1b0b5 1338
03edb226
WD
1339 cfg = iommu_group_get_iommudata(group);
1340 if (!cfg) {
a9a1b0b5 1341 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
af659932
JR
1342 if (!cfg)
1343 return -ENOMEM;
a9a1b0b5 1344
03edb226
WD
1345 iommu_group_set_iommudata(group, cfg,
1346 __arm_smmu_release_pci_iommudata);
1347 }
8f68f8e2 1348
af659932
JR
1349 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1350 return -ENOSPC;
a9a1b0b5 1351
03edb226
WD
1352 /*
1353 * Assume Stream ID == Requester ID for now.
1354 * We need a way to describe the ID mappings in FDT.
1355 */
1356 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1357 for (i = 0; i < cfg->num_streamids; ++i)
1358 if (cfg->streamids[i] == sid)
1359 break;
1360
1361 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1362 if (i == cfg->num_streamids)
1363 cfg->streamids[cfg->num_streamids++] = sid;
5fc63a7c 1364
03edb226 1365 return 0;
45ae7cff
WD
1366}
1367
af659932
JR
1368static int arm_smmu_init_platform_device(struct device *dev,
1369 struct iommu_group *group)
03edb226 1370{
03edb226 1371 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
af659932 1372 struct arm_smmu_master *master;
03edb226
WD
1373
1374 if (!smmu)
1375 return -ENODEV;
1376
1377 master = find_smmu_master(smmu, dev->of_node);
1378 if (!master)
1379 return -ENODEV;
1380
03edb226 1381 iommu_group_set_iommudata(group, &master->cfg, NULL);
af659932
JR
1382
1383 return 0;
03edb226
WD
1384}
1385
1386static int arm_smmu_add_device(struct device *dev)
1387{
af659932 1388 struct iommu_group *group;
03edb226 1389
af659932
JR
1390 group = iommu_group_get_for_dev(dev);
1391 if (IS_ERR(group))
1392 return PTR_ERR(group);
03edb226 1393
9a4a9d8c 1394 iommu_group_put(group);
af659932 1395 return 0;
03edb226
WD
1396}
1397
45ae7cff
WD
1398static void arm_smmu_remove_device(struct device *dev)
1399{
5fc63a7c 1400 iommu_group_remove_device(dev);
45ae7cff
WD
1401}
1402
af659932
JR
1403static struct iommu_group *arm_smmu_device_group(struct device *dev)
1404{
1405 struct iommu_group *group;
1406 int ret;
1407
1408 if (dev_is_pci(dev))
1409 group = pci_device_group(dev);
1410 else
1411 group = generic_device_group(dev);
1412
1413 if (IS_ERR(group))
1414 return group;
1415
1416 if (dev_is_pci(dev))
1417 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1418 else
1419 ret = arm_smmu_init_platform_device(dev, group);
1420
1421 if (ret) {
1422 iommu_group_put(group);
1423 group = ERR_PTR(ret);
1424 }
1425
1426 return group;
1427}
1428
c752ce45
WD
1429static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1430 enum iommu_attr attr, void *data)
1431{
1d672638 1432 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45
WD
1433
1434 switch (attr) {
1435 case DOMAIN_ATTR_NESTING:
1436 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1437 return 0;
1438 default:
1439 return -ENODEV;
1440 }
1441}
1442
1443static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1444 enum iommu_attr attr, void *data)
1445{
518f7136 1446 int ret = 0;
1d672638 1447 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45 1448
518f7136
WD
1449 mutex_lock(&smmu_domain->init_mutex);
1450
c752ce45
WD
1451 switch (attr) {
1452 case DOMAIN_ATTR_NESTING:
518f7136
WD
1453 if (smmu_domain->smmu) {
1454 ret = -EPERM;
1455 goto out_unlock;
1456 }
1457
c752ce45
WD
1458 if (*(int *)data)
1459 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1460 else
1461 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1462
518f7136 1463 break;
c752ce45 1464 default:
518f7136 1465 ret = -ENODEV;
c752ce45 1466 }
518f7136
WD
1467
1468out_unlock:
1469 mutex_unlock(&smmu_domain->init_mutex);
1470 return ret;
c752ce45
WD
1471}
1472
518f7136 1473static struct iommu_ops arm_smmu_ops = {
c752ce45 1474 .capable = arm_smmu_capable,
1d672638
JR
1475 .domain_alloc = arm_smmu_domain_alloc,
1476 .domain_free = arm_smmu_domain_free,
c752ce45 1477 .attach_dev = arm_smmu_attach_dev,
c752ce45
WD
1478 .map = arm_smmu_map,
1479 .unmap = arm_smmu_unmap,
76771c93 1480 .map_sg = default_iommu_map_sg,
c752ce45
WD
1481 .iova_to_phys = arm_smmu_iova_to_phys,
1482 .add_device = arm_smmu_add_device,
1483 .remove_device = arm_smmu_remove_device,
af659932 1484 .device_group = arm_smmu_device_group,
c752ce45
WD
1485 .domain_get_attr = arm_smmu_domain_get_attr,
1486 .domain_set_attr = arm_smmu_domain_set_attr,
518f7136 1487 .pgsize_bitmap = -1UL, /* Restricted during device attach */
45ae7cff
WD
1488};
1489
1490static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1491{
1492 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
659db6f6 1493 void __iomem *cb_base;
45ae7cff 1494 int i = 0;
659db6f6
AH
1495 u32 reg;
1496
3a5df8ff
AH
1497 /* clear global FSR */
1498 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1499 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
45ae7cff 1500
25a1c96c
RM
1501 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1502 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
45ae7cff 1503 for (i = 0; i < smmu->num_mapping_groups; ++i) {
3c8766d0 1504 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
25a1c96c 1505 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
45ae7cff
WD
1506 }
1507
659db6f6
AH
1508 /* Make sure all context banks are disabled and clear CB_FSR */
1509 for (i = 0; i < smmu->num_context_banks; ++i) {
1510 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1511 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1512 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1513 }
1463fe44 1514
45ae7cff 1515 /* Invalidate the TLB, just in case */
45ae7cff
WD
1516 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1517 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1518
3a5df8ff 1519 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
659db6f6 1520
45ae7cff 1521 /* Enable fault reporting */
659db6f6 1522 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
45ae7cff
WD
1523
1524 /* Disable TLB broadcasting. */
659db6f6 1525 reg |= (sCR0_VMIDPNE | sCR0_PTM);
45ae7cff 1526
25a1c96c
RM
1527 /* Enable client access, handling unmatched streams as appropriate */
1528 reg &= ~sCR0_CLIENTPD;
1529 if (disable_bypass)
1530 reg |= sCR0_USFCFG;
1531 else
1532 reg &= ~sCR0_USFCFG;
45ae7cff
WD
1533
1534 /* Disable forced broadcasting */
659db6f6 1535 reg &= ~sCR0_FB;
45ae7cff
WD
1536
1537 /* Don't upgrade barriers */
659db6f6 1538 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
45ae7cff
WD
1539
1540 /* Push the button */
518f7136 1541 __arm_smmu_tlb_sync(smmu);
3a5df8ff 1542 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
1543}
1544
1545static int arm_smmu_id_size_to_bits(int size)
1546{
1547 switch (size) {
1548 case 0:
1549 return 32;
1550 case 1:
1551 return 36;
1552 case 2:
1553 return 40;
1554 case 3:
1555 return 42;
1556 case 4:
1557 return 44;
1558 case 5:
1559 default:
1560 return 48;
1561 }
1562}
1563
1564static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1565{
1566 unsigned long size;
1567 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1568 u32 id;
bae2c2d4 1569 bool cttw_dt, cttw_reg;
45ae7cff
WD
1570
1571 dev_notice(smmu->dev, "probing hardware configuration...\n");
45ae7cff
WD
1572 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1573
1574 /* ID0 */
1575 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
4cf740b0
WD
1576
1577 /* Restrict available stages based on module parameter */
1578 if (force_stage == 1)
1579 id &= ~(ID0_S2TS | ID0_NTS);
1580 else if (force_stage == 2)
1581 id &= ~(ID0_S1TS | ID0_NTS);
1582
45ae7cff
WD
1583 if (id & ID0_S1TS) {
1584 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1585 dev_notice(smmu->dev, "\tstage 1 translation\n");
1586 }
1587
1588 if (id & ID0_S2TS) {
1589 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1590 dev_notice(smmu->dev, "\tstage 2 translation\n");
1591 }
1592
1593 if (id & ID0_NTS) {
1594 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1595 dev_notice(smmu->dev, "\tnested translation\n");
1596 }
1597
1598 if (!(smmu->features &
4cf740b0 1599 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
45ae7cff
WD
1600 dev_err(smmu->dev, "\tno translation support!\n");
1601 return -ENODEV;
1602 }
1603
d38f0ff9 1604 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
859a732e
MH
1605 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1606 dev_notice(smmu->dev, "\taddress translation ops\n");
1607 }
1608
bae2c2d4
RM
1609 /*
1610 * In order for DMA API calls to work properly, we must defer to what
1611 * the DT says about coherency, regardless of what the hardware claims.
1612 * Fortunately, this also opens up a workaround for systems where the
1613 * ID register value has ended up configured incorrectly.
1614 */
1615 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1616 cttw_reg = !!(id & ID0_CTTW);
1617 if (cttw_dt)
45ae7cff 1618 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
bae2c2d4
RM
1619 if (cttw_dt || cttw_reg)
1620 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1621 cttw_dt ? "" : "non-");
1622 if (cttw_dt != cttw_reg)
1623 dev_notice(smmu->dev,
1624 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
45ae7cff
WD
1625
1626 if (id & ID0_SMS) {
1627 u32 smr, sid, mask;
1628
1629 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1630 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1631 ID0_NUMSMRG_MASK;
1632 if (smmu->num_mapping_groups == 0) {
1633 dev_err(smmu->dev,
1634 "stream-matching supported, but no SMRs present!\n");
1635 return -ENODEV;
1636 }
1637
1638 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1639 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1640 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1641 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1642
1643 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1644 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1645 if ((mask & sid) != sid) {
1646 dev_err(smmu->dev,
1647 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1648 mask, sid);
1649 return -ENODEV;
1650 }
1651
1652 dev_notice(smmu->dev,
1653 "\tstream matching with %u register groups, mask 0x%x",
1654 smmu->num_mapping_groups, mask);
3c8766d0
OH
1655 } else {
1656 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1657 ID0_NUMSIDB_MASK;
45ae7cff
WD
1658 }
1659
1660 /* ID1 */
1661 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
c757e852 1662 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
45ae7cff 1663
c55af7f7 1664 /* Check for size mismatch of SMMU address space from mapped region */
518f7136 1665 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
c757e852 1666 size *= 2 << smmu->pgshift;
c55af7f7 1667 if (smmu->size != size)
2907320d
MH
1668 dev_warn(smmu->dev,
1669 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1670 size, smmu->size);
45ae7cff 1671
518f7136 1672 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
45ae7cff
WD
1673 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1674 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1675 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1676 return -ENODEV;
1677 }
1678 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1679 smmu->num_context_banks, smmu->num_s2_context_banks);
1680
1681 /* ID2 */
1682 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1683 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
518f7136 1684 smmu->ipa_size = size;
45ae7cff 1685
518f7136 1686 /* The output mask is also applied for bypass */
45ae7cff 1687 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
518f7136 1688 smmu->pa_size = size;
45ae7cff 1689
f1d84548
RM
1690 /*
1691 * What the page table walker can address actually depends on which
1692 * descriptor format is in use, but since a) we don't know that yet,
1693 * and b) it can vary per context bank, this will have to do...
1694 */
1695 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1696 dev_warn(smmu->dev,
1697 "failed to set DMA mask for table walker\n");
1698
09360403 1699 if (smmu->version == ARM_SMMU_V1) {
518f7136
WD
1700 smmu->va_size = smmu->ipa_size;
1701 size = SZ_4K | SZ_2M | SZ_1G;
45ae7cff 1702 } else {
45ae7cff 1703 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
518f7136
WD
1704 smmu->va_size = arm_smmu_id_size_to_bits(size);
1705#ifndef CONFIG_64BIT
1706 smmu->va_size = min(32UL, smmu->va_size);
45ae7cff 1707#endif
518f7136
WD
1708 size = 0;
1709 if (id & ID2_PTFS_4K)
1710 size |= SZ_4K | SZ_2M | SZ_1G;
1711 if (id & ID2_PTFS_16K)
1712 size |= SZ_16K | SZ_32M;
1713 if (id & ID2_PTFS_64K)
1714 size |= SZ_64K | SZ_512M;
45ae7cff
WD
1715 }
1716
518f7136
WD
1717 arm_smmu_ops.pgsize_bitmap &= size;
1718 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1719
28d6007b
WD
1720 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1721 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
518f7136 1722 smmu->va_size, smmu->ipa_size);
28d6007b
WD
1723
1724 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1725 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
518f7136 1726 smmu->ipa_size, smmu->pa_size);
28d6007b 1727
45ae7cff
WD
1728 return 0;
1729}
1730
09b5269a 1731static const struct of_device_id arm_smmu_of_match[] = {
09360403
RM
1732 { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
1733 { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
1734 { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
d3aba046 1735 { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
09360403
RM
1736 { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
1737 { },
1738};
1739MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1740
45ae7cff
WD
1741static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1742{
09360403 1743 const struct of_device_id *of_id;
45ae7cff
WD
1744 struct resource *res;
1745 struct arm_smmu_device *smmu;
45ae7cff
WD
1746 struct device *dev = &pdev->dev;
1747 struct rb_node *node;
1748 struct of_phandle_args masterspec;
1749 int num_irqs, i, err;
1750
1751 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1752 if (!smmu) {
1753 dev_err(dev, "failed to allocate arm_smmu_device\n");
1754 return -ENOMEM;
1755 }
1756 smmu->dev = dev;
1757
09360403
RM
1758 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1759 smmu->version = (enum arm_smmu_arch_version)of_id->data;
1760
45ae7cff 1761 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8a7f4312
JL
1762 smmu->base = devm_ioremap_resource(dev, res);
1763 if (IS_ERR(smmu->base))
1764 return PTR_ERR(smmu->base);
45ae7cff 1765 smmu->size = resource_size(res);
45ae7cff
WD
1766
1767 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1768 &smmu->num_global_irqs)) {
1769 dev_err(dev, "missing #global-interrupts property\n");
1770 return -ENODEV;
1771 }
1772
1773 num_irqs = 0;
1774 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1775 num_irqs++;
1776 if (num_irqs > smmu->num_global_irqs)
1777 smmu->num_context_irqs++;
1778 }
1779
44a08de2
AH
1780 if (!smmu->num_context_irqs) {
1781 dev_err(dev, "found %d interrupts but expected at least %d\n",
1782 num_irqs, smmu->num_global_irqs + 1);
1783 return -ENODEV;
45ae7cff 1784 }
45ae7cff
WD
1785
1786 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1787 GFP_KERNEL);
1788 if (!smmu->irqs) {
1789 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1790 return -ENOMEM;
1791 }
1792
1793 for (i = 0; i < num_irqs; ++i) {
1794 int irq = platform_get_irq(pdev, i);
2907320d 1795
45ae7cff
WD
1796 if (irq < 0) {
1797 dev_err(dev, "failed to get irq index %d\n", i);
1798 return -ENODEV;
1799 }
1800 smmu->irqs[i] = irq;
1801 }
1802
3c8766d0
OH
1803 err = arm_smmu_device_cfg_probe(smmu);
1804 if (err)
1805 return err;
1806
45ae7cff
WD
1807 i = 0;
1808 smmu->masters = RB_ROOT;
1809 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1810 "#stream-id-cells", i,
1811 &masterspec)) {
1812 err = register_smmu_master(smmu, dev, &masterspec);
1813 if (err) {
1814 dev_err(dev, "failed to add master %s\n",
1815 masterspec.np->name);
1816 goto out_put_masters;
1817 }
1818
1819 i++;
1820 }
1821 dev_notice(dev, "registered %d master devices\n", i);
1822
3a5df8ff
AH
1823 parse_driver_options(smmu);
1824
09360403 1825 if (smmu->version > ARM_SMMU_V1 &&
45ae7cff
WD
1826 smmu->num_context_banks != smmu->num_context_irqs) {
1827 dev_err(dev,
1828 "found only %d context interrupt(s) but %d required\n",
1829 smmu->num_context_irqs, smmu->num_context_banks);
89a23cde 1830 err = -ENODEV;
44680eed 1831 goto out_put_masters;
45ae7cff
WD
1832 }
1833
45ae7cff
WD
1834 for (i = 0; i < smmu->num_global_irqs; ++i) {
1835 err = request_irq(smmu->irqs[i],
1836 arm_smmu_global_fault,
1837 IRQF_SHARED,
1838 "arm-smmu global fault",
1839 smmu);
1840 if (err) {
1841 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1842 i, smmu->irqs[i]);
1843 goto out_free_irqs;
1844 }
1845 }
1846
1847 INIT_LIST_HEAD(&smmu->list);
1848 spin_lock(&arm_smmu_devices_lock);
1849 list_add(&smmu->list, &arm_smmu_devices);
1850 spin_unlock(&arm_smmu_devices_lock);
fd90cecb
WD
1851
1852 arm_smmu_device_reset(smmu);
45ae7cff
WD
1853 return 0;
1854
1855out_free_irqs:
1856 while (i--)
1857 free_irq(smmu->irqs[i], smmu);
1858
45ae7cff
WD
1859out_put_masters:
1860 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
1861 struct arm_smmu_master *master
1862 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
1863 of_node_put(master->of_node);
1864 }
1865
1866 return err;
1867}
1868
1869static int arm_smmu_device_remove(struct platform_device *pdev)
1870{
1871 int i;
1872 struct device *dev = &pdev->dev;
1873 struct arm_smmu_device *curr, *smmu = NULL;
1874 struct rb_node *node;
1875
1876 spin_lock(&arm_smmu_devices_lock);
1877 list_for_each_entry(curr, &arm_smmu_devices, list) {
1878 if (curr->dev == dev) {
1879 smmu = curr;
1880 list_del(&smmu->list);
1881 break;
1882 }
1883 }
1884 spin_unlock(&arm_smmu_devices_lock);
1885
1886 if (!smmu)
1887 return -ENODEV;
1888
45ae7cff 1889 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
1890 struct arm_smmu_master *master
1891 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
1892 of_node_put(master->of_node);
1893 }
1894
ecfadb6e 1895 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
45ae7cff
WD
1896 dev_err(dev, "removing device with active domains!\n");
1897
1898 for (i = 0; i < smmu->num_global_irqs; ++i)
1899 free_irq(smmu->irqs[i], smmu);
1900
1901 /* Turn the thing off */
2907320d 1902 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
1903 return 0;
1904}
1905
45ae7cff
WD
1906static struct platform_driver arm_smmu_driver = {
1907 .driver = {
45ae7cff
WD
1908 .name = "arm-smmu",
1909 .of_match_table = of_match_ptr(arm_smmu_of_match),
1910 },
1911 .probe = arm_smmu_device_dt_probe,
1912 .remove = arm_smmu_device_remove,
1913};
1914
1915static int __init arm_smmu_init(void)
1916{
0e7d37ad 1917 struct device_node *np;
45ae7cff
WD
1918 int ret;
1919
0e7d37ad
TR
1920 /*
1921 * Play nice with systems that don't have an ARM SMMU by checking that
1922 * an ARM SMMU exists in the system before proceeding with the driver
1923 * and IOMMU bus operation registration.
1924 */
1925 np = of_find_matching_node(NULL, arm_smmu_of_match);
1926 if (!np)
1927 return 0;
1928
1929 of_node_put(np);
1930
45ae7cff
WD
1931 ret = platform_driver_register(&arm_smmu_driver);
1932 if (ret)
1933 return ret;
1934
1935 /* Oh, for a proper bus abstraction */
6614ee77 1936 if (!iommu_present(&platform_bus_type))
45ae7cff
WD
1937 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1938
d123cf82 1939#ifdef CONFIG_ARM_AMBA
6614ee77 1940 if (!iommu_present(&amba_bustype))
45ae7cff 1941 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
d123cf82 1942#endif
45ae7cff 1943
a9a1b0b5
WD
1944#ifdef CONFIG_PCI
1945 if (!iommu_present(&pci_bus_type))
1946 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1947#endif
1948
45ae7cff
WD
1949 return 0;
1950}
1951
1952static void __exit arm_smmu_exit(void)
1953{
1954 return platform_driver_unregister(&arm_smmu_driver);
1955}
1956
b1950b27 1957subsys_initcall(arm_smmu_init);
45ae7cff
WD
1958module_exit(arm_smmu_exit);
1959
1960MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1961MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1962MODULE_LICENSE("GPL v2");
This page took 0.271112 seconds and 5 git commands to generate.