Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[deliverable/linux.git] / include / linux / iommu.h
1 /*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19 #ifndef __LINUX_IOMMU_H
20 #define __LINUX_IOMMU_H
21
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/of.h>
25 #include <linux/types.h>
26 #include <linux/scatterlist.h>
27 #include <trace/events/iommu.h>
28
29 #define IOMMU_READ (1 << 0)
30 #define IOMMU_WRITE (1 << 1)
31 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
32 #define IOMMU_NOEXEC (1 << 3)
33
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct notifier_block;
40
41 /* iommu fault flags */
42 #define IOMMU_FAULT_READ 0x0
43 #define IOMMU_FAULT_WRITE 0x1
44
45 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
46 struct device *, unsigned long, int, void *);
47
48 struct iommu_domain_geometry {
49 dma_addr_t aperture_start; /* First address that can be mapped */
50 dma_addr_t aperture_end; /* Last address that can be mapped */
51 bool force_aperture; /* DMA only allowed in mappable range? */
52 };
53
54 /* Domain feature flags */
55 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
56 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
57 implementation */
58 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
59
60 /*
61 * This are the possible domain-types
62 *
63 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
64 * devices
65 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
66 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
67 * for VMs
68 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
69 * This flag allows IOMMU drivers to implement
70 * certain optimizations for these domains
71 */
72 #define IOMMU_DOMAIN_BLOCKED (0U)
73 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
74 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
75 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
76 __IOMMU_DOMAIN_DMA_API)
77
78 struct iommu_domain {
79 unsigned type;
80 const struct iommu_ops *ops;
81 iommu_fault_handler_t handler;
82 void *handler_token;
83 struct iommu_domain_geometry geometry;
84 };
85
86 enum iommu_cap {
87 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
88 transactions */
89 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
90 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
91 };
92
93 /*
94 * Following constraints are specifc to FSL_PAMUV1:
95 * -aperture must be power of 2, and naturally aligned
96 * -number of windows must be power of 2, and address space size
97 * of each window is determined by aperture size / # of windows
98 * -the actual size of the mapped region of a window must be power
99 * of 2 starting with 4KB and physical address must be naturally
100 * aligned.
101 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
102 * The caller can invoke iommu_domain_get_attr to check if the underlying
103 * iommu implementation supports these constraints.
104 */
105
106 enum iommu_attr {
107 DOMAIN_ATTR_GEOMETRY,
108 DOMAIN_ATTR_PAGING,
109 DOMAIN_ATTR_WINDOWS,
110 DOMAIN_ATTR_FSL_PAMU_STASH,
111 DOMAIN_ATTR_FSL_PAMU_ENABLE,
112 DOMAIN_ATTR_FSL_PAMUV1,
113 DOMAIN_ATTR_NESTING, /* two stages of translation */
114 DOMAIN_ATTR_MAX,
115 };
116
117 /**
118 * struct iommu_dm_region - descriptor for a direct mapped memory region
119 * @list: Linked list pointers
120 * @start: System physical start address of the region
121 * @length: Length of the region in bytes
122 * @prot: IOMMU Protection flags (READ/WRITE/...)
123 */
124 struct iommu_dm_region {
125 struct list_head list;
126 phys_addr_t start;
127 size_t length;
128 int prot;
129 };
130
131 #ifdef CONFIG_IOMMU_API
132
133 /**
134 * struct iommu_ops - iommu ops and capabilities
135 * @domain_init: init iommu domain
136 * @domain_destroy: destroy iommu domain
137 * @attach_dev: attach device to an iommu domain
138 * @detach_dev: detach device from an iommu domain
139 * @map: map a physically contiguous memory region to an iommu domain
140 * @unmap: unmap a physically contiguous memory region from an iommu domain
141 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
142 * to an iommu domain
143 * @iova_to_phys: translate iova to physical address
144 * @add_device: add device to iommu grouping
145 * @remove_device: remove device from iommu grouping
146 * @domain_get_attr: Query domain attributes
147 * @domain_set_attr: Change domain attributes
148 * @of_xlate: add OF master IDs to iommu grouping
149 * @pgsize_bitmap: bitmap of supported page sizes
150 * @priv: per-instance data private to the iommu driver
151 */
152 struct iommu_ops {
153 bool (*capable)(enum iommu_cap);
154
155 /* Domain allocation and freeing by the iommu driver */
156 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
157 void (*domain_free)(struct iommu_domain *);
158
159 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
160 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
161 int (*map)(struct iommu_domain *domain, unsigned long iova,
162 phys_addr_t paddr, size_t size, int prot);
163 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
164 size_t size);
165 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
166 struct scatterlist *sg, unsigned int nents, int prot);
167 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
168 int (*add_device)(struct device *dev);
169 void (*remove_device)(struct device *dev);
170 int (*device_group)(struct device *dev, unsigned int *groupid);
171 int (*domain_get_attr)(struct iommu_domain *domain,
172 enum iommu_attr attr, void *data);
173 int (*domain_set_attr)(struct iommu_domain *domain,
174 enum iommu_attr attr, void *data);
175
176 /* Request/Free a list of direct mapping requirements for a device */
177 void (*get_dm_regions)(struct device *dev, struct list_head *list);
178 void (*put_dm_regions)(struct device *dev, struct list_head *list);
179
180 /* Window handling functions */
181 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
182 phys_addr_t paddr, u64 size, int prot);
183 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
184 /* Set the numer of window per domain */
185 int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
186 /* Get the numer of window per domain */
187 u32 (*domain_get_windows)(struct iommu_domain *domain);
188
189 #ifdef CONFIG_OF_IOMMU
190 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
191 #endif
192
193 unsigned long pgsize_bitmap;
194 void *priv;
195 };
196
197 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
198 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
199 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
200 #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
201 #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
202 #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
203
204 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
205 extern bool iommu_present(struct bus_type *bus);
206 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
207 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
208 extern struct iommu_group *iommu_group_get_by_id(int id);
209 extern void iommu_domain_free(struct iommu_domain *domain);
210 extern int iommu_attach_device(struct iommu_domain *domain,
211 struct device *dev);
212 extern void iommu_detach_device(struct iommu_domain *domain,
213 struct device *dev);
214 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
215 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
216 phys_addr_t paddr, size_t size, int prot);
217 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
218 size_t size);
219 extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
220 struct scatterlist *sg,unsigned int nents,
221 int prot);
222 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
223 extern void iommu_set_fault_handler(struct iommu_domain *domain,
224 iommu_fault_handler_t handler, void *token);
225
226 extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
227 extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
228 extern int iommu_request_dm_for_dev(struct device *dev);
229
230 extern int iommu_attach_group(struct iommu_domain *domain,
231 struct iommu_group *group);
232 extern void iommu_detach_group(struct iommu_domain *domain,
233 struct iommu_group *group);
234 extern struct iommu_group *iommu_group_alloc(void);
235 extern void *iommu_group_get_iommudata(struct iommu_group *group);
236 extern void iommu_group_set_iommudata(struct iommu_group *group,
237 void *iommu_data,
238 void (*release)(void *iommu_data));
239 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
240 extern int iommu_group_add_device(struct iommu_group *group,
241 struct device *dev);
242 extern void iommu_group_remove_device(struct device *dev);
243 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
244 int (*fn)(struct device *, void *));
245 extern struct iommu_group *iommu_group_get(struct device *dev);
246 extern void iommu_group_put(struct iommu_group *group);
247 extern int iommu_group_register_notifier(struct iommu_group *group,
248 struct notifier_block *nb);
249 extern int iommu_group_unregister_notifier(struct iommu_group *group,
250 struct notifier_block *nb);
251 extern int iommu_group_id(struct iommu_group *group);
252 extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
253 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
254
255 extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
256 void *data);
257 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
258 void *data);
259 struct device *iommu_device_create(struct device *parent, void *drvdata,
260 const struct attribute_group **groups,
261 const char *fmt, ...);
262 void iommu_device_destroy(struct device *dev);
263 int iommu_device_link(struct device *dev, struct device *link);
264 void iommu_device_unlink(struct device *dev, struct device *link);
265
266 /* Window handling function prototypes */
267 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
268 phys_addr_t offset, u64 size,
269 int prot);
270 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
271 /**
272 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
273 * @domain: the iommu domain where the fault has happened
274 * @dev: the device where the fault has happened
275 * @iova: the faulting address
276 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
277 *
278 * This function should be called by the low-level IOMMU implementations
279 * whenever IOMMU faults happen, to allow high-level users, that are
280 * interested in such events, to know about them.
281 *
282 * This event may be useful for several possible use cases:
283 * - mere logging of the event
284 * - dynamic TLB/PTE loading
285 * - if restarting of the faulting device is required
286 *
287 * Returns 0 on success and an appropriate error code otherwise (if dynamic
288 * PTE/TLB loading will one day be supported, implementations will be able
289 * to tell whether it succeeded or not according to this return value).
290 *
291 * Specifically, -ENOSYS is returned if a fault handler isn't installed
292 * (though fault handlers can also return -ENOSYS, in case they want to
293 * elicit the default behavior of the IOMMU drivers).
294 */
295 static inline int report_iommu_fault(struct iommu_domain *domain,
296 struct device *dev, unsigned long iova, int flags)
297 {
298 int ret = -ENOSYS;
299
300 /*
301 * if upper layers showed interest and installed a fault handler,
302 * invoke it.
303 */
304 if (domain->handler)
305 ret = domain->handler(domain, dev, iova, flags,
306 domain->handler_token);
307
308 trace_io_page_fault(dev, iova, flags);
309 return ret;
310 }
311
312 static inline size_t iommu_map_sg(struct iommu_domain *domain,
313 unsigned long iova, struct scatterlist *sg,
314 unsigned int nents, int prot)
315 {
316 return domain->ops->map_sg(domain, iova, sg, nents, prot);
317 }
318
319 #else /* CONFIG_IOMMU_API */
320
321 struct iommu_ops {};
322 struct iommu_group {};
323
324 static inline bool iommu_present(struct bus_type *bus)
325 {
326 return false;
327 }
328
329 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
330 {
331 return false;
332 }
333
334 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
335 {
336 return NULL;
337 }
338
339 static inline struct iommu_group *iommu_group_get_by_id(int id)
340 {
341 return NULL;
342 }
343
344 static inline void iommu_domain_free(struct iommu_domain *domain)
345 {
346 }
347
348 static inline int iommu_attach_device(struct iommu_domain *domain,
349 struct device *dev)
350 {
351 return -ENODEV;
352 }
353
354 static inline void iommu_detach_device(struct iommu_domain *domain,
355 struct device *dev)
356 {
357 }
358
359 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
360 {
361 return NULL;
362 }
363
364 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
365 phys_addr_t paddr, int gfp_order, int prot)
366 {
367 return -ENODEV;
368 }
369
370 static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
371 int gfp_order)
372 {
373 return -ENODEV;
374 }
375
376 static inline size_t iommu_map_sg(struct iommu_domain *domain,
377 unsigned long iova, struct scatterlist *sg,
378 unsigned int nents, int prot)
379 {
380 return -ENODEV;
381 }
382
383 static inline int iommu_domain_window_enable(struct iommu_domain *domain,
384 u32 wnd_nr, phys_addr_t paddr,
385 u64 size, int prot)
386 {
387 return -ENODEV;
388 }
389
390 static inline void iommu_domain_window_disable(struct iommu_domain *domain,
391 u32 wnd_nr)
392 {
393 }
394
395 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
396 {
397 return 0;
398 }
399
400 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
401 iommu_fault_handler_t handler, void *token)
402 {
403 }
404
405 static inline void iommu_get_dm_regions(struct device *dev,
406 struct list_head *list)
407 {
408 }
409
410 static inline void iommu_put_dm_regions(struct device *dev,
411 struct list_head *list)
412 {
413 }
414
415 static inline int iommu_request_dm_for_dev(struct device *dev)
416 {
417 return -ENODEV;
418 }
419
420 static inline int iommu_attach_group(struct iommu_domain *domain,
421 struct iommu_group *group)
422 {
423 return -ENODEV;
424 }
425
426 static inline void iommu_detach_group(struct iommu_domain *domain,
427 struct iommu_group *group)
428 {
429 }
430
431 static inline struct iommu_group *iommu_group_alloc(void)
432 {
433 return ERR_PTR(-ENODEV);
434 }
435
436 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
437 {
438 return NULL;
439 }
440
441 static inline void iommu_group_set_iommudata(struct iommu_group *group,
442 void *iommu_data,
443 void (*release)(void *iommu_data))
444 {
445 }
446
447 static inline int iommu_group_set_name(struct iommu_group *group,
448 const char *name)
449 {
450 return -ENODEV;
451 }
452
453 static inline int iommu_group_add_device(struct iommu_group *group,
454 struct device *dev)
455 {
456 return -ENODEV;
457 }
458
459 static inline void iommu_group_remove_device(struct device *dev)
460 {
461 }
462
463 static inline int iommu_group_for_each_dev(struct iommu_group *group,
464 void *data,
465 int (*fn)(struct device *, void *))
466 {
467 return -ENODEV;
468 }
469
470 static inline struct iommu_group *iommu_group_get(struct device *dev)
471 {
472 return NULL;
473 }
474
475 static inline void iommu_group_put(struct iommu_group *group)
476 {
477 }
478
479 static inline int iommu_group_register_notifier(struct iommu_group *group,
480 struct notifier_block *nb)
481 {
482 return -ENODEV;
483 }
484
485 static inline int iommu_group_unregister_notifier(struct iommu_group *group,
486 struct notifier_block *nb)
487 {
488 return 0;
489 }
490
491 static inline int iommu_group_id(struct iommu_group *group)
492 {
493 return -ENODEV;
494 }
495
496 static inline int iommu_domain_get_attr(struct iommu_domain *domain,
497 enum iommu_attr attr, void *data)
498 {
499 return -EINVAL;
500 }
501
502 static inline int iommu_domain_set_attr(struct iommu_domain *domain,
503 enum iommu_attr attr, void *data)
504 {
505 return -EINVAL;
506 }
507
508 static inline struct device *iommu_device_create(struct device *parent,
509 void *drvdata,
510 const struct attribute_group **groups,
511 const char *fmt, ...)
512 {
513 return ERR_PTR(-ENODEV);
514 }
515
516 static inline void iommu_device_destroy(struct device *dev)
517 {
518 }
519
520 static inline int iommu_device_link(struct device *dev, struct device *link)
521 {
522 return -EINVAL;
523 }
524
525 static inline void iommu_device_unlink(struct device *dev, struct device *link)
526 {
527 }
528
529 #endif /* CONFIG_IOMMU_API */
530
531 #endif /* __LINUX_IOMMU_H */
This page took 0.0423 seconds and 5 git commands to generate.