iommu/vt-d: Make use of IOMMU sysfs support
[deliverable/linux.git] / drivers / iommu / dmar.c
CommitLineData
10e5247f
KA
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
98bcef56 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f 21 *
e61d98d8 22 * This file implements early detection/parsing of Remapping Devices
10e5247f
KA
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
e61d98d8
SS
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f
KA
27 */
28
e9071b0b
DD
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
10e5247f
KA
31#include <linux/pci.h>
32#include <linux/dmar.h>
38717946
KA
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
fe962e90 35#include <linux/timer.h>
0ac2491f
SS
36#include <linux/irq.h>
37#include <linux/interrupt.h>
69575d38 38#include <linux/tboot.h>
eb27cae8 39#include <linux/dmi.h>
5a0e3ad6 40#include <linux/slab.h>
a5459cfe 41#include <linux/iommu.h>
8a8f422d 42#include <asm/irq_remapping.h>
4db77ff3 43#include <asm/iommu_table.h>
10e5247f 44
078e1ee2
JR
45#include "irq_remapping.h"
46
3a5670e8
JL
47/*
48 * Assumptions:
49 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
50 * before IO devices managed by that unit.
51 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
52 * after IO devices managed by that unit.
53 * 3) Hotplug events are rare.
54 *
55 * Locking rules for DMA and interrupt remapping related global data structures:
56 * 1) Use dmar_global_lock in process context
57 * 2) Use RCU in interrupt context
10e5247f 58 */
3a5670e8 59DECLARE_RWSEM(dmar_global_lock);
10e5247f 60LIST_HEAD(dmar_drhd_units);
10e5247f 61
41750d31 62struct acpi_table_header * __initdata dmar_tbl;
8e1568f3 63static acpi_size dmar_tbl_size;
2e455289 64static int dmar_dev_scope_status = 1;
10e5247f 65
694835dc 66static int alloc_iommu(struct dmar_drhd_unit *drhd);
a868e6b7 67static void free_iommu(struct intel_iommu *iommu);
694835dc 68
10e5247f
KA
69static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
70{
71 /*
72 * add INCLUDE_ALL at the tail, so scan the list will find it at
73 * the very end.
74 */
75 if (drhd->include_all)
0e242612 76 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
10e5247f 77 else
0e242612 78 list_add_rcu(&drhd->list, &dmar_drhd_units);
10e5247f
KA
79}
80
bb3a6b78 81void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
10e5247f
KA
82{
83 struct acpi_dmar_device_scope *scope;
10e5247f
KA
84
85 *cnt = 0;
86 while (start < end) {
87 scope = start;
07cb52ff
DW
88 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
10e5247f
KA
90 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
91 (*cnt)++;
ae3e7f3a
LC
92 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
93 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0b 94 pr_warn("Unsupported device scope\n");
5715f0f9 95 }
10e5247f
KA
96 start += scope->length;
97 }
98 if (*cnt == 0)
bb3a6b78
JL
99 return NULL;
100
832bd858 101 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
bb3a6b78
JL
102}
103
832bd858 104void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
ada4d4b2 105{
b683b230 106 int i;
832bd858 107 struct device *tmp_dev;
b683b230 108
ada4d4b2 109 if (*devices && *cnt) {
b683b230 110 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
832bd858 111 put_device(tmp_dev);
ada4d4b2 112 kfree(*devices);
ada4d4b2 113 }
0e242612
JL
114
115 *devices = NULL;
116 *cnt = 0;
ada4d4b2
JL
117}
118
59ce0515
JL
119/* Optimize out kzalloc()/kfree() for normal cases */
120static char dmar_pci_notify_info_buf[64];
121
122static struct dmar_pci_notify_info *
123dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
124{
125 int level = 0;
126 size_t size;
127 struct pci_dev *tmp;
128 struct dmar_pci_notify_info *info;
129
130 BUG_ON(dev->is_virtfn);
131
132 /* Only generate path[] for device addition event */
133 if (event == BUS_NOTIFY_ADD_DEVICE)
134 for (tmp = dev; tmp; tmp = tmp->bus->self)
135 level++;
136
137 size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
138 if (size <= sizeof(dmar_pci_notify_info_buf)) {
139 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
140 } else {
141 info = kzalloc(size, GFP_KERNEL);
142 if (!info) {
143 pr_warn("Out of memory when allocating notify_info "
144 "for %s.\n", pci_name(dev));
2e455289
JL
145 if (dmar_dev_scope_status == 0)
146 dmar_dev_scope_status = -ENOMEM;
59ce0515
JL
147 return NULL;
148 }
149 }
150
151 info->event = event;
152 info->dev = dev;
153 info->seg = pci_domain_nr(dev->bus);
154 info->level = level;
155 if (event == BUS_NOTIFY_ADD_DEVICE) {
5ae0566a
JL
156 for (tmp = dev; tmp; tmp = tmp->bus->self) {
157 level--;
59ce0515
JL
158 info->path[level].device = PCI_SLOT(tmp->devfn);
159 info->path[level].function = PCI_FUNC(tmp->devfn);
160 if (pci_is_root_bus(tmp->bus))
161 info->bus = tmp->bus->number;
162 }
163 }
164
165 return info;
166}
167
168static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
169{
170 if ((void *)info != dmar_pci_notify_info_buf)
171 kfree(info);
172}
173
174static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
175 struct acpi_dmar_pci_path *path, int count)
176{
177 int i;
178
179 if (info->bus != bus)
180 return false;
181 if (info->level != count)
182 return false;
183
184 for (i = 0; i < count; i++) {
185 if (path[i].device != info->path[i].device ||
186 path[i].function != info->path[i].function)
187 return false;
188 }
189
190 return true;
191}
192
193/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
194int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
195 void *start, void*end, u16 segment,
832bd858
DW
196 struct dmar_dev_scope *devices,
197 int devices_cnt)
59ce0515
JL
198{
199 int i, level;
832bd858 200 struct device *tmp, *dev = &info->dev->dev;
59ce0515
JL
201 struct acpi_dmar_device_scope *scope;
202 struct acpi_dmar_pci_path *path;
203
204 if (segment != info->seg)
205 return 0;
206
207 for (; start < end; start += scope->length) {
208 scope = start;
209 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
210 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
211 continue;
212
213 path = (struct acpi_dmar_pci_path *)(scope + 1);
214 level = (scope->length - sizeof(*scope)) / sizeof(*path);
215 if (!dmar_match_pci_path(info, scope->bus, path, level))
216 continue;
217
218 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
832bd858 219 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
59ce0515 220 pr_warn("Device scope type does not match for %s\n",
832bd858 221 pci_name(info->dev));
59ce0515
JL
222 return -EINVAL;
223 }
224
225 for_each_dev_scope(devices, devices_cnt, i, tmp)
226 if (tmp == NULL) {
832bd858
DW
227 devices[i].bus = info->dev->bus->number;
228 devices[i].devfn = info->dev->devfn;
229 rcu_assign_pointer(devices[i].dev,
230 get_device(dev));
59ce0515
JL
231 return 1;
232 }
233 BUG_ON(i >= devices_cnt);
234 }
235
236 return 0;
237}
238
239int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
832bd858 240 struct dmar_dev_scope *devices, int count)
59ce0515
JL
241{
242 int index;
832bd858 243 struct device *tmp;
59ce0515
JL
244
245 if (info->seg != segment)
246 return 0;
247
248 for_each_active_dev_scope(devices, count, index, tmp)
832bd858
DW
249 if (tmp == &info->dev->dev) {
250 rcu_assign_pointer(devices[index].dev, NULL);
59ce0515 251 synchronize_rcu();
832bd858 252 put_device(tmp);
59ce0515
JL
253 return 1;
254 }
255
256 return 0;
257}
258
259static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
260{
261 int ret = 0;
262 struct dmar_drhd_unit *dmaru;
263 struct acpi_dmar_hardware_unit *drhd;
264
265 for_each_drhd_unit(dmaru) {
266 if (dmaru->include_all)
267 continue;
268
269 drhd = container_of(dmaru->hdr,
270 struct acpi_dmar_hardware_unit, header);
271 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
272 ((void *)drhd) + drhd->header.length,
273 dmaru->segment,
274 dmaru->devices, dmaru->devices_cnt);
275 if (ret != 0)
276 break;
277 }
278 if (ret >= 0)
279 ret = dmar_iommu_notify_scope_dev(info);
2e455289
JL
280 if (ret < 0 && dmar_dev_scope_status == 0)
281 dmar_dev_scope_status = ret;
59ce0515
JL
282
283 return ret;
284}
285
286static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
287{
288 struct dmar_drhd_unit *dmaru;
289
290 for_each_drhd_unit(dmaru)
291 if (dmar_remove_dev_scope(info, dmaru->segment,
292 dmaru->devices, dmaru->devices_cnt))
293 break;
294 dmar_iommu_notify_scope_dev(info);
295}
296
297static int dmar_pci_bus_notifier(struct notifier_block *nb,
298 unsigned long action, void *data)
299{
300 struct pci_dev *pdev = to_pci_dev(data);
301 struct dmar_pci_notify_info *info;
302
303 /* Only care about add/remove events for physical functions */
304 if (pdev->is_virtfn)
305 return NOTIFY_DONE;
306 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
307 return NOTIFY_DONE;
308
309 info = dmar_alloc_pci_notify_info(pdev, action);
310 if (!info)
311 return NOTIFY_DONE;
312
313 down_write(&dmar_global_lock);
314 if (action == BUS_NOTIFY_ADD_DEVICE)
315 dmar_pci_bus_add_dev(info);
316 else if (action == BUS_NOTIFY_DEL_DEVICE)
317 dmar_pci_bus_del_dev(info);
318 up_write(&dmar_global_lock);
319
320 dmar_free_pci_notify_info(info);
321
322 return NOTIFY_OK;
323}
324
325static struct notifier_block dmar_pci_bus_nb = {
326 .notifier_call = dmar_pci_bus_notifier,
327 .priority = INT_MIN,
328};
329
10e5247f
KA
330/**
331 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
332 * structure which uniquely represent one DMA remapping hardware unit
333 * present in the platform
334 */
335static int __init
336dmar_parse_one_drhd(struct acpi_dmar_header *header)
337{
338 struct acpi_dmar_hardware_unit *drhd;
339 struct dmar_drhd_unit *dmaru;
340 int ret = 0;
10e5247f 341
e523b38e 342 drhd = (struct acpi_dmar_hardware_unit *)header;
10e5247f
KA
343 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
344 if (!dmaru)
345 return -ENOMEM;
346
1886e8a9 347 dmaru->hdr = header;
10e5247f 348 dmaru->reg_base_addr = drhd->address;
276dbf99 349 dmaru->segment = drhd->segment;
10e5247f 350 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
07cb52ff
DW
351 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
352 ((void *)drhd) + drhd->header.length,
353 &dmaru->devices_cnt);
354 if (dmaru->devices_cnt && dmaru->devices == NULL) {
355 kfree(dmaru);
356 return -ENOMEM;
2e455289 357 }
10e5247f 358
1886e8a9
SS
359 ret = alloc_iommu(dmaru);
360 if (ret) {
07cb52ff
DW
361 dmar_free_dev_scope(&dmaru->devices,
362 &dmaru->devices_cnt);
1886e8a9
SS
363 kfree(dmaru);
364 return ret;
365 }
366 dmar_register_drhd_unit(dmaru);
367 return 0;
368}
369
a868e6b7
JL
370static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
371{
372 if (dmaru->devices && dmaru->devices_cnt)
373 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
374 if (dmaru->iommu)
375 free_iommu(dmaru->iommu);
376 kfree(dmaru);
377}
378
e625b4a9
DW
379static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
380{
381 struct acpi_dmar_andd *andd = (void *)header;
382
383 /* Check for NUL termination within the designated length */
384 if (strnlen(andd->object_name, header->length - 8) == header->length - 8) {
385 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
386 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
387 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
388 dmi_get_system_info(DMI_BIOS_VENDOR),
389 dmi_get_system_info(DMI_BIOS_VERSION),
390 dmi_get_system_info(DMI_PRODUCT_VERSION));
391 return -EINVAL;
392 }
393 pr_info("ANDD device: %x name: %s\n", andd->device_number,
394 andd->object_name);
395
396 return 0;
397}
398
aa697079 399#ifdef CONFIG_ACPI_NUMA
ee34b32d
SS
400static int __init
401dmar_parse_one_rhsa(struct acpi_dmar_header *header)
402{
403 struct acpi_dmar_rhsa *rhsa;
404 struct dmar_drhd_unit *drhd;
405
406 rhsa = (struct acpi_dmar_rhsa *)header;
aa697079 407 for_each_drhd_unit(drhd) {
ee34b32d
SS
408 if (drhd->reg_base_addr == rhsa->base_address) {
409 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
410
411 if (!node_online(node))
412 node = -1;
413 drhd->iommu->node = node;
aa697079
DW
414 return 0;
415 }
ee34b32d 416 }
fd0c8894
BH
417 WARN_TAINT(
418 1, TAINT_FIRMWARE_WORKAROUND,
419 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
420 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
421 drhd->reg_base_addr,
422 dmi_get_system_info(DMI_BIOS_VENDOR),
423 dmi_get_system_info(DMI_BIOS_VERSION),
424 dmi_get_system_info(DMI_PRODUCT_VERSION));
ee34b32d 425
aa697079 426 return 0;
ee34b32d 427}
aa697079 428#endif
ee34b32d 429
10e5247f
KA
430static void __init
431dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
432{
433 struct acpi_dmar_hardware_unit *drhd;
434 struct acpi_dmar_reserved_memory *rmrr;
aa5d2b51 435 struct acpi_dmar_atsr *atsr;
17b60977 436 struct acpi_dmar_rhsa *rhsa;
10e5247f
KA
437
438 switch (header->type) {
439 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b51
YZ
440 drhd = container_of(header, struct acpi_dmar_hardware_unit,
441 header);
e9071b0b 442 pr_info("DRHD base: %#016Lx flags: %#x\n",
aa5d2b51 443 (unsigned long long)drhd->address, drhd->flags);
10e5247f
KA
444 break;
445 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b51
YZ
446 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
447 header);
e9071b0b 448 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
5b6985ce
FY
449 (unsigned long long)rmrr->base_address,
450 (unsigned long long)rmrr->end_address);
10e5247f 451 break;
aa5d2b51
YZ
452 case ACPI_DMAR_TYPE_ATSR:
453 atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0b 454 pr_info("ATSR flags: %#x\n", atsr->flags);
aa5d2b51 455 break;
17b60977
RD
456 case ACPI_DMAR_HARDWARE_AFFINITY:
457 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0b 458 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
17b60977
RD
459 (unsigned long long)rhsa->base_address,
460 rhsa->proximity_domain);
461 break;
e625b4a9
DW
462 case ACPI_DMAR_TYPE_ANDD:
463 /* We don't print this here because we need to sanity-check
464 it first. So print it in dmar_parse_one_andd() instead. */
465 break;
10e5247f
KA
466 }
467}
468
f6dd5c31
YL
469/**
470 * dmar_table_detect - checks to see if the platform supports DMAR devices
471 */
472static int __init dmar_table_detect(void)
473{
474 acpi_status status = AE_OK;
475
476 /* if we could find DMAR table, then there are DMAR devices */
8e1568f3
YL
477 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
478 (struct acpi_table_header **)&dmar_tbl,
479 &dmar_tbl_size);
f6dd5c31
YL
480
481 if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0b 482 pr_warn("Unable to map DMAR\n");
f6dd5c31
YL
483 status = AE_NOT_FOUND;
484 }
485
486 return (ACPI_SUCCESS(status) ? 1 : 0);
487}
aaa9d1dd 488
10e5247f
KA
489/**
490 * parse_dmar_table - parses the DMA reporting table
491 */
492static int __init
493parse_dmar_table(void)
494{
495 struct acpi_table_dmar *dmar;
496 struct acpi_dmar_header *entry_header;
497 int ret = 0;
7cef3347 498 int drhd_count = 0;
10e5247f 499
f6dd5c31
YL
500 /*
501 * Do it again, earlier dmar_tbl mapping could be mapped with
502 * fixed map.
503 */
504 dmar_table_detect();
505
a59b50e9
JC
506 /*
507 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
508 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
509 */
510 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
511
10e5247f
KA
512 dmar = (struct acpi_table_dmar *)dmar_tbl;
513 if (!dmar)
514 return -ENODEV;
515
5b6985ce 516 if (dmar->width < PAGE_SHIFT - 1) {
e9071b0b 517 pr_warn("Invalid DMAR haw\n");
10e5247f
KA
518 return -EINVAL;
519 }
520
e9071b0b 521 pr_info("Host address width %d\n", dmar->width + 1);
10e5247f
KA
522
523 entry_header = (struct acpi_dmar_header *)(dmar + 1);
524 while (((unsigned long)entry_header) <
525 (((unsigned long)dmar) + dmar_tbl->length)) {
084eb960
TB
526 /* Avoid looping forever on bad ACPI tables */
527 if (entry_header->length == 0) {
e9071b0b 528 pr_warn("Invalid 0-length structure\n");
084eb960
TB
529 ret = -EINVAL;
530 break;
531 }
532
10e5247f
KA
533 dmar_table_print_dmar_entry(entry_header);
534
535 switch (entry_header->type) {
536 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
7cef3347 537 drhd_count++;
10e5247f
KA
538 ret = dmar_parse_one_drhd(entry_header);
539 break;
540 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
541 ret = dmar_parse_one_rmrr(entry_header);
aa5d2b51
YZ
542 break;
543 case ACPI_DMAR_TYPE_ATSR:
aa5d2b51 544 ret = dmar_parse_one_atsr(entry_header);
10e5247f 545 break;
17b60977 546 case ACPI_DMAR_HARDWARE_AFFINITY:
aa697079 547#ifdef CONFIG_ACPI_NUMA
ee34b32d 548 ret = dmar_parse_one_rhsa(entry_header);
aa697079 549#endif
17b60977 550 break;
e625b4a9
DW
551 case ACPI_DMAR_TYPE_ANDD:
552 ret = dmar_parse_one_andd(entry_header);
553 break;
10e5247f 554 default:
e9071b0b 555 pr_warn("Unknown DMAR structure type %d\n",
4de75cf9 556 entry_header->type);
10e5247f
KA
557 ret = 0; /* for forward compatibility */
558 break;
559 }
560 if (ret)
561 break;
562
563 entry_header = ((void *)entry_header + entry_header->length);
564 }
7cef3347
LZH
565 if (drhd_count == 0)
566 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
10e5247f
KA
567 return ret;
568}
569
832bd858
DW
570static int dmar_pci_device_match(struct dmar_dev_scope devices[],
571 int cnt, struct pci_dev *dev)
e61d98d8
SS
572{
573 int index;
832bd858 574 struct device *tmp;
e61d98d8
SS
575
576 while (dev) {
b683b230 577 for_each_active_dev_scope(devices, cnt, index, tmp)
832bd858 578 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
e61d98d8
SS
579 return 1;
580
581 /* Check our parent */
582 dev = dev->bus->self;
583 }
584
585 return 0;
586}
587
588struct dmar_drhd_unit *
589dmar_find_matched_drhd_unit(struct pci_dev *dev)
590{
0e242612 591 struct dmar_drhd_unit *dmaru;
2e824f79
YZ
592 struct acpi_dmar_hardware_unit *drhd;
593
dda56549
Y
594 dev = pci_physfn(dev);
595
0e242612 596 rcu_read_lock();
8b161f0e 597 for_each_drhd_unit(dmaru) {
2e824f79
YZ
598 drhd = container_of(dmaru->hdr,
599 struct acpi_dmar_hardware_unit,
600 header);
601
602 if (dmaru->include_all &&
603 drhd->segment == pci_domain_nr(dev->bus))
0e242612 604 goto out;
e61d98d8 605
2e824f79
YZ
606 if (dmar_pci_device_match(dmaru->devices,
607 dmaru->devices_cnt, dev))
0e242612 608 goto out;
e61d98d8 609 }
0e242612
JL
610 dmaru = NULL;
611out:
612 rcu_read_unlock();
e61d98d8 613
0e242612 614 return dmaru;
e61d98d8
SS
615}
616
ed40356b
DW
617static void __init dmar_acpi_insert_dev_scope(u8 device_number,
618 struct acpi_device *adev)
619{
620 struct dmar_drhd_unit *dmaru;
621 struct acpi_dmar_hardware_unit *drhd;
622 struct acpi_dmar_device_scope *scope;
623 struct device *tmp;
624 int i;
625 struct acpi_dmar_pci_path *path;
626
627 for_each_drhd_unit(dmaru) {
628 drhd = container_of(dmaru->hdr,
629 struct acpi_dmar_hardware_unit,
630 header);
631
632 for (scope = (void *)(drhd + 1);
633 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
634 scope = ((void *)scope) + scope->length) {
635 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI)
636 continue;
637 if (scope->enumeration_id != device_number)
638 continue;
639
640 path = (void *)(scope + 1);
641 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
642 dev_name(&adev->dev), dmaru->reg_base_addr,
643 scope->bus, path->device, path->function);
644 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
645 if (tmp == NULL) {
646 dmaru->devices[i].bus = scope->bus;
647 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
648 path->function);
649 rcu_assign_pointer(dmaru->devices[i].dev,
650 get_device(&adev->dev));
651 return;
652 }
653 BUG_ON(i >= dmaru->devices_cnt);
654 }
655 }
656 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
657 device_number, dev_name(&adev->dev));
658}
659
660static int __init dmar_acpi_dev_scope_init(void)
661{
11f1a776
JR
662 struct acpi_dmar_andd *andd;
663
664 if (dmar_tbl == NULL)
665 return -ENODEV;
666
7713ec06
DW
667 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
668 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
669 andd = ((void *)andd) + andd->header.length) {
ed40356b
DW
670 if (andd->header.type == ACPI_DMAR_TYPE_ANDD) {
671 acpi_handle h;
672 struct acpi_device *adev;
673
674 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
675 andd->object_name,
676 &h))) {
677 pr_err("Failed to find handle for ACPI object %s\n",
678 andd->object_name);
679 continue;
680 }
681 acpi_bus_get_device(h, &adev);
682 if (!adev) {
683 pr_err("Failed to get device for ACPI object %s\n",
684 andd->object_name);
685 continue;
686 }
687 dmar_acpi_insert_dev_scope(andd->device_number, adev);
688 }
ed40356b
DW
689 }
690 return 0;
691}
692
1886e8a9
SS
693int __init dmar_dev_scope_init(void)
694{
2e455289
JL
695 struct pci_dev *dev = NULL;
696 struct dmar_pci_notify_info *info;
1886e8a9 697
2e455289
JL
698 if (dmar_dev_scope_status != 1)
699 return dmar_dev_scope_status;
c2c7286a 700
2e455289
JL
701 if (list_empty(&dmar_drhd_units)) {
702 dmar_dev_scope_status = -ENODEV;
703 } else {
704 dmar_dev_scope_status = 0;
705
63b42624
DW
706 dmar_acpi_dev_scope_init();
707
2e455289
JL
708 for_each_pci_dev(dev) {
709 if (dev->is_virtfn)
710 continue;
711
712 info = dmar_alloc_pci_notify_info(dev,
713 BUS_NOTIFY_ADD_DEVICE);
714 if (!info) {
715 return dmar_dev_scope_status;
716 } else {
717 dmar_pci_bus_add_dev(info);
718 dmar_free_pci_notify_info(info);
719 }
720 }
318fe7df 721
2e455289 722 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1886e8a9
SS
723 }
724
2e455289 725 return dmar_dev_scope_status;
1886e8a9
SS
726}
727
10e5247f
KA
728
729int __init dmar_table_init(void)
730{
1886e8a9 731 static int dmar_table_initialized;
093f87d2
FY
732 int ret;
733
cc05301f
JL
734 if (dmar_table_initialized == 0) {
735 ret = parse_dmar_table();
736 if (ret < 0) {
737 if (ret != -ENODEV)
738 pr_info("parse DMAR table failure.\n");
739 } else if (list_empty(&dmar_drhd_units)) {
740 pr_info("No DMAR devices found\n");
741 ret = -ENODEV;
742 }
093f87d2 743
cc05301f
JL
744 if (ret < 0)
745 dmar_table_initialized = ret;
746 else
747 dmar_table_initialized = 1;
10e5247f 748 }
093f87d2 749
cc05301f 750 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
10e5247f
KA
751}
752
3a8663ee
BH
753static void warn_invalid_dmar(u64 addr, const char *message)
754{
fd0c8894
BH
755 WARN_TAINT_ONCE(
756 1, TAINT_FIRMWARE_WORKAROUND,
757 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
758 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
759 addr, message,
760 dmi_get_system_info(DMI_BIOS_VENDOR),
761 dmi_get_system_info(DMI_BIOS_VERSION),
762 dmi_get_system_info(DMI_PRODUCT_VERSION));
3a8663ee 763}
6ecbf01c 764
21004dcd 765static int __init check_zero_address(void)
86cf898e
DW
766{
767 struct acpi_table_dmar *dmar;
768 struct acpi_dmar_header *entry_header;
769 struct acpi_dmar_hardware_unit *drhd;
770
771 dmar = (struct acpi_table_dmar *)dmar_tbl;
772 entry_header = (struct acpi_dmar_header *)(dmar + 1);
773
774 while (((unsigned long)entry_header) <
775 (((unsigned long)dmar) + dmar_tbl->length)) {
776 /* Avoid looping forever on bad ACPI tables */
777 if (entry_header->length == 0) {
e9071b0b 778 pr_warn("Invalid 0-length structure\n");
86cf898e
DW
779 return 0;
780 }
781
782 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
2c992208
CW
783 void __iomem *addr;
784 u64 cap, ecap;
785
86cf898e
DW
786 drhd = (void *)entry_header;
787 if (!drhd->address) {
3a8663ee 788 warn_invalid_dmar(0, "");
2c992208
CW
789 goto failed;
790 }
791
792 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
793 if (!addr ) {
794 printk("IOMMU: can't validate: %llx\n", drhd->address);
795 goto failed;
796 }
797 cap = dmar_readq(addr + DMAR_CAP_REG);
798 ecap = dmar_readq(addr + DMAR_ECAP_REG);
799 early_iounmap(addr, VTD_PAGE_SIZE);
800 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
3a8663ee
BH
801 warn_invalid_dmar(drhd->address,
802 " returns all ones");
2c992208 803 goto failed;
86cf898e 804 }
86cf898e
DW
805 }
806
807 entry_header = ((void *)entry_header + entry_header->length);
808 }
809 return 1;
2c992208
CW
810
811failed:
2c992208 812 return 0;
86cf898e
DW
813}
814
480125ba 815int __init detect_intel_iommu(void)
2ae21010
SS
816{
817 int ret;
818
3a5670e8 819 down_write(&dmar_global_lock);
f6dd5c31 820 ret = dmar_table_detect();
86cf898e
DW
821 if (ret)
822 ret = check_zero_address();
2ae21010 823 {
11bd04f6 824 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
2ae21010 825 iommu_detected = 1;
5d990b62
CW
826 /* Make sure ACS will be enabled */
827 pci_request_acs();
828 }
f5d1b97b 829
9d5ce73a
FT
830#ifdef CONFIG_X86
831 if (ret)
832 x86_init.iommu.iommu_init = intel_iommu_init;
2ae21010 833#endif
cacd4213 834 }
b707cb02 835 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
f6dd5c31 836 dmar_tbl = NULL;
3a5670e8 837 up_write(&dmar_global_lock);
480125ba 838
4db77ff3 839 return ret ? 1 : -ENODEV;
2ae21010
SS
840}
841
842
6f5cf521
DD
843static void unmap_iommu(struct intel_iommu *iommu)
844{
845 iounmap(iommu->reg);
846 release_mem_region(iommu->reg_phys, iommu->reg_size);
847}
848
849/**
850 * map_iommu: map the iommu's registers
851 * @iommu: the iommu to map
852 * @phys_addr: the physical address of the base resgister
e9071b0b 853 *
6f5cf521 854 * Memory map the iommu's registers. Start w/ a single page, and
e9071b0b 855 * possibly expand if that turns out to be insufficent.
6f5cf521
DD
856 */
857static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
858{
859 int map_size, err=0;
860
861 iommu->reg_phys = phys_addr;
862 iommu->reg_size = VTD_PAGE_SIZE;
863
864 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
865 pr_err("IOMMU: can't reserve memory\n");
866 err = -EBUSY;
867 goto out;
868 }
869
870 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
871 if (!iommu->reg) {
872 pr_err("IOMMU: can't map the region\n");
873 err = -ENOMEM;
874 goto release;
875 }
876
877 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
878 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
879
880 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
881 err = -EINVAL;
882 warn_invalid_dmar(phys_addr, " returns all ones");
883 goto unmap;
884 }
885
886 /* the registers might be more than one page */
887 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
888 cap_max_fault_reg_offset(iommu->cap));
889 map_size = VTD_PAGE_ALIGN(map_size);
890 if (map_size > iommu->reg_size) {
891 iounmap(iommu->reg);
892 release_mem_region(iommu->reg_phys, iommu->reg_size);
893 iommu->reg_size = map_size;
894 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
895 iommu->name)) {
896 pr_err("IOMMU: can't reserve memory\n");
897 err = -EBUSY;
898 goto out;
899 }
900 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
901 if (!iommu->reg) {
902 pr_err("IOMMU: can't map the region\n");
903 err = -ENOMEM;
904 goto release;
905 }
906 }
907 err = 0;
908 goto out;
909
910unmap:
911 iounmap(iommu->reg);
912release:
913 release_mem_region(iommu->reg_phys, iommu->reg_size);
914out:
915 return err;
916}
917
694835dc 918static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8 919{
c42d9f32 920 struct intel_iommu *iommu;
3a93c841 921 u32 ver, sts;
c42d9f32 922 static int iommu_allocated = 0;
43f7392b 923 int agaw = 0;
4ed0d3e6 924 int msagaw = 0;
6f5cf521 925 int err;
c42d9f32 926
6ecbf01c 927 if (!drhd->reg_base_addr) {
3a8663ee 928 warn_invalid_dmar(0, "");
6ecbf01c
DW
929 return -EINVAL;
930 }
931
c42d9f32
SS
932 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
933 if (!iommu)
1886e8a9 934 return -ENOMEM;
c42d9f32
SS
935
936 iommu->seq_id = iommu_allocated++;
9d783ba0 937 sprintf (iommu->name, "dmar%d", iommu->seq_id);
e61d98d8 938
6f5cf521
DD
939 err = map_iommu(iommu, drhd->reg_base_addr);
940 if (err) {
941 pr_err("IOMMU: failed to map %s\n", iommu->name);
e61d98d8
SS
942 goto error;
943 }
0815565a 944
6f5cf521 945 err = -EINVAL;
1b573683
WH
946 agaw = iommu_calculate_agaw(iommu);
947 if (agaw < 0) {
bf947fcb
DD
948 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
949 iommu->seq_id);
0815565a 950 goto err_unmap;
4ed0d3e6
FY
951 }
952 msagaw = iommu_calculate_max_sagaw(iommu);
953 if (msagaw < 0) {
bf947fcb 954 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1b573683 955 iommu->seq_id);
0815565a 956 goto err_unmap;
1b573683
WH
957 }
958 iommu->agaw = agaw;
4ed0d3e6 959 iommu->msagaw = msagaw;
67ccac41 960 iommu->segment = drhd->segment;
1b573683 961
ee34b32d
SS
962 iommu->node = -1;
963
e61d98d8 964 ver = readl(iommu->reg + DMAR_VER_REG);
680a7524
YL
965 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
966 iommu->seq_id,
5b6985ce
FY
967 (unsigned long long)drhd->reg_base_addr,
968 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
969 (unsigned long long)iommu->cap,
970 (unsigned long long)iommu->ecap);
e61d98d8 971
3a93c841
TI
972 /* Reflect status in gcmd */
973 sts = readl(iommu->reg + DMAR_GSTS_REG);
974 if (sts & DMA_GSTS_IRES)
975 iommu->gcmd |= DMA_GCMD_IRE;
976 if (sts & DMA_GSTS_TES)
977 iommu->gcmd |= DMA_GCMD_TE;
978 if (sts & DMA_GSTS_QIES)
979 iommu->gcmd |= DMA_GCMD_QIE;
980
1f5b3c3f 981 raw_spin_lock_init(&iommu->register_lock);
e61d98d8
SS
982
983 drhd->iommu = iommu;
a5459cfe
AW
984
985 if (intel_iommu_enabled)
986 iommu->iommu_dev = iommu_device_create(NULL, iommu,
987 intel_iommu_groups,
988 iommu->name);
989
1886e8a9 990 return 0;
0815565a
DW
991
992 err_unmap:
6f5cf521 993 unmap_iommu(iommu);
0815565a 994 error:
e61d98d8 995 kfree(iommu);
6f5cf521 996 return err;
e61d98d8
SS
997}
998
a868e6b7 999static void free_iommu(struct intel_iommu *iommu)
e61d98d8 1000{
a5459cfe
AW
1001 iommu_device_destroy(iommu->iommu_dev);
1002
a868e6b7
JL
1003 if (iommu->irq) {
1004 free_irq(iommu->irq, iommu);
1005 irq_set_handler_data(iommu->irq, NULL);
a553b142 1006 dmar_free_hwirq(iommu->irq);
a868e6b7 1007 }
e61d98d8 1008
a84da70b
JL
1009 if (iommu->qi) {
1010 free_page((unsigned long)iommu->qi->desc);
1011 kfree(iommu->qi->desc_status);
1012 kfree(iommu->qi);
1013 }
1014
e61d98d8 1015 if (iommu->reg)
6f5cf521
DD
1016 unmap_iommu(iommu);
1017
e61d98d8
SS
1018 kfree(iommu);
1019}
fe962e90
SS
1020
1021/*
1022 * Reclaim all the submitted descriptors which have completed its work.
1023 */
1024static inline void reclaim_free_desc(struct q_inval *qi)
1025{
6ba6c3a4
YZ
1026 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1027 qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90
SS
1028 qi->desc_status[qi->free_tail] = QI_FREE;
1029 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1030 qi->free_cnt++;
1031 }
1032}
1033
704126ad
YZ
1034static int qi_check_fault(struct intel_iommu *iommu, int index)
1035{
1036 u32 fault;
6ba6c3a4 1037 int head, tail;
704126ad
YZ
1038 struct q_inval *qi = iommu->qi;
1039 int wait_index = (index + 1) % QI_LENGTH;
1040
6ba6c3a4
YZ
1041 if (qi->desc_status[wait_index] == QI_ABORT)
1042 return -EAGAIN;
1043
704126ad
YZ
1044 fault = readl(iommu->reg + DMAR_FSTS_REG);
1045
1046 /*
1047 * If IQE happens, the head points to the descriptor associated
1048 * with the error. No new descriptors are fetched until the IQE
1049 * is cleared.
1050 */
1051 if (fault & DMA_FSTS_IQE) {
1052 head = readl(iommu->reg + DMAR_IQH_REG);
6ba6c3a4 1053 if ((head >> DMAR_IQ_SHIFT) == index) {
bf947fcb 1054 pr_err("VT-d detected invalid descriptor: "
6ba6c3a4
YZ
1055 "low=%llx, high=%llx\n",
1056 (unsigned long long)qi->desc[index].low,
1057 (unsigned long long)qi->desc[index].high);
704126ad
YZ
1058 memcpy(&qi->desc[index], &qi->desc[wait_index],
1059 sizeof(struct qi_desc));
1060 __iommu_flush_cache(iommu, &qi->desc[index],
1061 sizeof(struct qi_desc));
1062 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1063 return -EINVAL;
1064 }
1065 }
1066
6ba6c3a4
YZ
1067 /*
1068 * If ITE happens, all pending wait_desc commands are aborted.
1069 * No new descriptors are fetched until the ITE is cleared.
1070 */
1071 if (fault & DMA_FSTS_ITE) {
1072 head = readl(iommu->reg + DMAR_IQH_REG);
1073 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1074 head |= 1;
1075 tail = readl(iommu->reg + DMAR_IQT_REG);
1076 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1077
1078 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1079
1080 do {
1081 if (qi->desc_status[head] == QI_IN_USE)
1082 qi->desc_status[head] = QI_ABORT;
1083 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1084 } while (head != tail);
1085
1086 if (qi->desc_status[wait_index] == QI_ABORT)
1087 return -EAGAIN;
1088 }
1089
1090 if (fault & DMA_FSTS_ICE)
1091 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1092
704126ad
YZ
1093 return 0;
1094}
1095
fe962e90
SS
1096/*
1097 * Submit the queued invalidation descriptor to the remapping
1098 * hardware unit and wait for its completion.
1099 */
704126ad 1100int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90 1101{
6ba6c3a4 1102 int rc;
fe962e90
SS
1103 struct q_inval *qi = iommu->qi;
1104 struct qi_desc *hw, wait_desc;
1105 int wait_index, index;
1106 unsigned long flags;
1107
1108 if (!qi)
704126ad 1109 return 0;
fe962e90
SS
1110
1111 hw = qi->desc;
1112
6ba6c3a4
YZ
1113restart:
1114 rc = 0;
1115
3b8f4048 1116 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90 1117 while (qi->free_cnt < 3) {
3b8f4048 1118 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90 1119 cpu_relax();
3b8f4048 1120 raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90
SS
1121 }
1122
1123 index = qi->free_head;
1124 wait_index = (index + 1) % QI_LENGTH;
1125
1126 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1127
1128 hw[index] = *desc;
1129
704126ad
YZ
1130 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1131 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
fe962e90
SS
1132 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1133
1134 hw[wait_index] = wait_desc;
1135
1136 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1137 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1138
1139 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1140 qi->free_cnt -= 2;
1141
fe962e90
SS
1142 /*
1143 * update the HW tail register indicating the presence of
1144 * new descriptors.
1145 */
6ba6c3a4 1146 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
fe962e90
SS
1147
1148 while (qi->desc_status[wait_index] != QI_DONE) {
f05810c9
SS
1149 /*
1150 * We will leave the interrupts disabled, to prevent interrupt
1151 * context to queue another cmd while a cmd is already submitted
1152 * and waiting for completion on this cpu. This is to avoid
1153 * a deadlock where the interrupt context can wait indefinitely
1154 * for free slots in the queue.
1155 */
704126ad
YZ
1156 rc = qi_check_fault(iommu, index);
1157 if (rc)
6ba6c3a4 1158 break;
704126ad 1159
3b8f4048 1160 raw_spin_unlock(&qi->q_lock);
fe962e90 1161 cpu_relax();
3b8f4048 1162 raw_spin_lock(&qi->q_lock);
fe962e90 1163 }
6ba6c3a4
YZ
1164
1165 qi->desc_status[index] = QI_DONE;
fe962e90
SS
1166
1167 reclaim_free_desc(qi);
3b8f4048 1168 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad 1169
6ba6c3a4
YZ
1170 if (rc == -EAGAIN)
1171 goto restart;
1172
704126ad 1173 return rc;
fe962e90
SS
1174}
1175
1176/*
1177 * Flush the global interrupt entry cache.
1178 */
1179void qi_global_iec(struct intel_iommu *iommu)
1180{
1181 struct qi_desc desc;
1182
1183 desc.low = QI_IEC_TYPE;
1184 desc.high = 0;
1185
704126ad 1186 /* should never fail */
fe962e90
SS
1187 qi_submit_sync(&desc, iommu);
1188}
1189
4c25a2c1
DW
1190void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1191 u64 type)
3481f210 1192{
3481f210
YS
1193 struct qi_desc desc;
1194
3481f210
YS
1195 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1196 | QI_CC_GRAN(type) | QI_CC_TYPE;
1197 desc.high = 0;
1198
4c25a2c1 1199 qi_submit_sync(&desc, iommu);
3481f210
YS
1200}
1201
1f0ef2aa
DW
1202void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1203 unsigned int size_order, u64 type)
3481f210
YS
1204{
1205 u8 dw = 0, dr = 0;
1206
1207 struct qi_desc desc;
1208 int ih = 0;
1209
3481f210
YS
1210 if (cap_write_drain(iommu->cap))
1211 dw = 1;
1212
1213 if (cap_read_drain(iommu->cap))
1214 dr = 1;
1215
1216 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1217 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1218 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1219 | QI_IOTLB_AM(size_order);
1220
1f0ef2aa 1221 qi_submit_sync(&desc, iommu);
3481f210
YS
1222}
1223
6ba6c3a4
YZ
1224void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1225 u64 addr, unsigned mask)
1226{
1227 struct qi_desc desc;
1228
1229 if (mask) {
1230 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1231 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1232 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1233 } else
1234 desc.high = QI_DEV_IOTLB_ADDR(addr);
1235
1236 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1237 qdep = 0;
1238
1239 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1240 QI_DIOTLB_TYPE;
1241
1242 qi_submit_sync(&desc, iommu);
1243}
1244
eba67e5d
SS
1245/*
1246 * Disable Queued Invalidation interface.
1247 */
1248void dmar_disable_qi(struct intel_iommu *iommu)
1249{
1250 unsigned long flags;
1251 u32 sts;
1252 cycles_t start_time = get_cycles();
1253
1254 if (!ecap_qis(iommu->ecap))
1255 return;
1256
1f5b3c3f 1257 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5d
SS
1258
1259 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1260 if (!(sts & DMA_GSTS_QIES))
1261 goto end;
1262
1263 /*
1264 * Give a chance to HW to complete the pending invalidation requests.
1265 */
1266 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1267 readl(iommu->reg + DMAR_IQH_REG)) &&
1268 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1269 cpu_relax();
1270
1271 iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5d
SS
1272 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1273
1274 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1275 !(sts & DMA_GSTS_QIES), sts);
1276end:
1f5b3c3f 1277 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5d
SS
1278}
1279
eb4a52bc
FY
1280/*
1281 * Enable queued invalidation.
1282 */
1283static void __dmar_enable_qi(struct intel_iommu *iommu)
1284{
c416daa9 1285 u32 sts;
eb4a52bc
FY
1286 unsigned long flags;
1287 struct q_inval *qi = iommu->qi;
1288
1289 qi->free_head = qi->free_tail = 0;
1290 qi->free_cnt = QI_LENGTH;
1291
1f5b3c3f 1292 raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc
FY
1293
1294 /* write zero to the tail reg */
1295 writel(0, iommu->reg + DMAR_IQT_REG);
1296
1297 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1298
eb4a52bc 1299 iommu->gcmd |= DMA_GCMD_QIE;
c416daa9 1300 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc
FY
1301
1302 /* Make sure hardware complete it */
1303 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1304
1f5b3c3f 1305 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc
FY
1306}
1307
fe962e90
SS
1308/*
1309 * Enable Queued Invalidation interface. This is a must to support
1310 * interrupt-remapping. Also used by DMA-remapping, which replaces
1311 * register based IOTLB invalidation.
1312 */
1313int dmar_enable_qi(struct intel_iommu *iommu)
1314{
fe962e90 1315 struct q_inval *qi;
751cafe3 1316 struct page *desc_page;
fe962e90
SS
1317
1318 if (!ecap_qis(iommu->ecap))
1319 return -ENOENT;
1320
1321 /*
1322 * queued invalidation is already setup and enabled.
1323 */
1324 if (iommu->qi)
1325 return 0;
1326
fa4b57cc 1327 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90
SS
1328 if (!iommu->qi)
1329 return -ENOMEM;
1330
1331 qi = iommu->qi;
1332
751cafe3
SS
1333
1334 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1335 if (!desc_page) {
fe962e90 1336 kfree(qi);
b707cb02 1337 iommu->qi = NULL;
fe962e90
SS
1338 return -ENOMEM;
1339 }
1340
751cafe3
SS
1341 qi->desc = page_address(desc_page);
1342
37a40710 1343 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
fe962e90
SS
1344 if (!qi->desc_status) {
1345 free_page((unsigned long) qi->desc);
1346 kfree(qi);
b707cb02 1347 iommu->qi = NULL;
fe962e90
SS
1348 return -ENOMEM;
1349 }
1350
1351 qi->free_head = qi->free_tail = 0;
1352 qi->free_cnt = QI_LENGTH;
1353
3b8f4048 1354 raw_spin_lock_init(&qi->q_lock);
fe962e90 1355
eb4a52bc 1356 __dmar_enable_qi(iommu);
fe962e90
SS
1357
1358 return 0;
1359}
0ac2491f
SS
1360
1361/* iommu interrupt handling. Most stuff are MSI-like. */
1362
9d783ba0
SS
1363enum faulttype {
1364 DMA_REMAP,
1365 INTR_REMAP,
1366 UNKNOWN,
1367};
1368
1369static const char *dma_remap_fault_reasons[] =
0ac2491f
SS
1370{
1371 "Software",
1372 "Present bit in root entry is clear",
1373 "Present bit in context entry is clear",
1374 "Invalid context entry",
1375 "Access beyond MGAW",
1376 "PTE Write access is not set",
1377 "PTE Read access is not set",
1378 "Next page table ptr is invalid",
1379 "Root table address invalid",
1380 "Context table ptr is invalid",
1381 "non-zero reserved fields in RTP",
1382 "non-zero reserved fields in CTP",
1383 "non-zero reserved fields in PTE",
4ecccd9e 1384 "PCE for translation request specifies blocking",
0ac2491f 1385};
9d783ba0 1386
95a02e97 1387static const char *irq_remap_fault_reasons[] =
9d783ba0
SS
1388{
1389 "Detected reserved fields in the decoded interrupt-remapped request",
1390 "Interrupt index exceeded the interrupt-remapping table size",
1391 "Present field in the IRTE entry is clear",
1392 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1393 "Detected reserved fields in the IRTE entry",
1394 "Blocked a compatibility format interrupt request",
1395 "Blocked an interrupt request due to source-id verification failure",
1396};
1397
21004dcd 1398static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f 1399{
fefe1ed1
DC
1400 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1401 ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba0 1402 *fault_type = INTR_REMAP;
95a02e97 1403 return irq_remap_fault_reasons[fault_reason - 0x20];
9d783ba0
SS
1404 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1405 *fault_type = DMA_REMAP;
1406 return dma_remap_fault_reasons[fault_reason];
1407 } else {
1408 *fault_type = UNKNOWN;
0ac2491f 1409 return "Unknown";
9d783ba0 1410 }
0ac2491f
SS
1411}
1412
5c2837fb 1413void dmar_msi_unmask(struct irq_data *data)
0ac2491f 1414{
dced35ae 1415 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1416 unsigned long flag;
1417
1418 /* unmask it */
1f5b3c3f 1419 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1420 writel(0, iommu->reg + DMAR_FECTL_REG);
1421 /* Read a reg to force flush the post write */
1422 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1423 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1424}
1425
5c2837fb 1426void dmar_msi_mask(struct irq_data *data)
0ac2491f
SS
1427{
1428 unsigned long flag;
dced35ae 1429 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
0ac2491f
SS
1430
1431 /* mask it */
1f5b3c3f 1432 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1433 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1434 /* Read a reg to force flush the post write */
1435 readl(iommu->reg + DMAR_FECTL_REG);
1f5b3c3f 1436 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1437}
1438
1439void dmar_msi_write(int irq, struct msi_msg *msg)
1440{
dced35ae 1441 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1442 unsigned long flag;
1443
1f5b3c3f 1444 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1445 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1446 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1447 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1448 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1449}
1450
1451void dmar_msi_read(int irq, struct msi_msg *msg)
1452{
dced35ae 1453 struct intel_iommu *iommu = irq_get_handler_data(irq);
0ac2491f
SS
1454 unsigned long flag;
1455
1f5b3c3f 1456 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f
SS
1457 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1458 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1459 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1f5b3c3f 1460 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1461}
1462
1463static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1464 u8 fault_reason, u16 source_id, unsigned long long addr)
1465{
1466 const char *reason;
9d783ba0 1467 int fault_type;
0ac2491f 1468
9d783ba0 1469 reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f 1470
9d783ba0 1471 if (fault_type == INTR_REMAP)
bf947fcb 1472 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
9d783ba0
SS
1473 "fault index %llx\n"
1474 "INTR-REMAP:[fault reason %02d] %s\n",
1475 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1476 PCI_FUNC(source_id & 0xFF), addr >> 48,
1477 fault_reason, reason);
1478 else
bf947fcb 1479 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
9d783ba0
SS
1480 "fault addr %llx \n"
1481 "DMAR:[fault reason %02d] %s\n",
1482 (type ? "DMA Read" : "DMA Write"),
1483 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1484 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
0ac2491f
SS
1485 return 0;
1486}
1487
1488#define PRIMARY_FAULT_REG_LEN (16)
1531a6a6 1489irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f
SS
1490{
1491 struct intel_iommu *iommu = dev_id;
1492 int reg, fault_index;
1493 u32 fault_status;
1494 unsigned long flag;
1495
1f5b3c3f 1496 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1497 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
9d783ba0 1498 if (fault_status)
bf947fcb 1499 pr_err("DRHD: handling fault status reg %x\n", fault_status);
0ac2491f
SS
1500
1501 /* TBD: ignore advanced fault log currently */
1502 if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0 1503 goto unlock_exit;
0ac2491f
SS
1504
1505 fault_index = dma_fsts_fault_record_index(fault_status);
1506 reg = cap_fault_reg_offset(iommu->cap);
1507 while (1) {
1508 u8 fault_reason;
1509 u16 source_id;
1510 u64 guest_addr;
1511 int type;
1512 u32 data;
1513
1514 /* highest 32 bits */
1515 data = readl(iommu->reg + reg +
1516 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1517 if (!(data & DMA_FRCD_F))
1518 break;
1519
1520 fault_reason = dma_frcd_fault_reason(data);
1521 type = dma_frcd_type(data);
1522
1523 data = readl(iommu->reg + reg +
1524 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1525 source_id = dma_frcd_source_id(data);
1526
1527 guest_addr = dmar_readq(iommu->reg + reg +
1528 fault_index * PRIMARY_FAULT_REG_LEN);
1529 guest_addr = dma_frcd_page_addr(guest_addr);
1530 /* clear the fault */
1531 writel(DMA_FRCD_F, iommu->reg + reg +
1532 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1533
1f5b3c3f 1534 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1535
1536 dmar_fault_do_one(iommu, type, fault_reason,
1537 source_id, guest_addr);
1538
1539 fault_index++;
8211a7b5 1540 if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f 1541 fault_index = 0;
1f5b3c3f 1542 raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f 1543 }
0ac2491f 1544
bd5cdad0
LZH
1545 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1546
1547unlock_exit:
1f5b3c3f 1548 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f
SS
1549 return IRQ_HANDLED;
1550}
1551
1552int dmar_set_interrupt(struct intel_iommu *iommu)
1553{
1554 int irq, ret;
1555
9d783ba0
SS
1556 /*
1557 * Check if the fault interrupt is already initialized.
1558 */
1559 if (iommu->irq)
1560 return 0;
1561
a553b142 1562 irq = dmar_alloc_hwirq();
aa5125a4 1563 if (irq <= 0) {
bf947fcb 1564 pr_err("IOMMU: no free vectors\n");
0ac2491f
SS
1565 return -EINVAL;
1566 }
1567
dced35ae 1568 irq_set_handler_data(irq, iommu);
0ac2491f
SS
1569 iommu->irq = irq;
1570
1571 ret = arch_setup_dmar_msi(irq);
1572 if (ret) {
dced35ae 1573 irq_set_handler_data(irq, NULL);
0ac2491f 1574 iommu->irq = 0;
a553b142 1575 dmar_free_hwirq(irq);
dd726435 1576 return ret;
0ac2491f
SS
1577 }
1578
477694e7 1579 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f 1580 if (ret)
bf947fcb 1581 pr_err("IOMMU: can't request irq\n");
0ac2491f
SS
1582 return ret;
1583}
9d783ba0
SS
1584
1585int __init enable_drhd_fault_handling(void)
1586{
1587 struct dmar_drhd_unit *drhd;
7c919779 1588 struct intel_iommu *iommu;
9d783ba0
SS
1589
1590 /*
1591 * Enable fault control interrupt.
1592 */
7c919779 1593 for_each_iommu(iommu, drhd) {
bd5cdad0 1594 u32 fault_status;
7c919779 1595 int ret = dmar_set_interrupt(iommu);
9d783ba0
SS
1596
1597 if (ret) {
e9071b0b 1598 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
9d783ba0
SS
1599 (unsigned long long)drhd->reg_base_addr, ret);
1600 return -1;
1601 }
7f99d946
SS
1602
1603 /*
1604 * Clear any previous faults.
1605 */
1606 dmar_fault(iommu->irq, iommu);
bd5cdad0
LZH
1607 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1608 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba0
SS
1609 }
1610
1611 return 0;
1612}
eb4a52bc
FY
1613
1614/*
1615 * Re-enable Queued Invalidation interface.
1616 */
1617int dmar_reenable_qi(struct intel_iommu *iommu)
1618{
1619 if (!ecap_qis(iommu->ecap))
1620 return -ENOENT;
1621
1622 if (!iommu->qi)
1623 return -ENOENT;
1624
1625 /*
1626 * First disable queued invalidation.
1627 */
1628 dmar_disable_qi(iommu);
1629 /*
1630 * Then enable queued invalidation again. Since there is no pending
1631 * invalidation requests now, it's safe to re-enable queued
1632 * invalidation.
1633 */
1634 __dmar_enable_qi(iommu);
1635
1636 return 0;
1637}
074835f0
YS
1638
1639/*
1640 * Check interrupt remapping support in DMAR table description.
1641 */
0b8973a8 1642int __init dmar_ir_support(void)
074835f0
YS
1643{
1644 struct acpi_table_dmar *dmar;
1645 dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07
AP
1646 if (!dmar)
1647 return 0;
074835f0
YS
1648 return dmar->flags & 0x1;
1649}
694835dc 1650
a868e6b7
JL
1651static int __init dmar_free_unused_resources(void)
1652{
1653 struct dmar_drhd_unit *dmaru, *dmaru_n;
1654
1655 /* DMAR units are in use */
1656 if (irq_remapping_enabled || intel_iommu_enabled)
1657 return 0;
1658
2e455289
JL
1659 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1660 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
59ce0515 1661
3a5670e8 1662 down_write(&dmar_global_lock);
a868e6b7
JL
1663 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1664 list_del(&dmaru->list);
1665 dmar_free_drhd(dmaru);
1666 }
3a5670e8 1667 up_write(&dmar_global_lock);
a868e6b7
JL
1668
1669 return 0;
1670}
1671
1672late_initcall(dmar_free_unused_resources);
4db77ff3 1673IOMMU_INIT_POST(detect_intel_iommu);
This page took 0.509351 seconds and 5 git commands to generate.