Commit | Line | Data |
---|---|---|
5aeecaf4 | 1 | #include <linux/interrupt.h> |
ad3ad3f6 | 2 | #include <linux/dmar.h> |
2ae21010 SS |
3 | #include <linux/spinlock.h> |
4 | #include <linux/jiffies.h> | |
20f3097b | 5 | #include <linux/hpet.h> |
2ae21010 | 6 | #include <linux/pci.h> |
b6fcb33a | 7 | #include <linux/irq.h> |
ad3ad3f6 | 8 | #include <asm/io_apic.h> |
17483a1f | 9 | #include <asm/smp.h> |
6d652ea1 | 10 | #include <asm/cpu.h> |
38717946 | 11 | #include <linux/intel-iommu.h> |
ad3ad3f6 | 12 | #include "intr_remapping.h" |
46f06b72 | 13 | #include <acpi/acpi.h> |
f007e99c WH |
14 | #include <asm/pci-direct.h> |
15 | #include "pci.h" | |
ad3ad3f6 SS |
16 | |
17 | static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; | |
20f3097b SS |
18 | static struct hpet_scope ir_hpet[MAX_HPET_TBS]; |
19 | static int ir_ioapic_num, ir_hpet_num; | |
2ae21010 SS |
20 | int intr_remapping_enabled; |
21 | ||
03ea8155 WH |
22 | static int disable_intremap; |
23 | static __init int setup_nointremap(char *str) | |
24 | { | |
25 | disable_intremap = 1; | |
26 | return 0; | |
27 | } | |
28 | early_param("nointremap", setup_nointremap); | |
29 | ||
5aeecaf4 | 30 | struct irq_2_iommu { |
b6fcb33a SS |
31 | struct intel_iommu *iommu; |
32 | u16 irte_index; | |
33 | u16 sub_handle; | |
34 | u8 irte_mask; | |
5aeecaf4 YL |
35 | }; |
36 | ||
d7e51e66 | 37 | #ifdef CONFIG_GENERIC_HARDIRQS |
85ac16d0 | 38 | static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) |
0b8f1efa YL |
39 | { |
40 | struct irq_2_iommu *iommu; | |
0b8f1efa YL |
41 | |
42 | iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); | |
85ac16d0 | 43 | printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); |
0b8f1efa YL |
44 | |
45 | return iommu; | |
46 | } | |
e420dfb4 YL |
47 | |
48 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | |
49 | { | |
0b8f1efa YL |
50 | struct irq_desc *desc; |
51 | ||
52 | desc = irq_to_desc(irq); | |
53 | ||
54 | if (WARN_ON_ONCE(!desc)) | |
55 | return NULL; | |
56 | ||
57 | return desc->irq_2_iommu; | |
58 | } | |
59 | ||
70590ea7 | 60 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
0b8f1efa YL |
61 | { |
62 | struct irq_desc *desc; | |
63 | struct irq_2_iommu *irq_iommu; | |
64 | ||
70590ea7 | 65 | desc = irq_to_desc(irq); |
0b8f1efa YL |
66 | if (!desc) { |
67 | printk(KERN_INFO "can not get irq_desc for %d\n", irq); | |
68 | return NULL; | |
69 | } | |
70 | ||
71 | irq_iommu = desc->irq_2_iommu; | |
72 | ||
73 | if (!irq_iommu) | |
70590ea7 | 74 | desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); |
0b8f1efa YL |
75 | |
76 | return desc->irq_2_iommu; | |
77 | } | |
78 | ||
0b8f1efa YL |
79 | #else /* !CONFIG_SPARSE_IRQ */ |
80 | ||
81 | static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; | |
82 | ||
83 | static struct irq_2_iommu *irq_2_iommu(unsigned int irq) | |
84 | { | |
85 | if (irq < nr_irqs) | |
86 | return &irq_2_iommuX[irq]; | |
87 | ||
88 | return NULL; | |
89 | } | |
e420dfb4 YL |
90 | static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) |
91 | { | |
92 | return irq_2_iommu(irq); | |
93 | } | |
0b8f1efa | 94 | #endif |
b6fcb33a SS |
95 | |
96 | static DEFINE_SPINLOCK(irq_2_ir_lock); | |
97 | ||
e420dfb4 | 98 | static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) |
b6fcb33a | 99 | { |
e420dfb4 YL |
100 | struct irq_2_iommu *irq_iommu; |
101 | ||
102 | irq_iommu = irq_2_iommu(irq); | |
b6fcb33a | 103 | |
e420dfb4 YL |
104 | if (!irq_iommu) |
105 | return NULL; | |
b6fcb33a | 106 | |
e420dfb4 YL |
107 | if (!irq_iommu->iommu) |
108 | return NULL; | |
b6fcb33a | 109 | |
e420dfb4 YL |
110 | return irq_iommu; |
111 | } | |
b6fcb33a | 112 | |
e420dfb4 YL |
113 | int irq_remapped(int irq) |
114 | { | |
115 | return valid_irq_2_iommu(irq) != NULL; | |
b6fcb33a SS |
116 | } |
117 | ||
118 | int get_irte(int irq, struct irte *entry) | |
119 | { | |
120 | int index; | |
e420dfb4 | 121 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 122 | unsigned long flags; |
b6fcb33a | 123 | |
e420dfb4 | 124 | if (!entry) |
b6fcb33a SS |
125 | return -1; |
126 | ||
4c5502b1 | 127 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
128 | irq_iommu = valid_irq_2_iommu(irq); |
129 | if (!irq_iommu) { | |
4c5502b1 | 130 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
131 | return -1; |
132 | } | |
133 | ||
e420dfb4 YL |
134 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
135 | *entry = *(irq_iommu->iommu->ir_table->base + index); | |
b6fcb33a | 136 | |
4c5502b1 | 137 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
138 | return 0; |
139 | } | |
140 | ||
141 | int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | |
142 | { | |
143 | struct ir_table *table = iommu->ir_table; | |
e420dfb4 | 144 | struct irq_2_iommu *irq_iommu; |
b6fcb33a SS |
145 | u16 index, start_index; |
146 | unsigned int mask = 0; | |
4c5502b1 | 147 | unsigned long flags; |
b6fcb33a SS |
148 | int i; |
149 | ||
150 | if (!count) | |
151 | return -1; | |
152 | ||
0b8f1efa | 153 | #ifndef CONFIG_SPARSE_IRQ |
e420dfb4 YL |
154 | /* protect irq_2_iommu_alloc later */ |
155 | if (irq >= nr_irqs) | |
156 | return -1; | |
0b8f1efa | 157 | #endif |
e420dfb4 | 158 | |
b6fcb33a SS |
159 | /* |
160 | * start the IRTE search from index 0. | |
161 | */ | |
162 | index = start_index = 0; | |
163 | ||
164 | if (count > 1) { | |
165 | count = __roundup_pow_of_two(count); | |
166 | mask = ilog2(count); | |
167 | } | |
168 | ||
169 | if (mask > ecap_max_handle_mask(iommu->ecap)) { | |
170 | printk(KERN_ERR | |
171 | "Requested mask %x exceeds the max invalidation handle" | |
172 | " mask value %Lx\n", mask, | |
173 | ecap_max_handle_mask(iommu->ecap)); | |
174 | return -1; | |
175 | } | |
176 | ||
4c5502b1 | 177 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a SS |
178 | do { |
179 | for (i = index; i < index + count; i++) | |
180 | if (table->base[i].present) | |
181 | break; | |
182 | /* empty index found */ | |
183 | if (i == index + count) | |
184 | break; | |
185 | ||
186 | index = (index + count) % INTR_REMAP_TABLE_ENTRIES; | |
187 | ||
188 | if (index == start_index) { | |
4c5502b1 | 189 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
190 | printk(KERN_ERR "can't allocate an IRTE\n"); |
191 | return -1; | |
192 | } | |
193 | } while (1); | |
194 | ||
195 | for (i = index; i < index + count; i++) | |
196 | table->base[i].present = 1; | |
197 | ||
e420dfb4 | 198 | irq_iommu = irq_2_iommu_alloc(irq); |
0b8f1efa | 199 | if (!irq_iommu) { |
4c5502b1 | 200 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
0b8f1efa YL |
201 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
202 | return -1; | |
203 | } | |
204 | ||
e420dfb4 YL |
205 | irq_iommu->iommu = iommu; |
206 | irq_iommu->irte_index = index; | |
207 | irq_iommu->sub_handle = 0; | |
208 | irq_iommu->irte_mask = mask; | |
b6fcb33a | 209 | |
4c5502b1 | 210 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
211 | |
212 | return index; | |
213 | } | |
214 | ||
704126ad | 215 | static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) |
b6fcb33a SS |
216 | { |
217 | struct qi_desc desc; | |
218 | ||
219 | desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) | |
220 | | QI_IEC_SELECTIVE; | |
221 | desc.high = 0; | |
222 | ||
704126ad | 223 | return qi_submit_sync(&desc, iommu); |
b6fcb33a SS |
224 | } |
225 | ||
226 | int map_irq_to_irte_handle(int irq, u16 *sub_handle) | |
227 | { | |
228 | int index; | |
e420dfb4 | 229 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 230 | unsigned long flags; |
b6fcb33a | 231 | |
4c5502b1 | 232 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
233 | irq_iommu = valid_irq_2_iommu(irq); |
234 | if (!irq_iommu) { | |
4c5502b1 | 235 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
236 | return -1; |
237 | } | |
238 | ||
e420dfb4 YL |
239 | *sub_handle = irq_iommu->sub_handle; |
240 | index = irq_iommu->irte_index; | |
4c5502b1 | 241 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
242 | return index; |
243 | } | |
244 | ||
245 | int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) | |
246 | { | |
e420dfb4 | 247 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 248 | unsigned long flags; |
e420dfb4 | 249 | |
4c5502b1 | 250 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
b6fcb33a | 251 | |
7ddfb650 | 252 | irq_iommu = irq_2_iommu_alloc(irq); |
b6fcb33a | 253 | |
0b8f1efa | 254 | if (!irq_iommu) { |
4c5502b1 | 255 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
0b8f1efa YL |
256 | printk(KERN_ERR "can't allocate irq_2_iommu\n"); |
257 | return -1; | |
258 | } | |
259 | ||
e420dfb4 YL |
260 | irq_iommu->iommu = iommu; |
261 | irq_iommu->irte_index = index; | |
262 | irq_iommu->sub_handle = subhandle; | |
263 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 264 | |
4c5502b1 | 265 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
266 | |
267 | return 0; | |
268 | } | |
269 | ||
270 | int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) | |
271 | { | |
e420dfb4 | 272 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 273 | unsigned long flags; |
e420dfb4 | 274 | |
4c5502b1 | 275 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
276 | irq_iommu = valid_irq_2_iommu(irq); |
277 | if (!irq_iommu) { | |
4c5502b1 | 278 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
279 | return -1; |
280 | } | |
281 | ||
e420dfb4 YL |
282 | irq_iommu->iommu = NULL; |
283 | irq_iommu->irte_index = 0; | |
284 | irq_iommu->sub_handle = 0; | |
285 | irq_2_iommu(irq)->irte_mask = 0; | |
b6fcb33a | 286 | |
4c5502b1 | 287 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
288 | |
289 | return 0; | |
290 | } | |
291 | ||
292 | int modify_irte(int irq, struct irte *irte_modified) | |
293 | { | |
704126ad | 294 | int rc; |
b6fcb33a SS |
295 | int index; |
296 | struct irte *irte; | |
297 | struct intel_iommu *iommu; | |
e420dfb4 | 298 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 299 | unsigned long flags; |
b6fcb33a | 300 | |
4c5502b1 | 301 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
302 | irq_iommu = valid_irq_2_iommu(irq); |
303 | if (!irq_iommu) { | |
4c5502b1 | 304 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
305 | return -1; |
306 | } | |
307 | ||
e420dfb4 | 308 | iommu = irq_iommu->iommu; |
b6fcb33a | 309 | |
e420dfb4 | 310 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a SS |
311 | irte = &iommu->ir_table->base[index]; |
312 | ||
c4658b4e WH |
313 | set_64bit((unsigned long *)&irte->low, irte_modified->low); |
314 | set_64bit((unsigned long *)&irte->high, irte_modified->high); | |
b6fcb33a SS |
315 | __iommu_flush_cache(iommu, irte, sizeof(*irte)); |
316 | ||
704126ad | 317 | rc = qi_flush_iec(iommu, index, 0); |
4c5502b1 | 318 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
704126ad YZ |
319 | |
320 | return rc; | |
b6fcb33a SS |
321 | } |
322 | ||
323 | int flush_irte(int irq) | |
324 | { | |
704126ad | 325 | int rc; |
b6fcb33a SS |
326 | int index; |
327 | struct intel_iommu *iommu; | |
e420dfb4 | 328 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 329 | unsigned long flags; |
b6fcb33a | 330 | |
4c5502b1 | 331 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
332 | irq_iommu = valid_irq_2_iommu(irq); |
333 | if (!irq_iommu) { | |
4c5502b1 | 334 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
335 | return -1; |
336 | } | |
337 | ||
e420dfb4 | 338 | iommu = irq_iommu->iommu; |
b6fcb33a | 339 | |
e420dfb4 | 340 | index = irq_iommu->irte_index + irq_iommu->sub_handle; |
b6fcb33a | 341 | |
704126ad | 342 | rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); |
4c5502b1 | 343 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 344 | |
704126ad | 345 | return rc; |
b6fcb33a SS |
346 | } |
347 | ||
20f3097b SS |
348 | struct intel_iommu *map_hpet_to_ir(u8 hpet_id) |
349 | { | |
350 | int i; | |
351 | ||
352 | for (i = 0; i < MAX_HPET_TBS; i++) | |
353 | if (ir_hpet[i].id == hpet_id) | |
354 | return ir_hpet[i].iommu; | |
355 | return NULL; | |
356 | } | |
357 | ||
89027d35 SS |
358 | struct intel_iommu *map_ioapic_to_ir(int apic) |
359 | { | |
360 | int i; | |
361 | ||
362 | for (i = 0; i < MAX_IO_APICS; i++) | |
363 | if (ir_ioapic[i].id == apic) | |
364 | return ir_ioapic[i].iommu; | |
365 | return NULL; | |
366 | } | |
367 | ||
75c46fa6 SS |
368 | struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) |
369 | { | |
370 | struct dmar_drhd_unit *drhd; | |
371 | ||
372 | drhd = dmar_find_matched_drhd_unit(dev); | |
373 | if (!drhd) | |
374 | return NULL; | |
375 | ||
376 | return drhd->iommu; | |
377 | } | |
378 | ||
c4658b4e WH |
379 | static int clear_entries(struct irq_2_iommu *irq_iommu) |
380 | { | |
381 | struct irte *start, *entry, *end; | |
382 | struct intel_iommu *iommu; | |
383 | int index; | |
384 | ||
385 | if (irq_iommu->sub_handle) | |
386 | return 0; | |
387 | ||
388 | iommu = irq_iommu->iommu; | |
389 | index = irq_iommu->irte_index + irq_iommu->sub_handle; | |
390 | ||
391 | start = iommu->ir_table->base + index; | |
392 | end = start + (1 << irq_iommu->irte_mask); | |
393 | ||
394 | for (entry = start; entry < end; entry++) { | |
395 | set_64bit((unsigned long *)&entry->low, 0); | |
396 | set_64bit((unsigned long *)&entry->high, 0); | |
397 | } | |
398 | ||
399 | return qi_flush_iec(iommu, index, irq_iommu->irte_mask); | |
400 | } | |
401 | ||
b6fcb33a SS |
402 | int free_irte(int irq) |
403 | { | |
704126ad | 404 | int rc = 0; |
e420dfb4 | 405 | struct irq_2_iommu *irq_iommu; |
4c5502b1 | 406 | unsigned long flags; |
b6fcb33a | 407 | |
4c5502b1 | 408 | spin_lock_irqsave(&irq_2_ir_lock, flags); |
e420dfb4 YL |
409 | irq_iommu = valid_irq_2_iommu(irq); |
410 | if (!irq_iommu) { | |
4c5502b1 | 411 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a SS |
412 | return -1; |
413 | } | |
414 | ||
c4658b4e | 415 | rc = clear_entries(irq_iommu); |
b6fcb33a | 416 | |
e420dfb4 YL |
417 | irq_iommu->iommu = NULL; |
418 | irq_iommu->irte_index = 0; | |
419 | irq_iommu->sub_handle = 0; | |
420 | irq_iommu->irte_mask = 0; | |
b6fcb33a | 421 | |
4c5502b1 | 422 | spin_unlock_irqrestore(&irq_2_ir_lock, flags); |
b6fcb33a | 423 | |
704126ad | 424 | return rc; |
b6fcb33a SS |
425 | } |
426 | ||
f007e99c WH |
427 | /* |
428 | * source validation type | |
429 | */ | |
430 | #define SVT_NO_VERIFY 0x0 /* no verification is required */ | |
431 | #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ | |
432 | #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ | |
433 | ||
434 | /* | |
435 | * source-id qualifier | |
436 | */ | |
437 | #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ | |
438 | #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore | |
439 | * the third least significant bit | |
440 | */ | |
441 | #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore | |
442 | * the second and third least significant bits | |
443 | */ | |
444 | #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore | |
445 | * the least three significant bits | |
446 | */ | |
447 | ||
448 | /* | |
449 | * set SVT, SQ and SID fields of irte to verify | |
450 | * source ids of interrupt requests | |
451 | */ | |
452 | static void set_irte_sid(struct irte *irte, unsigned int svt, | |
453 | unsigned int sq, unsigned int sid) | |
454 | { | |
455 | irte->svt = svt; | |
456 | irte->sq = sq; | |
457 | irte->sid = sid; | |
458 | } | |
459 | ||
460 | int set_ioapic_sid(struct irte *irte, int apic) | |
461 | { | |
462 | int i; | |
463 | u16 sid = 0; | |
464 | ||
465 | if (!irte) | |
466 | return -1; | |
467 | ||
468 | for (i = 0; i < MAX_IO_APICS; i++) { | |
469 | if (ir_ioapic[i].id == apic) { | |
470 | sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; | |
471 | break; | |
472 | } | |
473 | } | |
474 | ||
475 | if (sid == 0) { | |
476 | pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); | |
477 | return -1; | |
478 | } | |
479 | ||
480 | set_irte_sid(irte, 1, 0, sid); | |
481 | ||
482 | return 0; | |
483 | } | |
484 | ||
20f3097b SS |
485 | int set_hpet_sid(struct irte *irte, u8 id) |
486 | { | |
487 | int i; | |
488 | u16 sid = 0; | |
489 | ||
490 | if (!irte) | |
491 | return -1; | |
492 | ||
493 | for (i = 0; i < MAX_HPET_TBS; i++) { | |
494 | if (ir_hpet[i].id == id) { | |
495 | sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; | |
496 | break; | |
497 | } | |
498 | } | |
499 | ||
500 | if (sid == 0) { | |
501 | pr_warning("Failed to set source-id of HPET block (%d)\n", id); | |
502 | return -1; | |
503 | } | |
504 | ||
505 | /* | |
506 | * Should really use SQ_ALL_16. Some platforms are broken. | |
507 | * While we figure out the right quirks for these broken platforms, use | |
508 | * SQ_13_IGNORE_3 for now. | |
509 | */ | |
510 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
f007e99c WH |
515 | int set_msi_sid(struct irte *irte, struct pci_dev *dev) |
516 | { | |
517 | struct pci_dev *bridge; | |
518 | ||
519 | if (!irte || !dev) | |
520 | return -1; | |
521 | ||
522 | /* PCIe device or Root Complex integrated PCI device */ | |
5f4d91a1 | 523 | if (pci_is_pcie(dev) || !dev->bus->parent) { |
f007e99c WH |
524 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, |
525 | (dev->bus->number << 8) | dev->devfn); | |
526 | return 0; | |
527 | } | |
528 | ||
529 | bridge = pci_find_upstream_pcie_bridge(dev); | |
530 | if (bridge) { | |
45e829ea | 531 | if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ |
f007e99c WH |
532 | set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, |
533 | (bridge->bus->number << 8) | dev->bus->number); | |
534 | else /* this is a legacy PCI bridge */ | |
535 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | |
536 | (bridge->bus->number << 8) | bridge->devfn); | |
537 | } | |
538 | ||
539 | return 0; | |
540 | } | |
541 | ||
2ae21010 SS |
542 | static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) |
543 | { | |
544 | u64 addr; | |
c416daa9 | 545 | u32 sts; |
2ae21010 SS |
546 | unsigned long flags; |
547 | ||
548 | addr = virt_to_phys((void *)iommu->ir_table->base); | |
549 | ||
550 | spin_lock_irqsave(&iommu->register_lock, flags); | |
551 | ||
552 | dmar_writeq(iommu->reg + DMAR_IRTA_REG, | |
553 | (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); | |
554 | ||
555 | /* Set interrupt-remapping table pointer */ | |
161fde08 | 556 | iommu->gcmd |= DMA_GCMD_SIRTP; |
c416daa9 | 557 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
558 | |
559 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
560 | readl, (sts & DMA_GSTS_IRTPS), sts); | |
561 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
562 | ||
563 | /* | |
564 | * global invalidation of interrupt entry cache before enabling | |
565 | * interrupt-remapping. | |
566 | */ | |
567 | qi_global_iec(iommu); | |
568 | ||
569 | spin_lock_irqsave(&iommu->register_lock, flags); | |
570 | ||
571 | /* Enable interrupt-remapping */ | |
2ae21010 | 572 | iommu->gcmd |= DMA_GCMD_IRE; |
c416daa9 | 573 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); |
2ae21010 SS |
574 | |
575 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
576 | readl, (sts & DMA_GSTS_IRES), sts); | |
577 | ||
578 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
579 | } | |
580 | ||
581 | ||
582 | static int setup_intr_remapping(struct intel_iommu *iommu, int mode) | |
583 | { | |
584 | struct ir_table *ir_table; | |
585 | struct page *pages; | |
586 | ||
587 | ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), | |
fa4b57cc | 588 | GFP_ATOMIC); |
2ae21010 SS |
589 | |
590 | if (!iommu->ir_table) | |
591 | return -ENOMEM; | |
592 | ||
824cd75b SS |
593 | pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, |
594 | INTR_REMAP_PAGE_ORDER); | |
2ae21010 SS |
595 | |
596 | if (!pages) { | |
597 | printk(KERN_ERR "failed to allocate pages of order %d\n", | |
598 | INTR_REMAP_PAGE_ORDER); | |
599 | kfree(iommu->ir_table); | |
600 | return -ENOMEM; | |
601 | } | |
602 | ||
603 | ir_table->base = page_address(pages); | |
604 | ||
605 | iommu_set_intr_remapping(iommu, mode); | |
606 | return 0; | |
607 | } | |
608 | ||
eba67e5d SS |
609 | /* |
610 | * Disable Interrupt Remapping. | |
611 | */ | |
b24696bc | 612 | static void iommu_disable_intr_remapping(struct intel_iommu *iommu) |
eba67e5d SS |
613 | { |
614 | unsigned long flags; | |
615 | u32 sts; | |
616 | ||
617 | if (!ecap_ir_support(iommu->ecap)) | |
618 | return; | |
619 | ||
b24696bc FY |
620 | /* |
621 | * global invalidation of interrupt entry cache before disabling | |
622 | * interrupt-remapping. | |
623 | */ | |
624 | qi_global_iec(iommu); | |
625 | ||
eba67e5d SS |
626 | spin_lock_irqsave(&iommu->register_lock, flags); |
627 | ||
628 | sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); | |
629 | if (!(sts & DMA_GSTS_IRES)) | |
630 | goto end; | |
631 | ||
632 | iommu->gcmd &= ~DMA_GCMD_IRE; | |
633 | writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); | |
634 | ||
635 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, | |
636 | readl, !(sts & DMA_GSTS_IRES), sts); | |
637 | ||
638 | end: | |
639 | spin_unlock_irqrestore(&iommu->register_lock, flags); | |
640 | } | |
641 | ||
93758238 WH |
642 | int __init intr_remapping_supported(void) |
643 | { | |
644 | struct dmar_drhd_unit *drhd; | |
645 | ||
03ea8155 WH |
646 | if (disable_intremap) |
647 | return 0; | |
648 | ||
074835f0 YS |
649 | if (!dmar_ir_support()) |
650 | return 0; | |
651 | ||
93758238 WH |
652 | for_each_drhd_unit(drhd) { |
653 | struct intel_iommu *iommu = drhd->iommu; | |
654 | ||
655 | if (!ecap_ir_support(iommu->ecap)) | |
656 | return 0; | |
657 | } | |
658 | ||
659 | return 1; | |
660 | } | |
661 | ||
2ae21010 SS |
662 | int __init enable_intr_remapping(int eim) |
663 | { | |
664 | struct dmar_drhd_unit *drhd; | |
665 | int setup = 0; | |
666 | ||
e936d077 YS |
667 | if (parse_ioapics_under_ir() != 1) { |
668 | printk(KERN_INFO "Not enable interrupt remapping\n"); | |
669 | return -1; | |
670 | } | |
671 | ||
1531a6a6 SS |
672 | for_each_drhd_unit(drhd) { |
673 | struct intel_iommu *iommu = drhd->iommu; | |
674 | ||
34aaaa94 HW |
675 | /* |
676 | * If the queued invalidation is already initialized, | |
677 | * shouldn't disable it. | |
678 | */ | |
679 | if (iommu->qi) | |
680 | continue; | |
681 | ||
1531a6a6 SS |
682 | /* |
683 | * Clear previous faults. | |
684 | */ | |
685 | dmar_fault(-1, iommu); | |
686 | ||
687 | /* | |
688 | * Disable intr remapping and queued invalidation, if already | |
689 | * enabled prior to OS handover. | |
690 | */ | |
b24696bc | 691 | iommu_disable_intr_remapping(iommu); |
1531a6a6 SS |
692 | |
693 | dmar_disable_qi(iommu); | |
694 | } | |
695 | ||
2ae21010 SS |
696 | /* |
697 | * check for the Interrupt-remapping support | |
698 | */ | |
699 | for_each_drhd_unit(drhd) { | |
700 | struct intel_iommu *iommu = drhd->iommu; | |
701 | ||
702 | if (!ecap_ir_support(iommu->ecap)) | |
703 | continue; | |
704 | ||
705 | if (eim && !ecap_eim_support(iommu->ecap)) { | |
706 | printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " | |
707 | " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); | |
708 | return -1; | |
709 | } | |
710 | } | |
711 | ||
712 | /* | |
713 | * Enable queued invalidation for all the DRHD's. | |
714 | */ | |
715 | for_each_drhd_unit(drhd) { | |
716 | int ret; | |
717 | struct intel_iommu *iommu = drhd->iommu; | |
718 | ret = dmar_enable_qi(iommu); | |
719 | ||
720 | if (ret) { | |
721 | printk(KERN_ERR "DRHD %Lx: failed to enable queued, " | |
722 | " invalidation, ecap %Lx, ret %d\n", | |
723 | drhd->reg_base_addr, iommu->ecap, ret); | |
724 | return -1; | |
725 | } | |
726 | } | |
727 | ||
728 | /* | |
729 | * Setup Interrupt-remapping for all the DRHD's now. | |
730 | */ | |
731 | for_each_drhd_unit(drhd) { | |
732 | struct intel_iommu *iommu = drhd->iommu; | |
733 | ||
734 | if (!ecap_ir_support(iommu->ecap)) | |
735 | continue; | |
736 | ||
737 | if (setup_intr_remapping(iommu, eim)) | |
738 | goto error; | |
739 | ||
740 | setup = 1; | |
741 | } | |
742 | ||
743 | if (!setup) | |
744 | goto error; | |
745 | ||
746 | intr_remapping_enabled = 1; | |
747 | ||
748 | return 0; | |
749 | ||
750 | error: | |
751 | /* | |
752 | * handle error condition gracefully here! | |
753 | */ | |
754 | return -1; | |
755 | } | |
ad3ad3f6 | 756 | |
20f3097b SS |
757 | static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, |
758 | struct intel_iommu *iommu) | |
759 | { | |
760 | struct acpi_dmar_pci_path *path; | |
761 | u8 bus; | |
762 | int count; | |
763 | ||
764 | bus = scope->bus; | |
765 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
766 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
767 | / sizeof(struct acpi_dmar_pci_path); | |
768 | ||
769 | while (--count > 0) { | |
770 | /* | |
771 | * Access PCI directly due to the PCI | |
772 | * subsystem isn't initialized yet. | |
773 | */ | |
774 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
775 | PCI_SECONDARY_BUS); | |
776 | path++; | |
777 | } | |
778 | ir_hpet[ir_hpet_num].bus = bus; | |
779 | ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
780 | ir_hpet[ir_hpet_num].iommu = iommu; | |
781 | ir_hpet[ir_hpet_num].id = scope->enumeration_id; | |
782 | ir_hpet_num++; | |
783 | } | |
784 | ||
f007e99c WH |
785 | static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, |
786 | struct intel_iommu *iommu) | |
787 | { | |
788 | struct acpi_dmar_pci_path *path; | |
789 | u8 bus; | |
790 | int count; | |
791 | ||
792 | bus = scope->bus; | |
793 | path = (struct acpi_dmar_pci_path *)(scope + 1); | |
794 | count = (scope->length - sizeof(struct acpi_dmar_device_scope)) | |
795 | / sizeof(struct acpi_dmar_pci_path); | |
796 | ||
797 | while (--count > 0) { | |
798 | /* | |
799 | * Access PCI directly due to the PCI | |
800 | * subsystem isn't initialized yet. | |
801 | */ | |
802 | bus = read_pci_config_byte(bus, path->dev, path->fn, | |
803 | PCI_SECONDARY_BUS); | |
804 | path++; | |
805 | } | |
806 | ||
807 | ir_ioapic[ir_ioapic_num].bus = bus; | |
808 | ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); | |
809 | ir_ioapic[ir_ioapic_num].iommu = iommu; | |
810 | ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; | |
811 | ir_ioapic_num++; | |
812 | } | |
813 | ||
20f3097b SS |
814 | static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, |
815 | struct intel_iommu *iommu) | |
ad3ad3f6 SS |
816 | { |
817 | struct acpi_dmar_hardware_unit *drhd; | |
818 | struct acpi_dmar_device_scope *scope; | |
819 | void *start, *end; | |
820 | ||
821 | drhd = (struct acpi_dmar_hardware_unit *)header; | |
822 | ||
823 | start = (void *)(drhd + 1); | |
824 | end = ((void *)drhd) + header->length; | |
825 | ||
826 | while (start < end) { | |
827 | scope = start; | |
828 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { | |
829 | if (ir_ioapic_num == MAX_IO_APICS) { | |
830 | printk(KERN_WARNING "Exceeded Max IO APICS\n"); | |
831 | return -1; | |
832 | } | |
833 | ||
834 | printk(KERN_INFO "IOAPIC id %d under DRHD base" | |
835 | " 0x%Lx\n", scope->enumeration_id, | |
836 | drhd->address); | |
837 | ||
f007e99c | 838 | ir_parse_one_ioapic_scope(scope, iommu); |
20f3097b SS |
839 | } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { |
840 | if (ir_hpet_num == MAX_HPET_TBS) { | |
841 | printk(KERN_WARNING "Exceeded Max HPET blocks\n"); | |
842 | return -1; | |
843 | } | |
844 | ||
845 | printk(KERN_INFO "HPET id %d under DRHD base" | |
846 | " 0x%Lx\n", scope->enumeration_id, | |
847 | drhd->address); | |
848 | ||
849 | ir_parse_one_hpet_scope(scope, iommu); | |
ad3ad3f6 SS |
850 | } |
851 | start += scope->length; | |
852 | } | |
853 | ||
854 | return 0; | |
855 | } | |
856 | ||
857 | /* | |
858 | * Finds the assocaition between IOAPIC's and its Interrupt-remapping | |
859 | * hardware unit. | |
860 | */ | |
861 | int __init parse_ioapics_under_ir(void) | |
862 | { | |
863 | struct dmar_drhd_unit *drhd; | |
864 | int ir_supported = 0; | |
865 | ||
866 | for_each_drhd_unit(drhd) { | |
867 | struct intel_iommu *iommu = drhd->iommu; | |
868 | ||
869 | if (ecap_ir_support(iommu->ecap)) { | |
20f3097b | 870 | if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) |
ad3ad3f6 SS |
871 | return -1; |
872 | ||
873 | ir_supported = 1; | |
874 | } | |
875 | } | |
876 | ||
877 | if (ir_supported && ir_ioapic_num != nr_ioapics) { | |
878 | printk(KERN_WARNING | |
879 | "Not all IO-APIC's listed under remapping hardware\n"); | |
880 | return -1; | |
881 | } | |
882 | ||
883 | return ir_supported; | |
884 | } | |
b24696bc FY |
885 | |
886 | void disable_intr_remapping(void) | |
887 | { | |
888 | struct dmar_drhd_unit *drhd; | |
889 | struct intel_iommu *iommu = NULL; | |
890 | ||
891 | /* | |
892 | * Disable Interrupt-remapping for all the DRHD's now. | |
893 | */ | |
894 | for_each_iommu(iommu, drhd) { | |
895 | if (!ecap_ir_support(iommu->ecap)) | |
896 | continue; | |
897 | ||
898 | iommu_disable_intr_remapping(iommu); | |
899 | } | |
900 | } | |
901 | ||
902 | int reenable_intr_remapping(int eim) | |
903 | { | |
904 | struct dmar_drhd_unit *drhd; | |
905 | int setup = 0; | |
906 | struct intel_iommu *iommu = NULL; | |
907 | ||
908 | for_each_iommu(iommu, drhd) | |
909 | if (iommu->qi) | |
910 | dmar_reenable_qi(iommu); | |
911 | ||
912 | /* | |
913 | * Setup Interrupt-remapping for all the DRHD's now. | |
914 | */ | |
915 | for_each_iommu(iommu, drhd) { | |
916 | if (!ecap_ir_support(iommu->ecap)) | |
917 | continue; | |
918 | ||
919 | /* Set up interrupt remapping for iommu.*/ | |
920 | iommu_set_intr_remapping(iommu, eim); | |
921 | setup = 1; | |
922 | } | |
923 | ||
924 | if (!setup) | |
925 | goto error; | |
926 | ||
927 | return 0; | |
928 | ||
929 | error: | |
930 | /* | |
931 | * handle error condition gracefully here! | |
932 | */ | |
933 | return -1; | |
934 | } | |
935 |