[PATCH] genirq: x86_64 irq: Reenable migrating irqs to other cpus
[deliverable/linux.git] / drivers / pci / msi.c
CommitLineData
1da177e4
LT
1/*
2 * File: msi.c
3 * Purpose: PCI Message Signaled Interrupt (MSI)
4 *
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */
8
9#include <linux/mm.h>
10#include <linux/irq.h>
11#include <linux/interrupt.h>
12#include <linux/init.h>
1da177e4
LT
13#include <linux/ioport.h>
14#include <linux/smp_lock.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17
18#include <asm/errno.h>
19#include <asm/io.h>
20#include <asm/smp.h>
21
22#include "pci.h"
23#include "msi.h"
24
25static DEFINE_SPINLOCK(msi_lock);
26static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
27static kmem_cache_t* msi_cachep;
28
29static int pci_msi_enable = 1;
70549ad9
GKH
30static int last_alloc_vector;
31static int nr_released_vectors;
1da177e4 32static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
70549ad9 33static int nr_msix_devices;
1da177e4
LT
34
35#ifndef CONFIG_X86_IO_APIC
36int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
1da177e4
LT
37#endif
38
fd58e55f
MM
39static struct msi_ops *msi_ops;
40
41int
42msi_register(struct msi_ops *ops)
43{
44 msi_ops = ops;
45 return 0;
46}
47
1da177e4
LT
48static int msi_cache_init(void)
49{
57181784
PE
50 msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
51 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
1da177e4
LT
52 if (!msi_cachep)
53 return -ENOMEM;
54
55 return 0;
56}
57
58static void msi_set_mask_bit(unsigned int vector, int flag)
59{
60 struct msi_desc *entry;
61
62 entry = (struct msi_desc *)msi_desc[vector];
63 if (!entry || !entry->dev || !entry->mask_base)
64 return;
65 switch (entry->msi_attrib.type) {
66 case PCI_CAP_ID_MSI:
67 {
68 int pos;
69 u32 mask_bits;
70
71 pos = (long)entry->mask_base;
72 pci_read_config_dword(entry->dev, pos, &mask_bits);
73 mask_bits &= ~(1);
74 mask_bits |= flag;
75 pci_write_config_dword(entry->dev, pos, mask_bits);
76 break;
77 }
78 case PCI_CAP_ID_MSIX:
79 {
80 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
81 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
82 writel(flag, entry->mask_base + offset);
83 break;
84 }
85 default:
86 break;
87 }
88}
89
90#ifdef CONFIG_SMP
91static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
92{
93 struct msi_desc *entry;
fd58e55f 94 u32 address_hi, address_lo;
54d5d424 95 unsigned int irq = vector;
b4033c17 96 unsigned int dest_cpu = first_cpu(cpu_mask);
1da177e4
LT
97
98 entry = (struct msi_desc *)msi_desc[vector];
99 if (!entry || !entry->dev)
100 return;
101
102 switch (entry->msi_attrib.type) {
103 case PCI_CAP_ID_MSI:
104 {
b64c05e7 105 int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI);
1da177e4 106
b64c05e7 107 if (!pos)
1da177e4
LT
108 return;
109
fd58e55f
MM
110 pci_read_config_dword(entry->dev, msi_upper_address_reg(pos),
111 &address_hi);
1da177e4 112 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
fd58e55f
MM
113 &address_lo);
114
115 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
116
117 pci_write_config_dword(entry->dev, msi_upper_address_reg(pos),
118 address_hi);
1da177e4 119 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
fd58e55f 120 address_lo);
54d5d424 121 set_native_irq_info(irq, cpu_mask);
1da177e4
LT
122 break;
123 }
124 case PCI_CAP_ID_MSIX:
125 {
fd58e55f
MM
126 int offset_hi =
127 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
128 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET;
129 int offset_lo =
130 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
131 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
132
133 address_hi = readl(entry->mask_base + offset_hi);
134 address_lo = readl(entry->mask_base + offset_lo);
135
136 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo);
137
138 writel(address_hi, entry->mask_base + offset_hi);
139 writel(address_lo, entry->mask_base + offset_lo);
54d5d424 140 set_native_irq_info(irq, cpu_mask);
1da177e4
LT
141 break;
142 }
143 default:
144 break;
145 }
146}
8169b5d2
GG
147#else
148#define set_msi_affinity NULL
1da177e4
LT
149#endif /* CONFIG_SMP */
150
151static void mask_MSI_irq(unsigned int vector)
152{
153 msi_set_mask_bit(vector, 1);
154}
155
156static void unmask_MSI_irq(unsigned int vector)
157{
158 msi_set_mask_bit(vector, 0);
159}
160
161static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
162{
163 struct msi_desc *entry;
164 unsigned long flags;
165
166 spin_lock_irqsave(&msi_lock, flags);
167 entry = msi_desc[vector];
168 if (!entry || !entry->dev) {
169 spin_unlock_irqrestore(&msi_lock, flags);
170 return 0;
171 }
172 entry->msi_attrib.state = 1; /* Mark it active */
173 spin_unlock_irqrestore(&msi_lock, flags);
174
175 return 0; /* never anything pending */
176}
177
70549ad9 178static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
1da177e4 179{
70549ad9
GKH
180 startup_msi_irq_wo_maskbit(vector);
181 unmask_MSI_irq(vector);
182 return 0; /* never anything pending */
1da177e4
LT
183}
184
70549ad9 185static void shutdown_msi_irq(unsigned int vector)
1da177e4
LT
186{
187 struct msi_desc *entry;
188 unsigned long flags;
189
190 spin_lock_irqsave(&msi_lock, flags);
191 entry = msi_desc[vector];
70549ad9
GKH
192 if (entry && entry->dev)
193 entry->msi_attrib.state = 0; /* Mark it not active */
1da177e4 194 spin_unlock_irqrestore(&msi_lock, flags);
1da177e4
LT
195}
196
70549ad9
GKH
197static void end_msi_irq_wo_maskbit(unsigned int vector)
198{
54d5d424 199 move_native_irq(vector);
70549ad9
GKH
200 ack_APIC_irq();
201}
1da177e4
LT
202
203static void end_msi_irq_w_maskbit(unsigned int vector)
204{
54d5d424 205 move_native_irq(vector);
1da177e4
LT
206 unmask_MSI_irq(vector);
207 ack_APIC_irq();
208}
209
70549ad9
GKH
210static void do_nothing(unsigned int vector)
211{
212}
213
1da177e4
LT
214/*
215 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
216 * which implement the MSI-X Capability Structure.
217 */
218static struct hw_interrupt_type msix_irq_type = {
219 .typename = "PCI-MSI-X",
220 .startup = startup_msi_irq_w_maskbit,
70549ad9
GKH
221 .shutdown = shutdown_msi_irq,
222 .enable = unmask_MSI_irq,
223 .disable = mask_MSI_irq,
224 .ack = mask_MSI_irq,
1da177e4 225 .end = end_msi_irq_w_maskbit,
8169b5d2 226 .set_affinity = set_msi_affinity
1da177e4
LT
227};
228
229/*
230 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
231 * which implement the MSI Capability Structure with
232 * Mask-and-Pending Bits.
233 */
234static struct hw_interrupt_type msi_irq_w_maskbit_type = {
235 .typename = "PCI-MSI",
236 .startup = startup_msi_irq_w_maskbit,
70549ad9
GKH
237 .shutdown = shutdown_msi_irq,
238 .enable = unmask_MSI_irq,
239 .disable = mask_MSI_irq,
240 .ack = mask_MSI_irq,
1da177e4 241 .end = end_msi_irq_w_maskbit,
8169b5d2 242 .set_affinity = set_msi_affinity
1da177e4
LT
243};
244
245/*
246 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
247 * which implement the MSI Capability Structure without
248 * Mask-and-Pending Bits.
249 */
250static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
251 .typename = "PCI-MSI",
252 .startup = startup_msi_irq_wo_maskbit,
70549ad9
GKH
253 .shutdown = shutdown_msi_irq,
254 .enable = do_nothing,
255 .disable = do_nothing,
256 .ack = do_nothing,
1da177e4 257 .end = end_msi_irq_wo_maskbit,
8169b5d2 258 .set_affinity = set_msi_affinity
1da177e4
LT
259};
260
1da177e4
LT
261static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
262static int assign_msi_vector(void)
263{
264 static int new_vector_avail = 1;
265 int vector;
266 unsigned long flags;
267
268 /*
269 * msi_lock is provided to ensure that successful allocation of MSI
270 * vector is assigned unique among drivers.
271 */
272 spin_lock_irqsave(&msi_lock, flags);
273
274 if (!new_vector_avail) {
275 int free_vector = 0;
276
277 /*
278 * vector_irq[] = -1 indicates that this specific vector is:
279 * - assigned for MSI (since MSI have no associated IRQ) or
280 * - assigned for legacy if less than 16, or
281 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
282 * vector_irq[] = 0 indicates that this vector, previously
283 * assigned for MSI, is freed by hotplug removed operations.
284 * This vector will be reused for any subsequent hotplug added
285 * operations.
286 * vector_irq[] > 0 indicates that this vector is assigned for
287 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
288 * vector-to-IOxAPIC IRQ mapping.
289 */
290 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
291 if (vector_irq[vector] != 0)
292 continue;
293 free_vector = vector;
294 if (!msi_desc[vector])
295 break;
296 else
297 continue;
298 }
299 if (!free_vector) {
300 spin_unlock_irqrestore(&msi_lock, flags);
301 return -EBUSY;
302 }
303 vector_irq[free_vector] = -1;
304 nr_released_vectors--;
305 spin_unlock_irqrestore(&msi_lock, flags);
306 if (msi_desc[free_vector] != NULL) {
307 struct pci_dev *dev;
308 int tail;
309
310 /* free all linked vectors before re-assign */
311 do {
312 spin_lock_irqsave(&msi_lock, flags);
313 dev = msi_desc[free_vector]->dev;
314 tail = msi_desc[free_vector]->link.tail;
315 spin_unlock_irqrestore(&msi_lock, flags);
316 msi_free_vector(dev, tail, 1);
317 } while (free_vector != tail);
318 }
319
320 return free_vector;
321 }
322 vector = assign_irq_vector(AUTO_ASSIGN);
323 last_alloc_vector = vector;
324 if (vector == LAST_DEVICE_VECTOR)
325 new_vector_avail = 0;
326
327 spin_unlock_irqrestore(&msi_lock, flags);
328 return vector;
329}
330
331static int get_new_vector(void)
332{
b64c05e7 333 int vector = assign_msi_vector();
1da177e4 334
b64c05e7 335 if (vector > 0)
1da177e4
LT
336 set_intr_gate(vector, interrupt[vector]);
337
338 return vector;
339}
340
341static int msi_init(void)
342{
343 static int status = -ENOMEM;
344
345 if (!status)
346 return status;
347
348 if (pci_msi_quirk) {
349 pci_msi_enable = 0;
350 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
351 status = -EINVAL;
352 return status;
353 }
354
fd58e55f
MM
355 status = msi_arch_init();
356 if (status < 0) {
357 pci_msi_enable = 0;
358 printk(KERN_WARNING
359 "PCI: MSI arch init failed. MSI disabled.\n");
360 return status;
361 }
362
363 if (! msi_ops) {
364 printk(KERN_WARNING
365 "PCI: MSI ops not registered. MSI disabled.\n");
366 status = -EINVAL;
367 return status;
368 }
369
370 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
b64c05e7
GG
371 status = msi_cache_init();
372 if (status < 0) {
1da177e4
LT
373 pci_msi_enable = 0;
374 printk(KERN_WARNING "PCI: MSI cache init failed\n");
375 return status;
376 }
fd58e55f 377
1da177e4
LT
378 if (last_alloc_vector < 0) {
379 pci_msi_enable = 0;
380 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
381 status = -EBUSY;
382 return status;
383 }
384 vector_irq[last_alloc_vector] = 0;
385 nr_released_vectors++;
386
387 return status;
388}
389
390static int get_msi_vector(struct pci_dev *dev)
391{
392 return get_new_vector();
393}
394
395static struct msi_desc* alloc_msi_entry(void)
396{
397 struct msi_desc *entry;
398
57181784 399 entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
1da177e4
LT
400 if (!entry)
401 return NULL;
402
1da177e4
LT
403 entry->link.tail = entry->link.head = 0; /* single message */
404 entry->dev = NULL;
405
406 return entry;
407}
408
409static void attach_msi_entry(struct msi_desc *entry, int vector)
410{
411 unsigned long flags;
412
413 spin_lock_irqsave(&msi_lock, flags);
414 msi_desc[vector] = entry;
415 spin_unlock_irqrestore(&msi_lock, flags);
416}
417
418static void irq_handler_init(int cap_id, int pos, int mask)
419{
f6bc2666
IM
420 unsigned long flags;
421
422 spin_lock_irqsave(&irq_desc[pos].lock, flags);
1da177e4 423 if (cap_id == PCI_CAP_ID_MSIX)
d1bef4ed 424 irq_desc[pos].chip = &msix_irq_type;
1da177e4
LT
425 else {
426 if (!mask)
d1bef4ed 427 irq_desc[pos].chip = &msi_irq_wo_maskbit_type;
1da177e4 428 else
d1bef4ed 429 irq_desc[pos].chip = &msi_irq_w_maskbit_type;
1da177e4 430 }
f6bc2666 431 spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
1da177e4
LT
432}
433
434static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
435{
436 u16 control;
437
438 pci_read_config_word(dev, msi_control_reg(pos), &control);
439 if (type == PCI_CAP_ID_MSI) {
440 /* Set enabled bits to single MSI & enable MSI_enable bit */
441 msi_enable(control, 1);
442 pci_write_config_word(dev, msi_control_reg(pos), control);
99dc804d 443 dev->msi_enabled = 1;
1da177e4
LT
444 } else {
445 msix_enable(control);
446 pci_write_config_word(dev, msi_control_reg(pos), control);
99dc804d 447 dev->msix_enabled = 1;
1da177e4
LT
448 }
449 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
450 /* PCI Express Endpoint device detected */
a04ce0ff 451 pci_intx(dev, 0); /* disable intx */
1da177e4
LT
452 }
453}
454
4602b88d 455void disable_msi_mode(struct pci_dev *dev, int pos, int type)
1da177e4
LT
456{
457 u16 control;
458
459 pci_read_config_word(dev, msi_control_reg(pos), &control);
460 if (type == PCI_CAP_ID_MSI) {
461 /* Set enabled bits to single MSI & enable MSI_enable bit */
462 msi_disable(control);
463 pci_write_config_word(dev, msi_control_reg(pos), control);
99dc804d 464 dev->msi_enabled = 0;
1da177e4
LT
465 } else {
466 msix_disable(control);
467 pci_write_config_word(dev, msi_control_reg(pos), control);
99dc804d 468 dev->msix_enabled = 0;
1da177e4
LT
469 }
470 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
471 /* PCI Express Endpoint device detected */
a04ce0ff 472 pci_intx(dev, 1); /* enable intx */
1da177e4
LT
473 }
474}
475
476static int msi_lookup_vector(struct pci_dev *dev, int type)
477{
478 int vector;
479 unsigned long flags;
480
481 spin_lock_irqsave(&msi_lock, flags);
482 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
483 if (!msi_desc[vector] || msi_desc[vector]->dev != dev ||
484 msi_desc[vector]->msi_attrib.type != type ||
485 msi_desc[vector]->msi_attrib.default_vector != dev->irq)
486 continue;
487 spin_unlock_irqrestore(&msi_lock, flags);
488 /* This pre-assigned MSI vector for this device
489 already exits. Override dev->irq with this vector */
490 dev->irq = vector;
491 return 0;
492 }
493 spin_unlock_irqrestore(&msi_lock, flags);
494
495 return -EACCES;
496}
497
498void pci_scan_msi_device(struct pci_dev *dev)
499{
500 if (!dev)
501 return;
502
503 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
504 nr_msix_devices++;
505 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
506 nr_reserved_vectors++;
507}
508
41017f0c
SL
509#ifdef CONFIG_PM
510int pci_save_msi_state(struct pci_dev *dev)
511{
512 int pos, i = 0;
513 u16 control;
514 struct pci_cap_saved_state *save_state;
515 u32 *cap;
516
517 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
518 if (pos <= 0 || dev->no_msi)
519 return 0;
520
521 pci_read_config_word(dev, msi_control_reg(pos), &control);
522 if (!(control & PCI_MSI_FLAGS_ENABLE))
523 return 0;
524
525 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
526 GFP_KERNEL);
527 if (!save_state) {
528 printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
529 return -ENOMEM;
530 }
531 cap = &save_state->data[0];
532
533 pci_read_config_dword(dev, pos, &cap[i++]);
534 control = cap[0] >> 16;
535 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
536 if (control & PCI_MSI_FLAGS_64BIT) {
537 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
538 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
539 } else
540 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
541 if (control & PCI_MSI_FLAGS_MASKBIT)
542 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
41017f0c
SL
543 save_state->cap_nr = PCI_CAP_ID_MSI;
544 pci_add_saved_cap(dev, save_state);
545 return 0;
546}
547
548void pci_restore_msi_state(struct pci_dev *dev)
549{
550 int i = 0, pos;
551 u16 control;
552 struct pci_cap_saved_state *save_state;
553 u32 *cap;
554
555 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
556 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
557 if (!save_state || pos <= 0)
558 return;
559 cap = &save_state->data[0];
560
561 control = cap[i++] >> 16;
562 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
563 if (control & PCI_MSI_FLAGS_64BIT) {
564 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
565 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
566 } else
567 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
568 if (control & PCI_MSI_FLAGS_MASKBIT)
569 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
570 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
571 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
572 pci_remove_saved_cap(save_state);
573 kfree(save_state);
574}
575
576int pci_save_msix_state(struct pci_dev *dev)
577{
578 int pos;
fd58e55f
MM
579 int temp;
580 int vector, head, tail = 0;
41017f0c
SL
581 u16 control;
582 struct pci_cap_saved_state *save_state;
583
584 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
585 if (pos <= 0 || dev->no_msi)
586 return 0;
587
fd58e55f 588 /* save the capability */
41017f0c
SL
589 pci_read_config_word(dev, msi_control_reg(pos), &control);
590 if (!(control & PCI_MSIX_FLAGS_ENABLE))
591 return 0;
592 save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
593 GFP_KERNEL);
594 if (!save_state) {
595 printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
596 return -ENOMEM;
597 }
598 *((u16 *)&save_state->data[0]) = control;
599
fd58e55f
MM
600 /* save the table */
601 temp = dev->irq;
602 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
603 kfree(save_state);
604 return -EINVAL;
605 }
606
607 vector = head = dev->irq;
608 while (head != tail) {
609 int j;
610 void __iomem *base;
611 struct msi_desc *entry;
612
613 entry = msi_desc[vector];
614 base = entry->mask_base;
615 j = entry->msi_attrib.entry_nr;
616
617 entry->address_lo_save =
618 readl(base + j * PCI_MSIX_ENTRY_SIZE +
619 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
620 entry->address_hi_save =
621 readl(base + j * PCI_MSIX_ENTRY_SIZE +
622 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
623 entry->data_save =
624 readl(base + j * PCI_MSIX_ENTRY_SIZE +
625 PCI_MSIX_ENTRY_DATA_OFFSET);
626
627 tail = msi_desc[vector]->link.tail;
628 vector = tail;
629 }
630 dev->irq = temp;
631
41017f0c
SL
632 save_state->cap_nr = PCI_CAP_ID_MSIX;
633 pci_add_saved_cap(dev, save_state);
634 return 0;
635}
636
637void pci_restore_msix_state(struct pci_dev *dev)
638{
639 u16 save;
640 int pos;
641 int vector, head, tail = 0;
642 void __iomem *base;
643 int j;
41017f0c
SL
644 struct msi_desc *entry;
645 int temp;
646 struct pci_cap_saved_state *save_state;
647
648 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
649 if (!save_state)
650 return;
651 save = *((u16 *)&save_state->data[0]);
652 pci_remove_saved_cap(save_state);
653 kfree(save_state);
654
655 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
656 if (pos <= 0)
657 return;
658
659 /* route the table */
660 temp = dev->irq;
661 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX))
662 return;
663 vector = head = dev->irq;
664 while (head != tail) {
665 entry = msi_desc[vector];
666 base = entry->mask_base;
667 j = entry->msi_attrib.entry_nr;
668
fd58e55f 669 writel(entry->address_lo_save,
41017f0c
SL
670 base + j * PCI_MSIX_ENTRY_SIZE +
671 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
fd58e55f 672 writel(entry->address_hi_save,
41017f0c
SL
673 base + j * PCI_MSIX_ENTRY_SIZE +
674 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
fd58e55f 675 writel(entry->data_save,
41017f0c
SL
676 base + j * PCI_MSIX_ENTRY_SIZE +
677 PCI_MSIX_ENTRY_DATA_OFFSET);
678
679 tail = msi_desc[vector]->link.tail;
680 vector = tail;
681 }
682 dev->irq = temp;
683
684 pci_write_config_word(dev, msi_control_reg(pos), save);
685 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
686}
687#endif
688
fd58e55f 689static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
41017f0c 690{
fd58e55f
MM
691 int status;
692 u32 address_hi;
693 u32 address_lo;
694 u32 data;
41017f0c
SL
695 int pos, vector = dev->irq;
696 u16 control;
697
698 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
699 pci_read_config_word(dev, msi_control_reg(pos), &control);
fd58e55f 700
41017f0c 701 /* Configure MSI capability structure */
fd58e55f
MM
702 status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
703 if (status < 0)
704 return status;
705
706 pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
41017f0c
SL
707 if (is_64bit_address(control)) {
708 pci_write_config_dword(dev,
fd58e55f 709 msi_upper_address_reg(pos), address_hi);
41017f0c 710 pci_write_config_word(dev,
fd58e55f 711 msi_data_reg(pos, 1), data);
41017f0c
SL
712 } else
713 pci_write_config_word(dev,
fd58e55f 714 msi_data_reg(pos, 0), data);
41017f0c
SL
715 if (entry->msi_attrib.maskbit) {
716 unsigned int maskbits, temp;
717 /* All MSIs are unmasked by default, Mask them all */
718 pci_read_config_dword(dev,
719 msi_mask_bits_reg(pos, is_64bit_address(control)),
720 &maskbits);
721 temp = (1 << multi_msi_capable(control));
722 temp = ((temp - 1) & ~temp);
723 maskbits |= temp;
724 pci_write_config_dword(dev,
725 msi_mask_bits_reg(pos, is_64bit_address(control)),
726 maskbits);
727 }
fd58e55f
MM
728
729 return 0;
41017f0c
SL
730}
731
1da177e4
LT
732/**
733 * msi_capability_init - configure device's MSI capability structure
734 * @dev: pointer to the pci_dev data structure of MSI device function
735 *
eaae4b3a 736 * Setup the MSI capability structure of device function with a single
1da177e4
LT
737 * MSI vector, regardless of device function is capable of handling
738 * multiple messages. A return of zero indicates the successful setup
739 * of an entry zero with the new MSI vector or non-zero for otherwise.
740 **/
741static int msi_capability_init(struct pci_dev *dev)
742{
fd58e55f 743 int status;
1da177e4 744 struct msi_desc *entry;
1da177e4
LT
745 int pos, vector;
746 u16 control;
747
748 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
749 pci_read_config_word(dev, msi_control_reg(pos), &control);
750 /* MSI Entry Initialization */
b64c05e7
GG
751 entry = alloc_msi_entry();
752 if (!entry)
1da177e4
LT
753 return -ENOMEM;
754
b64c05e7
GG
755 vector = get_msi_vector(dev);
756 if (vector < 0) {
1da177e4
LT
757 kmem_cache_free(msi_cachep, entry);
758 return -EBUSY;
759 }
760 entry->link.head = vector;
761 entry->link.tail = vector;
762 entry->msi_attrib.type = PCI_CAP_ID_MSI;
763 entry->msi_attrib.state = 0; /* Mark it not active */
764 entry->msi_attrib.entry_nr = 0;
765 entry->msi_attrib.maskbit = is_mask_bit_support(control);
766 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */
767 dev->irq = vector;
768 entry->dev = dev;
769 if (is_mask_bit_support(control)) {
770 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
771 is_64bit_address(control));
772 }
773 /* Replace with MSI handler */
774 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit);
775 /* Configure MSI capability structure */
fd58e55f
MM
776 status = msi_register_init(dev, entry);
777 if (status != 0) {
778 dev->irq = entry->msi_attrib.default_vector;
779 kmem_cache_free(msi_cachep, entry);
780 return status;
781 }
41017f0c 782
1da177e4
LT
783 attach_msi_entry(entry, vector);
784 /* Set MSI enabled bits */
785 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
786
787 return 0;
788}
789
790/**
791 * msix_capability_init - configure device's MSI-X capability
792 * @dev: pointer to the pci_dev data structure of MSI-X device function
8f7020d3
RD
793 * @entries: pointer to an array of struct msix_entry entries
794 * @nvec: number of @entries
1da177e4 795 *
eaae4b3a 796 * Setup the MSI-X capability structure of device function with a
1da177e4
LT
797 * single MSI-X vector. A return of zero indicates the successful setup of
798 * requested MSI-X entries with allocated vectors or non-zero for otherwise.
799 **/
800static int msix_capability_init(struct pci_dev *dev,
801 struct msix_entry *entries, int nvec)
802{
803 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
fd58e55f
MM
804 u32 address_hi;
805 u32 address_lo;
806 u32 data;
807 int status;
1da177e4 808 int vector, pos, i, j, nr_entries, temp = 0;
a0454b40
GG
809 unsigned long phys_addr;
810 u32 table_offset;
1da177e4
LT
811 u16 control;
812 u8 bir;
813 void __iomem *base;
814
815 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
816 /* Request & Map MSI-X table region */
817 pci_read_config_word(dev, msi_control_reg(pos), &control);
818 nr_entries = multi_msix_capable(control);
a0454b40
GG
819
820 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
1da177e4 821 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
a0454b40
GG
822 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
823 phys_addr = pci_resource_start (dev, bir) + table_offset;
1da177e4
LT
824 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
825 if (base == NULL)
826 return -ENOMEM;
827
828 /* MSI-X Table Initialization */
829 for (i = 0; i < nvec; i++) {
830 entry = alloc_msi_entry();
831 if (!entry)
832 break;
b64c05e7 833 vector = get_msi_vector(dev);
f01f4182
JJ
834 if (vector < 0) {
835 kmem_cache_free(msi_cachep, entry);
1da177e4 836 break;
f01f4182 837 }
1da177e4
LT
838
839 j = entries[i].entry;
840 entries[i].vector = vector;
841 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
842 entry->msi_attrib.state = 0; /* Mark it not active */
843 entry->msi_attrib.entry_nr = j;
844 entry->msi_attrib.maskbit = 1;
845 entry->msi_attrib.default_vector = dev->irq;
846 entry->dev = dev;
847 entry->mask_base = base;
848 if (!head) {
849 entry->link.head = vector;
850 entry->link.tail = vector;
851 head = entry;
852 } else {
853 entry->link.head = temp;
854 entry->link.tail = tail->link.tail;
855 tail->link.tail = vector;
856 head->link.head = vector;
857 }
858 temp = vector;
859 tail = entry;
860 /* Replace with MSI-X handler */
861 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
862 /* Configure MSI-X capability structure */
fd58e55f
MM
863 status = msi_ops->setup(dev, vector,
864 &address_hi,
865 &address_lo,
866 &data);
867 if (status < 0)
868 break;
869
870 writel(address_lo,
1da177e4
LT
871 base + j * PCI_MSIX_ENTRY_SIZE +
872 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
fd58e55f 873 writel(address_hi,
1da177e4
LT
874 base + j * PCI_MSIX_ENTRY_SIZE +
875 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
fd58e55f 876 writel(data,
1da177e4
LT
877 base + j * PCI_MSIX_ENTRY_SIZE +
878 PCI_MSIX_ENTRY_DATA_OFFSET);
879 attach_msi_entry(entry, vector);
880 }
881 if (i != nvec) {
882 i--;
883 for (; i >= 0; i--) {
884 vector = (entries + i)->vector;
885 msi_free_vector(dev, vector, 0);
886 (entries + i)->vector = 0;
887 }
888 return -EBUSY;
889 }
890 /* Set MSI-X enabled bits */
891 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
892
893 return 0;
894}
895
24334a12
BG
896/**
897 * pci_msi_supported - check whether MSI may be enabled on device
898 * @dev: pointer to the pci_dev data structure of MSI device function
899 *
900 * MSI must be globally enabled and supported by the device and its root
901 * bus. But, the root bus is not easy to find since some architectures
902 * have virtual busses on top of the PCI hierarchy (for instance the
903 * hypertransport bus), while the actual bus where MSI must be supported
904 * is below. So we test the MSI flag on all parent busses and assume
905 * that no quirk will ever set the NO_MSI flag on a non-root bus.
906 **/
907static
908int pci_msi_supported(struct pci_dev * dev)
909{
910 struct pci_bus *bus;
911
912 if (!pci_msi_enable || !dev || dev->no_msi)
913 return -EINVAL;
914
915 /* check MSI flags of all parent busses */
916 for (bus = dev->bus; bus; bus = bus->parent)
917 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
918 return -EINVAL;
919
920 return 0;
921}
922
1da177e4
LT
923/**
924 * pci_enable_msi - configure device's MSI capability structure
925 * @dev: pointer to the pci_dev data structure of MSI device function
926 *
927 * Setup the MSI capability structure of device function with
928 * a single MSI vector upon its software driver call to request for
929 * MSI mode enabled on its hardware device function. A return of zero
930 * indicates the successful setup of an entry zero with the new MSI
931 * vector or non-zero for otherwise.
932 **/
933int pci_enable_msi(struct pci_dev* dev)
934{
24334a12 935 int pos, temp, status;
1da177e4
LT
936 u16 control;
937
24334a12
BG
938 if (pci_msi_supported(dev) < 0)
939 return -EINVAL;
6e325a62 940
1da177e4
LT
941 temp = dev->irq;
942
b64c05e7
GG
943 status = msi_init();
944 if (status < 0)
1da177e4
LT
945 return status;
946
b64c05e7
GG
947 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
948 if (!pos)
1da177e4
LT
949 return -EINVAL;
950
1da177e4
LT
951 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
952 /* Lookup Sucess */
953 unsigned long flags;
954
020d5024
RS
955 pci_read_config_word(dev, msi_control_reg(pos), &control);
956 if (control & PCI_MSI_FLAGS_ENABLE)
957 return 0; /* Already in MSI mode */
1da177e4
LT
958 spin_lock_irqsave(&msi_lock, flags);
959 if (!vector_irq[dev->irq]) {
960 msi_desc[dev->irq]->msi_attrib.state = 0;
961 vector_irq[dev->irq] = -1;
962 nr_released_vectors--;
963 spin_unlock_irqrestore(&msi_lock, flags);
fd58e55f
MM
964 status = msi_register_init(dev, msi_desc[dev->irq]);
965 if (status == 0)
966 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
967 return status;
1da177e4
LT
968 }
969 spin_unlock_irqrestore(&msi_lock, flags);
970 dev->irq = temp;
971 }
972 /* Check whether driver already requested for MSI-X vectors */
b64c05e7
GG
973 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
974 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1da177e4
LT
975 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
976 "Device already has MSI-X vectors assigned\n",
977 pci_name(dev));
978 dev->irq = temp;
979 return -EINVAL;
980 }
981 status = msi_capability_init(dev);
982 if (!status) {
983 if (!pos)
984 nr_reserved_vectors--; /* Only MSI capable */
985 else if (nr_msix_devices > 0)
986 nr_msix_devices--; /* Both MSI and MSI-X capable,
987 but choose enabling MSI */
988 }
989
990 return status;
991}
992
993void pci_disable_msi(struct pci_dev* dev)
994{
995 struct msi_desc *entry;
996 int pos, default_vector;
997 u16 control;
998 unsigned long flags;
999
309e57df
MW
1000 if (!pci_msi_enable)
1001 return;
b64c05e7
GG
1002 if (!dev)
1003 return;
309e57df 1004
b64c05e7
GG
1005 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1006 if (!pos)
1da177e4
LT
1007 return;
1008
1009 pci_read_config_word(dev, msi_control_reg(pos), &control);
1010 if (!(control & PCI_MSI_FLAGS_ENABLE))
1011 return;
1012
1013 spin_lock_irqsave(&msi_lock, flags);
1014 entry = msi_desc[dev->irq];
1015 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
1016 spin_unlock_irqrestore(&msi_lock, flags);
1017 return;
1018 }
1019 if (entry->msi_attrib.state) {
1020 spin_unlock_irqrestore(&msi_lock, flags);
1021 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
1022 "free_irq() on MSI vector %d\n",
1023 pci_name(dev), dev->irq);
1024 BUG_ON(entry->msi_attrib.state > 0);
1025 } else {
1026 vector_irq[dev->irq] = 0; /* free it */
1027 nr_released_vectors++;
1028 default_vector = entry->msi_attrib.default_vector;
1029 spin_unlock_irqrestore(&msi_lock, flags);
1030 /* Restore dev->irq to its default pin-assertion vector */
1031 dev->irq = default_vector;
1032 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
1033 PCI_CAP_ID_MSI);
1034 }
1035}
1036
1da177e4
LT
1037static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1038{
1039 struct msi_desc *entry;
1040 int head, entry_nr, type;
1041 void __iomem *base;
1042 unsigned long flags;
1043
fd58e55f
MM
1044 msi_ops->teardown(vector);
1045
1da177e4
LT
1046 spin_lock_irqsave(&msi_lock, flags);
1047 entry = msi_desc[vector];
1048 if (!entry || entry->dev != dev) {
1049 spin_unlock_irqrestore(&msi_lock, flags);
1050 return -EINVAL;
1051 }
1052 type = entry->msi_attrib.type;
1053 entry_nr = entry->msi_attrib.entry_nr;
1054 head = entry->link.head;
1055 base = entry->mask_base;
1056 msi_desc[entry->link.head]->link.tail = entry->link.tail;
1057 msi_desc[entry->link.tail]->link.head = entry->link.head;
1058 entry->dev = NULL;
1059 if (!reassign) {
1060 vector_irq[vector] = 0;
1061 nr_released_vectors++;
1062 }
1063 msi_desc[vector] = NULL;
1064 spin_unlock_irqrestore(&msi_lock, flags);
1065
1066 kmem_cache_free(msi_cachep, entry);
1067
1068 if (type == PCI_CAP_ID_MSIX) {
1069 if (!reassign)
1070 writel(1, base +
1071 entry_nr * PCI_MSIX_ENTRY_SIZE +
1072 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1073
f7e6600d 1074 if (head == vector)
1da177e4 1075 iounmap(base);
1da177e4
LT
1076 }
1077
1078 return 0;
1079}
1080
1081static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
1082{
1083 int vector = head, tail = 0;
1084 int i, j = 0, nr_entries = 0;
1085 void __iomem *base;
1086 unsigned long flags;
1087
1088 spin_lock_irqsave(&msi_lock, flags);
1089 while (head != tail) {
1090 nr_entries++;
1091 tail = msi_desc[vector]->link.tail;
1092 if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
1093 j = vector;
1094 vector = tail;
1095 }
1096 if (*nvec > nr_entries) {
1097 spin_unlock_irqrestore(&msi_lock, flags);
1098 *nvec = nr_entries;
1099 return -EINVAL;
1100 }
1101 vector = ((j > 0) ? j : head);
1102 for (i = 0; i < *nvec; i++) {
1103 j = msi_desc[vector]->msi_attrib.entry_nr;
1104 msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
1105 vector_irq[vector] = -1; /* Mark it busy */
1106 nr_released_vectors--;
1107 entries[i].vector = vector;
1108 if (j != (entries + i)->entry) {
1109 base = msi_desc[vector]->mask_base;
1110 msi_desc[vector]->msi_attrib.entry_nr =
1111 (entries + i)->entry;
1112 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1113 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
1114 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1115 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
1116 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1117 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
1118 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1119 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
1120 writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
1121 PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
1122 base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
1123 PCI_MSIX_ENTRY_DATA_OFFSET);
1124 }
1125 vector = msi_desc[vector]->link.tail;
1126 }
1127 spin_unlock_irqrestore(&msi_lock, flags);
1128
1129 return 0;
1130}
1131
1132/**
1133 * pci_enable_msix - configure device's MSI-X capability structure
1134 * @dev: pointer to the pci_dev data structure of MSI-X device function
70549ad9 1135 * @entries: pointer to an array of MSI-X entries
1da177e4
LT
1136 * @nvec: number of MSI-X vectors requested for allocation by device driver
1137 *
1138 * Setup the MSI-X capability structure of device function with the number
1139 * of requested vectors upon its software driver call to request for
1140 * MSI-X mode enabled on its hardware device function. A return of zero
1141 * indicates the successful configuration of MSI-X capability structure
1142 * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
1143 * Or a return of > 0 indicates that driver request is exceeding the number
1144 * of vectors available. Driver should use the returned value to re-send
1145 * its request.
1146 **/
1147int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1148{
1149 int status, pos, nr_entries, free_vectors;
1150 int i, j, temp;
1151 u16 control;
1152 unsigned long flags;
1153
24334a12 1154 if (!entries || pci_msi_supported(dev) < 0)
1da177e4
LT
1155 return -EINVAL;
1156
b64c05e7
GG
1157 status = msi_init();
1158 if (status < 0)
1da177e4
LT
1159 return status;
1160
b64c05e7
GG
1161 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1162 if (!pos)
1da177e4
LT
1163 return -EINVAL;
1164
1165 pci_read_config_word(dev, msi_control_reg(pos), &control);
1166 if (control & PCI_MSIX_FLAGS_ENABLE)
1167 return -EINVAL; /* Already in MSI-X mode */
1168
1169 nr_entries = multi_msix_capable(control);
1170 if (nvec > nr_entries)
1171 return -EINVAL;
1172
1173 /* Check for any invalid entries */
1174 for (i = 0; i < nvec; i++) {
1175 if (entries[i].entry >= nr_entries)
1176 return -EINVAL; /* invalid entry */
1177 for (j = i + 1; j < nvec; j++) {
1178 if (entries[i].entry == entries[j].entry)
1179 return -EINVAL; /* duplicate entry */
1180 }
1181 }
1182 temp = dev->irq;
1183 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1184 /* Lookup Sucess */
1185 nr_entries = nvec;
1186 /* Reroute MSI-X table */
1187 if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
1188 /* #requested > #previous-assigned */
1189 dev->irq = temp;
1190 return nr_entries;
1191 }
1192 dev->irq = temp;
1193 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1194 return 0;
1195 }
1196 /* Check whether driver already requested for MSI vector */
1197 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1198 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1199 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
1200 "Device already has an MSI vector assigned\n",
1201 pci_name(dev));
1202 dev->irq = temp;
1203 return -EINVAL;
1204 }
1205
1206 spin_lock_irqsave(&msi_lock, flags);
1207 /*
1208 * msi_lock is provided to ensure that enough vectors resources are
1209 * available before granting.
1210 */
1211 free_vectors = pci_vector_resources(last_alloc_vector,
1212 nr_released_vectors);
1213 /* Ensure that each MSI/MSI-X device has one vector reserved by
1214 default to avoid any MSI-X driver to take all available
1215 resources */
1216 free_vectors -= nr_reserved_vectors;
1217 /* Find the average of free vectors among MSI-X devices */
1218 if (nr_msix_devices > 0)
1219 free_vectors /= nr_msix_devices;
1220 spin_unlock_irqrestore(&msi_lock, flags);
1221
1222 if (nvec > free_vectors) {
1223 if (free_vectors > 0)
1224 return free_vectors;
1225 else
1226 return -EBUSY;
1227 }
1228
1229 status = msix_capability_init(dev, entries, nvec);
1230 if (!status && nr_msix_devices > 0)
1231 nr_msix_devices--;
1232
1233 return status;
1234}
1235
1236void pci_disable_msix(struct pci_dev* dev)
1237{
1238 int pos, temp;
1239 u16 control;
1240
309e57df
MW
1241 if (!pci_msi_enable)
1242 return;
b64c05e7
GG
1243 if (!dev)
1244 return;
1245
1246 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1247 if (!pos)
1da177e4
LT
1248 return;
1249
1250 pci_read_config_word(dev, msi_control_reg(pos), &control);
1251 if (!(control & PCI_MSIX_FLAGS_ENABLE))
1252 return;
1253
1254 temp = dev->irq;
1255 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1256 int state, vector, head, tail = 0, warning = 0;
1257 unsigned long flags;
1258
1259 vector = head = dev->irq;
1260 spin_lock_irqsave(&msi_lock, flags);
1261 while (head != tail) {
1262 state = msi_desc[vector]->msi_attrib.state;
1263 if (state)
1264 warning = 1;
1265 else {
1266 vector_irq[vector] = 0; /* free it */
1267 nr_released_vectors++;
1268 }
1269 tail = msi_desc[vector]->link.tail;
1270 vector = tail;
1271 }
1272 spin_unlock_irqrestore(&msi_lock, flags);
1273 if (warning) {
1274 dev->irq = temp;
1275 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1276 "free_irq() on all MSI-X vectors\n",
1277 pci_name(dev));
1278 BUG_ON(warning > 0);
1279 } else {
1280 dev->irq = temp;
1281 disable_msi_mode(dev,
1282 pci_find_capability(dev, PCI_CAP_ID_MSIX),
1283 PCI_CAP_ID_MSIX);
1284
1285 }
1286 }
1287}
1288
1289/**
1290 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
1291 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1292 *
eaae4b3a 1293 * Being called during hotplug remove, from which the device function
1da177e4
LT
1294 * is hot-removed. All previous assigned MSI/MSI-X vectors, if
1295 * allocated for this device function, are reclaimed to unused state,
1296 * which may be used later on.
1297 **/
1298void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1299{
1300 int state, pos, temp;
1301 unsigned long flags;
1302
1303 if (!pci_msi_enable || !dev)
1304 return;
1305
1306 temp = dev->irq; /* Save IOAPIC IRQ */
b64c05e7
GG
1307 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1308 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) {
1da177e4
LT
1309 spin_lock_irqsave(&msi_lock, flags);
1310 state = msi_desc[dev->irq]->msi_attrib.state;
1311 spin_unlock_irqrestore(&msi_lock, flags);
1312 if (state) {
1313 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1314 "called without free_irq() on MSI vector %d\n",
1315 pci_name(dev), dev->irq);
1316 BUG_ON(state > 0);
1317 } else /* Release MSI vector assigned to this device */
1318 msi_free_vector(dev, dev->irq, 0);
1319 dev->irq = temp; /* Restore IOAPIC IRQ */
1320 }
b64c05e7
GG
1321 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1322 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) {
1da177e4
LT
1323 int vector, head, tail = 0, warning = 0;
1324 void __iomem *base = NULL;
1325
1326 vector = head = dev->irq;
1327 while (head != tail) {
1328 spin_lock_irqsave(&msi_lock, flags);
1329 state = msi_desc[vector]->msi_attrib.state;
1330 tail = msi_desc[vector]->link.tail;
1331 base = msi_desc[vector]->mask_base;
1332 spin_unlock_irqrestore(&msi_lock, flags);
1333 if (state)
1334 warning = 1;
1335 else if (vector != head) /* Release MSI-X vector */
1336 msi_free_vector(dev, vector, 0);
1337 vector = tail;
1338 }
1339 msi_free_vector(dev, vector, 0);
1340 if (warning) {
1da177e4
LT
1341 iounmap(base);
1342 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1343 "called without free_irq() on all MSI-X vectors\n",
1344 pci_name(dev));
1345 BUG_ON(warning > 0);
1346 }
1347 dev->irq = temp; /* Restore IOAPIC IRQ */
1348 }
1349}
1350
309e57df
MW
1351void pci_no_msi(void)
1352{
1353 pci_msi_enable = 0;
1354}
1355
1da177e4
LT
1356EXPORT_SYMBOL(pci_enable_msi);
1357EXPORT_SYMBOL(pci_disable_msi);
1358EXPORT_SYMBOL(pci_enable_msix);
1359EXPORT_SYMBOL(pci_disable_msix);
This page took 0.239017 seconds and 5 git commands to generate.