2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/gfp.h>
23 #include <linux/list.h>
24 #include <linux/sysdev.h>
25 #include <asm/pci-direct.h>
26 #include <asm/amd_iommu_types.h>
27 #include <asm/amd_iommu.h>
31 * definitions for the ACPI scanning code
33 #define DEVID(bus, devfn) (((bus) << 8) | (devfn))
34 #define PCI_BUS(x) (((x) >> 8) & 0xff)
35 #define IVRS_HEADER_LENGTH 48
37 #define ACPI_IVHD_TYPE 0x10
38 #define ACPI_IVMD_TYPE_ALL 0x20
39 #define ACPI_IVMD_TYPE 0x21
40 #define ACPI_IVMD_TYPE_RANGE 0x22
42 #define IVHD_DEV_ALL 0x01
43 #define IVHD_DEV_SELECT 0x02
44 #define IVHD_DEV_SELECT_RANGE_START 0x03
45 #define IVHD_DEV_RANGE_END 0x04
46 #define IVHD_DEV_ALIAS 0x42
47 #define IVHD_DEV_ALIAS_RANGE 0x43
48 #define IVHD_DEV_EXT_SELECT 0x46
49 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
51 #define IVHD_FLAG_HT_TUN_EN 0x00
52 #define IVHD_FLAG_PASSPW_EN 0x01
53 #define IVHD_FLAG_RESPASSPW_EN 0x02
54 #define IVHD_FLAG_ISOC_EN 0x03
56 #define IVMD_FLAG_EXCL_RANGE 0x08
57 #define IVMD_FLAG_UNITY_MAP 0x01
59 #define ACPI_DEVFLAG_INITPASS 0x01
60 #define ACPI_DEVFLAG_EXTINT 0x02
61 #define ACPI_DEVFLAG_NMI 0x04
62 #define ACPI_DEVFLAG_SYSMGT1 0x10
63 #define ACPI_DEVFLAG_SYSMGT2 0x20
64 #define ACPI_DEVFLAG_LINT0 0x40
65 #define ACPI_DEVFLAG_LINT1 0x80
66 #define ACPI_DEVFLAG_ATSDIS 0x10000000
69 * ACPI table definitions
71 * These data structures are laid over the table to parse the important values
76 * structure describing one IOMMU in the ACPI table. Typically followed by one
77 * or more ivhd_entrys.
89 } __attribute__((packed
));
92 * A device entry describing which devices a specific IOMMU translates and
93 * which requestor ids they use.
100 } __attribute__((packed
));
103 * An AMD IOMMU memory definition structure. It defines things like exclusion
104 * ranges for devices and regions that should be unity mapped.
115 } __attribute__((packed
));
117 static int __initdata amd_iommu_detected
;
119 u16 amd_iommu_last_bdf
; /* largest PCI device id we have
121 struct list_head amd_iommu_unity_map
; /* a list of required unity mappings
123 unsigned amd_iommu_aperture_order
= 26; /* size of aperture in power of 2 */
124 int amd_iommu_isolate
; /* if 1, device isolation is enabled */
126 struct list_head amd_iommu_list
; /* list of all AMD IOMMUs in the
130 * Pointer to the device table which is shared by all AMD IOMMUs
131 * it is indexed by the PCI device id or the HT unit id and contains
132 * information about the domain the device belongs to as well as the
133 * page table root pointer.
135 struct dev_table_entry
*amd_iommu_dev_table
;
138 * The alias table is a driver specific data structure which contains the
139 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
140 * More than one device can share the same requestor id.
142 u16
*amd_iommu_alias_table
;
145 * The rlookup table is used to find the IOMMU which is responsible
146 * for a specific device. It is also indexed by the PCI device id.
148 struct amd_iommu
**amd_iommu_rlookup_table
;
151 * The pd table (protection domain table) is used to find the protection domain
152 * data structure a device belongs to. Indexed with the PCI device id too.
154 struct protection_domain
**amd_iommu_pd_table
;
157 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
158 * to know which ones are already in use.
160 unsigned long *amd_iommu_pd_alloc_bitmap
;
162 static u32 dev_table_size
; /* size of the device table */
163 static u32 alias_table_size
; /* size of the alias table */
164 static u32 rlookup_table_size
; /* size if the rlookup table */
166 static inline void update_last_devid(u16 devid
)
168 if (devid
> amd_iommu_last_bdf
)
169 amd_iommu_last_bdf
= devid
;
172 static inline unsigned long tbl_size(int entry_size
)
174 unsigned shift
= PAGE_SHIFT
+
175 get_order(amd_iommu_last_bdf
* entry_size
);
180 /****************************************************************************
182 * AMD IOMMU MMIO register space handling functions
184 * These functions are used to program the IOMMU device registers in
185 * MMIO space required for that driver.
187 ****************************************************************************/
190 * This function set the exclusion range in the IOMMU. DMA accesses to the
191 * exclusion range are passed through untranslated
193 static void __init
iommu_set_exclusion_range(struct amd_iommu
*iommu
)
195 u64 start
= iommu
->exclusion_start
& PAGE_MASK
;
196 u64 limit
= (start
+ iommu
->exclusion_length
) & PAGE_MASK
;
199 if (!iommu
->exclusion_start
)
202 entry
= start
| MMIO_EXCL_ENABLE_MASK
;
203 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_BASE_OFFSET
,
204 &entry
, sizeof(entry
));
207 memcpy_toio(iommu
->mmio_base
+ MMIO_EXCL_LIMIT_OFFSET
,
208 &entry
, sizeof(entry
));
211 /* Programs the physical address of the device table into the IOMMU hardware */
212 static void __init
iommu_set_device_table(struct amd_iommu
*iommu
)
216 BUG_ON(iommu
->mmio_base
== NULL
);
218 entry
= virt_to_phys(amd_iommu_dev_table
);
219 entry
|= (dev_table_size
>> 12) - 1;
220 memcpy_toio(iommu
->mmio_base
+ MMIO_DEV_TABLE_OFFSET
,
221 &entry
, sizeof(entry
));
224 /* Generic functions to enable/disable certain features of the IOMMU. */
225 static void __init
iommu_feature_enable(struct amd_iommu
*iommu
, u8 bit
)
229 ctrl
= readl(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
231 writel(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
234 static void __init
iommu_feature_disable(struct amd_iommu
*iommu
, u8 bit
)
238 ctrl
= (u64
)readl(iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
240 writel(ctrl
, iommu
->mmio_base
+ MMIO_CONTROL_OFFSET
);
243 /* Function to enable the hardware */
244 void __init
iommu_enable(struct amd_iommu
*iommu
)
246 printk(KERN_INFO
"AMD IOMMU: Enabling IOMMU at ");
247 print_devid(iommu
->devid
, 0);
248 printk(" cap 0x%hx\n", iommu
->cap_ptr
);
250 iommu_feature_enable(iommu
, CONTROL_IOMMU_EN
);
254 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
255 * the system has one.
257 static u8
* __init
iommu_map_mmio_space(u64 address
)
261 if (!request_mem_region(address
, MMIO_REGION_LENGTH
, "amd_iommu"))
264 ret
= ioremap_nocache(address
, MMIO_REGION_LENGTH
);
268 release_mem_region(address
, MMIO_REGION_LENGTH
);
273 static void __init
iommu_unmap_mmio_space(struct amd_iommu
*iommu
)
275 if (iommu
->mmio_base
)
276 iounmap(iommu
->mmio_base
);
277 release_mem_region(iommu
->mmio_phys
, MMIO_REGION_LENGTH
);
280 /****************************************************************************
282 * The functions below belong to the first pass of AMD IOMMU ACPI table
283 * parsing. In this pass we try to find out the highest device id this
284 * code has to handle. Upon this information the size of the shared data
285 * structures is determined later.
287 ****************************************************************************/
290 * This function reads the last device id the IOMMU has to handle from the PCI
291 * capability header for this IOMMU
293 static int __init
find_last_devid_on_pci(int bus
, int dev
, int fn
, int cap_ptr
)
297 cap
= read_pci_config(bus
, dev
, fn
, cap_ptr
+MMIO_RANGE_OFFSET
);
298 update_last_devid(DEVID(MMIO_GET_BUS(cap
), MMIO_GET_LD(cap
)));
304 * After reading the highest device id from the IOMMU PCI capability header
305 * this function looks if there is a higher device id defined in the ACPI table
307 static int __init
find_last_devid_from_ivhd(struct ivhd_header
*h
)
309 u8
*p
= (void *)h
, *end
= (void *)h
;
310 struct ivhd_entry
*dev
;
315 find_last_devid_on_pci(PCI_BUS(h
->devid
),
321 dev
= (struct ivhd_entry
*)p
;
323 case IVHD_DEV_SELECT
:
324 case IVHD_DEV_RANGE_END
:
326 case IVHD_DEV_EXT_SELECT
:
327 /* all the above subfield types refer to device ids */
328 update_last_devid(dev
->devid
);
333 p
+= 0x04 << (*p
>> 6);
342 * Iterate over all IVHD entries in the ACPI table and find the highest device
343 * id which we need to handle. This is the first of three functions which parse
344 * the ACPI table. So we check the checksum here.
346 static int __init
find_last_devid_acpi(struct acpi_table_header
*table
)
349 u8 checksum
= 0, *p
= (u8
*)table
, *end
= (u8
*)table
;
350 struct ivhd_header
*h
;
353 * Validate checksum here so we don't need to do it when
354 * we actually parse the table
356 for (i
= 0; i
< table
->length
; ++i
)
359 /* ACPI table corrupt */
362 p
+= IVRS_HEADER_LENGTH
;
364 end
+= table
->length
;
366 h
= (struct ivhd_header
*)p
;
369 find_last_devid_from_ivhd(h
);
381 /****************************************************************************
383 * The following functions belong the the code path which parses the ACPI table
384 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
385 * data structures, initialize the device/alias/rlookup table and also
386 * basically initialize the hardware.
388 ****************************************************************************/
391 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
392 * write commands to that buffer later and the IOMMU will execute them
395 static u8
* __init
alloc_command_buffer(struct amd_iommu
*iommu
)
397 u8
*cmd_buf
= (u8
*)__get_free_pages(GFP_KERNEL
,
398 get_order(CMD_BUFFER_SIZE
));
404 iommu
->cmd_buf_size
= CMD_BUFFER_SIZE
;
406 memset(cmd_buf
, 0, CMD_BUFFER_SIZE
);
408 entry
= (u64
)virt_to_phys(cmd_buf
);
409 entry
|= MMIO_CMD_SIZE_512
;
410 memcpy_toio(iommu
->mmio_base
+ MMIO_CMD_BUF_OFFSET
,
411 &entry
, sizeof(entry
));
413 iommu_feature_enable(iommu
, CONTROL_CMDBUF_EN
);
418 static void __init
free_command_buffer(struct amd_iommu
*iommu
)
421 free_pages((unsigned long)iommu
->cmd_buf
,
422 get_order(CMD_BUFFER_SIZE
));
425 /* sets a specific bit in the device table entry. */
426 static void set_dev_entry_bit(u16 devid
, u8 bit
)
428 int i
= (bit
>> 5) & 0x07;
429 int _bit
= bit
& 0x1f;
431 amd_iommu_dev_table
[devid
].data
[i
] |= (1 << _bit
);
435 * This function takes the device specific flags read from the ACPI
436 * table and sets up the device table entry with that information
438 static void __init
set_dev_entry_from_acpi(u16 devid
, u32 flags
, u32 ext_flags
)
440 if (flags
& ACPI_DEVFLAG_INITPASS
)
441 set_dev_entry_bit(devid
, DEV_ENTRY_INIT_PASS
);
442 if (flags
& ACPI_DEVFLAG_EXTINT
)
443 set_dev_entry_bit(devid
, DEV_ENTRY_EINT_PASS
);
444 if (flags
& ACPI_DEVFLAG_NMI
)
445 set_dev_entry_bit(devid
, DEV_ENTRY_NMI_PASS
);
446 if (flags
& ACPI_DEVFLAG_SYSMGT1
)
447 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT1
);
448 if (flags
& ACPI_DEVFLAG_SYSMGT2
)
449 set_dev_entry_bit(devid
, DEV_ENTRY_SYSMGT2
);
450 if (flags
& ACPI_DEVFLAG_LINT0
)
451 set_dev_entry_bit(devid
, DEV_ENTRY_LINT0_PASS
);
452 if (flags
& ACPI_DEVFLAG_LINT1
)
453 set_dev_entry_bit(devid
, DEV_ENTRY_LINT1_PASS
);
456 /* Writes the specific IOMMU for a device into the rlookup table */
457 static void __init
set_iommu_for_device(struct amd_iommu
*iommu
, u16 devid
)
459 amd_iommu_rlookup_table
[devid
] = iommu
;
463 * Reads the device exclusion range from ACPI and initialize IOMMU with
466 static void __init
set_device_exclusion_range(u16 devid
, struct ivmd_header
*m
)
468 struct amd_iommu
*iommu
= amd_iommu_rlookup_table
[devid
];
470 if (!(m
->flags
& IVMD_FLAG_EXCL_RANGE
))
475 * We only can configure exclusion ranges per IOMMU, not
476 * per device. But we can enable the exclusion range per
477 * device. This is done here
479 set_dev_entry_bit(m
->devid
, DEV_ENTRY_EX
);
480 iommu
->exclusion_start
= m
->range_start
;
481 iommu
->exclusion_length
= m
->range_length
;
486 * This function reads some important data from the IOMMU PCI space and
487 * initializes the driver data structure with it. It reads the hardware
488 * capabilities and the first/last device entries
490 static void __init
init_iommu_from_pci(struct amd_iommu
*iommu
)
492 int bus
= PCI_BUS(iommu
->devid
);
493 int dev
= PCI_SLOT(iommu
->devid
);
494 int fn
= PCI_FUNC(iommu
->devid
);
495 int cap_ptr
= iommu
->cap_ptr
;
498 iommu
->cap
= read_pci_config(bus
, dev
, fn
, cap_ptr
+MMIO_CAP_HDR_OFFSET
);
500 range
= read_pci_config(bus
, dev
, fn
, cap_ptr
+MMIO_RANGE_OFFSET
);
501 iommu
->first_device
= DEVID(MMIO_GET_BUS(range
), MMIO_GET_FD(range
));
502 iommu
->last_device
= DEVID(MMIO_GET_BUS(range
), MMIO_GET_LD(range
));
506 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
507 * initializes the hardware and our data structures with it.
509 static void __init
init_iommu_from_acpi(struct amd_iommu
*iommu
,
510 struct ivhd_header
*h
)
513 u8
*end
= p
, flags
= 0;
514 u16 dev_i
, devid
= 0, devid_start
= 0, devid_to
= 0;
517 struct ivhd_entry
*e
;
520 * First set the recommended feature enable bits from ACPI
521 * into the IOMMU control registers
523 h
->flags
& IVHD_FLAG_HT_TUN_EN
?
524 iommu_feature_enable(iommu
, CONTROL_HT_TUN_EN
) :
525 iommu_feature_disable(iommu
, CONTROL_HT_TUN_EN
);
527 h
->flags
& IVHD_FLAG_PASSPW_EN
?
528 iommu_feature_enable(iommu
, CONTROL_PASSPW_EN
) :
529 iommu_feature_disable(iommu
, CONTROL_PASSPW_EN
);
531 h
->flags
& IVHD_FLAG_RESPASSPW_EN
?
532 iommu_feature_enable(iommu
, CONTROL_RESPASSPW_EN
) :
533 iommu_feature_disable(iommu
, CONTROL_RESPASSPW_EN
);
535 h
->flags
& IVHD_FLAG_ISOC_EN
?
536 iommu_feature_enable(iommu
, CONTROL_ISOC_EN
) :
537 iommu_feature_disable(iommu
, CONTROL_ISOC_EN
);
540 * make IOMMU memory accesses cache coherent
542 iommu_feature_enable(iommu
, CONTROL_COHERENT_EN
);
545 * Done. Now parse the device entries
547 p
+= sizeof(struct ivhd_header
);
551 e
= (struct ivhd_entry
*)p
;
554 for (dev_i
= iommu
->first_device
;
555 dev_i
<= iommu
->last_device
; ++dev_i
)
556 set_dev_entry_from_acpi(dev_i
, e
->flags
, 0);
558 case IVHD_DEV_SELECT
:
560 set_dev_entry_from_acpi(devid
, e
->flags
, 0);
562 case IVHD_DEV_SELECT_RANGE_START
:
563 devid_start
= e
->devid
;
570 devid_to
= e
->ext
>> 8;
571 set_dev_entry_from_acpi(devid
, e
->flags
, 0);
572 amd_iommu_alias_table
[devid
] = devid_to
;
574 case IVHD_DEV_ALIAS_RANGE
:
575 devid_start
= e
->devid
;
577 devid_to
= e
->ext
>> 8;
581 case IVHD_DEV_EXT_SELECT
:
583 set_dev_entry_from_acpi(devid
, e
->flags
, e
->ext
);
585 case IVHD_DEV_EXT_SELECT_RANGE
:
586 devid_start
= e
->devid
;
591 case IVHD_DEV_RANGE_END
:
593 for (dev_i
= devid_start
; dev_i
<= devid
; ++dev_i
) {
595 amd_iommu_alias_table
[dev_i
] = devid_to
;
596 set_dev_entry_from_acpi(
597 amd_iommu_alias_table
[dev_i
],
605 p
+= 0x04 << (e
->type
>> 6);
609 /* Initializes the device->iommu mapping for the driver */
610 static int __init
init_iommu_devices(struct amd_iommu
*iommu
)
614 for (i
= iommu
->first_device
; i
<= iommu
->last_device
; ++i
)
615 set_iommu_for_device(iommu
, i
);
620 static void __init
free_iommu_one(struct amd_iommu
*iommu
)
622 free_command_buffer(iommu
);
623 iommu_unmap_mmio_space(iommu
);
626 static void __init
free_iommu_all(void)
628 struct amd_iommu
*iommu
, *next
;
630 list_for_each_entry_safe(iommu
, next
, &amd_iommu_list
, list
) {
631 list_del(&iommu
->list
);
632 free_iommu_one(iommu
);
638 * This function clues the initialization function for one IOMMU
639 * together and also allocates the command buffer and programs the
640 * hardware. It does NOT enable the IOMMU. This is done afterwards.
642 static int __init
init_iommu_one(struct amd_iommu
*iommu
, struct ivhd_header
*h
)
644 spin_lock_init(&iommu
->lock
);
645 list_add_tail(&iommu
->list
, &amd_iommu_list
);
648 * Copy data from ACPI table entry to the iommu struct
650 iommu
->devid
= h
->devid
;
651 iommu
->cap_ptr
= h
->cap_ptr
;
652 iommu
->mmio_phys
= h
->mmio_phys
;
653 iommu
->mmio_base
= iommu_map_mmio_space(h
->mmio_phys
);
654 if (!iommu
->mmio_base
)
657 iommu_set_device_table(iommu
);
658 iommu
->cmd_buf
= alloc_command_buffer(iommu
);
662 init_iommu_from_pci(iommu
);
663 init_iommu_from_acpi(iommu
, h
);
664 init_iommu_devices(iommu
);
670 * Iterates over all IOMMU entries in the ACPI table, allocates the
671 * IOMMU structure and initializes it with init_iommu_one()
673 static int __init
init_iommu_all(struct acpi_table_header
*table
)
675 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
676 struct ivhd_header
*h
;
677 struct amd_iommu
*iommu
;
680 INIT_LIST_HEAD(&amd_iommu_list
);
682 end
+= table
->length
;
683 p
+= IVRS_HEADER_LENGTH
;
686 h
= (struct ivhd_header
*)p
;
689 iommu
= kzalloc(sizeof(struct amd_iommu
), GFP_KERNEL
);
692 ret
= init_iommu_one(iommu
, h
);
707 /****************************************************************************
709 * The next functions belong to the third pass of parsing the ACPI
710 * table. In this last pass the memory mapping requirements are
711 * gathered (like exclusion and unity mapping reanges).
713 ****************************************************************************/
715 static void __init
free_unity_maps(void)
717 struct unity_map_entry
*entry
, *next
;
719 list_for_each_entry_safe(entry
, next
, &amd_iommu_unity_map
, list
) {
720 list_del(&entry
->list
);
725 /* called when we find an exclusion range definition in ACPI */
726 static int __init
init_exclusion_range(struct ivmd_header
*m
)
732 set_device_exclusion_range(m
->devid
, m
);
734 case ACPI_IVMD_TYPE_ALL
:
735 for (i
= 0; i
< amd_iommu_last_bdf
; ++i
)
736 set_device_exclusion_range(i
, m
);
738 case ACPI_IVMD_TYPE_RANGE
:
739 for (i
= m
->devid
; i
<= m
->aux
; ++i
)
740 set_device_exclusion_range(i
, m
);
749 /* called for unity map ACPI definition */
750 static int __init
init_unity_map_range(struct ivmd_header
*m
)
752 struct unity_map_entry
*e
= 0;
754 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
761 e
->devid_start
= e
->devid_end
= m
->devid
;
763 case ACPI_IVMD_TYPE_ALL
:
765 e
->devid_end
= amd_iommu_last_bdf
;
767 case ACPI_IVMD_TYPE_RANGE
:
768 e
->devid_start
= m
->devid
;
769 e
->devid_end
= m
->aux
;
772 e
->address_start
= PAGE_ALIGN(m
->range_start
);
773 e
->address_end
= e
->address_start
+ PAGE_ALIGN(m
->range_length
);
774 e
->prot
= m
->flags
>> 1;
776 list_add_tail(&e
->list
, &amd_iommu_unity_map
);
781 /* iterates over all memory definitions we find in the ACPI table */
782 static int __init
init_memory_definitions(struct acpi_table_header
*table
)
784 u8
*p
= (u8
*)table
, *end
= (u8
*)table
;
785 struct ivmd_header
*m
;
787 INIT_LIST_HEAD(&amd_iommu_unity_map
);
789 end
+= table
->length
;
790 p
+= IVRS_HEADER_LENGTH
;
793 m
= (struct ivmd_header
*)p
;
794 if (m
->flags
& IVMD_FLAG_EXCL_RANGE
)
795 init_exclusion_range(m
);
796 else if (m
->flags
& IVMD_FLAG_UNITY_MAP
)
797 init_unity_map_range(m
);
806 * This function finally enables all IOMMUs found in the system after
807 * they have been initialized
809 static void __init
enable_iommus(void)
811 struct amd_iommu
*iommu
;
813 list_for_each_entry(iommu
, &amd_iommu_list
, list
) {
814 iommu_set_exclusion_range(iommu
);
820 * Suspend/Resume support
821 * disable suspend until real resume implemented
824 static int amd_iommu_resume(struct sys_device
*dev
)
829 static int amd_iommu_suspend(struct sys_device
*dev
, pm_message_t state
)
834 static struct sysdev_class amd_iommu_sysdev_class
= {
836 .suspend
= amd_iommu_suspend
,
837 .resume
= amd_iommu_resume
,
840 static struct sys_device device_amd_iommu
= {
842 .cls
= &amd_iommu_sysdev_class
,
846 * This is the core init function for AMD IOMMU hardware in the system.
847 * This function is called from the generic x86 DMA layer initialization
850 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
853 * 1 pass) Find the highest PCI device id the driver has to handle.
854 * Upon this information the size of the data structures is
855 * determined that needs to be allocated.
857 * 2 pass) Initialize the data structures just allocated with the
858 * information in the ACPI table about available AMD IOMMUs
859 * in the system. It also maps the PCI devices in the
860 * system to specific IOMMUs
862 * 3 pass) After the basic data structures are allocated and
863 * initialized we update them with information about memory
864 * remapping requirements parsed out of the ACPI table in
867 * After that the hardware is initialized and ready to go. In the last
868 * step we do some Linux specific things like registering the driver in
869 * the dma_ops interface and initializing the suspend/resume support
870 * functions. Finally it prints some information about AMD IOMMUs and
871 * the driver state and enables the hardware.
873 int __init
amd_iommu_init(void)
879 printk(KERN_INFO
"AMD IOMMU disabled by kernel command line\n");
883 if (!amd_iommu_detected
)
887 * First parse ACPI tables to find the largest Bus/Dev/Func
888 * we need to handle. Upon this information the shared data
889 * structures for the IOMMUs in the system will be allocated
891 if (acpi_table_parse("IVRS", find_last_devid_acpi
) != 0)
894 dev_table_size
= tbl_size(DEV_TABLE_ENTRY_SIZE
);
895 alias_table_size
= tbl_size(ALIAS_TABLE_ENTRY_SIZE
);
896 rlookup_table_size
= tbl_size(RLOOKUP_TABLE_ENTRY_SIZE
);
900 /* Device table - directly used by all IOMMUs */
901 amd_iommu_dev_table
= (void *)__get_free_pages(GFP_KERNEL
,
902 get_order(dev_table_size
));
903 if (amd_iommu_dev_table
== NULL
)
907 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
908 * IOMMU see for that device
910 amd_iommu_alias_table
= (void *)__get_free_pages(GFP_KERNEL
,
911 get_order(alias_table_size
));
912 if (amd_iommu_alias_table
== NULL
)
915 /* IOMMU rlookup table - find the IOMMU for a specific device */
916 amd_iommu_rlookup_table
= (void *)__get_free_pages(GFP_KERNEL
,
917 get_order(rlookup_table_size
));
918 if (amd_iommu_rlookup_table
== NULL
)
922 * Protection Domain table - maps devices to protection domains
923 * This table has the same size as the rlookup_table
925 amd_iommu_pd_table
= (void *)__get_free_pages(GFP_KERNEL
,
926 get_order(rlookup_table_size
));
927 if (amd_iommu_pd_table
== NULL
)
930 amd_iommu_pd_alloc_bitmap
= (void *)__get_free_pages(GFP_KERNEL
,
931 get_order(MAX_DOMAIN_ID
/8));
932 if (amd_iommu_pd_alloc_bitmap
== NULL
)
936 * memory is allocated now; initialize the device table with all zeroes
937 * and let all alias entries point to itself
939 memset(amd_iommu_dev_table
, 0, dev_table_size
);
940 for (i
= 0; i
< amd_iommu_last_bdf
; ++i
)
941 amd_iommu_alias_table
[i
] = i
;
943 memset(amd_iommu_pd_table
, 0, rlookup_table_size
);
944 memset(amd_iommu_pd_alloc_bitmap
, 0, MAX_DOMAIN_ID
/ 8);
947 * never allocate domain 0 because its used as the non-allocated and
948 * error value placeholder
950 amd_iommu_pd_alloc_bitmap
[0] = 1;
953 * now the data structures are allocated and basically initialized
954 * start the real acpi table scan
957 if (acpi_table_parse("IVRS", init_iommu_all
) != 0)
960 if (acpi_table_parse("IVRS", init_memory_definitions
) != 0)
963 ret
= amd_iommu_init_dma_ops();
967 ret
= sysdev_class_register(&amd_iommu_sysdev_class
);
971 ret
= sysdev_register(&device_amd_iommu
);
977 printk(KERN_INFO
"AMD IOMMU: aperture size is %d MB\n",
978 (1 << (amd_iommu_aperture_order
-20)));
980 printk(KERN_INFO
"AMD IOMMU: device isolation ");
981 if (amd_iommu_isolate
)
984 printk("disabled\n");
990 if (amd_iommu_pd_alloc_bitmap
)
991 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap
, 1);
993 if (amd_iommu_pd_table
)
994 free_pages((unsigned long)amd_iommu_pd_table
,
995 get_order(rlookup_table_size
));
997 if (amd_iommu_rlookup_table
)
998 free_pages((unsigned long)amd_iommu_rlookup_table
,
999 get_order(rlookup_table_size
));
1001 if (amd_iommu_alias_table
)
1002 free_pages((unsigned long)amd_iommu_alias_table
,
1003 get_order(alias_table_size
));
1005 if (amd_iommu_dev_table
)
1006 free_pages((unsigned long)amd_iommu_dev_table
,
1007 get_order(dev_table_size
));
1016 /****************************************************************************
1018 * Early detect code. This code runs at IOMMU detection time in the DMA
1019 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1022 ****************************************************************************/
1023 static int __init
early_amd_iommu_detect(struct acpi_table_header
*table
)
1028 void __init
amd_iommu_detect(void)
1030 if (swiotlb
|| no_iommu
|| (iommu_detected
&& !gart_iommu_aperture
))
1033 if (acpi_table_parse("IVRS", early_amd_iommu_detect
) == 0) {
1035 amd_iommu_detected
= 1;
1036 #ifdef CONFIG_GART_IOMMU
1037 gart_iommu_aperture_disabled
= 1;
1038 gart_iommu_aperture
= 0;
1043 /****************************************************************************
1045 * Parsing functions for the AMD IOMMU specific kernel command line
1048 ****************************************************************************/
1050 static int __init
parse_amd_iommu_options(char *str
)
1052 for (; *str
; ++str
) {
1053 if (strcmp(str
, "isolate") == 0)
1054 amd_iommu_isolate
= 1;
1060 static int __init
parse_amd_iommu_size_options(char *str
)
1062 for (; *str
; ++str
) {
1063 if (strcmp(str
, "32M") == 0)
1064 amd_iommu_aperture_order
= 25;
1065 if (strcmp(str
, "64M") == 0)
1066 amd_iommu_aperture_order
= 26;
1067 if (strcmp(str
, "128M") == 0)
1068 amd_iommu_aperture_order
= 27;
1069 if (strcmp(str
, "256M") == 0)
1070 amd_iommu_aperture_order
= 28;
1071 if (strcmp(str
, "512M") == 0)
1072 amd_iommu_aperture_order
= 29;
1073 if (strcmp(str
, "1G") == 0)
1074 amd_iommu_aperture_order
= 30;
1080 __setup("amd_iommu=", parse_amd_iommu_options
);
1081 __setup("amd_iommu_size=", parse_amd_iommu_size_options
);