2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/interrupt.h>
26 #include <linux/msi.h>
27 #include <linux/amd-iommu.h>
28 #include <linux/export.h>
29 #include <acpi/acpi.h>
30 #include <asm/pci-direct.h>
31 #include <asm/iommu.h>
33 #include <asm/x86_init.h>
34 #include <asm/iommu_table.h>
35 #include <asm/io_apic.h>
36 #include <asm/irq_remapping.h>
38 #include "amd_iommu_proto.h"
39 #include "amd_iommu_types.h"
40 #include "irq_remapping.h"
43 * definitions for the ACPI scanning code
45 #define IVRS_HEADER_LENGTH 48
47 #define ACPI_IVHD_TYPE 0x10
48 #define ACPI_IVMD_TYPE_ALL 0x20
49 #define ACPI_IVMD_TYPE 0x21
50 #define ACPI_IVMD_TYPE_RANGE 0x22
52 #define IVHD_DEV_ALL 0x01
53 #define IVHD_DEV_SELECT 0x02
54 #define IVHD_DEV_SELECT_RANGE_START 0x03
55 #define IVHD_DEV_RANGE_END 0x04
56 #define IVHD_DEV_ALIAS 0x42
57 #define IVHD_DEV_ALIAS_RANGE 0x43
58 #define IVHD_DEV_EXT_SELECT 0x46
59 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
60 #define IVHD_DEV_SPECIAL 0x48
62 #define IVHD_SPECIAL_IOAPIC 1
63 #define IVHD_SPECIAL_HPET 2
65 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
66 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
67 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
68 #define IVHD_FLAG_ISOC_EN_MASK 0x08
70 #define IVMD_FLAG_EXCL_RANGE 0x08
71 #define IVMD_FLAG_UNITY_MAP 0x01
73 #define ACPI_DEVFLAG_INITPASS 0x01
74 #define ACPI_DEVFLAG_EXTINT 0x02
75 #define ACPI_DEVFLAG_NMI 0x04
76 #define ACPI_DEVFLAG_SYSMGT1 0x10
77 #define ACPI_DEVFLAG_SYSMGT2 0x20
78 #define ACPI_DEVFLAG_LINT0 0x40
79 #define ACPI_DEVFLAG_LINT1 0x80
80 #define ACPI_DEVFLAG_ATSDIS 0x10000000
83 * ACPI table definitions
85 * These data structures are laid over the table to parse the important values
90 * structure describing one IOMMU in the ACPI table. Typically followed by one
91 * or more ivhd_entrys.
103 } __attribute__((packed));
106 * A device entry describing which devices a specific IOMMU translates and
107 * which requestor ids they use.
114 } __attribute__((packed));
117 * An AMD IOMMU memory definition structure. It defines things like exclusion
118 * ranges for devices and regions that should be unity mapped.
129 } __attribute__((packed));
132 bool amd_iommu_irq_remap __read_mostly;
134 static bool amd_iommu_detected;
135 static bool __initdata amd_iommu_disabled;
137 u16 amd_iommu_last_bdf; /* largest PCI device id we have
139 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
141 u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
143 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
146 /* Array to assign indices to IOMMUs*/
147 struct amd_iommu *amd_iommus[MAX_IOMMUS];
148 int amd_iommus_present;
150 /* IOMMUs have a non-present cache? */
151 bool amd_iommu_np_cache __read_mostly;
152 bool amd_iommu_iotlb_sup __read_mostly = true;
154 u32 amd_iommu_max_pasids __read_mostly = ~0;
156 bool amd_iommu_v2_present __read_mostly;
158 bool amd_iommu_force_isolation __read_mostly;
161 * List of protection domains - used during resume
163 LIST_HEAD(amd_iommu_pd_list);
164 spinlock_t amd_iommu_pd_lock;
167 * Pointer to the device table which is shared by all AMD IOMMUs
168 * it is indexed by the PCI device id or the HT unit id and contains
169 * information about the domain the device belongs to as well as the
170 * page table root pointer.
172 struct dev_table_entry *amd_iommu_dev_table;
175 * The alias table is a driver specific data structure which contains the
176 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
177 * More than one device can share the same requestor id.
179 u16 *amd_iommu_alias_table;
182 * The rlookup table is used to find the IOMMU which is responsible
183 * for a specific device. It is also indexed by the PCI device id.
185 struct amd_iommu **amd_iommu_rlookup_table;
188 * This table is used to find the irq remapping table for a given device id
191 struct irq_remap_table **irq_lookup_table;
194 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
195 * to know which ones are already in use.
197 unsigned long *amd_iommu_pd_alloc_bitmap;
199 static u32 dev_table_size; /* size of the device table */
200 static u32 alias_table_size; /* size of the alias table */
201 static u32 rlookup_table_size; /* size if the rlookup table */
203 enum iommu_init_state {
216 static enum iommu_init_state init_state = IOMMU_START_STATE;
218 static int amd_iommu_enable_interrupts(void);
219 static int __init iommu_go_to_state(enum iommu_init_state state);
221 static inline void update_last_devid(u16 devid)
223 if (devid > amd_iommu_last_bdf)
224 amd_iommu_last_bdf = devid;
227 static inline unsigned long tbl_size(int entry_size)
229 unsigned shift = PAGE_SHIFT +
230 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
235 /* Access to l1 and l2 indexed register spaces */
237 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
241 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
242 pci_read_config_dword(iommu->dev, 0xfc, &val);
246 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
248 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
249 pci_write_config_dword(iommu->dev, 0xfc, val);
250 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
253 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
257 pci_write_config_dword(iommu->dev, 0xf0, address);
258 pci_read_config_dword(iommu->dev, 0xf4, &val);
262 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
264 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
265 pci_write_config_dword(iommu->dev, 0xf4, val);
268 /****************************************************************************
270 * AMD IOMMU MMIO register space handling functions
272 * These functions are used to program the IOMMU device registers in
273 * MMIO space required for that driver.
275 ****************************************************************************/
278 * This function set the exclusion range in the IOMMU. DMA accesses to the
279 * exclusion range are passed through untranslated
281 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
283 u64 start = iommu->exclusion_start & PAGE_MASK;
284 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
287 if (!iommu->exclusion_start)
290 entry = start | MMIO_EXCL_ENABLE_MASK;
291 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
292 &entry, sizeof(entry));
295 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
296 &entry, sizeof(entry));
299 /* Programs the physical address of the device table into the IOMMU hardware */
300 static void iommu_set_device_table(struct amd_iommu *iommu)
304 BUG_ON(iommu->mmio_base == NULL);
306 entry = virt_to_phys(amd_iommu_dev_table);
307 entry |= (dev_table_size >> 12) - 1;
308 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
309 &entry, sizeof(entry));
312 /* Generic functions to enable/disable certain features of the IOMMU. */
313 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
317 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
319 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
322 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
326 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
328 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
331 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
335 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
336 ctrl &= ~CTRL_INV_TO_MASK;
337 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
338 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
341 /* Function to enable the hardware */
342 static void iommu_enable(struct amd_iommu *iommu)
344 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
347 static void iommu_disable(struct amd_iommu *iommu)
349 /* Disable command buffer */
350 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
352 /* Disable event logging and event interrupts */
353 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
354 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
356 /* Disable IOMMU hardware itself */
357 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
361 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
362 * the system has one.
364 static u8 __iomem * __init iommu_map_mmio_space(u64 address)
366 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
367 pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
369 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
373 return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH);
376 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
378 if (iommu->mmio_base)
379 iounmap(iommu->mmio_base);
380 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
383 /****************************************************************************
385 * The functions below belong to the first pass of AMD IOMMU ACPI table
386 * parsing. In this pass we try to find out the highest device id this
387 * code has to handle. Upon this information the size of the shared data
388 * structures is determined later.
390 ****************************************************************************/
393 * This function calculates the length of a given IVHD entry
395 static inline int ivhd_entry_length(u8 *ivhd)
397 return 0x04 << (*ivhd >> 6);
401 * This function reads the last device id the IOMMU has to handle from the PCI
402 * capability header for this IOMMU
404 static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
408 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
409 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
415 * After reading the highest device id from the IOMMU PCI capability header
416 * this function looks if there is a higher device id defined in the ACPI table
418 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
420 u8 *p = (void *)h, *end = (void *)h;
421 struct ivhd_entry *dev;
426 find_last_devid_on_pci(PCI_BUS(h->devid),
432 dev = (struct ivhd_entry *)p;
434 case IVHD_DEV_SELECT:
435 case IVHD_DEV_RANGE_END:
437 case IVHD_DEV_EXT_SELECT:
438 /* all the above subfield types refer to device ids */
439 update_last_devid(dev->devid);
444 p += ivhd_entry_length(p);
453 * Iterate over all IVHD entries in the ACPI table and find the highest device
454 * id which we need to handle. This is the first of three functions which parse
455 * the ACPI table. So we check the checksum here.
457 static int __init find_last_devid_acpi(struct acpi_table_header *table)
460 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
461 struct ivhd_header *h;
464 * Validate checksum here so we don't need to do it when
465 * we actually parse the table
467 for (i = 0; i < table->length; ++i)
470 /* ACPI table corrupt */
473 p += IVRS_HEADER_LENGTH;
475 end += table->length;
477 h = (struct ivhd_header *)p;
480 find_last_devid_from_ivhd(h);
492 /****************************************************************************
494 * The following functions belong to the code path which parses the ACPI table
495 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
496 * data structures, initialize the device/alias/rlookup table and also
497 * basically initialize the hardware.
499 ****************************************************************************/
502 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
503 * write commands to that buffer later and the IOMMU will execute them
506 static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
508 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
509 get_order(CMD_BUFFER_SIZE));
514 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
520 * This function resets the command buffer if the IOMMU stopped fetching
523 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
525 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
527 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
528 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
530 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
534 * This function writes the command buffer address to the hardware and
537 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
541 BUG_ON(iommu->cmd_buf == NULL);
543 entry = (u64)virt_to_phys(iommu->cmd_buf);
544 entry |= MMIO_CMD_SIZE_512;
546 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
547 &entry, sizeof(entry));
549 amd_iommu_reset_cmd_buffer(iommu);
550 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
553 static void __init free_command_buffer(struct amd_iommu *iommu)
555 free_pages((unsigned long)iommu->cmd_buf,
556 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
559 /* allocates the memory where the IOMMU will log its events to */
560 static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
562 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
563 get_order(EVT_BUFFER_SIZE));
565 if (iommu->evt_buf == NULL)
568 iommu->evt_buf_size = EVT_BUFFER_SIZE;
570 return iommu->evt_buf;
573 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
577 BUG_ON(iommu->evt_buf == NULL);
579 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
581 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
582 &entry, sizeof(entry));
584 /* set head and tail to zero manually */
585 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
586 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
588 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
591 static void __init free_event_buffer(struct amd_iommu *iommu)
593 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
596 /* allocates the memory where the IOMMU will log its events to */
597 static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
599 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
600 get_order(PPR_LOG_SIZE));
602 if (iommu->ppr_log == NULL)
605 return iommu->ppr_log;
608 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
612 if (iommu->ppr_log == NULL)
615 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
617 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
618 &entry, sizeof(entry));
620 /* set head and tail to zero manually */
621 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
622 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
624 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
625 iommu_feature_enable(iommu, CONTROL_PPR_EN);
628 static void __init free_ppr_log(struct amd_iommu *iommu)
630 if (iommu->ppr_log == NULL)
633 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
636 static void iommu_enable_gt(struct amd_iommu *iommu)
638 if (!iommu_feature(iommu, FEATURE_GT))
641 iommu_feature_enable(iommu, CONTROL_GT_EN);
644 /* sets a specific bit in the device table entry. */
645 static void set_dev_entry_bit(u16 devid, u8 bit)
647 int i = (bit >> 6) & 0x03;
648 int _bit = bit & 0x3f;
650 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
653 static int get_dev_entry_bit(u16 devid, u8 bit)
655 int i = (bit >> 6) & 0x03;
656 int _bit = bit & 0x3f;
658 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
662 void amd_iommu_apply_erratum_63(u16 devid)
666 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
667 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
670 set_dev_entry_bit(devid, DEV_ENTRY_IW);
673 /* Writes the specific IOMMU for a device into the rlookup table */
674 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
676 amd_iommu_rlookup_table[devid] = iommu;
680 * This function takes the device specific flags read from the ACPI
681 * table and sets up the device table entry with that information
683 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
684 u16 devid, u32 flags, u32 ext_flags)
686 if (flags & ACPI_DEVFLAG_INITPASS)
687 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
688 if (flags & ACPI_DEVFLAG_EXTINT)
689 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
690 if (flags & ACPI_DEVFLAG_NMI)
691 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
692 if (flags & ACPI_DEVFLAG_SYSMGT1)
693 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
694 if (flags & ACPI_DEVFLAG_SYSMGT2)
695 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
696 if (flags & ACPI_DEVFLAG_LINT0)
697 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
698 if (flags & ACPI_DEVFLAG_LINT1)
699 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
701 amd_iommu_apply_erratum_63(devid);
703 set_iommu_for_device(iommu, devid);
706 static int add_special_device(u8 type, u8 id, u16 devid)
708 struct devid_map *entry;
709 struct list_head *list;
711 if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
714 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
719 entry->devid = devid;
721 if (type == IVHD_SPECIAL_IOAPIC)
726 list_add_tail(&entry->list, list);
732 * Reads the device exclusion range from ACPI and initializes the IOMMU with
735 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
737 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
739 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
744 * We only can configure exclusion ranges per IOMMU, not
745 * per device. But we can enable the exclusion range per
746 * device. This is done here
748 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
749 iommu->exclusion_start = m->range_start;
750 iommu->exclusion_length = m->range_length;
755 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
756 * initializes the hardware and our data structures with it.
758 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
759 struct ivhd_header *h)
762 u8 *end = p, flags = 0;
763 u16 devid = 0, devid_start = 0, devid_to = 0;
764 u32 dev_i, ext_flags = 0;
766 struct ivhd_entry *e;
769 * First save the recommended feature enable bits from ACPI
771 iommu->acpi_flags = h->flags;
774 * Done. Now parse the device entries
776 p += sizeof(struct ivhd_header);
781 e = (struct ivhd_entry *)p;
785 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
786 " last device %02x:%02x.%x flags: %02x\n",
787 PCI_BUS(iommu->first_device),
788 PCI_SLOT(iommu->first_device),
789 PCI_FUNC(iommu->first_device),
790 PCI_BUS(iommu->last_device),
791 PCI_SLOT(iommu->last_device),
792 PCI_FUNC(iommu->last_device),
795 for (dev_i = iommu->first_device;
796 dev_i <= iommu->last_device; ++dev_i)
797 set_dev_entry_from_acpi(iommu, dev_i,
800 case IVHD_DEV_SELECT:
802 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
810 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
812 case IVHD_DEV_SELECT_RANGE_START:
814 DUMP_printk(" DEV_SELECT_RANGE_START\t "
815 "devid: %02x:%02x.%x flags: %02x\n",
821 devid_start = e->devid;
828 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
829 "flags: %02x devid_to: %02x:%02x.%x\n",
834 PCI_BUS(e->ext >> 8),
835 PCI_SLOT(e->ext >> 8),
836 PCI_FUNC(e->ext >> 8));
839 devid_to = e->ext >> 8;
840 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
841 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
842 amd_iommu_alias_table[devid] = devid_to;
844 case IVHD_DEV_ALIAS_RANGE:
846 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
847 "devid: %02x:%02x.%x flags: %02x "
848 "devid_to: %02x:%02x.%x\n",
853 PCI_BUS(e->ext >> 8),
854 PCI_SLOT(e->ext >> 8),
855 PCI_FUNC(e->ext >> 8));
857 devid_start = e->devid;
859 devid_to = e->ext >> 8;
863 case IVHD_DEV_EXT_SELECT:
865 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
866 "flags: %02x ext: %08x\n",
873 set_dev_entry_from_acpi(iommu, devid, e->flags,
876 case IVHD_DEV_EXT_SELECT_RANGE:
878 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
879 "%02x:%02x.%x flags: %02x ext: %08x\n",
885 devid_start = e->devid;
890 case IVHD_DEV_RANGE_END:
892 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
898 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
900 amd_iommu_alias_table[dev_i] = devid_to;
901 set_dev_entry_from_acpi(iommu,
902 devid_to, flags, ext_flags);
904 set_dev_entry_from_acpi(iommu, dev_i,
908 case IVHD_DEV_SPECIAL: {
914 handle = e->ext & 0xff;
915 devid = (e->ext >> 8) & 0xffff;
916 type = (e->ext >> 24) & 0xff;
918 if (type == IVHD_SPECIAL_IOAPIC)
920 else if (type == IVHD_SPECIAL_HPET)
925 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
931 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
932 ret = add_special_device(type, handle, devid);
941 p += ivhd_entry_length(p);
947 /* Initializes the device->iommu mapping for the driver */
948 static int __init init_iommu_devices(struct amd_iommu *iommu)
952 for (i = iommu->first_device; i <= iommu->last_device; ++i)
953 set_iommu_for_device(iommu, i);
958 static void __init free_iommu_one(struct amd_iommu *iommu)
960 free_command_buffer(iommu);
961 free_event_buffer(iommu);
963 iommu_unmap_mmio_space(iommu);
966 static void __init free_iommu_all(void)
968 struct amd_iommu *iommu, *next;
970 for_each_iommu_safe(iommu, next) {
971 list_del(&iommu->list);
972 free_iommu_one(iommu);
978 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
980 * BIOS should disable L2B micellaneous clock gating by setting
981 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
983 static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
987 if ((boot_cpu_data.x86 != 0x15) ||
988 (boot_cpu_data.x86_model < 0x10) ||
989 (boot_cpu_data.x86_model > 0x1f))
992 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
993 pci_read_config_dword(iommu->dev, 0xf4, &value);
998 /* Select NB indirect register 0x90 and enable writing */
999 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1001 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1002 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1003 dev_name(&iommu->dev->dev));
1005 /* Clear the enable writing bit */
1006 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1010 * This function clues the initialization function for one IOMMU
1011 * together and also allocates the command buffer and programs the
1012 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1014 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1018 spin_lock_init(&iommu->lock);
1020 /* Add IOMMU to internal data structures */
1021 list_add_tail(&iommu->list, &amd_iommu_list);
1022 iommu->index = amd_iommus_present++;
1024 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1025 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1029 /* Index is fine - add IOMMU to the array */
1030 amd_iommus[iommu->index] = iommu;
1033 * Copy data from ACPI table entry to the iommu struct
1035 iommu->devid = h->devid;
1036 iommu->cap_ptr = h->cap_ptr;
1037 iommu->pci_seg = h->pci_seg;
1038 iommu->mmio_phys = h->mmio_phys;
1039 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
1040 if (!iommu->mmio_base)
1043 iommu->cmd_buf = alloc_command_buffer(iommu);
1044 if (!iommu->cmd_buf)
1047 iommu->evt_buf = alloc_event_buffer(iommu);
1048 if (!iommu->evt_buf)
1051 iommu->int_enabled = false;
1053 ret = init_iommu_from_acpi(iommu, h);
1058 * Make sure IOMMU is not considered to translate itself. The IVRS
1059 * table tells us so, but this is a lie!
1061 amd_iommu_rlookup_table[iommu->devid] = NULL;
1063 init_iommu_devices(iommu);
1069 * Iterates over all IOMMU entries in the ACPI table, allocates the
1070 * IOMMU structure and initializes it with init_iommu_one()
1072 static int __init init_iommu_all(struct acpi_table_header *table)
1074 u8 *p = (u8 *)table, *end = (u8 *)table;
1075 struct ivhd_header *h;
1076 struct amd_iommu *iommu;
1079 end += table->length;
1080 p += IVRS_HEADER_LENGTH;
1083 h = (struct ivhd_header *)p;
1085 case ACPI_IVHD_TYPE:
1087 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1088 "seg: %d flags: %01x info %04x\n",
1089 PCI_BUS(h->devid), PCI_SLOT(h->devid),
1090 PCI_FUNC(h->devid), h->cap_ptr,
1091 h->pci_seg, h->flags, h->info);
1092 DUMP_printk(" mmio-addr: %016llx\n",
1095 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1099 ret = init_iommu_one(iommu, h);
1114 static int iommu_init_pci(struct amd_iommu *iommu)
1116 int cap_ptr = iommu->cap_ptr;
1117 u32 range, misc, low, high;
1119 iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
1120 iommu->devid & 0xff);
1124 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1126 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1128 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1131 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
1132 MMIO_GET_FD(range));
1133 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
1134 MMIO_GET_LD(range));
1136 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1137 amd_iommu_iotlb_sup = false;
1139 /* read extended feature bits */
1140 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1141 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1143 iommu->features = ((u64)high << 32) | low;
1145 if (iommu_feature(iommu, FEATURE_GT)) {
1150 shift = iommu->features & FEATURE_PASID_MASK;
1151 shift >>= FEATURE_PASID_SHIFT;
1152 pasids = (1 << shift);
1154 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
1156 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1157 glxval >>= FEATURE_GLXVAL_SHIFT;
1159 if (amd_iommu_max_glx_val == -1)
1160 amd_iommu_max_glx_val = glxval;
1162 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1165 if (iommu_feature(iommu, FEATURE_GT) &&
1166 iommu_feature(iommu, FEATURE_PPR)) {
1167 iommu->is_iommu_v2 = true;
1168 amd_iommu_v2_present = true;
1171 if (iommu_feature(iommu, FEATURE_PPR)) {
1172 iommu->ppr_log = alloc_ppr_log(iommu);
1173 if (!iommu->ppr_log)
1177 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1178 amd_iommu_np_cache = true;
1180 if (is_rd890_iommu(iommu->dev)) {
1183 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1187 * Some rd890 systems may not be fully reconfigured by the
1188 * BIOS, so it's necessary for us to store this information so
1189 * it can be reprogrammed on resume
1191 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1192 &iommu->stored_addr_lo);
1193 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1194 &iommu->stored_addr_hi);
1196 /* Low bit locks writes to configuration space */
1197 iommu->stored_addr_lo &= ~1;
1199 for (i = 0; i < 6; i++)
1200 for (j = 0; j < 0x12; j++)
1201 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1203 for (i = 0; i < 0x83; i++)
1204 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1207 amd_iommu_erratum_746_workaround(iommu);
1209 return pci_enable_device(iommu->dev);
1212 static void print_iommu_info(void)
1214 static const char * const feat_str[] = {
1215 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1216 "IA", "GA", "HE", "PC"
1218 struct amd_iommu *iommu;
1220 for_each_iommu(iommu) {
1223 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1224 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1226 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1227 pr_info("AMD-Vi: Extended features: ");
1228 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1229 if (iommu_feature(iommu, (1ULL << i)))
1230 pr_cont(" %s", feat_str[i]);
1235 if (irq_remapping_enabled)
1236 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1239 static int __init amd_iommu_init_pci(void)
1241 struct amd_iommu *iommu;
1244 for_each_iommu(iommu) {
1245 ret = iommu_init_pci(iommu);
1250 ret = amd_iommu_init_devices();
1257 /****************************************************************************
1259 * The following functions initialize the MSI interrupts for all IOMMUs
1260 * in the system. It's a bit challenging because there could be multiple
1261 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1264 ****************************************************************************/
1266 static int iommu_setup_msi(struct amd_iommu *iommu)
1270 r = pci_enable_msi(iommu->dev);
1274 r = request_threaded_irq(iommu->dev->irq,
1275 amd_iommu_int_handler,
1276 amd_iommu_int_thread,
1281 pci_disable_msi(iommu->dev);
1285 iommu->int_enabled = true;
1290 static int iommu_init_msi(struct amd_iommu *iommu)
1294 if (iommu->int_enabled)
1297 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
1298 ret = iommu_setup_msi(iommu);
1306 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1308 if (iommu->ppr_log != NULL)
1309 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1314 /****************************************************************************
1316 * The next functions belong to the third pass of parsing the ACPI
1317 * table. In this last pass the memory mapping requirements are
1318 * gathered (like exclusion and unity mapping ranges).
1320 ****************************************************************************/
1322 static void __init free_unity_maps(void)
1324 struct unity_map_entry *entry, *next;
1326 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1327 list_del(&entry->list);
1332 /* called when we find an exclusion range definition in ACPI */
1333 static int __init init_exclusion_range(struct ivmd_header *m)
1338 case ACPI_IVMD_TYPE:
1339 set_device_exclusion_range(m->devid, m);
1341 case ACPI_IVMD_TYPE_ALL:
1342 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1343 set_device_exclusion_range(i, m);
1345 case ACPI_IVMD_TYPE_RANGE:
1346 for (i = m->devid; i <= m->aux; ++i)
1347 set_device_exclusion_range(i, m);
1356 /* called for unity map ACPI definition */
1357 static int __init init_unity_map_range(struct ivmd_header *m)
1359 struct unity_map_entry *e = NULL;
1362 e = kzalloc(sizeof(*e), GFP_KERNEL);
1370 case ACPI_IVMD_TYPE:
1371 s = "IVMD_TYPEi\t\t\t";
1372 e->devid_start = e->devid_end = m->devid;
1374 case ACPI_IVMD_TYPE_ALL:
1375 s = "IVMD_TYPE_ALL\t\t";
1377 e->devid_end = amd_iommu_last_bdf;
1379 case ACPI_IVMD_TYPE_RANGE:
1380 s = "IVMD_TYPE_RANGE\t\t";
1381 e->devid_start = m->devid;
1382 e->devid_end = m->aux;
1385 e->address_start = PAGE_ALIGN(m->range_start);
1386 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1387 e->prot = m->flags >> 1;
1389 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1390 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1391 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1392 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1393 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1394 e->address_start, e->address_end, m->flags);
1396 list_add_tail(&e->list, &amd_iommu_unity_map);
1401 /* iterates over all memory definitions we find in the ACPI table */
1402 static int __init init_memory_definitions(struct acpi_table_header *table)
1404 u8 *p = (u8 *)table, *end = (u8 *)table;
1405 struct ivmd_header *m;
1407 end += table->length;
1408 p += IVRS_HEADER_LENGTH;
1411 m = (struct ivmd_header *)p;
1412 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1413 init_exclusion_range(m);
1414 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1415 init_unity_map_range(m);
1424 * Init the device table to not allow DMA access for devices and
1425 * suppress all page faults
1427 static void init_device_table_dma(void)
1431 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1432 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1433 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1437 static void __init uninit_device_table_dma(void)
1441 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1442 amd_iommu_dev_table[devid].data[0] = 0ULL;
1443 amd_iommu_dev_table[devid].data[1] = 0ULL;
1447 static void init_device_table(void)
1451 if (!amd_iommu_irq_remap)
1454 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1455 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1458 static void iommu_init_flags(struct amd_iommu *iommu)
1460 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1461 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1462 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1464 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1465 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1466 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1468 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1469 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1470 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1472 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1473 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1474 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1477 * make IOMMU memory accesses cache coherent
1479 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1481 /* Set IOTLB invalidation timeout to 1s */
1482 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1485 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1488 u32 ioc_feature_control;
1489 struct pci_dev *pdev = iommu->root_pdev;
1491 /* RD890 BIOSes may not have completely reconfigured the iommu */
1492 if (!is_rd890_iommu(iommu->dev) || !pdev)
1496 * First, we need to ensure that the iommu is enabled. This is
1497 * controlled by a register in the northbridge
1500 /* Select Northbridge indirect register 0x75 and enable writing */
1501 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1502 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1504 /* Enable the iommu */
1505 if (!(ioc_feature_control & 0x1))
1506 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1508 /* Restore the iommu BAR */
1509 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1510 iommu->stored_addr_lo);
1511 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1512 iommu->stored_addr_hi);
1514 /* Restore the l1 indirect regs for each of the 6 l1s */
1515 for (i = 0; i < 6; i++)
1516 for (j = 0; j < 0x12; j++)
1517 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1519 /* Restore the l2 indirect regs */
1520 for (i = 0; i < 0x83; i++)
1521 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1523 /* Lock PCI setup registers */
1524 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1525 iommu->stored_addr_lo | 1);
1529 * This function finally enables all IOMMUs found in the system after
1530 * they have been initialized
1532 static void early_enable_iommus(void)
1534 struct amd_iommu *iommu;
1536 for_each_iommu(iommu) {
1537 iommu_disable(iommu);
1538 iommu_init_flags(iommu);
1539 iommu_set_device_table(iommu);
1540 iommu_enable_command_buffer(iommu);
1541 iommu_enable_event_buffer(iommu);
1542 iommu_set_exclusion_range(iommu);
1543 iommu_enable(iommu);
1544 iommu_flush_all_caches(iommu);
1548 static void enable_iommus_v2(void)
1550 struct amd_iommu *iommu;
1552 for_each_iommu(iommu) {
1553 iommu_enable_ppr_log(iommu);
1554 iommu_enable_gt(iommu);
1558 static void enable_iommus(void)
1560 early_enable_iommus();
1565 static void disable_iommus(void)
1567 struct amd_iommu *iommu;
1569 for_each_iommu(iommu)
1570 iommu_disable(iommu);
1574 * Suspend/Resume support
1575 * disable suspend until real resume implemented
1578 static void amd_iommu_resume(void)
1580 struct amd_iommu *iommu;
1582 for_each_iommu(iommu)
1583 iommu_apply_resume_quirks(iommu);
1585 /* re-load the hardware */
1588 amd_iommu_enable_interrupts();
1591 static int amd_iommu_suspend(void)
1593 /* disable IOMMUs to go out of the way for BIOS */
1599 static struct syscore_ops amd_iommu_syscore_ops = {
1600 .suspend = amd_iommu_suspend,
1601 .resume = amd_iommu_resume,
1604 static void __init free_on_init_error(void)
1606 free_pages((unsigned long)irq_lookup_table,
1607 get_order(rlookup_table_size));
1609 if (amd_iommu_irq_cache) {
1610 kmem_cache_destroy(amd_iommu_irq_cache);
1611 amd_iommu_irq_cache = NULL;
1615 free_pages((unsigned long)amd_iommu_rlookup_table,
1616 get_order(rlookup_table_size));
1618 free_pages((unsigned long)amd_iommu_alias_table,
1619 get_order(alias_table_size));
1621 free_pages((unsigned long)amd_iommu_dev_table,
1622 get_order(dev_table_size));
1626 #ifdef CONFIG_GART_IOMMU
1628 * We failed to initialize the AMD IOMMU - try fallback to GART
1636 /* SB IOAPIC is always on this device in AMD systems */
1637 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
1639 static bool __init check_ioapic_information(void)
1641 bool ret, has_sb_ioapic;
1644 has_sb_ioapic = false;
1647 for (idx = 0; idx < nr_ioapics; idx++) {
1648 int devid, id = mpc_ioapic_id(idx);
1650 devid = get_ioapic_devid(id);
1652 pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id);
1654 } else if (devid == IOAPIC_SB_DEVID) {
1655 has_sb_ioapic = true;
1660 if (!has_sb_ioapic) {
1662 * We expect the SB IOAPIC to be listed in the IVRS
1663 * table. The system timer is connected to the SB IOAPIC
1664 * and if we don't have it in the list the system will
1665 * panic at boot time. This situation usually happens
1666 * when the BIOS is buggy and provides us the wrong
1667 * device id for the IOAPIC in the system.
1669 pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n");
1673 pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n");
1678 static void __init free_dma_resources(void)
1680 amd_iommu_uninit_devices();
1682 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1683 get_order(MAX_DOMAIN_ID/8));
1689 * This is the hardware init function for AMD IOMMU in the system.
1690 * This function is called either from amd_iommu_init or from the interrupt
1691 * remapping setup code.
1693 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1696 * 1 pass) Find the highest PCI device id the driver has to handle.
1697 * Upon this information the size of the data structures is
1698 * determined that needs to be allocated.
1700 * 2 pass) Initialize the data structures just allocated with the
1701 * information in the ACPI table about available AMD IOMMUs
1702 * in the system. It also maps the PCI devices in the
1703 * system to specific IOMMUs
1705 * 3 pass) After the basic data structures are allocated and
1706 * initialized we update them with information about memory
1707 * remapping requirements parsed out of the ACPI table in
1710 * After everything is set up the IOMMUs are enabled and the necessary
1711 * hotplug and suspend notifiers are registered.
1713 static int __init early_amd_iommu_init(void)
1715 struct acpi_table_header *ivrs_base;
1716 acpi_size ivrs_size;
1720 if (!amd_iommu_detected)
1723 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1724 if (status == AE_NOT_FOUND)
1726 else if (ACPI_FAILURE(status)) {
1727 const char *err = acpi_format_exception(status);
1728 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1733 * First parse ACPI tables to find the largest Bus/Dev/Func
1734 * we need to handle. Upon this information the shared data
1735 * structures for the IOMMUs in the system will be allocated
1737 ret = find_last_devid_acpi(ivrs_base);
1741 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1742 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1743 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1745 /* Device table - directly used by all IOMMUs */
1747 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1748 get_order(dev_table_size));
1749 if (amd_iommu_dev_table == NULL)
1753 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1754 * IOMMU see for that device
1756 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1757 get_order(alias_table_size));
1758 if (amd_iommu_alias_table == NULL)
1761 /* IOMMU rlookup table - find the IOMMU for a specific device */
1762 amd_iommu_rlookup_table = (void *)__get_free_pages(
1763 GFP_KERNEL | __GFP_ZERO,
1764 get_order(rlookup_table_size));
1765 if (amd_iommu_rlookup_table == NULL)
1768 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1769 GFP_KERNEL | __GFP_ZERO,
1770 get_order(MAX_DOMAIN_ID/8));
1771 if (amd_iommu_pd_alloc_bitmap == NULL)
1775 * let all alias entries point to itself
1777 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1778 amd_iommu_alias_table[i] = i;
1781 * never allocate domain 0 because its used as the non-allocated and
1782 * error value placeholder
1784 amd_iommu_pd_alloc_bitmap[0] = 1;
1786 spin_lock_init(&amd_iommu_pd_lock);
1789 * now the data structures are allocated and basically initialized
1790 * start the real acpi table scan
1792 ret = init_iommu_all(ivrs_base);
1796 if (amd_iommu_irq_remap)
1797 amd_iommu_irq_remap = check_ioapic_information();
1799 if (amd_iommu_irq_remap) {
1801 * Interrupt remapping enabled, create kmem_cache for the
1804 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
1805 MAX_IRQS_PER_TABLE * sizeof(u32),
1806 IRQ_TABLE_ALIGNMENT,
1808 if (!amd_iommu_irq_cache)
1811 irq_lookup_table = (void *)__get_free_pages(
1812 GFP_KERNEL | __GFP_ZERO,
1813 get_order(rlookup_table_size));
1814 if (!irq_lookup_table)
1818 ret = init_memory_definitions(ivrs_base);
1822 /* init the device table */
1823 init_device_table();
1826 /* Don't leak any ACPI memory */
1827 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1833 static int amd_iommu_enable_interrupts(void)
1835 struct amd_iommu *iommu;
1838 for_each_iommu(iommu) {
1839 ret = iommu_init_msi(iommu);
1848 static bool detect_ivrs(void)
1850 struct acpi_table_header *ivrs_base;
1851 acpi_size ivrs_size;
1854 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1855 if (status == AE_NOT_FOUND)
1857 else if (ACPI_FAILURE(status)) {
1858 const char *err = acpi_format_exception(status);
1859 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1863 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1865 /* Make sure ACS will be enabled during PCI probe */
1868 if (!disable_irq_remap)
1869 amd_iommu_irq_remap = true;
1874 static int amd_iommu_init_dma(void)
1876 struct amd_iommu *iommu;
1879 if (iommu_pass_through)
1880 ret = amd_iommu_init_passthrough();
1882 ret = amd_iommu_init_dma_ops();
1887 init_device_table_dma();
1889 for_each_iommu(iommu)
1890 iommu_flush_all_caches(iommu);
1892 amd_iommu_init_api();
1894 amd_iommu_init_notifier();
1899 /****************************************************************************
1901 * AMD IOMMU Initialization State Machine
1903 ****************************************************************************/
1905 static int __init state_next(void)
1909 switch (init_state) {
1910 case IOMMU_START_STATE:
1911 if (!detect_ivrs()) {
1912 init_state = IOMMU_NOT_FOUND;
1915 init_state = IOMMU_IVRS_DETECTED;
1918 case IOMMU_IVRS_DETECTED:
1919 ret = early_amd_iommu_init();
1920 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
1922 case IOMMU_ACPI_FINISHED:
1923 early_enable_iommus();
1924 register_syscore_ops(&amd_iommu_syscore_ops);
1925 x86_platform.iommu_shutdown = disable_iommus;
1926 init_state = IOMMU_ENABLED;
1929 ret = amd_iommu_init_pci();
1930 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
1933 case IOMMU_PCI_INIT:
1934 ret = amd_iommu_enable_interrupts();
1935 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
1937 case IOMMU_INTERRUPTS_EN:
1938 ret = amd_iommu_init_dma();
1939 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
1942 init_state = IOMMU_INITIALIZED;
1944 case IOMMU_INITIALIZED:
1947 case IOMMU_NOT_FOUND:
1948 case IOMMU_INIT_ERROR:
1949 /* Error states => do nothing */
1960 static int __init iommu_go_to_state(enum iommu_init_state state)
1964 while (init_state != state) {
1966 if (init_state == IOMMU_NOT_FOUND ||
1967 init_state == IOMMU_INIT_ERROR)
1974 #ifdef CONFIG_IRQ_REMAP
1975 int __init amd_iommu_prepare(void)
1977 return iommu_go_to_state(IOMMU_ACPI_FINISHED);
1980 int __init amd_iommu_supported(void)
1982 return amd_iommu_irq_remap ? 1 : 0;
1985 int __init amd_iommu_enable(void)
1989 ret = iommu_go_to_state(IOMMU_ENABLED);
1993 irq_remapping_enabled = 1;
1998 void amd_iommu_disable(void)
2000 amd_iommu_suspend();
2003 int amd_iommu_reenable(int mode)
2010 int __init amd_iommu_enable_faulting(void)
2012 /* We enable MSI later when PCI is initialized */
2018 * This is the core init function for AMD IOMMU hardware in the system.
2019 * This function is called from the generic x86 DMA layer initialization
2022 static int __init amd_iommu_init(void)
2026 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2028 free_dma_resources();
2029 if (!irq_remapping_enabled) {
2031 free_on_init_error();
2033 struct amd_iommu *iommu;
2035 uninit_device_table_dma();
2036 for_each_iommu(iommu)
2037 iommu_flush_all_caches(iommu);
2044 /****************************************************************************
2046 * Early detect code. This code runs at IOMMU detection time in the DMA
2047 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2050 ****************************************************************************/
2051 int __init amd_iommu_detect(void)
2055 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2058 if (amd_iommu_disabled)
2061 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2065 amd_iommu_detected = true;
2067 x86_init.iommu.iommu_init = amd_iommu_init;
2072 /****************************************************************************
2074 * Parsing functions for the AMD IOMMU specific kernel command line
2077 ****************************************************************************/
2079 static int __init parse_amd_iommu_dump(char *str)
2081 amd_iommu_dump = true;
2086 static int __init parse_amd_iommu_options(char *str)
2088 for (; *str; ++str) {
2089 if (strncmp(str, "fullflush", 9) == 0)
2090 amd_iommu_unmap_flush = true;
2091 if (strncmp(str, "off", 3) == 0)
2092 amd_iommu_disabled = true;
2093 if (strncmp(str, "force_isolation", 15) == 0)
2094 amd_iommu_force_isolation = true;
2100 __setup("amd_iommu_dump", parse_amd_iommu_dump);
2101 __setup("amd_iommu=", parse_amd_iommu_options);
2103 IOMMU_INIT_FINISH(amd_iommu_detect,
2104 gart_iommu_hole_init,
2108 bool amd_iommu_v2_supported(void)
2110 return amd_iommu_v2_present;
2112 EXPORT_SYMBOL(amd_iommu_v2_supported);