aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu_init.c')
-rw-r--r--arch/x86/kernel/amd_iommu_init.c579
1 files changed, 469 insertions, 110 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 2a13e430437d..0cdcda35a05f 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -22,23 +22,17 @@
22#include <linux/gfp.h> 22#include <linux/gfp.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/interrupt.h>
26#include <linux/msi.h>
25#include <asm/pci-direct.h> 27#include <asm/pci-direct.h>
26#include <asm/amd_iommu_types.h> 28#include <asm/amd_iommu_types.h>
27#include <asm/amd_iommu.h> 29#include <asm/amd_iommu.h>
28#include <asm/gart.h> 30#include <asm/iommu.h>
29 31
30/* 32/*
31 * definitions for the ACPI scanning code 33 * definitions for the ACPI scanning code
32 */ 34 */
33#define UPDATE_LAST_BDF(x) do {\
34 if ((x) > amd_iommu_last_bdf) \
35 amd_iommu_last_bdf = (x); \
36 } while (0);
37
38#define DEVID(bus, devfn) (((bus) << 8) | (devfn))
39#define PCI_BUS(x) (((x) >> 8) & 0xff)
40#define IVRS_HEADER_LENGTH 48 35#define IVRS_HEADER_LENGTH 48
41#define TBL_SIZE(x) (1 << (PAGE_SHIFT + get_order(amd_iommu_last_bdf * (x))))
42 36
43#define ACPI_IVHD_TYPE 0x10 37#define ACPI_IVHD_TYPE 0x10
44#define ACPI_IVMD_TYPE_ALL 0x20 38#define ACPI_IVMD_TYPE_ALL 0x20
@@ -71,6 +65,17 @@
71#define ACPI_DEVFLAG_LINT1 0x80 65#define ACPI_DEVFLAG_LINT1 0x80
72#define ACPI_DEVFLAG_ATSDIS 0x10000000 66#define ACPI_DEVFLAG_ATSDIS 0x10000000
73 67
68/*
69 * ACPI table definitions
70 *
71 * These data structures are laid over the table to parse the important values
72 * out of it.
73 */
74
75/*
76 * structure describing one IOMMU in the ACPI table. Typically followed by one
77 * or more ivhd_entrys.
78 */
74struct ivhd_header { 79struct ivhd_header {
75 u8 type; 80 u8 type;
76 u8 flags; 81 u8 flags;
@@ -83,6 +88,10 @@ struct ivhd_header {
83 u32 reserved; 88 u32 reserved;
84} __attribute__((packed)); 89} __attribute__((packed));
85 90
91/*
92 * A device entry describing which devices a specific IOMMU translates and
93 * which requestor ids they use.
94 */
86struct ivhd_entry { 95struct ivhd_entry {
87 u8 type; 96 u8 type;
88 u16 devid; 97 u16 devid;
@@ -90,6 +99,10 @@ struct ivhd_entry {
90 u32 ext; 99 u32 ext;
91} __attribute__((packed)); 100} __attribute__((packed));
92 101
102/*
103 * An AMD IOMMU memory definition structure. It defines things like exclusion
104 * ranges for devices and regions that should be unity mapped.
105 */
93struct ivmd_header { 106struct ivmd_header {
94 u8 type; 107 u8 type;
95 u8 flags; 108 u8 flags;
@@ -103,22 +116,81 @@ struct ivmd_header {
103 116
104static int __initdata amd_iommu_detected; 117static int __initdata amd_iommu_detected;
105 118
106u16 amd_iommu_last_bdf; 119u16 amd_iommu_last_bdf; /* largest PCI device id we have
107struct list_head amd_iommu_unity_map; 120 to handle */
108unsigned amd_iommu_aperture_order = 26; 121LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
109int amd_iommu_isolate; 122 we find in ACPI */
123unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
124int amd_iommu_isolate; /* if 1, device isolation is enabled */
125bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
126
127LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
128 system */
110 129
111struct list_head amd_iommu_list; 130/*
131 * Pointer to the device table which is shared by all AMD IOMMUs
132 * it is indexed by the PCI device id or the HT unit id and contains
133 * information about the domain the device belongs to as well as the
134 * page table root pointer.
135 */
112struct dev_table_entry *amd_iommu_dev_table; 136struct dev_table_entry *amd_iommu_dev_table;
137
138/*
139 * The alias table is a driver specific data structure which contains the
140 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
141 * More than one device can share the same requestor id.
142 */
113u16 *amd_iommu_alias_table; 143u16 *amd_iommu_alias_table;
144
145/*
146 * The rlookup table is used to find the IOMMU which is responsible
147 * for a specific device. It is also indexed by the PCI device id.
148 */
114struct amd_iommu **amd_iommu_rlookup_table; 149struct amd_iommu **amd_iommu_rlookup_table;
150
151/*
152 * The pd table (protection domain table) is used to find the protection domain
153 * data structure a device belongs to. Indexed with the PCI device id too.
154 */
115struct protection_domain **amd_iommu_pd_table; 155struct protection_domain **amd_iommu_pd_table;
156
157/*
158 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
159 * to know which ones are already in use.
160 */
116unsigned long *amd_iommu_pd_alloc_bitmap; 161unsigned long *amd_iommu_pd_alloc_bitmap;
117 162
118static u32 dev_table_size; 163static u32 dev_table_size; /* size of the device table */
119static u32 alias_table_size; 164static u32 alias_table_size; /* size of the alias table */
120static u32 rlookup_table_size; 165static u32 rlookup_table_size; /* size if the rlookup table */
166
167static inline void update_last_devid(u16 devid)
168{
169 if (devid > amd_iommu_last_bdf)
170 amd_iommu_last_bdf = devid;
171}
172
173static inline unsigned long tbl_size(int entry_size)
174{
175 unsigned shift = PAGE_SHIFT +
176 get_order(amd_iommu_last_bdf * entry_size);
177
178 return 1UL << shift;
179}
121 180
181/****************************************************************************
182 *
183 * AMD IOMMU MMIO register space handling functions
184 *
185 * These functions are used to program the IOMMU device registers in
186 * MMIO space required for that driver.
187 *
188 ****************************************************************************/
189
190/*
191 * This function set the exclusion range in the IOMMU. DMA accesses to the
192 * exclusion range are passed through untranslated
193 */
122static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) 194static void __init iommu_set_exclusion_range(struct amd_iommu *iommu)
123{ 195{
124 u64 start = iommu->exclusion_start & PAGE_MASK; 196 u64 start = iommu->exclusion_start & PAGE_MASK;
@@ -137,9 +209,10 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu)
137 &entry, sizeof(entry)); 209 &entry, sizeof(entry));
138} 210}
139 211
212/* Programs the physical address of the device table into the IOMMU hardware */
140static void __init iommu_set_device_table(struct amd_iommu *iommu) 213static void __init iommu_set_device_table(struct amd_iommu *iommu)
141{ 214{
142 u32 entry; 215 u64 entry;
143 216
144 BUG_ON(iommu->mmio_base == NULL); 217 BUG_ON(iommu->mmio_base == NULL);
145 218
@@ -149,6 +222,7 @@ static void __init iommu_set_device_table(struct amd_iommu *iommu)
149 &entry, sizeof(entry)); 222 &entry, sizeof(entry));
150} 223}
151 224
225/* Generic functions to enable/disable certain features of the IOMMU. */
152static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 226static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
153{ 227{
154 u32 ctrl; 228 u32 ctrl;
@@ -162,20 +236,35 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
162{ 236{
163 u32 ctrl; 237 u32 ctrl;
164 238
165 ctrl = (u64)readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); 239 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
166 ctrl &= ~(1 << bit); 240 ctrl &= ~(1 << bit);
167 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 241 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
168} 242}
169 243
244/* Function to enable the hardware */
170void __init iommu_enable(struct amd_iommu *iommu) 245void __init iommu_enable(struct amd_iommu *iommu)
171{ 246{
172 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); 247 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU "
173 print_devid(iommu->devid, 0); 248 "at %02x:%02x.%x cap 0x%hx\n",
174 printk(" cap 0x%hx\n", iommu->cap_ptr); 249 iommu->dev->bus->number,
250 PCI_SLOT(iommu->dev->devfn),
251 PCI_FUNC(iommu->dev->devfn),
252 iommu->cap_ptr);
175 253
176 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 254 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
177} 255}
178 256
257/* Function to enable IOMMU event logging and event interrupts */
258void __init iommu_enable_event_logging(struct amd_iommu *iommu)
259{
260 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
261 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
262}
263
264/*
265 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
266 * the system has one.
267 */
179static u8 * __init iommu_map_mmio_space(u64 address) 268static u8 * __init iommu_map_mmio_space(u64 address)
180{ 269{
181 u8 *ret; 270 u8 *ret;
@@ -199,16 +288,41 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
199 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); 288 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
200} 289}
201 290
291/****************************************************************************
292 *
293 * The functions below belong to the first pass of AMD IOMMU ACPI table
294 * parsing. In this pass we try to find out the highest device id this
295 * code has to handle. Upon this information the size of the shared data
296 * structures is determined later.
297 *
298 ****************************************************************************/
299
300/*
301 * This function calculates the length of a given IVHD entry
302 */
303static inline int ivhd_entry_length(u8 *ivhd)
304{
305 return 0x04 << (*ivhd >> 6);
306}
307
308/*
309 * This function reads the last device id the IOMMU has to handle from the PCI
310 * capability header for this IOMMU
311 */
202static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) 312static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
203{ 313{
204 u32 cap; 314 u32 cap;
205 315
206 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); 316 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
207 UPDATE_LAST_BDF(DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); 317 update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
208 318
209 return 0; 319 return 0;
210} 320}
211 321
322/*
323 * After reading the highest device id from the IOMMU PCI capability header
324 * this function looks if there is a higher device id defined in the ACPI table
325 */
212static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 326static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
213{ 327{
214 u8 *p = (void *)h, *end = (void *)h; 328 u8 *p = (void *)h, *end = (void *)h;
@@ -229,12 +343,13 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
229 case IVHD_DEV_RANGE_END: 343 case IVHD_DEV_RANGE_END:
230 case IVHD_DEV_ALIAS: 344 case IVHD_DEV_ALIAS:
231 case IVHD_DEV_EXT_SELECT: 345 case IVHD_DEV_EXT_SELECT:
232 UPDATE_LAST_BDF(dev->devid); 346 /* all the above subfield types refer to device ids */
347 update_last_devid(dev->devid);
233 break; 348 break;
234 default: 349 default:
235 break; 350 break;
236 } 351 }
237 p += 0x04 << (*p >> 6); 352 p += ivhd_entry_length(p);
238 } 353 }
239 354
240 WARN_ON(p != end); 355 WARN_ON(p != end);
@@ -242,6 +357,11 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
242 return 0; 357 return 0;
243} 358}
244 359
360/*
361 * Iterate over all IVHD entries in the ACPI table and find the highest device
362 * id which we need to handle. This is the first of three functions which parse
363 * the ACPI table. So we check the checksum here.
364 */
245static int __init find_last_devid_acpi(struct acpi_table_header *table) 365static int __init find_last_devid_acpi(struct acpi_table_header *table)
246{ 366{
247 int i; 367 int i;
@@ -277,19 +397,31 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
277 return 0; 397 return 0;
278} 398}
279 399
400/****************************************************************************
401 *
402 * The following functions belong the the code path which parses the ACPI table
403 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
404 * data structures, initialize the device/alias/rlookup table and also
405 * basically initialize the hardware.
406 *
407 ****************************************************************************/
408
409/*
410 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
411 * write commands to that buffer later and the IOMMU will execute them
412 * asynchronously
413 */
280static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) 414static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
281{ 415{
282 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL, 416 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
283 get_order(CMD_BUFFER_SIZE)); 417 get_order(CMD_BUFFER_SIZE));
284 u64 entry = 0; 418 u64 entry;
285 419
286 if (cmd_buf == NULL) 420 if (cmd_buf == NULL)
287 return NULL; 421 return NULL;
288 422
289 iommu->cmd_buf_size = CMD_BUFFER_SIZE; 423 iommu->cmd_buf_size = CMD_BUFFER_SIZE;
290 424
291 memset(cmd_buf, 0, CMD_BUFFER_SIZE);
292
293 entry = (u64)virt_to_phys(cmd_buf); 425 entry = (u64)virt_to_phys(cmd_buf);
294 entry |= MMIO_CMD_SIZE_512; 426 entry |= MMIO_CMD_SIZE_512;
295 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 427 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
@@ -302,11 +434,35 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
302 434
303static void __init free_command_buffer(struct amd_iommu *iommu) 435static void __init free_command_buffer(struct amd_iommu *iommu)
304{ 436{
305 if (iommu->cmd_buf) 437 free_pages((unsigned long)iommu->cmd_buf,
306 free_pages((unsigned long)iommu->cmd_buf, 438 get_order(iommu->cmd_buf_size));
307 get_order(CMD_BUFFER_SIZE)); 439}
440
441/* allocates the memory where the IOMMU will log its events to */
442static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
443{
444 u64 entry;
445 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
446 get_order(EVT_BUFFER_SIZE));
447
448 if (iommu->evt_buf == NULL)
449 return NULL;
450
451 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
452 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
453 &entry, sizeof(entry));
454
455 iommu->evt_buf_size = EVT_BUFFER_SIZE;
456
457 return iommu->evt_buf;
458}
459
460static void __init free_event_buffer(struct amd_iommu *iommu)
461{
462 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
308} 463}
309 464
465/* sets a specific bit in the device table entry. */
310static void set_dev_entry_bit(u16 devid, u8 bit) 466static void set_dev_entry_bit(u16 devid, u8 bit)
311{ 467{
312 int i = (bit >> 5) & 0x07; 468 int i = (bit >> 5) & 0x07;
@@ -315,7 +471,18 @@ static void set_dev_entry_bit(u16 devid, u8 bit)
315 amd_iommu_dev_table[devid].data[i] |= (1 << _bit); 471 amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
316} 472}
317 473
318static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags) 474/* Writes the specific IOMMU for a device into the rlookup table */
475static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
476{
477 amd_iommu_rlookup_table[devid] = iommu;
478}
479
480/*
481 * This function takes the device specific flags read from the ACPI
482 * table and sets up the device table entry with that information
483 */
484static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
485 u16 devid, u32 flags, u32 ext_flags)
319{ 486{
320 if (flags & ACPI_DEVFLAG_INITPASS) 487 if (flags & ACPI_DEVFLAG_INITPASS)
321 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); 488 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
@@ -331,13 +498,14 @@ static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags)
331 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); 498 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
332 if (flags & ACPI_DEVFLAG_LINT1) 499 if (flags & ACPI_DEVFLAG_LINT1)
333 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); 500 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
334}
335 501
336static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) 502 set_iommu_for_device(iommu, devid);
337{
338 amd_iommu_rlookup_table[devid] = iommu;
339} 503}
340 504
505/*
506 * Reads the device exclusion range from ACPI and initialize IOMMU with
507 * it
508 */
341static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) 509static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
342{ 510{
343 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 511 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
@@ -346,27 +514,45 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
346 return; 514 return;
347 515
348 if (iommu) { 516 if (iommu) {
517 /*
518 * We only can configure exclusion ranges per IOMMU, not
519 * per device. But we can enable the exclusion range per
520 * device. This is done here
521 */
349 set_dev_entry_bit(m->devid, DEV_ENTRY_EX); 522 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
350 iommu->exclusion_start = m->range_start; 523 iommu->exclusion_start = m->range_start;
351 iommu->exclusion_length = m->range_length; 524 iommu->exclusion_length = m->range_length;
352 } 525 }
353} 526}
354 527
528/*
529 * This function reads some important data from the IOMMU PCI space and
530 * initializes the driver data structure with it. It reads the hardware
531 * capabilities and the first/last device entries
532 */
355static void __init init_iommu_from_pci(struct amd_iommu *iommu) 533static void __init init_iommu_from_pci(struct amd_iommu *iommu)
356{ 534{
357 int bus = PCI_BUS(iommu->devid);
358 int dev = PCI_SLOT(iommu->devid);
359 int fn = PCI_FUNC(iommu->devid);
360 int cap_ptr = iommu->cap_ptr; 535 int cap_ptr = iommu->cap_ptr;
361 u32 range; 536 u32 range, misc;
362 537
363 iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET); 538 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
364 539 &iommu->cap);
365 range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); 540 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
366 iommu->first_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_FD(range)); 541 &range);
367 iommu->last_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range)); 542 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
543 &misc);
544
545 iommu->first_device = calc_devid(MMIO_GET_BUS(range),
546 MMIO_GET_FD(range));
547 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
548 MMIO_GET_LD(range));
549 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
368} 550}
369 551
552/*
553 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
554 * initializes the hardware and our data structures with it.
555 */
370static void __init init_iommu_from_acpi(struct amd_iommu *iommu, 556static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
371 struct ivhd_header *h) 557 struct ivhd_header *h)
372{ 558{
@@ -374,7 +560,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
374 u8 *end = p, flags = 0; 560 u8 *end = p, flags = 0;
375 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; 561 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
376 u32 ext_flags = 0; 562 u32 ext_flags = 0;
377 bool alias = 0; 563 bool alias = false;
378 struct ivhd_entry *e; 564 struct ivhd_entry *e;
379 565
380 /* 566 /*
@@ -414,22 +600,23 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
414 case IVHD_DEV_ALL: 600 case IVHD_DEV_ALL:
415 for (dev_i = iommu->first_device; 601 for (dev_i = iommu->first_device;
416 dev_i <= iommu->last_device; ++dev_i) 602 dev_i <= iommu->last_device; ++dev_i)
417 set_dev_entry_from_acpi(dev_i, e->flags, 0); 603 set_dev_entry_from_acpi(iommu, dev_i,
604 e->flags, 0);
418 break; 605 break;
419 case IVHD_DEV_SELECT: 606 case IVHD_DEV_SELECT:
420 devid = e->devid; 607 devid = e->devid;
421 set_dev_entry_from_acpi(devid, e->flags, 0); 608 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
422 break; 609 break;
423 case IVHD_DEV_SELECT_RANGE_START: 610 case IVHD_DEV_SELECT_RANGE_START:
424 devid_start = e->devid; 611 devid_start = e->devid;
425 flags = e->flags; 612 flags = e->flags;
426 ext_flags = 0; 613 ext_flags = 0;
427 alias = 0; 614 alias = false;
428 break; 615 break;
429 case IVHD_DEV_ALIAS: 616 case IVHD_DEV_ALIAS:
430 devid = e->devid; 617 devid = e->devid;
431 devid_to = e->ext >> 8; 618 devid_to = e->ext >> 8;
432 set_dev_entry_from_acpi(devid, e->flags, 0); 619 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
433 amd_iommu_alias_table[devid] = devid_to; 620 amd_iommu_alias_table[devid] = devid_to;
434 break; 621 break;
435 case IVHD_DEV_ALIAS_RANGE: 622 case IVHD_DEV_ALIAS_RANGE:
@@ -437,24 +624,25 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
437 flags = e->flags; 624 flags = e->flags;
438 devid_to = e->ext >> 8; 625 devid_to = e->ext >> 8;
439 ext_flags = 0; 626 ext_flags = 0;
440 alias = 1; 627 alias = true;
441 break; 628 break;
442 case IVHD_DEV_EXT_SELECT: 629 case IVHD_DEV_EXT_SELECT:
443 devid = e->devid; 630 devid = e->devid;
444 set_dev_entry_from_acpi(devid, e->flags, e->ext); 631 set_dev_entry_from_acpi(iommu, devid, e->flags,
632 e->ext);
445 break; 633 break;
446 case IVHD_DEV_EXT_SELECT_RANGE: 634 case IVHD_DEV_EXT_SELECT_RANGE:
447 devid_start = e->devid; 635 devid_start = e->devid;
448 flags = e->flags; 636 flags = e->flags;
449 ext_flags = e->ext; 637 ext_flags = e->ext;
450 alias = 0; 638 alias = false;
451 break; 639 break;
452 case IVHD_DEV_RANGE_END: 640 case IVHD_DEV_RANGE_END:
453 devid = e->devid; 641 devid = e->devid;
454 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 642 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
455 if (alias) 643 if (alias)
456 amd_iommu_alias_table[dev_i] = devid_to; 644 amd_iommu_alias_table[dev_i] = devid_to;
457 set_dev_entry_from_acpi( 645 set_dev_entry_from_acpi(iommu,
458 amd_iommu_alias_table[dev_i], 646 amd_iommu_alias_table[dev_i],
459 flags, ext_flags); 647 flags, ext_flags);
460 } 648 }
@@ -463,10 +651,11 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
463 break; 651 break;
464 } 652 }
465 653
466 p += 0x04 << (e->type >> 6); 654 p += ivhd_entry_length(p);
467 } 655 }
468} 656}
469 657
658/* Initializes the device->iommu mapping for the driver */
470static int __init init_iommu_devices(struct amd_iommu *iommu) 659static int __init init_iommu_devices(struct amd_iommu *iommu)
471{ 660{
472 u16 i; 661 u16 i;
@@ -480,6 +669,7 @@ static int __init init_iommu_devices(struct amd_iommu *iommu)
480static void __init free_iommu_one(struct amd_iommu *iommu) 669static void __init free_iommu_one(struct amd_iommu *iommu)
481{ 670{
482 free_command_buffer(iommu); 671 free_command_buffer(iommu);
672 free_event_buffer(iommu);
483 iommu_unmap_mmio_space(iommu); 673 iommu_unmap_mmio_space(iommu);
484} 674}
485 675
@@ -494,6 +684,11 @@ static void __init free_iommu_all(void)
494 } 684 }
495} 685}
496 686
687/*
688 * This function clues the initialization function for one IOMMU
689 * together and also allocates the command buffer and programs the
690 * hardware. It does NOT enable the IOMMU. This is done afterwards.
691 */
497static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 692static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
498{ 693{
499 spin_lock_init(&iommu->lock); 694 spin_lock_init(&iommu->lock);
@@ -502,8 +697,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
502 /* 697 /*
503 * Copy data from ACPI table entry to the iommu struct 698 * Copy data from ACPI table entry to the iommu struct
504 */ 699 */
505 iommu->devid = h->devid; 700 iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
701 if (!iommu->dev)
702 return 1;
703
506 iommu->cap_ptr = h->cap_ptr; 704 iommu->cap_ptr = h->cap_ptr;
705 iommu->pci_seg = h->pci_seg;
507 iommu->mmio_phys = h->mmio_phys; 706 iommu->mmio_phys = h->mmio_phys;
508 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); 707 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
509 if (!iommu->mmio_base) 708 if (!iommu->mmio_base)
@@ -514,13 +713,23 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
514 if (!iommu->cmd_buf) 713 if (!iommu->cmd_buf)
515 return -ENOMEM; 714 return -ENOMEM;
516 715
716 iommu->evt_buf = alloc_event_buffer(iommu);
717 if (!iommu->evt_buf)
718 return -ENOMEM;
719
720 iommu->int_enabled = false;
721
517 init_iommu_from_pci(iommu); 722 init_iommu_from_pci(iommu);
518 init_iommu_from_acpi(iommu, h); 723 init_iommu_from_acpi(iommu, h);
519 init_iommu_devices(iommu); 724 init_iommu_devices(iommu);
520 725
521 return 0; 726 return pci_enable_device(iommu->dev);
522} 727}
523 728
729/*
730 * Iterates over all IOMMU entries in the ACPI table, allocates the
731 * IOMMU structure and initializes it with init_iommu_one()
732 */
524static int __init init_iommu_all(struct acpi_table_header *table) 733static int __init init_iommu_all(struct acpi_table_header *table)
525{ 734{
526 u8 *p = (u8 *)table, *end = (u8 *)table; 735 u8 *p = (u8 *)table, *end = (u8 *)table;
@@ -528,8 +737,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
528 struct amd_iommu *iommu; 737 struct amd_iommu *iommu;
529 int ret; 738 int ret;
530 739
531 INIT_LIST_HEAD(&amd_iommu_list);
532
533 end += table->length; 740 end += table->length;
534 p += IVRS_HEADER_LENGTH; 741 p += IVRS_HEADER_LENGTH;
535 742
@@ -555,6 +762,103 @@ static int __init init_iommu_all(struct acpi_table_header *table)
555 return 0; 762 return 0;
556} 763}
557 764
765/****************************************************************************
766 *
767 * The following functions initialize the MSI interrupts for all IOMMUs
768 * in the system. Its a bit challenging because there could be multiple
769 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
770 * pci_dev.
771 *
772 ****************************************************************************/
773
774static int __init iommu_setup_msix(struct amd_iommu *iommu)
775{
776 struct amd_iommu *curr;
777 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
778 int nvec = 0, i;
779
780 list_for_each_entry(curr, &amd_iommu_list, list) {
781 if (curr->dev == iommu->dev) {
782 entries[nvec].entry = curr->evt_msi_num;
783 entries[nvec].vector = 0;
784 curr->int_enabled = true;
785 nvec++;
786 }
787 }
788
789 if (pci_enable_msix(iommu->dev, entries, nvec)) {
790 pci_disable_msix(iommu->dev);
791 return 1;
792 }
793
794 for (i = 0; i < nvec; ++i) {
795 int r = request_irq(entries->vector, amd_iommu_int_handler,
796 IRQF_SAMPLE_RANDOM,
797 "AMD IOMMU",
798 NULL);
799 if (r)
800 goto out_free;
801 }
802
803 return 0;
804
805out_free:
806 for (i -= 1; i >= 0; --i)
807 free_irq(entries->vector, NULL);
808
809 pci_disable_msix(iommu->dev);
810
811 return 1;
812}
813
814static int __init iommu_setup_msi(struct amd_iommu *iommu)
815{
816 int r;
817 struct amd_iommu *curr;
818
819 list_for_each_entry(curr, &amd_iommu_list, list) {
820 if (curr->dev == iommu->dev)
821 curr->int_enabled = true;
822 }
823
824
825 if (pci_enable_msi(iommu->dev))
826 return 1;
827
828 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
829 IRQF_SAMPLE_RANDOM,
830 "AMD IOMMU",
831 NULL);
832
833 if (r) {
834 pci_disable_msi(iommu->dev);
835 return 1;
836 }
837
838 return 0;
839}
840
841static int __init iommu_init_msi(struct amd_iommu *iommu)
842{
843 if (iommu->int_enabled)
844 return 0;
845
846 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSIX))
847 return iommu_setup_msix(iommu);
848 else if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
849 return iommu_setup_msi(iommu);
850
851 return 1;
852}
853
854/****************************************************************************
855 *
856 * The next functions belong to the third pass of parsing the ACPI
857 * table. In this last pass the memory mapping requirements are
858 * gathered (like exclusion and unity mapping reanges).
859 *
860 ****************************************************************************/
861
558static void __init free_unity_maps(void) 862static void __init free_unity_maps(void)
559{ 863{
560 struct unity_map_entry *entry, *next; 864 struct unity_map_entry *entry, *next;
@@ -565,6 +869,7 @@ static void __init free_unity_maps(void)
565 } 869 }
566} 870}
567 871
872/* called when we find an exclusion range definition in ACPI */
568static int __init init_exclusion_range(struct ivmd_header *m) 873static int __init init_exclusion_range(struct ivmd_header *m)
569{ 874{
570 int i; 875 int i;
@@ -574,7 +879,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
574 set_device_exclusion_range(m->devid, m); 879 set_device_exclusion_range(m->devid, m);
575 break; 880 break;
576 case ACPI_IVMD_TYPE_ALL: 881 case ACPI_IVMD_TYPE_ALL:
577 for (i = 0; i < amd_iommu_last_bdf; ++i) 882 for (i = 0; i <= amd_iommu_last_bdf; ++i)
578 set_device_exclusion_range(i, m); 883 set_device_exclusion_range(i, m);
579 break; 884 break;
580 case ACPI_IVMD_TYPE_RANGE: 885 case ACPI_IVMD_TYPE_RANGE:
@@ -588,6 +893,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
588 return 0; 893 return 0;
589} 894}
590 895
896/* called for unity map ACPI definition */
591static int __init init_unity_map_range(struct ivmd_header *m) 897static int __init init_unity_map_range(struct ivmd_header *m)
592{ 898{
593 struct unity_map_entry *e = 0; 899 struct unity_map_entry *e = 0;
@@ -619,13 +925,12 @@ static int __init init_unity_map_range(struct ivmd_header *m)
619 return 0; 925 return 0;
620} 926}
621 927
928/* iterates over all memory definitions we find in the ACPI table */
622static int __init init_memory_definitions(struct acpi_table_header *table) 929static int __init init_memory_definitions(struct acpi_table_header *table)
623{ 930{
624 u8 *p = (u8 *)table, *end = (u8 *)table; 931 u8 *p = (u8 *)table, *end = (u8 *)table;
625 struct ivmd_header *m; 932 struct ivmd_header *m;
626 933
627 INIT_LIST_HEAD(&amd_iommu_unity_map);
628
629 end += table->length; 934 end += table->length;
630 p += IVRS_HEADER_LENGTH; 935 p += IVRS_HEADER_LENGTH;
631 936
@@ -642,12 +947,32 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
642 return 0; 947 return 0;
643} 948}
644 949
950/*
951 * Init the device table to not allow DMA access for devices and
952 * suppress all page faults
953 */
954static void init_device_table(void)
955{
956 u16 devid;
957
958 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
959 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
960 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
961 }
962}
963
964/*
965 * This function finally enables all IOMMUs found in the system after
966 * they have been initialized
967 */
645static void __init enable_iommus(void) 968static void __init enable_iommus(void)
646{ 969{
647 struct amd_iommu *iommu; 970 struct amd_iommu *iommu;
648 971
649 list_for_each_entry(iommu, &amd_iommu_list, list) { 972 list_for_each_entry(iommu, &amd_iommu_list, list) {
650 iommu_set_exclusion_range(iommu); 973 iommu_set_exclusion_range(iommu);
974 iommu_init_msi(iommu);
975 iommu_enable_event_logging(iommu);
651 iommu_enable(iommu); 976 iommu_enable(iommu);
652 } 977 }
653} 978}
@@ -678,6 +1003,34 @@ static struct sys_device device_amd_iommu = {
678 .cls = &amd_iommu_sysdev_class, 1003 .cls = &amd_iommu_sysdev_class,
679}; 1004};
680 1005
1006/*
1007 * This is the core init function for AMD IOMMU hardware in the system.
1008 * This function is called from the generic x86 DMA layer initialization
1009 * code.
1010 *
1011 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1012 * three times:
1013 *
1014 * 1 pass) Find the highest PCI device id the driver has to handle.
1015 * Upon this information the size of the data structures is
1016 * determined that needs to be allocated.
1017 *
1018 * 2 pass) Initialize the data structures just allocated with the
1019 * information in the ACPI table about available AMD IOMMUs
1020 * in the system. It also maps the PCI devices in the
1021 * system to specific IOMMUs
1022 *
1023 * 3 pass) After the basic data structures are allocated and
1024 * initialized we update them with information about memory
1025 * remapping requirements parsed out of the ACPI table in
1026 * this last pass.
1027 *
1028 * After that the hardware is initialized and ready to go. In the last
1029 * step we do some Linux specific things like registering the driver in
1030 * the dma_ops interface and initializing the suspend/resume support
1031 * functions. Finally it prints some information about AMD IOMMUs and
1032 * the driver state and enables the hardware.
1033 */
681int __init amd_iommu_init(void) 1034int __init amd_iommu_init(void)
682{ 1035{
683 int i, ret = 0; 1036 int i, ret = 0;
@@ -699,14 +1052,14 @@ int __init amd_iommu_init(void)
699 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1052 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
700 return -ENODEV; 1053 return -ENODEV;
701 1054
702 dev_table_size = TBL_SIZE(DEV_TABLE_ENTRY_SIZE); 1055 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
703 alias_table_size = TBL_SIZE(ALIAS_TABLE_ENTRY_SIZE); 1056 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
704 rlookup_table_size = TBL_SIZE(RLOOKUP_TABLE_ENTRY_SIZE); 1057 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
705 1058
706 ret = -ENOMEM; 1059 ret = -ENOMEM;
707 1060
708 /* Device table - directly used by all IOMMUs */ 1061 /* Device table - directly used by all IOMMUs */
709 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL, 1062 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
710 get_order(dev_table_size)); 1063 get_order(dev_table_size));
711 if (amd_iommu_dev_table == NULL) 1064 if (amd_iommu_dev_table == NULL)
712 goto out; 1065 goto out;
@@ -730,27 +1083,26 @@ int __init amd_iommu_init(void)
730 * Protection Domain table - maps devices to protection domains 1083 * Protection Domain table - maps devices to protection domains
731 * This table has the same size as the rlookup_table 1084 * This table has the same size as the rlookup_table
732 */ 1085 */
733 amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL, 1086 amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
734 get_order(rlookup_table_size)); 1087 get_order(rlookup_table_size));
735 if (amd_iommu_pd_table == NULL) 1088 if (amd_iommu_pd_table == NULL)
736 goto free; 1089 goto free;
737 1090
738 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(GFP_KERNEL, 1091 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1092 GFP_KERNEL | __GFP_ZERO,
739 get_order(MAX_DOMAIN_ID/8)); 1093 get_order(MAX_DOMAIN_ID/8));
740 if (amd_iommu_pd_alloc_bitmap == NULL) 1094 if (amd_iommu_pd_alloc_bitmap == NULL)
741 goto free; 1095 goto free;
742 1096
1097 /* init the device table */
1098 init_device_table();
1099
743 /* 1100 /*
744 * memory is allocated now; initialize the device table with all zeroes 1101 * let all alias entries point to itself
745 * and let all alias entries point to itself
746 */ 1102 */
747 memset(amd_iommu_dev_table, 0, dev_table_size); 1103 for (i = 0; i <= amd_iommu_last_bdf; ++i)
748 for (i = 0; i < amd_iommu_last_bdf; ++i)
749 amd_iommu_alias_table[i] = i; 1104 amd_iommu_alias_table[i] = i;
750 1105
751 memset(amd_iommu_pd_table, 0, rlookup_table_size);
752 memset(amd_iommu_pd_alloc_bitmap, 0, MAX_DOMAIN_ID / 8);
753
754 /* 1106 /*
755 * never allocate domain 0 because its used as the non-allocated and 1107 * never allocate domain 0 because its used as the non-allocated and
756 * error value placeholder 1108 * error value placeholder
@@ -768,15 +1120,15 @@ int __init amd_iommu_init(void)
768 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1120 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
769 goto free; 1121 goto free;
770 1122
771 ret = amd_iommu_init_dma_ops(); 1123 ret = sysdev_class_register(&amd_iommu_sysdev_class);
772 if (ret) 1124 if (ret)
773 goto free; 1125 goto free;
774 1126
775 ret = sysdev_class_register(&amd_iommu_sysdev_class); 1127 ret = sysdev_register(&device_amd_iommu);
776 if (ret) 1128 if (ret)
777 goto free; 1129 goto free;
778 1130
779 ret = sysdev_register(&device_amd_iommu); 1131 ret = amd_iommu_init_dma_ops();
780 if (ret) 1132 if (ret)
781 goto free; 1133 goto free;
782 1134
@@ -791,28 +1143,29 @@ int __init amd_iommu_init(void)
791 else 1143 else
792 printk("disabled\n"); 1144 printk("disabled\n");
793 1145
1146 if (amd_iommu_unmap_flush)
1147 printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n");
1148 else
1149 printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n");
1150
794out: 1151out:
795 return ret; 1152 return ret;
796 1153
797free: 1154free:
798 if (amd_iommu_pd_alloc_bitmap) 1155 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
799 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1); 1156 get_order(MAX_DOMAIN_ID/8));
800 1157
801 if (amd_iommu_pd_table) 1158 free_pages((unsigned long)amd_iommu_pd_table,
802 free_pages((unsigned long)amd_iommu_pd_table, 1159 get_order(rlookup_table_size));
803 get_order(rlookup_table_size));
804 1160
805 if (amd_iommu_rlookup_table) 1161 free_pages((unsigned long)amd_iommu_rlookup_table,
806 free_pages((unsigned long)amd_iommu_rlookup_table, 1162 get_order(rlookup_table_size));
807 get_order(rlookup_table_size));
808 1163
809 if (amd_iommu_alias_table) 1164 free_pages((unsigned long)amd_iommu_alias_table,
810 free_pages((unsigned long)amd_iommu_alias_table, 1165 get_order(alias_table_size));
811 get_order(alias_table_size));
812 1166
813 if (amd_iommu_dev_table) 1167 free_pages((unsigned long)amd_iommu_dev_table,
814 free_pages((unsigned long)amd_iommu_dev_table, 1168 get_order(dev_table_size));
815 get_order(dev_table_size));
816 1169
817 free_iommu_all(); 1170 free_iommu_all();
818 1171
@@ -821,6 +1174,13 @@ free:
821 goto out; 1174 goto out;
822} 1175}
823 1176
1177/****************************************************************************
1178 *
1179 * Early detect code. This code runs at IOMMU detection time in the DMA
1180 * layer. It just looks if there is an IVRS ACPI table to detect AMD
1181 * IOMMUs
1182 *
1183 ****************************************************************************/
824static int __init early_amd_iommu_detect(struct acpi_table_header *table) 1184static int __init early_amd_iommu_detect(struct acpi_table_header *table)
825{ 1185{
826 return 0; 1186 return 0;
@@ -828,7 +1188,7 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table)
828 1188
829void __init amd_iommu_detect(void) 1189void __init amd_iommu_detect(void)
830{ 1190{
831 if (swiotlb || no_iommu || iommu_detected) 1191 if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture))
832 return; 1192 return;
833 1193
834 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { 1194 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
@@ -841,11 +1201,20 @@ void __init amd_iommu_detect(void)
841 } 1201 }
842} 1202}
843 1203
1204/****************************************************************************
1205 *
1206 * Parsing functions for the AMD IOMMU specific kernel command line
1207 * options.
1208 *
1209 ****************************************************************************/
1210
844static int __init parse_amd_iommu_options(char *str) 1211static int __init parse_amd_iommu_options(char *str)
845{ 1212{
846 for (; *str; ++str) { 1213 for (; *str; ++str) {
847 if (strcmp(str, "isolate") == 0) 1214 if (strncmp(str, "isolate", 7) == 0)
848 amd_iommu_isolate = 1; 1215 amd_iommu_isolate = 1;
1216 if (strncmp(str, "fullflush", 11) == 0)
1217 amd_iommu_unmap_flush = true;
849 } 1218 }
850 1219
851 return 1; 1220 return 1;
@@ -853,20 +1222,10 @@ static int __init parse_amd_iommu_options(char *str)
853 1222
854static int __init parse_amd_iommu_size_options(char *str) 1223static int __init parse_amd_iommu_size_options(char *str)
855{ 1224{
856 for (; *str; ++str) { 1225 unsigned order = PAGE_SHIFT + get_order(memparse(str, &str));
857 if (strcmp(str, "32M") == 0) 1226
858 amd_iommu_aperture_order = 25; 1227 if ((order > 24) && (order < 31))
859 if (strcmp(str, "64M") == 0) 1228 amd_iommu_aperture_order = order;
860 amd_iommu_aperture_order = 26;
861 if (strcmp(str, "128M") == 0)
862 amd_iommu_aperture_order = 27;
863 if (strcmp(str, "256M") == 0)
864 amd_iommu_aperture_order = 28;
865 if (strcmp(str, "512M") == 0)
866 amd_iommu_aperture_order = 29;
867 if (strcmp(str, "1G") == 0)
868 amd_iommu_aperture_order = 30;
869 }
870 1229
871 return 1; 1230 return 1;
872} 1231}