diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-07-11 11:14:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-11 12:01:09 -0400 |
commit | b65233a9c1da587bf19ee161982f4f0ec59941c0 (patch) | |
tree | af4aca8de5dd844d9c1ab1de7f07d02f378d8398 /arch/x86/kernel/amd_iommu_init.c | |
parent | 5694703f14b1f6219fce42a27229b0c7d2c23edd (diff) |
x86, AMD IOMMU: add comments to the initialization code
This patch adds some comments to the AMD IOMMU initialization code to increase
its readability.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: robert.richter@amd.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu_init.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 214 |
1 files changed, 206 insertions, 8 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index bb0280077a3..9ddb46d7c52 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -71,6 +71,17 @@ | |||
71 | #define ACPI_DEVFLAG_LINT1 0x80 | 71 | #define ACPI_DEVFLAG_LINT1 0x80 |
72 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 | 72 | #define ACPI_DEVFLAG_ATSDIS 0x10000000 |
73 | 73 | ||
74 | /* | ||
75 | * ACPI table definitions | ||
76 | * | ||
77 | * These data structures are laid over the table to parse the important values | ||
78 | * out of it. | ||
79 | */ | ||
80 | |||
81 | /* | ||
82 | * structure describing one IOMMU in the ACPI table. Typically followed by one | ||
83 | * or more ivhd_entrys. | ||
84 | */ | ||
74 | struct ivhd_header { | 85 | struct ivhd_header { |
75 | u8 type; | 86 | u8 type; |
76 | u8 flags; | 87 | u8 flags; |
@@ -83,6 +94,10 @@ struct ivhd_header { | |||
83 | u32 reserved; | 94 | u32 reserved; |
84 | } __attribute__((packed)); | 95 | } __attribute__((packed)); |
85 | 96 | ||
97 | /* | ||
98 | * A device entry describing which devices a specific IOMMU translates and | ||
99 | * which requestor ids they use. | ||
100 | */ | ||
86 | struct ivhd_entry { | 101 | struct ivhd_entry { |
87 | u8 type; | 102 | u8 type; |
88 | u16 devid; | 103 | u16 devid; |
@@ -90,6 +105,10 @@ struct ivhd_entry { | |||
90 | u32 ext; | 105 | u32 ext; |
91 | } __attribute__((packed)); | 106 | } __attribute__((packed)); |
92 | 107 | ||
108 | /* | ||
109 | * An AMD IOMMU memory definition structure. It defines things like exclusion | ||
110 | * ranges for devices and regions that should be unity mapped. | ||
111 | */ | ||
93 | struct ivmd_header { | 112 | struct ivmd_header { |
94 | u8 type; | 113 | u8 type; |
95 | u8 flags; | 114 | u8 flags; |
@@ -103,22 +122,66 @@ struct ivmd_header { | |||
103 | 122 | ||
104 | static int __initdata amd_iommu_detected; | 123 | static int __initdata amd_iommu_detected; |
105 | 124 | ||
106 | u16 amd_iommu_last_bdf; | 125 | u16 amd_iommu_last_bdf; /* largest PCI device id we have |
107 | struct list_head amd_iommu_unity_map; | 126 | to handle */ |
108 | unsigned amd_iommu_aperture_order = 26; | 127 | struct list_head amd_iommu_unity_map; /* a list of required unity mappings |
109 | int amd_iommu_isolate; | 128 | we find in ACPI */ |
129 | unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ | ||
130 | int amd_iommu_isolate; /* if 1, device isolation is enabled */ | ||
110 | 131 | ||
111 | struct list_head amd_iommu_list; | 132 | struct list_head amd_iommu_list; /* list of all AMD IOMMUs in the |
133 | system */ | ||
134 | |||
135 | /* | ||
136 | * Pointer to the device table which is shared by all AMD IOMMUs | ||
137 | * it is indexed by the PCI device id or the HT unit id and contains | ||
138 | * information about the domain the device belongs to as well as the | ||
139 | * page table root pointer. | ||
140 | */ | ||
112 | struct dev_table_entry *amd_iommu_dev_table; | 141 | struct dev_table_entry *amd_iommu_dev_table; |
142 | |||
143 | /* | ||
144 | * The alias table is a driver specific data structure which contains the | ||
145 | * mappings of the PCI device ids to the actual requestor ids on the IOMMU. | ||
146 | * More than one device can share the same requestor id. | ||
147 | */ | ||
113 | u16 *amd_iommu_alias_table; | 148 | u16 *amd_iommu_alias_table; |
149 | |||
150 | /* | ||
151 | * The rlookup table is used to find the IOMMU which is responsible | ||
152 | * for a specific device. It is also indexed by the PCI device id. | ||
153 | */ | ||
114 | struct amd_iommu **amd_iommu_rlookup_table; | 154 | struct amd_iommu **amd_iommu_rlookup_table; |
155 | |||
156 | /* | ||
157 | * The pd table (protection domain table) is used to find the protection domain | ||
158 | * data structure a device belongs to. Indexed with the PCI device id too. | ||
159 | */ | ||
115 | struct protection_domain **amd_iommu_pd_table; | 160 | struct protection_domain **amd_iommu_pd_table; |
161 | |||
162 | /* | ||
163 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap | ||
164 | * to know which ones are already in use. | ||
165 | */ | ||
116 | unsigned long *amd_iommu_pd_alloc_bitmap; | 166 | unsigned long *amd_iommu_pd_alloc_bitmap; |
117 | 167 | ||
118 | static u32 dev_table_size; | 168 | static u32 dev_table_size; /* size of the device table */ |
119 | static u32 alias_table_size; | 169 | static u32 alias_table_size; /* size of the alias table */ |
120 | static u32 rlookup_table_size; | 170 | static u32 rlookup_table_size; /* size if the rlookup table */ |
171 | |||
172 | /**************************************************************************** | ||
173 | * | ||
174 | * AMD IOMMU MMIO register space handling functions | ||
175 | * | ||
176 | * These functions are used to program the IOMMU device registers in | ||
177 | * MMIO space required for that driver. | ||
178 | * | ||
179 | ****************************************************************************/ | ||
121 | 180 | ||
181 | /* | ||
182 | * This function set the exclusion range in the IOMMU. DMA accesses to the | ||
183 | * exclusion range are passed through untranslated | ||
184 | */ | ||
122 | static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) | 185 | static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) |
123 | { | 186 | { |
124 | u64 start = iommu->exclusion_start & PAGE_MASK; | 187 | u64 start = iommu->exclusion_start & PAGE_MASK; |
@@ -137,6 +200,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) | |||
137 | &entry, sizeof(entry)); | 200 | &entry, sizeof(entry)); |
138 | } | 201 | } |
139 | 202 | ||
203 | /* Programs the physical address of the device table into the IOMMU hardware */ | ||
140 | static void __init iommu_set_device_table(struct amd_iommu *iommu) | 204 | static void __init iommu_set_device_table(struct amd_iommu *iommu) |
141 | { | 205 | { |
142 | u32 entry; | 206 | u32 entry; |
@@ -149,6 +213,7 @@ static void __init iommu_set_device_table(struct amd_iommu *iommu) | |||
149 | &entry, sizeof(entry)); | 213 | &entry, sizeof(entry)); |
150 | } | 214 | } |
151 | 215 | ||
216 | /* Generic functions to enable/disable certain features of the IOMMU. */ | ||
152 | static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit) | 217 | static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit) |
153 | { | 218 | { |
154 | u32 ctrl; | 219 | u32 ctrl; |
@@ -167,6 +232,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
167 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | 232 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
168 | } | 233 | } |
169 | 234 | ||
235 | /* Function to enable the hardware */ | ||
170 | void __init iommu_enable(struct amd_iommu *iommu) | 236 | void __init iommu_enable(struct amd_iommu *iommu) |
171 | { | 237 | { |
172 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); | 238 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at "); |
@@ -176,6 +242,10 @@ void __init iommu_enable(struct amd_iommu *iommu) | |||
176 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); | 242 | iommu_feature_enable(iommu, CONTROL_IOMMU_EN); |
177 | } | 243 | } |
178 | 244 | ||
245 | /* | ||
246 | * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in | ||
247 | * the system has one. | ||
248 | */ | ||
179 | static u8 * __init iommu_map_mmio_space(u64 address) | 249 | static u8 * __init iommu_map_mmio_space(u64 address) |
180 | { | 250 | { |
181 | u8 *ret; | 251 | u8 *ret; |
@@ -199,6 +269,19 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) | |||
199 | release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); | 269 | release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); |
200 | } | 270 | } |
201 | 271 | ||
272 | /**************************************************************************** | ||
273 | * | ||
274 | * The functions below belong to the first pass of AMD IOMMU ACPI table | ||
275 | * parsing. In this pass we try to find out the highest device id this | ||
276 | * code has to handle. Upon this information the size of the shared data | ||
277 | * structures is determined later. | ||
278 | * | ||
279 | ****************************************************************************/ | ||
280 | |||
281 | /* | ||
282 | * This function reads the last device id the IOMMU has to handle from the PCI | ||
283 | * capability header for this IOMMU | ||
284 | */ | ||
202 | static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) | 285 | static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) |
203 | { | 286 | { |
204 | u32 cap; | 287 | u32 cap; |
@@ -209,6 +292,10 @@ static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) | |||
209 | return 0; | 292 | return 0; |
210 | } | 293 | } |
211 | 294 | ||
295 | /* | ||
296 | * After reading the highest device id from the IOMMU PCI capability header | ||
297 | * this function looks if there is a higher device id defined in the ACPI table | ||
298 | */ | ||
212 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) | 299 | static int __init find_last_devid_from_ivhd(struct ivhd_header *h) |
213 | { | 300 | { |
214 | u8 *p = (void *)h, *end = (void *)h; | 301 | u8 *p = (void *)h, *end = (void *)h; |
@@ -229,6 +316,7 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) | |||
229 | case IVHD_DEV_RANGE_END: | 316 | case IVHD_DEV_RANGE_END: |
230 | case IVHD_DEV_ALIAS: | 317 | case IVHD_DEV_ALIAS: |
231 | case IVHD_DEV_EXT_SELECT: | 318 | case IVHD_DEV_EXT_SELECT: |
319 | /* all the above subfield types refer to device ids */ | ||
232 | UPDATE_LAST_BDF(dev->devid); | 320 | UPDATE_LAST_BDF(dev->devid); |
233 | break; | 321 | break; |
234 | default: | 322 | default: |
@@ -242,6 +330,11 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) | |||
242 | return 0; | 330 | return 0; |
243 | } | 331 | } |
244 | 332 | ||
333 | /* | ||
334 | * Iterate over all IVHD entries in the ACPI table and find the highest device | ||
335 | * id which we need to handle. This is the first of three functions which parse | ||
336 | * the ACPI table. So we check the checksum here. | ||
337 | */ | ||
245 | static int __init find_last_devid_acpi(struct acpi_table_header *table) | 338 | static int __init find_last_devid_acpi(struct acpi_table_header *table) |
246 | { | 339 | { |
247 | int i; | 340 | int i; |
@@ -277,6 +370,20 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) | |||
277 | return 0; | 370 | return 0; |
278 | } | 371 | } |
279 | 372 | ||
373 | /**************************************************************************** | ||
374 | * | ||
375 | * The following functions belong the the code path which parses the ACPI table | ||
376 | * the second time. In this ACPI parsing iteration we allocate IOMMU specific | ||
377 | * data structures, initialize the device/alias/rlookup table and also | ||
378 | * basically initialize the hardware. | ||
379 | * | ||
380 | ****************************************************************************/ | ||
381 | |||
382 | /* | ||
383 | * Allocates the command buffer. This buffer is per AMD IOMMU. We can | ||
384 | * write commands to that buffer later and the IOMMU will execute them | ||
385 | * asynchronously | ||
386 | */ | ||
280 | static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | 387 | static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) |
281 | { | 388 | { |
282 | u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL, | 389 | u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL, |
@@ -307,6 +414,7 @@ static void __init free_command_buffer(struct amd_iommu *iommu) | |||
307 | get_order(CMD_BUFFER_SIZE)); | 414 | get_order(CMD_BUFFER_SIZE)); |
308 | } | 415 | } |
309 | 416 | ||
417 | /* sets a specific bit in the device table entry. */ | ||
310 | static void set_dev_entry_bit(u16 devid, u8 bit) | 418 | static void set_dev_entry_bit(u16 devid, u8 bit) |
311 | { | 419 | { |
312 | int i = (bit >> 5) & 0x07; | 420 | int i = (bit >> 5) & 0x07; |
@@ -315,6 +423,10 @@ static void set_dev_entry_bit(u16 devid, u8 bit) | |||
315 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); | 423 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); |
316 | } | 424 | } |
317 | 425 | ||
426 | /* | ||
427 | * This function takes the device specific flags read from the ACPI | ||
428 | * table and sets up the device table entry with that information | ||
429 | */ | ||
318 | static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags) | 430 | static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags) |
319 | { | 431 | { |
320 | if (flags & ACPI_DEVFLAG_INITPASS) | 432 | if (flags & ACPI_DEVFLAG_INITPASS) |
@@ -333,11 +445,16 @@ static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags) | |||
333 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); | 445 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); |
334 | } | 446 | } |
335 | 447 | ||
448 | /* Writes the specific IOMMU for a device into the rlookup table */ | ||
336 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) | 449 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) |
337 | { | 450 | { |
338 | amd_iommu_rlookup_table[devid] = iommu; | 451 | amd_iommu_rlookup_table[devid] = iommu; |
339 | } | 452 | } |
340 | 453 | ||
454 | /* | ||
455 | * Reads the device exclusion range from ACPI and initialize IOMMU with | ||
456 | * it | ||
457 | */ | ||
341 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) | 458 | static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) |
342 | { | 459 | { |
343 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | 460 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; |
@@ -346,12 +463,22 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) | |||
346 | return; | 463 | return; |
347 | 464 | ||
348 | if (iommu) { | 465 | if (iommu) { |
466 | /* | ||
467 | * We only can configure exclusion ranges per IOMMU, not | ||
468 | * per device. But we can enable the exclusion range per | ||
469 | * device. This is done here | ||
470 | */ | ||
349 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); | 471 | set_dev_entry_bit(m->devid, DEV_ENTRY_EX); |
350 | iommu->exclusion_start = m->range_start; | 472 | iommu->exclusion_start = m->range_start; |
351 | iommu->exclusion_length = m->range_length; | 473 | iommu->exclusion_length = m->range_length; |
352 | } | 474 | } |
353 | } | 475 | } |
354 | 476 | ||
477 | /* | ||
478 | * This function reads some important data from the IOMMU PCI space and | ||
479 | * initializes the driver data structure with it. It reads the hardware | ||
480 | * capabilities and the first/last device entries | ||
481 | */ | ||
355 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) | 482 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) |
356 | { | 483 | { |
357 | int bus = PCI_BUS(iommu->devid); | 484 | int bus = PCI_BUS(iommu->devid); |
@@ -367,6 +494,10 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
367 | iommu->last_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range)); | 494 | iommu->last_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range)); |
368 | } | 495 | } |
369 | 496 | ||
497 | /* | ||
498 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and | ||
499 | * initializes the hardware and our data structures with it. | ||
500 | */ | ||
370 | static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | 501 | static void __init init_iommu_from_acpi(struct amd_iommu *iommu, |
371 | struct ivhd_header *h) | 502 | struct ivhd_header *h) |
372 | { | 503 | { |
@@ -467,6 +598,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
467 | } | 598 | } |
468 | } | 599 | } |
469 | 600 | ||
601 | /* Initializes the device->iommu mapping for the driver */ | ||
470 | static int __init init_iommu_devices(struct amd_iommu *iommu) | 602 | static int __init init_iommu_devices(struct amd_iommu *iommu) |
471 | { | 603 | { |
472 | u16 i; | 604 | u16 i; |
@@ -494,6 +626,11 @@ static void __init free_iommu_all(void) | |||
494 | } | 626 | } |
495 | } | 627 | } |
496 | 628 | ||
629 | /* | ||
630 | * This function clues the initialization function for one IOMMU | ||
631 | * together and also allocates the command buffer and programs the | ||
632 | * hardware. It does NOT enable the IOMMU. This is done afterwards. | ||
633 | */ | ||
497 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | 634 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
498 | { | 635 | { |
499 | spin_lock_init(&iommu->lock); | 636 | spin_lock_init(&iommu->lock); |
@@ -521,6 +658,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
521 | return 0; | 658 | return 0; |
522 | } | 659 | } |
523 | 660 | ||
661 | /* | ||
662 | * Iterates over all IOMMU entries in the ACPI table, allocates the | ||
663 | * IOMMU structure and initializes it with init_iommu_one() | ||
664 | */ | ||
524 | static int __init init_iommu_all(struct acpi_table_header *table) | 665 | static int __init init_iommu_all(struct acpi_table_header *table) |
525 | { | 666 | { |
526 | u8 *p = (u8 *)table, *end = (u8 *)table; | 667 | u8 *p = (u8 *)table, *end = (u8 *)table; |
@@ -555,6 +696,14 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
555 | return 0; | 696 | return 0; |
556 | } | 697 | } |
557 | 698 | ||
699 | /**************************************************************************** | ||
700 | * | ||
701 | * The next functions belong to the third pass of parsing the ACPI | ||
702 | * table. In this last pass the memory mapping requirements are | ||
703 | * gathered (like exclusion and unity mapping reanges). | ||
704 | * | ||
705 | ****************************************************************************/ | ||
706 | |||
558 | static void __init free_unity_maps(void) | 707 | static void __init free_unity_maps(void) |
559 | { | 708 | { |
560 | struct unity_map_entry *entry, *next; | 709 | struct unity_map_entry *entry, *next; |
@@ -565,6 +714,7 @@ static void __init free_unity_maps(void) | |||
565 | } | 714 | } |
566 | } | 715 | } |
567 | 716 | ||
717 | /* called when we find an exclusion range definition in ACPI */ | ||
568 | static int __init init_exclusion_range(struct ivmd_header *m) | 718 | static int __init init_exclusion_range(struct ivmd_header *m) |
569 | { | 719 | { |
570 | int i; | 720 | int i; |
@@ -588,6 +738,7 @@ static int __init init_exclusion_range(struct ivmd_header *m) | |||
588 | return 0; | 738 | return 0; |
589 | } | 739 | } |
590 | 740 | ||
741 | /* called for unity map ACPI definition */ | ||
591 | static int __init init_unity_map_range(struct ivmd_header *m) | 742 | static int __init init_unity_map_range(struct ivmd_header *m) |
592 | { | 743 | { |
593 | struct unity_map_entry *e = 0; | 744 | struct unity_map_entry *e = 0; |
@@ -619,6 +770,7 @@ static int __init init_unity_map_range(struct ivmd_header *m) | |||
619 | return 0; | 770 | return 0; |
620 | } | 771 | } |
621 | 772 | ||
773 | /* iterates over all memory definitions we find in the ACPI table */ | ||
622 | static int __init init_memory_definitions(struct acpi_table_header *table) | 774 | static int __init init_memory_definitions(struct acpi_table_header *table) |
623 | { | 775 | { |
624 | u8 *p = (u8 *)table, *end = (u8 *)table; | 776 | u8 *p = (u8 *)table, *end = (u8 *)table; |
@@ -642,6 +794,10 @@ static int __init init_memory_definitions(struct acpi_table_header *table) | |||
642 | return 0; | 794 | return 0; |
643 | } | 795 | } |
644 | 796 | ||
797 | /* | ||
798 | * This function finally enables all IOMMUs found in the system after | ||
799 | * they have been initialized | ||
800 | */ | ||
645 | static void __init enable_iommus(void) | 801 | static void __init enable_iommus(void) |
646 | { | 802 | { |
647 | struct amd_iommu *iommu; | 803 | struct amd_iommu *iommu; |
@@ -678,6 +834,34 @@ static struct sys_device device_amd_iommu = { | |||
678 | .cls = &amd_iommu_sysdev_class, | 834 | .cls = &amd_iommu_sysdev_class, |
679 | }; | 835 | }; |
680 | 836 | ||
837 | /* | ||
838 | * This is the core init function for AMD IOMMU hardware in the system. | ||
839 | * This function is called from the generic x86 DMA layer initialization | ||
840 | * code. | ||
841 | * | ||
842 | * This function basically parses the ACPI table for AMD IOMMU (IVRS) | ||
843 | * three times: | ||
844 | * | ||
845 | * 1 pass) Find the highest PCI device id the driver has to handle. | ||
846 | * Upon this information the size of the data structures is | ||
847 | * determined that needs to be allocated. | ||
848 | * | ||
849 | * 2 pass) Initialize the data structures just allocated with the | ||
850 | * information in the ACPI table about available AMD IOMMUs | ||
851 | * in the system. It also maps the PCI devices in the | ||
852 | * system to specific IOMMUs | ||
853 | * | ||
854 | * 3 pass) After the basic data structures are allocated and | ||
855 | * initialized we update them with information about memory | ||
856 | * remapping requirements parsed out of the ACPI table in | ||
857 | * this last pass. | ||
858 | * | ||
859 | * After that the hardware is initialized and ready to go. In the last | ||
860 | * step we do some Linux specific things like registering the driver in | ||
861 | * the dma_ops interface and initializing the suspend/resume support | ||
862 | * functions. Finally it prints some information about AMD IOMMUs and | ||
863 | * the driver state and enables the hardware. | ||
864 | */ | ||
681 | int __init amd_iommu_init(void) | 865 | int __init amd_iommu_init(void) |
682 | { | 866 | { |
683 | int i, ret = 0; | 867 | int i, ret = 0; |
@@ -821,6 +1005,13 @@ free: | |||
821 | goto out; | 1005 | goto out; |
822 | } | 1006 | } |
823 | 1007 | ||
1008 | /**************************************************************************** | ||
1009 | * | ||
1010 | * Early detect code. This code runs at IOMMU detection time in the DMA | ||
1011 | * layer. It just looks if there is an IVRS ACPI table to detect AMD | ||
1012 | * IOMMUs | ||
1013 | * | ||
1014 | ****************************************************************************/ | ||
824 | static int __init early_amd_iommu_detect(struct acpi_table_header *table) | 1015 | static int __init early_amd_iommu_detect(struct acpi_table_header *table) |
825 | { | 1016 | { |
826 | return 0; | 1017 | return 0; |
@@ -841,6 +1032,13 @@ void __init amd_iommu_detect(void) | |||
841 | } | 1032 | } |
842 | } | 1033 | } |
843 | 1034 | ||
1035 | /**************************************************************************** | ||
1036 | * | ||
1037 | * Parsing functions for the AMD IOMMU specific kernel command line | ||
1038 | * options. | ||
1039 | * | ||
1040 | ****************************************************************************/ | ||
1041 | |||
844 | static int __init parse_amd_iommu_options(char *str) | 1042 | static int __init parse_amd_iommu_options(char *str) |
845 | { | 1043 | { |
846 | for (; *str; ++str) { | 1044 | for (; *str; ++str) { |