aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu_init.c')
-rw-r--r--arch/x86/kernel/amd_iommu_init.c875
1 files changed, 875 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
new file mode 100644
index 000000000000..2a13e430437d
--- /dev/null
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -0,0 +1,875 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/acpi.h>
22#include <linux/gfp.h>
23#include <linux/list.h>
24#include <linux/sysdev.h>
25#include <asm/pci-direct.h>
26#include <asm/amd_iommu_types.h>
27#include <asm/amd_iommu.h>
28#include <asm/gart.h>
29
30/*
31 * definitions for the ACPI scanning code
32 */
33#define UPDATE_LAST_BDF(x) do {\
34 if ((x) > amd_iommu_last_bdf) \
35 amd_iommu_last_bdf = (x); \
36 } while (0);
37
38#define DEVID(bus, devfn) (((bus) << 8) | (devfn))
39#define PCI_BUS(x) (((x) >> 8) & 0xff)
40#define IVRS_HEADER_LENGTH 48
41#define TBL_SIZE(x) (1 << (PAGE_SHIFT + get_order(amd_iommu_last_bdf * (x))))
42
43#define ACPI_IVHD_TYPE 0x10
44#define ACPI_IVMD_TYPE_ALL 0x20
45#define ACPI_IVMD_TYPE 0x21
46#define ACPI_IVMD_TYPE_RANGE 0x22
47
48#define IVHD_DEV_ALL 0x01
49#define IVHD_DEV_SELECT 0x02
50#define IVHD_DEV_SELECT_RANGE_START 0x03
51#define IVHD_DEV_RANGE_END 0x04
52#define IVHD_DEV_ALIAS 0x42
53#define IVHD_DEV_ALIAS_RANGE 0x43
54#define IVHD_DEV_EXT_SELECT 0x46
55#define IVHD_DEV_EXT_SELECT_RANGE 0x47
56
57#define IVHD_FLAG_HT_TUN_EN 0x00
58#define IVHD_FLAG_PASSPW_EN 0x01
59#define IVHD_FLAG_RESPASSPW_EN 0x02
60#define IVHD_FLAG_ISOC_EN 0x03
61
62#define IVMD_FLAG_EXCL_RANGE 0x08
63#define IVMD_FLAG_UNITY_MAP 0x01
64
65#define ACPI_DEVFLAG_INITPASS 0x01
66#define ACPI_DEVFLAG_EXTINT 0x02
67#define ACPI_DEVFLAG_NMI 0x04
68#define ACPI_DEVFLAG_SYSMGT1 0x10
69#define ACPI_DEVFLAG_SYSMGT2 0x20
70#define ACPI_DEVFLAG_LINT0 0x40
71#define ACPI_DEVFLAG_LINT1 0x80
72#define ACPI_DEVFLAG_ATSDIS 0x10000000
73
74struct ivhd_header {
75 u8 type;
76 u8 flags;
77 u16 length;
78 u16 devid;
79 u16 cap_ptr;
80 u64 mmio_phys;
81 u16 pci_seg;
82 u16 info;
83 u32 reserved;
84} __attribute__((packed));
85
86struct ivhd_entry {
87 u8 type;
88 u16 devid;
89 u8 flags;
90 u32 ext;
91} __attribute__((packed));
92
93struct ivmd_header {
94 u8 type;
95 u8 flags;
96 u16 length;
97 u16 devid;
98 u16 aux;
99 u64 resv;
100 u64 range_start;
101 u64 range_length;
102} __attribute__((packed));
103
104static int __initdata amd_iommu_detected;
105
106u16 amd_iommu_last_bdf;
107struct list_head amd_iommu_unity_map;
108unsigned amd_iommu_aperture_order = 26;
109int amd_iommu_isolate;
110
111struct list_head amd_iommu_list;
112struct dev_table_entry *amd_iommu_dev_table;
113u16 *amd_iommu_alias_table;
114struct amd_iommu **amd_iommu_rlookup_table;
115struct protection_domain **amd_iommu_pd_table;
116unsigned long *amd_iommu_pd_alloc_bitmap;
117
118static u32 dev_table_size;
119static u32 alias_table_size;
120static u32 rlookup_table_size;
121
122static void __init iommu_set_exclusion_range(struct amd_iommu *iommu)
123{
124 u64 start = iommu->exclusion_start & PAGE_MASK;
125 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
126 u64 entry;
127
128 if (!iommu->exclusion_start)
129 return;
130
131 entry = start | MMIO_EXCL_ENABLE_MASK;
132 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
133 &entry, sizeof(entry));
134
135 entry = limit;
136 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
137 &entry, sizeof(entry));
138}
139
140static void __init iommu_set_device_table(struct amd_iommu *iommu)
141{
142 u32 entry;
143
144 BUG_ON(iommu->mmio_base == NULL);
145
146 entry = virt_to_phys(amd_iommu_dev_table);
147 entry |= (dev_table_size >> 12) - 1;
148 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
149 &entry, sizeof(entry));
150}
151
152static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
153{
154 u32 ctrl;
155
156 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
157 ctrl |= (1 << bit);
158 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
159}
160
161static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
162{
163 u32 ctrl;
164
165 ctrl = (u64)readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
166 ctrl &= ~(1 << bit);
167 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
168}
169
170void __init iommu_enable(struct amd_iommu *iommu)
171{
172 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at ");
173 print_devid(iommu->devid, 0);
174 printk(" cap 0x%hx\n", iommu->cap_ptr);
175
176 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
177}
178
179static u8 * __init iommu_map_mmio_space(u64 address)
180{
181 u8 *ret;
182
183 if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu"))
184 return NULL;
185
186 ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
187 if (ret != NULL)
188 return ret;
189
190 release_mem_region(address, MMIO_REGION_LENGTH);
191
192 return NULL;
193}
194
195static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
196{
197 if (iommu->mmio_base)
198 iounmap(iommu->mmio_base);
199 release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
200}
201
202static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
203{
204 u32 cap;
205
206 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
207 UPDATE_LAST_BDF(DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
208
209 return 0;
210}
211
212static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
213{
214 u8 *p = (void *)h, *end = (void *)h;
215 struct ivhd_entry *dev;
216
217 p += sizeof(*h);
218 end += h->length;
219
220 find_last_devid_on_pci(PCI_BUS(h->devid),
221 PCI_SLOT(h->devid),
222 PCI_FUNC(h->devid),
223 h->cap_ptr);
224
225 while (p < end) {
226 dev = (struct ivhd_entry *)p;
227 switch (dev->type) {
228 case IVHD_DEV_SELECT:
229 case IVHD_DEV_RANGE_END:
230 case IVHD_DEV_ALIAS:
231 case IVHD_DEV_EXT_SELECT:
232 UPDATE_LAST_BDF(dev->devid);
233 break;
234 default:
235 break;
236 }
237 p += 0x04 << (*p >> 6);
238 }
239
240 WARN_ON(p != end);
241
242 return 0;
243}
244
245static int __init find_last_devid_acpi(struct acpi_table_header *table)
246{
247 int i;
248 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
249 struct ivhd_header *h;
250
251 /*
252 * Validate checksum here so we don't need to do it when
253 * we actually parse the table
254 */
255 for (i = 0; i < table->length; ++i)
256 checksum += p[i];
257 if (checksum != 0)
258 /* ACPI table corrupt */
259 return -ENODEV;
260
261 p += IVRS_HEADER_LENGTH;
262
263 end += table->length;
264 while (p < end) {
265 h = (struct ivhd_header *)p;
266 switch (h->type) {
267 case ACPI_IVHD_TYPE:
268 find_last_devid_from_ivhd(h);
269 break;
270 default:
271 break;
272 }
273 p += h->length;
274 }
275 WARN_ON(p != end);
276
277 return 0;
278}
279
280static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
281{
282 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL,
283 get_order(CMD_BUFFER_SIZE));
284 u64 entry = 0;
285
286 if (cmd_buf == NULL)
287 return NULL;
288
289 iommu->cmd_buf_size = CMD_BUFFER_SIZE;
290
291 memset(cmd_buf, 0, CMD_BUFFER_SIZE);
292
293 entry = (u64)virt_to_phys(cmd_buf);
294 entry |= MMIO_CMD_SIZE_512;
295 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
296 &entry, sizeof(entry));
297
298 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
299
300 return cmd_buf;
301}
302
303static void __init free_command_buffer(struct amd_iommu *iommu)
304{
305 if (iommu->cmd_buf)
306 free_pages((unsigned long)iommu->cmd_buf,
307 get_order(CMD_BUFFER_SIZE));
308}
309
310static void set_dev_entry_bit(u16 devid, u8 bit)
311{
312 int i = (bit >> 5) & 0x07;
313 int _bit = bit & 0x1f;
314
315 amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
316}
317
318static void __init set_dev_entry_from_acpi(u16 devid, u32 flags, u32 ext_flags)
319{
320 if (flags & ACPI_DEVFLAG_INITPASS)
321 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
322 if (flags & ACPI_DEVFLAG_EXTINT)
323 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
324 if (flags & ACPI_DEVFLAG_NMI)
325 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
326 if (flags & ACPI_DEVFLAG_SYSMGT1)
327 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
328 if (flags & ACPI_DEVFLAG_SYSMGT2)
329 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
330 if (flags & ACPI_DEVFLAG_LINT0)
331 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
332 if (flags & ACPI_DEVFLAG_LINT1)
333 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
334}
335
336static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
337{
338 amd_iommu_rlookup_table[devid] = iommu;
339}
340
341static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
342{
343 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
344
345 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
346 return;
347
348 if (iommu) {
349 set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
350 iommu->exclusion_start = m->range_start;
351 iommu->exclusion_length = m->range_length;
352 }
353}
354
355static void __init init_iommu_from_pci(struct amd_iommu *iommu)
356{
357 int bus = PCI_BUS(iommu->devid);
358 int dev = PCI_SLOT(iommu->devid);
359 int fn = PCI_FUNC(iommu->devid);
360 int cap_ptr = iommu->cap_ptr;
361 u32 range;
362
363 iommu->cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_CAP_HDR_OFFSET);
364
365 range = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
366 iommu->first_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_FD(range));
367 iommu->last_device = DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range));
368}
369
370static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
371 struct ivhd_header *h)
372{
373 u8 *p = (u8 *)h;
374 u8 *end = p, flags = 0;
375 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
376 u32 ext_flags = 0;
377 bool alias = 0;
378 struct ivhd_entry *e;
379
380 /*
381 * First set the recommended feature enable bits from ACPI
382 * into the IOMMU control registers
383 */
384 h->flags & IVHD_FLAG_HT_TUN_EN ?
385 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
386 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
387
388 h->flags & IVHD_FLAG_PASSPW_EN ?
389 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
390 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
391
392 h->flags & IVHD_FLAG_RESPASSPW_EN ?
393 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
394 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
395
396 h->flags & IVHD_FLAG_ISOC_EN ?
397 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
398 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
399
400 /*
401 * make IOMMU memory accesses cache coherent
402 */
403 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
404
405 /*
406 * Done. Now parse the device entries
407 */
408 p += sizeof(struct ivhd_header);
409 end += h->length;
410
411 while (p < end) {
412 e = (struct ivhd_entry *)p;
413 switch (e->type) {
414 case IVHD_DEV_ALL:
415 for (dev_i = iommu->first_device;
416 dev_i <= iommu->last_device; ++dev_i)
417 set_dev_entry_from_acpi(dev_i, e->flags, 0);
418 break;
419 case IVHD_DEV_SELECT:
420 devid = e->devid;
421 set_dev_entry_from_acpi(devid, e->flags, 0);
422 break;
423 case IVHD_DEV_SELECT_RANGE_START:
424 devid_start = e->devid;
425 flags = e->flags;
426 ext_flags = 0;
427 alias = 0;
428 break;
429 case IVHD_DEV_ALIAS:
430 devid = e->devid;
431 devid_to = e->ext >> 8;
432 set_dev_entry_from_acpi(devid, e->flags, 0);
433 amd_iommu_alias_table[devid] = devid_to;
434 break;
435 case IVHD_DEV_ALIAS_RANGE:
436 devid_start = e->devid;
437 flags = e->flags;
438 devid_to = e->ext >> 8;
439 ext_flags = 0;
440 alias = 1;
441 break;
442 case IVHD_DEV_EXT_SELECT:
443 devid = e->devid;
444 set_dev_entry_from_acpi(devid, e->flags, e->ext);
445 break;
446 case IVHD_DEV_EXT_SELECT_RANGE:
447 devid_start = e->devid;
448 flags = e->flags;
449 ext_flags = e->ext;
450 alias = 0;
451 break;
452 case IVHD_DEV_RANGE_END:
453 devid = e->devid;
454 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
455 if (alias)
456 amd_iommu_alias_table[dev_i] = devid_to;
457 set_dev_entry_from_acpi(
458 amd_iommu_alias_table[dev_i],
459 flags, ext_flags);
460 }
461 break;
462 default:
463 break;
464 }
465
466 p += 0x04 << (e->type >> 6);
467 }
468}
469
470static int __init init_iommu_devices(struct amd_iommu *iommu)
471{
472 u16 i;
473
474 for (i = iommu->first_device; i <= iommu->last_device; ++i)
475 set_iommu_for_device(iommu, i);
476
477 return 0;
478}
479
480static void __init free_iommu_one(struct amd_iommu *iommu)
481{
482 free_command_buffer(iommu);
483 iommu_unmap_mmio_space(iommu);
484}
485
486static void __init free_iommu_all(void)
487{
488 struct amd_iommu *iommu, *next;
489
490 list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) {
491 list_del(&iommu->list);
492 free_iommu_one(iommu);
493 kfree(iommu);
494 }
495}
496
497static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
498{
499 spin_lock_init(&iommu->lock);
500 list_add_tail(&iommu->list, &amd_iommu_list);
501
502 /*
503 * Copy data from ACPI table entry to the iommu struct
504 */
505 iommu->devid = h->devid;
506 iommu->cap_ptr = h->cap_ptr;
507 iommu->mmio_phys = h->mmio_phys;
508 iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
509 if (!iommu->mmio_base)
510 return -ENOMEM;
511
512 iommu_set_device_table(iommu);
513 iommu->cmd_buf = alloc_command_buffer(iommu);
514 if (!iommu->cmd_buf)
515 return -ENOMEM;
516
517 init_iommu_from_pci(iommu);
518 init_iommu_from_acpi(iommu, h);
519 init_iommu_devices(iommu);
520
521 return 0;
522}
523
524static int __init init_iommu_all(struct acpi_table_header *table)
525{
526 u8 *p = (u8 *)table, *end = (u8 *)table;
527 struct ivhd_header *h;
528 struct amd_iommu *iommu;
529 int ret;
530
531 INIT_LIST_HEAD(&amd_iommu_list);
532
533 end += table->length;
534 p += IVRS_HEADER_LENGTH;
535
536 while (p < end) {
537 h = (struct ivhd_header *)p;
538 switch (*p) {
539 case ACPI_IVHD_TYPE:
540 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
541 if (iommu == NULL)
542 return -ENOMEM;
543 ret = init_iommu_one(iommu, h);
544 if (ret)
545 return ret;
546 break;
547 default:
548 break;
549 }
550 p += h->length;
551
552 }
553 WARN_ON(p != end);
554
555 return 0;
556}
557
558static void __init free_unity_maps(void)
559{
560 struct unity_map_entry *entry, *next;
561
562 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
563 list_del(&entry->list);
564 kfree(entry);
565 }
566}
567
568static int __init init_exclusion_range(struct ivmd_header *m)
569{
570 int i;
571
572 switch (m->type) {
573 case ACPI_IVMD_TYPE:
574 set_device_exclusion_range(m->devid, m);
575 break;
576 case ACPI_IVMD_TYPE_ALL:
577 for (i = 0; i < amd_iommu_last_bdf; ++i)
578 set_device_exclusion_range(i, m);
579 break;
580 case ACPI_IVMD_TYPE_RANGE:
581 for (i = m->devid; i <= m->aux; ++i)
582 set_device_exclusion_range(i, m);
583 break;
584 default:
585 break;
586 }
587
588 return 0;
589}
590
591static int __init init_unity_map_range(struct ivmd_header *m)
592{
593 struct unity_map_entry *e = 0;
594
595 e = kzalloc(sizeof(*e), GFP_KERNEL);
596 if (e == NULL)
597 return -ENOMEM;
598
599 switch (m->type) {
600 default:
601 case ACPI_IVMD_TYPE:
602 e->devid_start = e->devid_end = m->devid;
603 break;
604 case ACPI_IVMD_TYPE_ALL:
605 e->devid_start = 0;
606 e->devid_end = amd_iommu_last_bdf;
607 break;
608 case ACPI_IVMD_TYPE_RANGE:
609 e->devid_start = m->devid;
610 e->devid_end = m->aux;
611 break;
612 }
613 e->address_start = PAGE_ALIGN(m->range_start);
614 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
615 e->prot = m->flags >> 1;
616
617 list_add_tail(&e->list, &amd_iommu_unity_map);
618
619 return 0;
620}
621
622static int __init init_memory_definitions(struct acpi_table_header *table)
623{
624 u8 *p = (u8 *)table, *end = (u8 *)table;
625 struct ivmd_header *m;
626
627 INIT_LIST_HEAD(&amd_iommu_unity_map);
628
629 end += table->length;
630 p += IVRS_HEADER_LENGTH;
631
632 while (p < end) {
633 m = (struct ivmd_header *)p;
634 if (m->flags & IVMD_FLAG_EXCL_RANGE)
635 init_exclusion_range(m);
636 else if (m->flags & IVMD_FLAG_UNITY_MAP)
637 init_unity_map_range(m);
638
639 p += m->length;
640 }
641
642 return 0;
643}
644
645static void __init enable_iommus(void)
646{
647 struct amd_iommu *iommu;
648
649 list_for_each_entry(iommu, &amd_iommu_list, list) {
650 iommu_set_exclusion_range(iommu);
651 iommu_enable(iommu);
652 }
653}
654
655/*
656 * Suspend/Resume support
657 * disable suspend until real resume implemented
658 */
659
660static int amd_iommu_resume(struct sys_device *dev)
661{
662 return 0;
663}
664
665static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
666{
667 return -EINVAL;
668}
669
670static struct sysdev_class amd_iommu_sysdev_class = {
671 .name = "amd_iommu",
672 .suspend = amd_iommu_suspend,
673 .resume = amd_iommu_resume,
674};
675
676static struct sys_device device_amd_iommu = {
677 .id = 0,
678 .cls = &amd_iommu_sysdev_class,
679};
680
681int __init amd_iommu_init(void)
682{
683 int i, ret = 0;
684
685
686 if (no_iommu) {
687 printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n");
688 return 0;
689 }
690
691 if (!amd_iommu_detected)
692 return -ENODEV;
693
694 /*
695 * First parse ACPI tables to find the largest Bus/Dev/Func
696 * we need to handle. Upon this information the shared data
697 * structures for the IOMMUs in the system will be allocated
698 */
699 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
700 return -ENODEV;
701
702 dev_table_size = TBL_SIZE(DEV_TABLE_ENTRY_SIZE);
703 alias_table_size = TBL_SIZE(ALIAS_TABLE_ENTRY_SIZE);
704 rlookup_table_size = TBL_SIZE(RLOOKUP_TABLE_ENTRY_SIZE);
705
706 ret = -ENOMEM;
707
708 /* Device table - directly used by all IOMMUs */
709 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL,
710 get_order(dev_table_size));
711 if (amd_iommu_dev_table == NULL)
712 goto out;
713
714 /*
715 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
716 * IOMMU see for that device
717 */
718 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
719 get_order(alias_table_size));
720 if (amd_iommu_alias_table == NULL)
721 goto free;
722
723 /* IOMMU rlookup table - find the IOMMU for a specific device */
724 amd_iommu_rlookup_table = (void *)__get_free_pages(GFP_KERNEL,
725 get_order(rlookup_table_size));
726 if (amd_iommu_rlookup_table == NULL)
727 goto free;
728
729 /*
730 * Protection Domain table - maps devices to protection domains
731 * This table has the same size as the rlookup_table
732 */
733 amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL,
734 get_order(rlookup_table_size));
735 if (amd_iommu_pd_table == NULL)
736 goto free;
737
738 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(GFP_KERNEL,
739 get_order(MAX_DOMAIN_ID/8));
740 if (amd_iommu_pd_alloc_bitmap == NULL)
741 goto free;
742
743 /*
744 * memory is allocated now; initialize the device table with all zeroes
745 * and let all alias entries point to itself
746 */
747 memset(amd_iommu_dev_table, 0, dev_table_size);
748 for (i = 0; i < amd_iommu_last_bdf; ++i)
749 amd_iommu_alias_table[i] = i;
750
751 memset(amd_iommu_pd_table, 0, rlookup_table_size);
752 memset(amd_iommu_pd_alloc_bitmap, 0, MAX_DOMAIN_ID / 8);
753
754 /*
755 * never allocate domain 0 because its used as the non-allocated and
756 * error value placeholder
757 */
758 amd_iommu_pd_alloc_bitmap[0] = 1;
759
760 /*
761 * now the data structures are allocated and basically initialized
762 * start the real acpi table scan
763 */
764 ret = -ENODEV;
765 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
766 goto free;
767
768 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
769 goto free;
770
771 ret = amd_iommu_init_dma_ops();
772 if (ret)
773 goto free;
774
775 ret = sysdev_class_register(&amd_iommu_sysdev_class);
776 if (ret)
777 goto free;
778
779 ret = sysdev_register(&device_amd_iommu);
780 if (ret)
781 goto free;
782
783 enable_iommus();
784
785 printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n",
786 (1 << (amd_iommu_aperture_order-20)));
787
788 printk(KERN_INFO "AMD IOMMU: device isolation ");
789 if (amd_iommu_isolate)
790 printk("enabled\n");
791 else
792 printk("disabled\n");
793
794out:
795 return ret;
796
797free:
798 if (amd_iommu_pd_alloc_bitmap)
799 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1);
800
801 if (amd_iommu_pd_table)
802 free_pages((unsigned long)amd_iommu_pd_table,
803 get_order(rlookup_table_size));
804
805 if (amd_iommu_rlookup_table)
806 free_pages((unsigned long)amd_iommu_rlookup_table,
807 get_order(rlookup_table_size));
808
809 if (amd_iommu_alias_table)
810 free_pages((unsigned long)amd_iommu_alias_table,
811 get_order(alias_table_size));
812
813 if (amd_iommu_dev_table)
814 free_pages((unsigned long)amd_iommu_dev_table,
815 get_order(dev_table_size));
816
817 free_iommu_all();
818
819 free_unity_maps();
820
821 goto out;
822}
823
824static int __init early_amd_iommu_detect(struct acpi_table_header *table)
825{
826 return 0;
827}
828
829void __init amd_iommu_detect(void)
830{
831 if (swiotlb || no_iommu || iommu_detected)
832 return;
833
834 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
835 iommu_detected = 1;
836 amd_iommu_detected = 1;
837#ifdef CONFIG_GART_IOMMU
838 gart_iommu_aperture_disabled = 1;
839 gart_iommu_aperture = 0;
840#endif
841 }
842}
843
844static int __init parse_amd_iommu_options(char *str)
845{
846 for (; *str; ++str) {
847 if (strcmp(str, "isolate") == 0)
848 amd_iommu_isolate = 1;
849 }
850
851 return 1;
852}
853
854static int __init parse_amd_iommu_size_options(char *str)
855{
856 for (; *str; ++str) {
857 if (strcmp(str, "32M") == 0)
858 amd_iommu_aperture_order = 25;
859 if (strcmp(str, "64M") == 0)
860 amd_iommu_aperture_order = 26;
861 if (strcmp(str, "128M") == 0)
862 amd_iommu_aperture_order = 27;
863 if (strcmp(str, "256M") == 0)
864 amd_iommu_aperture_order = 28;
865 if (strcmp(str, "512M") == 0)
866 amd_iommu_aperture_order = 29;
867 if (strcmp(str, "1G") == 0)
868 amd_iommu_aperture_order = 30;
869 }
870
871 return 1;
872}
873
874__setup("amd_iommu=", parse_amd_iommu_options);
875__setup("amd_iommu_size=", parse_amd_iommu_size_options);