aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu_init.c')
-rw-r--r--arch/x86/kernel/amd_iommu_init.c296
1 files changed, 203 insertions, 93 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 8c0be0902dac..c1b17e97252e 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -115,15 +115,21 @@ struct ivmd_header {
115 u64 range_length; 115 u64 range_length;
116} __attribute__((packed)); 116} __attribute__((packed));
117 117
118bool amd_iommu_dump;
119
118static int __initdata amd_iommu_detected; 120static int __initdata amd_iommu_detected;
119 121
120u16 amd_iommu_last_bdf; /* largest PCI device id we have 122u16 amd_iommu_last_bdf; /* largest PCI device id we have
121 to handle */ 123 to handle */
122LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 124LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
123 we find in ACPI */ 125 we find in ACPI */
124unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ 126#ifdef CONFIG_IOMMU_STRESS
127bool amd_iommu_isolate = false;
128#else
125bool amd_iommu_isolate = true; /* if true, device isolation is 129bool amd_iommu_isolate = true; /* if true, device isolation is
126 enabled */ 130 enabled */
131#endif
132
127bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 133bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
128 134
129LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 135LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@@ -175,7 +181,7 @@ static inline void update_last_devid(u16 devid)
175static inline unsigned long tbl_size(int entry_size) 181static inline unsigned long tbl_size(int entry_size)
176{ 182{
177 unsigned shift = PAGE_SHIFT + 183 unsigned shift = PAGE_SHIFT +
178 get_order(amd_iommu_last_bdf * entry_size); 184 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
179 185
180 return 1UL << shift; 186 return 1UL << shift;
181} 187}
@@ -193,7 +199,7 @@ static inline unsigned long tbl_size(int entry_size)
193 * This function set the exclusion range in the IOMMU. DMA accesses to the 199 * This function set the exclusion range in the IOMMU. DMA accesses to the
194 * exclusion range are passed through untranslated 200 * exclusion range are passed through untranslated
195 */ 201 */
196static void __init iommu_set_exclusion_range(struct amd_iommu *iommu) 202static void iommu_set_exclusion_range(struct amd_iommu *iommu)
197{ 203{
198 u64 start = iommu->exclusion_start & PAGE_MASK; 204 u64 start = iommu->exclusion_start & PAGE_MASK;
199 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; 205 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
@@ -225,7 +231,7 @@ static void __init iommu_set_device_table(struct amd_iommu *iommu)
225} 231}
226 232
227/* Generic functions to enable/disable certain features of the IOMMU. */ 233/* Generic functions to enable/disable certain features of the IOMMU. */
228static void __init iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 234static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
229{ 235{
230 u32 ctrl; 236 u32 ctrl;
231 237
@@ -244,7 +250,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
244} 250}
245 251
246/* Function to enable the hardware */ 252/* Function to enable the hardware */
247static void __init iommu_enable(struct amd_iommu *iommu) 253static void iommu_enable(struct amd_iommu *iommu)
248{ 254{
249 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", 255 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n",
250 dev_name(&iommu->dev->dev), iommu->cap_ptr); 256 dev_name(&iommu->dev->dev), iommu->cap_ptr);
@@ -252,11 +258,17 @@ static void __init iommu_enable(struct amd_iommu *iommu)
252 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 258 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
253} 259}
254 260
255/* Function to enable IOMMU event logging and event interrupts */ 261static void iommu_disable(struct amd_iommu *iommu)
256static void __init iommu_enable_event_logging(struct amd_iommu *iommu)
257{ 262{
258 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 263 /* Disable command buffer */
259 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 264 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
265
266 /* Disable event logging and event interrupts */
267 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
268 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
269
270 /* Disable IOMMU hardware itself */
271 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
260} 272}
261 273
262/* 274/*
@@ -413,25 +425,36 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
413{ 425{
414 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 426 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
415 get_order(CMD_BUFFER_SIZE)); 427 get_order(CMD_BUFFER_SIZE));
416 u64 entry;
417 428
418 if (cmd_buf == NULL) 429 if (cmd_buf == NULL)
419 return NULL; 430 return NULL;
420 431
421 iommu->cmd_buf_size = CMD_BUFFER_SIZE; 432 iommu->cmd_buf_size = CMD_BUFFER_SIZE;
422 433
423 entry = (u64)virt_to_phys(cmd_buf); 434 return cmd_buf;
435}
436
437/*
438 * This function writes the command buffer address to the hardware and
439 * enables it.
440 */
441static void iommu_enable_command_buffer(struct amd_iommu *iommu)
442{
443 u64 entry;
444
445 BUG_ON(iommu->cmd_buf == NULL);
446
447 entry = (u64)virt_to_phys(iommu->cmd_buf);
424 entry |= MMIO_CMD_SIZE_512; 448 entry |= MMIO_CMD_SIZE_512;
449
425 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 450 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
426 &entry, sizeof(entry)); 451 &entry, sizeof(entry));
427 452
428 /* set head and tail to zero manually */ 453 /* set head and tail to zero manually */
429 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 454 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
430 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 455 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
431 456
432 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 457 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
433
434 return cmd_buf;
435} 458}
436 459
437static void __init free_command_buffer(struct amd_iommu *iommu) 460static void __init free_command_buffer(struct amd_iommu *iommu)
@@ -443,20 +466,33 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
443/* allocates the memory where the IOMMU will log its events to */ 466/* allocates the memory where the IOMMU will log its events to */
444static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) 467static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
445{ 468{
446 u64 entry;
447 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 469 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
448 get_order(EVT_BUFFER_SIZE)); 470 get_order(EVT_BUFFER_SIZE));
449 471
450 if (iommu->evt_buf == NULL) 472 if (iommu->evt_buf == NULL)
451 return NULL; 473 return NULL;
452 474
475 iommu->evt_buf_size = EVT_BUFFER_SIZE;
476
477 return iommu->evt_buf;
478}
479
480static void iommu_enable_event_buffer(struct amd_iommu *iommu)
481{
482 u64 entry;
483
484 BUG_ON(iommu->evt_buf == NULL);
485
453 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 486 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
487
454 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 488 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
455 &entry, sizeof(entry)); 489 &entry, sizeof(entry));
456 490
457 iommu->evt_buf_size = EVT_BUFFER_SIZE; 491 /* set head and tail to zero manually */
492 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
493 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
458 494
459 return iommu->evt_buf; 495 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
460} 496}
461 497
462static void __init free_event_buffer(struct amd_iommu *iommu) 498static void __init free_event_buffer(struct amd_iommu *iommu)
@@ -596,32 +632,84 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
596 p += sizeof(struct ivhd_header); 632 p += sizeof(struct ivhd_header);
597 end += h->length; 633 end += h->length;
598 634
635
599 while (p < end) { 636 while (p < end) {
600 e = (struct ivhd_entry *)p; 637 e = (struct ivhd_entry *)p;
601 switch (e->type) { 638 switch (e->type) {
602 case IVHD_DEV_ALL: 639 case IVHD_DEV_ALL:
640
641 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
642 " last device %02x:%02x.%x flags: %02x\n",
643 PCI_BUS(iommu->first_device),
644 PCI_SLOT(iommu->first_device),
645 PCI_FUNC(iommu->first_device),
646 PCI_BUS(iommu->last_device),
647 PCI_SLOT(iommu->last_device),
648 PCI_FUNC(iommu->last_device),
649 e->flags);
650
603 for (dev_i = iommu->first_device; 651 for (dev_i = iommu->first_device;
604 dev_i <= iommu->last_device; ++dev_i) 652 dev_i <= iommu->last_device; ++dev_i)
605 set_dev_entry_from_acpi(iommu, dev_i, 653 set_dev_entry_from_acpi(iommu, dev_i,
606 e->flags, 0); 654 e->flags, 0);
607 break; 655 break;
608 case IVHD_DEV_SELECT: 656 case IVHD_DEV_SELECT:
657
658 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
659 "flags: %02x\n",
660 PCI_BUS(e->devid),
661 PCI_SLOT(e->devid),
662 PCI_FUNC(e->devid),
663 e->flags);
664
609 devid = e->devid; 665 devid = e->devid;
610 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 666 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
611 break; 667 break;
612 case IVHD_DEV_SELECT_RANGE_START: 668 case IVHD_DEV_SELECT_RANGE_START:
669
670 DUMP_printk(" DEV_SELECT_RANGE_START\t "
671 "devid: %02x:%02x.%x flags: %02x\n",
672 PCI_BUS(e->devid),
673 PCI_SLOT(e->devid),
674 PCI_FUNC(e->devid),
675 e->flags);
676
613 devid_start = e->devid; 677 devid_start = e->devid;
614 flags = e->flags; 678 flags = e->flags;
615 ext_flags = 0; 679 ext_flags = 0;
616 alias = false; 680 alias = false;
617 break; 681 break;
618 case IVHD_DEV_ALIAS: 682 case IVHD_DEV_ALIAS:
683
684 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
685 "flags: %02x devid_to: %02x:%02x.%x\n",
686 PCI_BUS(e->devid),
687 PCI_SLOT(e->devid),
688 PCI_FUNC(e->devid),
689 e->flags,
690 PCI_BUS(e->ext >> 8),
691 PCI_SLOT(e->ext >> 8),
692 PCI_FUNC(e->ext >> 8));
693
619 devid = e->devid; 694 devid = e->devid;
620 devid_to = e->ext >> 8; 695 devid_to = e->ext >> 8;
621 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 696 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
697 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
622 amd_iommu_alias_table[devid] = devid_to; 698 amd_iommu_alias_table[devid] = devid_to;
623 break; 699 break;
624 case IVHD_DEV_ALIAS_RANGE: 700 case IVHD_DEV_ALIAS_RANGE:
701
702 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
703 "devid: %02x:%02x.%x flags: %02x "
704 "devid_to: %02x:%02x.%x\n",
705 PCI_BUS(e->devid),
706 PCI_SLOT(e->devid),
707 PCI_FUNC(e->devid),
708 e->flags,
709 PCI_BUS(e->ext >> 8),
710 PCI_SLOT(e->ext >> 8),
711 PCI_FUNC(e->ext >> 8));
712
625 devid_start = e->devid; 713 devid_start = e->devid;
626 flags = e->flags; 714 flags = e->flags;
627 devid_to = e->ext >> 8; 715 devid_to = e->ext >> 8;
@@ -629,24 +717,48 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
629 alias = true; 717 alias = true;
630 break; 718 break;
631 case IVHD_DEV_EXT_SELECT: 719 case IVHD_DEV_EXT_SELECT:
720
721 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
722 "flags: %02x ext: %08x\n",
723 PCI_BUS(e->devid),
724 PCI_SLOT(e->devid),
725 PCI_FUNC(e->devid),
726 e->flags, e->ext);
727
632 devid = e->devid; 728 devid = e->devid;
633 set_dev_entry_from_acpi(iommu, devid, e->flags, 729 set_dev_entry_from_acpi(iommu, devid, e->flags,
634 e->ext); 730 e->ext);
635 break; 731 break;
636 case IVHD_DEV_EXT_SELECT_RANGE: 732 case IVHD_DEV_EXT_SELECT_RANGE:
733
734 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
735 "%02x:%02x.%x flags: %02x ext: %08x\n",
736 PCI_BUS(e->devid),
737 PCI_SLOT(e->devid),
738 PCI_FUNC(e->devid),
739 e->flags, e->ext);
740
637 devid_start = e->devid; 741 devid_start = e->devid;
638 flags = e->flags; 742 flags = e->flags;
639 ext_flags = e->ext; 743 ext_flags = e->ext;
640 alias = false; 744 alias = false;
641 break; 745 break;
642 case IVHD_DEV_RANGE_END: 746 case IVHD_DEV_RANGE_END:
747
748 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
749 PCI_BUS(e->devid),
750 PCI_SLOT(e->devid),
751 PCI_FUNC(e->devid));
752
643 devid = e->devid; 753 devid = e->devid;
644 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 754 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
645 if (alias) 755 if (alias) {
646 amd_iommu_alias_table[dev_i] = devid_to; 756 amd_iommu_alias_table[dev_i] = devid_to;
647 set_dev_entry_from_acpi(iommu, 757 set_dev_entry_from_acpi(iommu,
648 amd_iommu_alias_table[dev_i], 758 devid_to, flags, ext_flags);
649 flags, ext_flags); 759 }
760 set_dev_entry_from_acpi(iommu, dev_i,
761 flags, ext_flags);
650 } 762 }
651 break; 763 break;
652 default: 764 default:
@@ -679,7 +791,7 @@ static void __init free_iommu_all(void)
679{ 791{
680 struct amd_iommu *iommu, *next; 792 struct amd_iommu *iommu, *next;
681 793
682 list_for_each_entry_safe(iommu, next, &amd_iommu_list, list) { 794 for_each_iommu_safe(iommu, next) {
683 list_del(&iommu->list); 795 list_del(&iommu->list);
684 free_iommu_one(iommu); 796 free_iommu_one(iommu);
685 kfree(iommu); 797 kfree(iommu);
@@ -710,7 +822,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
710 if (!iommu->mmio_base) 822 if (!iommu->mmio_base)
711 return -ENOMEM; 823 return -ENOMEM;
712 824
713 iommu_set_device_table(iommu);
714 iommu->cmd_buf = alloc_command_buffer(iommu); 825 iommu->cmd_buf = alloc_command_buffer(iommu);
715 if (!iommu->cmd_buf) 826 if (!iommu->cmd_buf)
716 return -ENOMEM; 827 return -ENOMEM;
@@ -746,6 +857,15 @@ static int __init init_iommu_all(struct acpi_table_header *table)
746 h = (struct ivhd_header *)p; 857 h = (struct ivhd_header *)p;
747 switch (*p) { 858 switch (*p) {
748 case ACPI_IVHD_TYPE: 859 case ACPI_IVHD_TYPE:
860
861 DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x "
862 "seg: %d flags: %01x info %04x\n",
863 PCI_BUS(h->devid), PCI_SLOT(h->devid),
864 PCI_FUNC(h->devid), h->cap_ptr,
865 h->pci_seg, h->flags, h->info);
866 DUMP_printk(" mmio-addr: %016llx\n",
867 h->mmio_phys);
868
749 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 869 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
750 if (iommu == NULL) 870 if (iommu == NULL)
751 return -ENOMEM; 871 return -ENOMEM;
@@ -773,56 +893,9 @@ static int __init init_iommu_all(struct acpi_table_header *table)
773 * 893 *
774 ****************************************************************************/ 894 ****************************************************************************/
775 895
776static int __init iommu_setup_msix(struct amd_iommu *iommu)
777{
778 struct amd_iommu *curr;
779 struct msix_entry entries[32]; /* only 32 supported by AMD IOMMU */
780 int nvec = 0, i;
781
782 list_for_each_entry(curr, &amd_iommu_list, list) {
783 if (curr->dev == iommu->dev) {
784 entries[nvec].entry = curr->evt_msi_num;
785 entries[nvec].vector = 0;
786 curr->int_enabled = true;
787 nvec++;
788 }
789 }
790
791 if (pci_enable_msix(iommu->dev, entries, nvec)) {
792 pci_disable_msix(iommu->dev);
793 return 1;
794 }
795
796 for (i = 0; i < nvec; ++i) {
797 int r = request_irq(entries->vector, amd_iommu_int_handler,
798 IRQF_SAMPLE_RANDOM,
799 "AMD IOMMU",
800 NULL);
801 if (r)
802 goto out_free;
803 }
804
805 return 0;
806
807out_free:
808 for (i -= 1; i >= 0; --i)
809 free_irq(entries->vector, NULL);
810
811 pci_disable_msix(iommu->dev);
812
813 return 1;
814}
815
816static int __init iommu_setup_msi(struct amd_iommu *iommu) 896static int __init iommu_setup_msi(struct amd_iommu *iommu)
817{ 897{
818 int r; 898 int r;
819 struct amd_iommu *curr;
820
821 list_for_each_entry(curr, &amd_iommu_list, list) {
822 if (curr->dev == iommu->dev)
823 curr->int_enabled = true;
824 }
825
826 899
827 if (pci_enable_msi(iommu->dev)) 900 if (pci_enable_msi(iommu->dev))
828 return 1; 901 return 1;
@@ -837,17 +910,18 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
837 return 1; 910 return 1;
838 } 911 }
839 912
913 iommu->int_enabled = true;
914 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
915
840 return 0; 916 return 0;
841} 917}
842 918
843static int __init iommu_init_msi(struct amd_iommu *iommu) 919static int iommu_init_msi(struct amd_iommu *iommu)
844{ 920{
845 if (iommu->int_enabled) 921 if (iommu->int_enabled)
846 return 0; 922 return 0;
847 923
848 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSIX)) 924 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
849 return iommu_setup_msix(iommu);
850 else if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
851 return iommu_setup_msi(iommu); 925 return iommu_setup_msi(iommu);
852 926
853 return 1; 927 return 1;
@@ -899,6 +973,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
899static int __init init_unity_map_range(struct ivmd_header *m) 973static int __init init_unity_map_range(struct ivmd_header *m)
900{ 974{
901 struct unity_map_entry *e = 0; 975 struct unity_map_entry *e = 0;
976 char *s;
902 977
903 e = kzalloc(sizeof(*e), GFP_KERNEL); 978 e = kzalloc(sizeof(*e), GFP_KERNEL);
904 if (e == NULL) 979 if (e == NULL)
@@ -906,14 +981,19 @@ static int __init init_unity_map_range(struct ivmd_header *m)
906 981
907 switch (m->type) { 982 switch (m->type) {
908 default: 983 default:
984 kfree(e);
985 return 0;
909 case ACPI_IVMD_TYPE: 986 case ACPI_IVMD_TYPE:
987 s = "IVMD_TYPEi\t\t\t";
910 e->devid_start = e->devid_end = m->devid; 988 e->devid_start = e->devid_end = m->devid;
911 break; 989 break;
912 case ACPI_IVMD_TYPE_ALL: 990 case ACPI_IVMD_TYPE_ALL:
991 s = "IVMD_TYPE_ALL\t\t";
913 e->devid_start = 0; 992 e->devid_start = 0;
914 e->devid_end = amd_iommu_last_bdf; 993 e->devid_end = amd_iommu_last_bdf;
915 break; 994 break;
916 case ACPI_IVMD_TYPE_RANGE: 995 case ACPI_IVMD_TYPE_RANGE:
996 s = "IVMD_TYPE_RANGE\t\t";
917 e->devid_start = m->devid; 997 e->devid_start = m->devid;
918 e->devid_end = m->aux; 998 e->devid_end = m->aux;
919 break; 999 break;
@@ -922,6 +1002,13 @@ static int __init init_unity_map_range(struct ivmd_header *m)
922 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 1002 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
923 e->prot = m->flags >> 1; 1003 e->prot = m->flags >> 1;
924 1004
1005 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1006 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1007 PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
1008 PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
1009 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1010 e->address_start, e->address_end, m->flags);
1011
925 list_add_tail(&e->list, &amd_iommu_unity_map); 1012 list_add_tail(&e->list, &amd_iommu_unity_map);
926 1013
927 return 0; 1014 return 0;
@@ -967,18 +1054,29 @@ static void init_device_table(void)
967 * This function finally enables all IOMMUs found in the system after 1054 * This function finally enables all IOMMUs found in the system after
968 * they have been initialized 1055 * they have been initialized
969 */ 1056 */
970static void __init enable_iommus(void) 1057static void enable_iommus(void)
971{ 1058{
972 struct amd_iommu *iommu; 1059 struct amd_iommu *iommu;
973 1060
974 list_for_each_entry(iommu, &amd_iommu_list, list) { 1061 for_each_iommu(iommu) {
1062 iommu_disable(iommu);
1063 iommu_set_device_table(iommu);
1064 iommu_enable_command_buffer(iommu);
1065 iommu_enable_event_buffer(iommu);
975 iommu_set_exclusion_range(iommu); 1066 iommu_set_exclusion_range(iommu);
976 iommu_init_msi(iommu); 1067 iommu_init_msi(iommu);
977 iommu_enable_event_logging(iommu);
978 iommu_enable(iommu); 1068 iommu_enable(iommu);
979 } 1069 }
980} 1070}
981 1071
1072static void disable_iommus(void)
1073{
1074 struct amd_iommu *iommu;
1075
1076 for_each_iommu(iommu)
1077 iommu_disable(iommu);
1078}
1079
982/* 1080/*
983 * Suspend/Resume support 1081 * Suspend/Resume support
984 * disable suspend until real resume implemented 1082 * disable suspend until real resume implemented
@@ -986,12 +1084,25 @@ static void __init enable_iommus(void)
986 1084
987static int amd_iommu_resume(struct sys_device *dev) 1085static int amd_iommu_resume(struct sys_device *dev)
988{ 1086{
1087 /* re-load the hardware */
1088 enable_iommus();
1089
1090 /*
1091 * we have to flush after the IOMMUs are enabled because a
1092 * disabled IOMMU will never execute the commands we send
1093 */
1094 amd_iommu_flush_all_devices();
1095 amd_iommu_flush_all_domains();
1096
989 return 0; 1097 return 0;
990} 1098}
991 1099
992static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state) 1100static int amd_iommu_suspend(struct sys_device *dev, pm_message_t state)
993{ 1101{
994 return -EINVAL; 1102 /* disable IOMMUs to go out of the way for BIOS */
1103 disable_iommus();
1104
1105 return 0;
995} 1106}
996 1107
997static struct sysdev_class amd_iommu_sysdev_class = { 1108static struct sysdev_class amd_iommu_sysdev_class = {
@@ -1137,9 +1248,6 @@ int __init amd_iommu_init(void)
1137 1248
1138 enable_iommus(); 1249 enable_iommus();
1139 1250
1140 printk(KERN_INFO "AMD IOMMU: aperture size is %d MB\n",
1141 (1 << (amd_iommu_aperture_order-20)));
1142
1143 printk(KERN_INFO "AMD IOMMU: device isolation "); 1251 printk(KERN_INFO "AMD IOMMU: device isolation ");
1144 if (amd_iommu_isolate) 1252 if (amd_iommu_isolate)
1145 printk("enabled\n"); 1253 printk("enabled\n");
@@ -1177,6 +1285,11 @@ free:
1177 goto out; 1285 goto out;
1178} 1286}
1179 1287
1288void amd_iommu_shutdown(void)
1289{
1290 disable_iommus();
1291}
1292
1180/**************************************************************************** 1293/****************************************************************************
1181 * 1294 *
1182 * Early detect code. This code runs at IOMMU detection time in the DMA 1295 * Early detect code. This code runs at IOMMU detection time in the DMA
@@ -1211,6 +1324,13 @@ void __init amd_iommu_detect(void)
1211 * 1324 *
1212 ****************************************************************************/ 1325 ****************************************************************************/
1213 1326
1327static int __init parse_amd_iommu_dump(char *str)
1328{
1329 amd_iommu_dump = true;
1330
1331 return 1;
1332}
1333
1214static int __init parse_amd_iommu_options(char *str) 1334static int __init parse_amd_iommu_options(char *str)
1215{ 1335{
1216 for (; *str; ++str) { 1336 for (; *str; ++str) {
@@ -1225,15 +1345,5 @@ static int __init parse_amd_iommu_options(char *str)
1225 return 1; 1345 return 1;
1226} 1346}
1227 1347
1228static int __init parse_amd_iommu_size_options(char *str) 1348__setup("amd_iommu_dump", parse_amd_iommu_dump);
1229{
1230 unsigned order = PAGE_SHIFT + get_order(memparse(str, &str));
1231
1232 if ((order > 24) && (order < 31))
1233 amd_iommu_aperture_order = order;
1234
1235 return 1;
1236}
1237
1238__setup("amd_iommu=", parse_amd_iommu_options); 1349__setup("amd_iommu=", parse_amd_iommu_options);
1239__setup("amd_iommu_size=", parse_amd_iommu_size_options);