aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-07 17:33:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-07 17:33:44 -0400
commitc0703c12ef6744b6d2565ec67a15aaf25d534abd (patch)
treeaff6bcf3874dba969805b057f0ceb3ea0b0b2112 /drivers
parent0e51793e162ca432fc5f04178cf82b80a92c2659 (diff)
parent009487258399cb4f431992919fa0f386d1b74ceb (diff)
Merge tag 'iommu-updates-v3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "This time the IOMMU updates contain a bunch of fixes and cleanups to various IOMMU drivers and the DMA debug code. New features are the code for IRQ remapping support with the AMD IOMMU (preperation for that was already merged in the last release) and a debugfs interface to export some statistics in the NVidia Tegra IOMMU driver." * tag 'iommu-updates-v3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (27 commits) iommu/amd: Remove obsolete comment line dma-debug: Remove local BUS_NOTIFY_UNBOUND_DRIVER define iommu/amd: Fix possible use after free in get_irq_table() iommu/amd: Report irq remapping through IOMMU-API iommu/amd: Print message to system log when irq remapping is enabled iommu/irq: Use amd_iommu_irq_ops if supported iommu/amd: Make sure irq remapping still works on dma init failure iommu/amd: Add initialization routines for AMD interrupt remapping iommu/amd: Add call-back routine for HPET MSI iommu/amd: Implement MSI routines for interrupt remapping iommu/amd: Add IOAPIC remapping routines iommu/amd: Add routines to manage irq remapping tables iommu/amd: Add IRTE invalidation routine iommu/amd: Make sure IOMMU is not considered to translate itself iommu/amd: Split device table initialization into irq and dma part iommu/amd: Check if IOAPIC information is correct iommu/amd: Allocate data structures to keep track of irq remapping tables iommu/amd: Add slab-cache for irq remapping tables iommu/amd: Keep track of HPET and IOAPIC device ids iommu/amd: Fix features reporting ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c514
-rw-r--r--drivers/iommu/amd_iommu_init.c253
-rw-r--r--drivers/iommu/amd_iommu_proto.h8
-rw-r--r--drivers/iommu/amd_iommu_types.h59
-rw-r--r--drivers/iommu/exynos-iommu.c3
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h6
-rw-r--r--drivers/iommu/tegra-smmu.c261
10 files changed, 1071 insertions, 44 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 9f69b561f5d..e39f9dbf297 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -42,7 +42,7 @@ config AMD_IOMMU
42 select PCI_PRI 42 select PCI_PRI
43 select PCI_PASID 43 select PCI_PASID
44 select IOMMU_API 44 select IOMMU_API
45 depends on X86_64 && PCI && ACPI 45 depends on X86_64 && PCI && ACPI && X86_IO_APIC
46 ---help--- 46 ---help---
47 With this option you can enable support for AMD IOMMU hardware in 47 With this option you can enable support for AMD IOMMU hardware in
48 your system. An IOMMU is a hardware component which provides 48 your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e89daf1b21b..55074cba20e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -31,6 +31,12 @@
31#include <linux/amd-iommu.h> 31#include <linux/amd-iommu.h>
32#include <linux/notifier.h> 32#include <linux/notifier.h>
33#include <linux/export.h> 33#include <linux/export.h>
34#include <linux/irq.h>
35#include <linux/msi.h>
36#include <asm/irq_remapping.h>
37#include <asm/io_apic.h>
38#include <asm/apic.h>
39#include <asm/hw_irq.h>
34#include <asm/msidef.h> 40#include <asm/msidef.h>
35#include <asm/proto.h> 41#include <asm/proto.h>
36#include <asm/iommu.h> 42#include <asm/iommu.h>
@@ -39,6 +45,7 @@
39 45
40#include "amd_iommu_proto.h" 46#include "amd_iommu_proto.h"
41#include "amd_iommu_types.h" 47#include "amd_iommu_types.h"
48#include "irq_remapping.h"
42 49
43#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 50#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
44 51
@@ -72,6 +79,9 @@ static DEFINE_SPINLOCK(iommu_pd_list_lock);
72static LIST_HEAD(dev_data_list); 79static LIST_HEAD(dev_data_list);
73static DEFINE_SPINLOCK(dev_data_list_lock); 80static DEFINE_SPINLOCK(dev_data_list_lock);
74 81
82LIST_HEAD(ioapic_map);
83LIST_HEAD(hpet_map);
84
75/* 85/*
76 * Domain for untranslated devices - only allocated 86 * Domain for untranslated devices - only allocated
77 * if iommu=pt passed on kernel cmd line. 87 * if iommu=pt passed on kernel cmd line.
@@ -92,6 +102,8 @@ struct iommu_cmd {
92 u32 data[4]; 102 u32 data[4];
93}; 103};
94 104
105struct kmem_cache *amd_iommu_irq_cache;
106
95static void update_domain(struct protection_domain *domain); 107static void update_domain(struct protection_domain *domain);
96static int __init alloc_passthrough_domain(void); 108static int __init alloc_passthrough_domain(void);
97 109
@@ -686,7 +698,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
686 698
687 /* 699 /*
688 * Release iommu->lock because ppr-handling might need to 700 * Release iommu->lock because ppr-handling might need to
689 * re-aquire it 701 * re-acquire it
690 */ 702 */
691 spin_unlock_irqrestore(&iommu->lock, flags); 703 spin_unlock_irqrestore(&iommu->lock, flags);
692 704
@@ -804,7 +816,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
804 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); 816 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
805 if (s) /* size bit - we flush more than one 4kb page */ 817 if (s) /* size bit - we flush more than one 4kb page */
806 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; 818 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
807 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ 819 if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
808 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; 820 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
809} 821}
810 822
@@ -899,6 +911,13 @@ static void build_inv_all(struct iommu_cmd *cmd)
899 CMD_SET_TYPE(cmd, CMD_INV_ALL); 911 CMD_SET_TYPE(cmd, CMD_INV_ALL);
900} 912}
901 913
914static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
915{
916 memset(cmd, 0, sizeof(*cmd));
917 cmd->data[0] = devid;
918 CMD_SET_TYPE(cmd, CMD_INV_IRT);
919}
920
902/* 921/*
903 * Writes the command to the IOMMUs command buffer and informs the 922 * Writes the command to the IOMMUs command buffer and informs the
904 * hardware about the new command. 923 * hardware about the new command.
@@ -1020,12 +1039,32 @@ static void iommu_flush_all(struct amd_iommu *iommu)
1020 iommu_completion_wait(iommu); 1039 iommu_completion_wait(iommu);
1021} 1040}
1022 1041
1042static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1043{
1044 struct iommu_cmd cmd;
1045
1046 build_inv_irt(&cmd, devid);
1047
1048 iommu_queue_command(iommu, &cmd);
1049}
1050
1051static void iommu_flush_irt_all(struct amd_iommu *iommu)
1052{
1053 u32 devid;
1054
1055 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1056 iommu_flush_irt(iommu, devid);
1057
1058 iommu_completion_wait(iommu);
1059}
1060
1023void iommu_flush_all_caches(struct amd_iommu *iommu) 1061void iommu_flush_all_caches(struct amd_iommu *iommu)
1024{ 1062{
1025 if (iommu_feature(iommu, FEATURE_IA)) { 1063 if (iommu_feature(iommu, FEATURE_IA)) {
1026 iommu_flush_all(iommu); 1064 iommu_flush_all(iommu);
1027 } else { 1065 } else {
1028 iommu_flush_dte_all(iommu); 1066 iommu_flush_dte_all(iommu);
1067 iommu_flush_irt_all(iommu);
1029 iommu_flush_tlb_all(iommu); 1068 iommu_flush_tlb_all(iommu);
1030 } 1069 }
1031} 1070}
@@ -2155,7 +2194,7 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
2155} 2194}
2156 2195
2157/* 2196/*
2158 * If a device is not yet associated with a domain, this function does 2197 * If a device is not yet associated with a domain, this function
2159 * assigns it visible for the hardware 2198 * assigns it visible for the hardware
2160 */ 2199 */
2161static int attach_device(struct device *dev, 2200static int attach_device(struct device *dev,
@@ -2405,7 +2444,7 @@ static struct protection_domain *get_domain(struct device *dev)
2405 if (domain != NULL) 2444 if (domain != NULL)
2406 return domain; 2445 return domain;
2407 2446
2408 /* Device not bount yet - bind it */ 2447 /* Device not bound yet - bind it */
2409 dma_dom = find_protection_domain(devid); 2448 dma_dom = find_protection_domain(devid);
2410 if (!dma_dom) 2449 if (!dma_dom)
2411 dma_dom = amd_iommu_rlookup_table[devid]->default_dom; 2450 dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
@@ -2944,7 +2983,7 @@ static void __init prealloc_protection_domains(void)
2944 alloc_passthrough_domain(); 2983 alloc_passthrough_domain();
2945 dev_data->passthrough = true; 2984 dev_data->passthrough = true;
2946 attach_device(&dev->dev, pt_domain); 2985 attach_device(&dev->dev, pt_domain);
2947 pr_info("AMD-Vi: Using passthough domain for device %s\n", 2986 pr_info("AMD-Vi: Using passthrough domain for device %s\n",
2948 dev_name(&dev->dev)); 2987 dev_name(&dev->dev));
2949 } 2988 }
2950 2989
@@ -3316,6 +3355,8 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
3316 switch (cap) { 3355 switch (cap) {
3317 case IOMMU_CAP_CACHE_COHERENCY: 3356 case IOMMU_CAP_CACHE_COHERENCY:
3318 return 1; 3357 return 1;
3358 case IOMMU_CAP_INTR_REMAP:
3359 return irq_remapping_enabled;
3319 } 3360 }
3320 3361
3321 return 0; 3362 return 0;
@@ -3743,3 +3784,466 @@ int amd_iommu_device_info(struct pci_dev *pdev,
3743 return 0; 3784 return 0;
3744} 3785}
3745EXPORT_SYMBOL(amd_iommu_device_info); 3786EXPORT_SYMBOL(amd_iommu_device_info);
3787
3788#ifdef CONFIG_IRQ_REMAP
3789
3790/*****************************************************************************
3791 *
3792 * Interrupt Remapping Implementation
3793 *
3794 *****************************************************************************/
3795
3796union irte {
3797 u32 val;
3798 struct {
3799 u32 valid : 1,
3800 no_fault : 1,
3801 int_type : 3,
3802 rq_eoi : 1,
3803 dm : 1,
3804 rsvd_1 : 1,
3805 destination : 8,
3806 vector : 8,
3807 rsvd_2 : 8;
3808 } fields;
3809};
3810
3811#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
3812#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
3813#define DTE_IRQ_TABLE_LEN (8ULL << 1)
3814#define DTE_IRQ_REMAP_ENABLE 1ULL
3815
3816static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3817{
3818 u64 dte;
3819
3820 dte = amd_iommu_dev_table[devid].data[2];
3821 dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
3822 dte |= virt_to_phys(table->table);
3823 dte |= DTE_IRQ_REMAP_INTCTL;
3824 dte |= DTE_IRQ_TABLE_LEN;
3825 dte |= DTE_IRQ_REMAP_ENABLE;
3826
3827 amd_iommu_dev_table[devid].data[2] = dte;
3828}
3829
3830#define IRTE_ALLOCATED (~1U)
3831
3832static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3833{
3834 struct irq_remap_table *table = NULL;
3835 struct amd_iommu *iommu;
3836 unsigned long flags;
3837 u16 alias;
3838
3839 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3840
3841 iommu = amd_iommu_rlookup_table[devid];
3842 if (!iommu)
3843 goto out_unlock;
3844
3845 table = irq_lookup_table[devid];
3846 if (table)
3847 goto out;
3848
3849 alias = amd_iommu_alias_table[devid];
3850 table = irq_lookup_table[alias];
3851 if (table) {
3852 irq_lookup_table[devid] = table;
3853 set_dte_irq_entry(devid, table);
3854 iommu_flush_dte(iommu, devid);
3855 goto out;
3856 }
3857
3858 /* Nothing there yet, allocate new irq remapping table */
3859 table = kzalloc(sizeof(*table), GFP_ATOMIC);
3860 if (!table)
3861 goto out;
3862
3863 if (ioapic)
3864 /* Keep the first 32 indexes free for IOAPIC interrupts */
3865 table->min_index = 32;
3866
3867 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3868 if (!table->table) {
3869 kfree(table);
3870 table = NULL;
3871 goto out;
3872 }
3873
3874 memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
3875
3876 if (ioapic) {
3877 int i;
3878
3879 for (i = 0; i < 32; ++i)
3880 table->table[i] = IRTE_ALLOCATED;
3881 }
3882
3883 irq_lookup_table[devid] = table;
3884 set_dte_irq_entry(devid, table);
3885 iommu_flush_dte(iommu, devid);
3886 if (devid != alias) {
3887 irq_lookup_table[alias] = table;
3888 set_dte_irq_entry(devid, table);
3889 iommu_flush_dte(iommu, alias);
3890 }
3891
3892out:
3893 iommu_completion_wait(iommu);
3894
3895out_unlock:
3896 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3897
3898 return table;
3899}
3900
3901static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
3902{
3903 struct irq_remap_table *table;
3904 unsigned long flags;
3905 int index, c;
3906
3907 table = get_irq_table(devid, false);
3908 if (!table)
3909 return -ENODEV;
3910
3911 spin_lock_irqsave(&table->lock, flags);
3912
3913 /* Scan table for free entries */
3914 for (c = 0, index = table->min_index;
3915 index < MAX_IRQS_PER_TABLE;
3916 ++index) {
3917 if (table->table[index] == 0)
3918 c += 1;
3919 else
3920 c = 0;
3921
3922 if (c == count) {
3923 struct irq_2_iommu *irte_info;
3924
3925 for (; c != 0; --c)
3926 table->table[index - c + 1] = IRTE_ALLOCATED;
3927
3928 index -= count - 1;
3929
3930 irte_info = &cfg->irq_2_iommu;
3931 irte_info->sub_handle = devid;
3932 irte_info->irte_index = index;
3933 irte_info->iommu = (void *)cfg;
3934
3935 goto out;
3936 }
3937 }
3938
3939 index = -ENOSPC;
3940
3941out:
3942 spin_unlock_irqrestore(&table->lock, flags);
3943
3944 return index;
3945}
3946
3947static int get_irte(u16 devid, int index, union irte *irte)
3948{
3949 struct irq_remap_table *table;
3950 unsigned long flags;
3951
3952 table = get_irq_table(devid, false);
3953 if (!table)
3954 return -ENOMEM;
3955
3956 spin_lock_irqsave(&table->lock, flags);
3957 irte->val = table->table[index];
3958 spin_unlock_irqrestore(&table->lock, flags);
3959
3960 return 0;
3961}
3962
3963static int modify_irte(u16 devid, int index, union irte irte)
3964{
3965 struct irq_remap_table *table;
3966 struct amd_iommu *iommu;
3967 unsigned long flags;
3968
3969 iommu = amd_iommu_rlookup_table[devid];
3970 if (iommu == NULL)
3971 return -EINVAL;
3972
3973 table = get_irq_table(devid, false);
3974 if (!table)
3975 return -ENOMEM;
3976
3977 spin_lock_irqsave(&table->lock, flags);
3978 table->table[index] = irte.val;
3979 spin_unlock_irqrestore(&table->lock, flags);
3980
3981 iommu_flush_irt(iommu, devid);
3982 iommu_completion_wait(iommu);
3983
3984 return 0;
3985}
3986
3987static void free_irte(u16 devid, int index)
3988{
3989 struct irq_remap_table *table;
3990 struct amd_iommu *iommu;
3991 unsigned long flags;
3992
3993 iommu = amd_iommu_rlookup_table[devid];
3994 if (iommu == NULL)
3995 return;
3996
3997 table = get_irq_table(devid, false);
3998 if (!table)
3999 return;
4000
4001 spin_lock_irqsave(&table->lock, flags);
4002 table->table[index] = 0;
4003 spin_unlock_irqrestore(&table->lock, flags);
4004
4005 iommu_flush_irt(iommu, devid);
4006 iommu_completion_wait(iommu);
4007}
4008
4009static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
4010 unsigned int destination, int vector,
4011 struct io_apic_irq_attr *attr)
4012{
4013 struct irq_remap_table *table;
4014 struct irq_2_iommu *irte_info;
4015 struct irq_cfg *cfg;
4016 union irte irte;
4017 int ioapic_id;
4018 int index;
4019 int devid;
4020 int ret;
4021
4022 cfg = irq_get_chip_data(irq);
4023 if (!cfg)
4024 return -EINVAL;
4025
4026 irte_info = &cfg->irq_2_iommu;
4027 ioapic_id = mpc_ioapic_id(attr->ioapic);
4028 devid = get_ioapic_devid(ioapic_id);
4029
4030 if (devid < 0)
4031 return devid;
4032
4033 table = get_irq_table(devid, true);
4034 if (table == NULL)
4035 return -ENOMEM;
4036
4037 index = attr->ioapic_pin;
4038
4039 /* Setup IRQ remapping info */
4040 irte_info->sub_handle = devid;
4041 irte_info->irte_index = index;
4042 irte_info->iommu = (void *)cfg;
4043
4044 /* Setup IRTE for IOMMU */
4045 irte.val = 0;
4046 irte.fields.vector = vector;
4047 irte.fields.int_type = apic->irq_delivery_mode;
4048 irte.fields.destination = destination;
4049 irte.fields.dm = apic->irq_dest_mode;
4050 irte.fields.valid = 1;
4051
4052 ret = modify_irte(devid, index, irte);
4053 if (ret)
4054 return ret;
4055
4056 /* Setup IOAPIC entry */
4057 memset(entry, 0, sizeof(*entry));
4058
4059 entry->vector = index;
4060 entry->mask = 0;
4061 entry->trigger = attr->trigger;
4062 entry->polarity = attr->polarity;
4063
4064 /*
4065 * Mask level triggered irqs.
4066 */
4067 if (attr->trigger)
4068 entry->mask = 1;
4069
4070 return 0;
4071}
4072
4073static int set_affinity(struct irq_data *data, const struct cpumask *mask,
4074 bool force)
4075{
4076 struct irq_2_iommu *irte_info;
4077 unsigned int dest, irq;
4078 struct irq_cfg *cfg;
4079 union irte irte;
4080 int err;
4081
4082 if (!config_enabled(CONFIG_SMP))
4083 return -1;
4084
4085 cfg = data->chip_data;
4086 irq = data->irq;
4087 irte_info = &cfg->irq_2_iommu;
4088
4089 if (!cpumask_intersects(mask, cpu_online_mask))
4090 return -EINVAL;
4091
4092 if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
4093 return -EBUSY;
4094
4095 if (assign_irq_vector(irq, cfg, mask))
4096 return -EBUSY;
4097
4098 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
4099 if (err) {
4100 if (assign_irq_vector(irq, cfg, data->affinity))
4101 pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
4102 return err;
4103 }
4104
4105 irte.fields.vector = cfg->vector;
4106 irte.fields.destination = dest;
4107
4108 modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
4109
4110 if (cfg->move_in_progress)
4111 send_cleanup_vector(cfg);
4112
4113 cpumask_copy(data->affinity, mask);
4114
4115 return 0;
4116}
4117
4118static int free_irq(int irq)
4119{
4120 struct irq_2_iommu *irte_info;
4121 struct irq_cfg *cfg;
4122
4123 cfg = irq_get_chip_data(irq);
4124 if (!cfg)
4125 return -EINVAL;
4126
4127 irte_info = &cfg->irq_2_iommu;
4128
4129 free_irte(irte_info->sub_handle, irte_info->irte_index);
4130
4131 return 0;
4132}
4133
4134static void compose_msi_msg(struct pci_dev *pdev,
4135 unsigned int irq, unsigned int dest,
4136 struct msi_msg *msg, u8 hpet_id)
4137{
4138 struct irq_2_iommu *irte_info;
4139 struct irq_cfg *cfg;
4140 union irte irte;
4141
4142 cfg = irq_get_chip_data(irq);
4143 if (!cfg)
4144 return;
4145
4146 irte_info = &cfg->irq_2_iommu;
4147
4148 irte.val = 0;
4149 irte.fields.vector = cfg->vector;
4150 irte.fields.int_type = apic->irq_delivery_mode;
4151 irte.fields.destination = dest;
4152 irte.fields.dm = apic->irq_dest_mode;
4153 irte.fields.valid = 1;
4154
4155 modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
4156
4157 msg->address_hi = MSI_ADDR_BASE_HI;
4158 msg->address_lo = MSI_ADDR_BASE_LO;
4159 msg->data = irte_info->irte_index;
4160}
4161
4162static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
4163{
4164 struct irq_cfg *cfg;
4165 int index;
4166 u16 devid;
4167
4168 if (!pdev)
4169 return -EINVAL;
4170
4171 cfg = irq_get_chip_data(irq);
4172 if (!cfg)
4173 return -EINVAL;
4174
4175 devid = get_device_id(&pdev->dev);
4176 index = alloc_irq_index(cfg, devid, nvec);
4177
4178 return index < 0 ? MAX_IRQS_PER_TABLE : index;
4179}
4180
4181static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
4182 int index, int offset)
4183{
4184 struct irq_2_iommu *irte_info;
4185 struct irq_cfg *cfg;
4186 u16 devid;
4187
4188 if (!pdev)
4189 return -EINVAL;
4190
4191 cfg = irq_get_chip_data(irq);
4192 if (!cfg)
4193 return -EINVAL;
4194
4195 if (index >= MAX_IRQS_PER_TABLE)
4196 return 0;
4197
4198 devid = get_device_id(&pdev->dev);
4199 irte_info = &cfg->irq_2_iommu;
4200
4201 irte_info->sub_handle = devid;
4202 irte_info->irte_index = index + offset;
4203 irte_info->iommu = (void *)cfg;
4204
4205 return 0;
4206}
4207
4208static int setup_hpet_msi(unsigned int irq, unsigned int id)
4209{
4210 struct irq_2_iommu *irte_info;
4211 struct irq_cfg *cfg;
4212 int index, devid;
4213
4214 cfg = irq_get_chip_data(irq);
4215 if (!cfg)
4216 return -EINVAL;
4217
4218 irte_info = &cfg->irq_2_iommu;
4219 devid = get_hpet_devid(id);
4220 if (devid < 0)
4221 return devid;
4222
4223 index = alloc_irq_index(cfg, devid, 1);
4224 if (index < 0)
4225 return index;
4226
4227 irte_info->sub_handle = devid;
4228 irte_info->irte_index = index;
4229 irte_info->iommu = (void *)cfg;
4230
4231 return 0;
4232}
4233
4234struct irq_remap_ops amd_iommu_irq_ops = {
4235 .supported = amd_iommu_supported,
4236 .prepare = amd_iommu_prepare,
4237 .enable = amd_iommu_enable,
4238 .disable = amd_iommu_disable,
4239 .reenable = amd_iommu_reenable,
4240 .enable_faulting = amd_iommu_enable_faulting,
4241 .setup_ioapic_entry = setup_ioapic_entry,
4242 .set_affinity = set_affinity,
4243 .free_irq = free_irq,
4244 .compose_msi_msg = compose_msi_msg,
4245 .msi_alloc_irq = msi_alloc_irq,
4246 .msi_setup_irq = msi_setup_irq,
4247 .setup_hpet_msi = setup_hpet_msi,
4248};
4249#endif
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 18a89b760aa..18b0d99bd4d 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -26,16 +26,18 @@
26#include <linux/msi.h> 26#include <linux/msi.h>
27#include <linux/amd-iommu.h> 27#include <linux/amd-iommu.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/acpi.h>
30#include <acpi/acpi.h> 29#include <acpi/acpi.h>
31#include <asm/pci-direct.h> 30#include <asm/pci-direct.h>
32#include <asm/iommu.h> 31#include <asm/iommu.h>
33#include <asm/gart.h> 32#include <asm/gart.h>
34#include <asm/x86_init.h> 33#include <asm/x86_init.h>
35#include <asm/iommu_table.h> 34#include <asm/iommu_table.h>
35#include <asm/io_apic.h>
36#include <asm/irq_remapping.h>
36 37
37#include "amd_iommu_proto.h" 38#include "amd_iommu_proto.h"
38#include "amd_iommu_types.h" 39#include "amd_iommu_types.h"
40#include "irq_remapping.h"
39 41
40/* 42/*
41 * definitions for the ACPI scanning code 43 * definitions for the ACPI scanning code
@@ -55,6 +57,10 @@
55#define IVHD_DEV_ALIAS_RANGE 0x43 57#define IVHD_DEV_ALIAS_RANGE 0x43
56#define IVHD_DEV_EXT_SELECT 0x46 58#define IVHD_DEV_EXT_SELECT 0x46
57#define IVHD_DEV_EXT_SELECT_RANGE 0x47 59#define IVHD_DEV_EXT_SELECT_RANGE 0x47
60#define IVHD_DEV_SPECIAL 0x48
61
62#define IVHD_SPECIAL_IOAPIC 1
63#define IVHD_SPECIAL_HPET 2
58 64
59#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 65#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
60#define IVHD_FLAG_PASSPW_EN_MASK 0x02 66#define IVHD_FLAG_PASSPW_EN_MASK 0x02
@@ -123,6 +129,7 @@ struct ivmd_header {
123} __attribute__((packed)); 129} __attribute__((packed));
124 130
125bool amd_iommu_dump; 131bool amd_iommu_dump;
132bool amd_iommu_irq_remap __read_mostly;
126 133
127static bool amd_iommu_detected; 134static bool amd_iommu_detected;
128static bool __initdata amd_iommu_disabled; 135static bool __initdata amd_iommu_disabled;
@@ -178,7 +185,13 @@ u16 *amd_iommu_alias_table;
178struct amd_iommu **amd_iommu_rlookup_table; 185struct amd_iommu **amd_iommu_rlookup_table;
179 186
180/* 187/*
181 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap 188 * This table is used to find the irq remapping table for a given device id
189 * quickly.
190 */
191struct irq_remap_table **irq_lookup_table;
192
193/*
194 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
182 * to know which ones are already in use. 195 * to know which ones are already in use.
183 */ 196 */
184unsigned long *amd_iommu_pd_alloc_bitmap; 197unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -478,7 +491,7 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
478 491
479/**************************************************************************** 492/****************************************************************************
480 * 493 *
481 * The following functions belong the the code path which parses the ACPI table 494 * The following functions belong to the code path which parses the ACPI table
482 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 495 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
483 * data structures, initialize the device/alias/rlookup table and also 496 * data structures, initialize the device/alias/rlookup table and also
484 * basically initialize the hardware. 497 * basically initialize the hardware.
@@ -690,8 +703,33 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
690 set_iommu_for_device(iommu, devid); 703 set_iommu_for_device(iommu, devid);
691} 704}
692 705
706static int add_special_device(u8 type, u8 id, u16 devid)
707{
708 struct devid_map *entry;
709 struct list_head *list;
710
711 if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET)
712 return -EINVAL;
713
714 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
715 if (!entry)
716 return -ENOMEM;
717
718 entry->id = id;
719 entry->devid = devid;
720
721 if (type == IVHD_SPECIAL_IOAPIC)
722 list = &ioapic_map;
723 else
724 list = &hpet_map;
725
726 list_add_tail(&entry->list, list);
727
728 return 0;
729}
730
693/* 731/*
694 * Reads the device exclusion range from ACPI and initialize IOMMU with 732 * Reads the device exclusion range from ACPI and initializes the IOMMU with
695 * it 733 * it
696 */ 734 */
697static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) 735static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
@@ -717,7 +755,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
717 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 755 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
718 * initializes the hardware and our data structures with it. 756 * initializes the hardware and our data structures with it.
719 */ 757 */
720static void __init init_iommu_from_acpi(struct amd_iommu *iommu, 758static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
721 struct ivhd_header *h) 759 struct ivhd_header *h)
722{ 760{
723 u8 *p = (u8 *)h; 761 u8 *p = (u8 *)h;
@@ -867,12 +905,43 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
867 flags, ext_flags); 905 flags, ext_flags);
868 } 906 }
869 break; 907 break;
908 case IVHD_DEV_SPECIAL: {
909 u8 handle, type;
910 const char *var;
911 u16 devid;
912 int ret;
913
914 handle = e->ext & 0xff;
915 devid = (e->ext >> 8) & 0xffff;
916 type = (e->ext >> 24) & 0xff;
917
918 if (type == IVHD_SPECIAL_IOAPIC)
919 var = "IOAPIC";
920 else if (type == IVHD_SPECIAL_HPET)
921 var = "HPET";
922 else
923 var = "UNKNOWN";
924
925 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
926 var, (int)handle,
927 PCI_BUS(devid),
928 PCI_SLOT(devid),
929 PCI_FUNC(devid));
930
931 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
932 ret = add_special_device(type, handle, devid);
933 if (ret)
934 return ret;
935 break;
936 }
870 default: 937 default:
871 break; 938 break;
872 } 939 }
873 940
874 p += ivhd_entry_length(p); 941 p += ivhd_entry_length(p);
875 } 942 }
943
944 return 0;
876} 945}
877 946
878/* Initializes the device->iommu mapping for the driver */ 947/* Initializes the device->iommu mapping for the driver */
@@ -912,6 +981,8 @@ static void __init free_iommu_all(void)
912 */ 981 */
913static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 982static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
914{ 983{
984 int ret;
985
915 spin_lock_init(&iommu->lock); 986 spin_lock_init(&iommu->lock);
916 987
917 /* Add IOMMU to internal data structures */ 988 /* Add IOMMU to internal data structures */
@@ -947,7 +1018,16 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
947 1018
948 iommu->int_enabled = false; 1019 iommu->int_enabled = false;
949 1020
950 init_iommu_from_acpi(iommu, h); 1021 ret = init_iommu_from_acpi(iommu, h);
1022 if (ret)
1023 return ret;
1024
1025 /*
1026 * Make sure IOMMU is not considered to translate itself. The IVRS
1027 * table tells us so, but this is a lie!
1028 */
1029 amd_iommu_rlookup_table[iommu->devid] = NULL;
1030
951 init_iommu_devices(iommu); 1031 init_iommu_devices(iommu);
952 1032
953 return 0; 1033 return 0;
@@ -1115,9 +1195,11 @@ static void print_iommu_info(void)
1115 if (iommu_feature(iommu, (1ULL << i))) 1195 if (iommu_feature(iommu, (1ULL << i)))
1116 pr_cont(" %s", feat_str[i]); 1196 pr_cont(" %s", feat_str[i]);
1117 } 1197 }
1118 }
1119 pr_cont("\n"); 1198 pr_cont("\n");
1199 }
1120 } 1200 }
1201 if (irq_remapping_enabled)
1202 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1121} 1203}
1122 1204
1123static int __init amd_iommu_init_pci(void) 1205static int __init amd_iommu_init_pci(void)
@@ -1141,7 +1223,7 @@ static int __init amd_iommu_init_pci(void)
1141/**************************************************************************** 1223/****************************************************************************
1142 * 1224 *
1143 * The following functions initialize the MSI interrupts for all IOMMUs 1225 * The following functions initialize the MSI interrupts for all IOMMUs
1144 * in the system. Its a bit challenging because there could be multiple 1226 * in the system. It's a bit challenging because there could be multiple
1145 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 1227 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1146 * pci_dev. 1228 * pci_dev.
1147 * 1229 *
@@ -1199,7 +1281,7 @@ enable_faults:
1199 * 1281 *
1200 * The next functions belong to the third pass of parsing the ACPI 1282 * The next functions belong to the third pass of parsing the ACPI
1201 * table. In this last pass the memory mapping requirements are 1283 * table. In this last pass the memory mapping requirements are
1202 * gathered (like exclusion and unity mapping reanges). 1284 * gathered (like exclusion and unity mapping ranges).
1203 * 1285 *
1204 ****************************************************************************/ 1286 ****************************************************************************/
1205 1287
@@ -1308,7 +1390,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
1308 * Init the device table to not allow DMA access for devices and 1390 * Init the device table to not allow DMA access for devices and
1309 * suppress all page faults 1391 * suppress all page faults
1310 */ 1392 */
1311static void init_device_table(void) 1393static void init_device_table_dma(void)
1312{ 1394{
1313 u32 devid; 1395 u32 devid;
1314 1396
@@ -1318,6 +1400,27 @@ static void init_device_table(void)
1318 } 1400 }
1319} 1401}
1320 1402
1403static void __init uninit_device_table_dma(void)
1404{
1405 u32 devid;
1406
1407 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1408 amd_iommu_dev_table[devid].data[0] = 0ULL;
1409 amd_iommu_dev_table[devid].data[1] = 0ULL;
1410 }
1411}
1412
1413static void init_device_table(void)
1414{
1415 u32 devid;
1416
1417 if (!amd_iommu_irq_remap)
1418 return;
1419
1420 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1421 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1422}
1423
1321static void iommu_init_flags(struct amd_iommu *iommu) 1424static void iommu_init_flags(struct amd_iommu *iommu)
1322{ 1425{
1323 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 1426 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
@@ -1466,10 +1569,14 @@ static struct syscore_ops amd_iommu_syscore_ops = {
1466 1569
1467static void __init free_on_init_error(void) 1570static void __init free_on_init_error(void)
1468{ 1571{
1469 amd_iommu_uninit_devices(); 1572 free_pages((unsigned long)irq_lookup_table,
1573 get_order(rlookup_table_size));
1470 1574
1471 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1575 if (amd_iommu_irq_cache) {
1472 get_order(MAX_DOMAIN_ID/8)); 1576 kmem_cache_destroy(amd_iommu_irq_cache);
1577 amd_iommu_irq_cache = NULL;
1578
1579 }
1473 1580
1474 free_pages((unsigned long)amd_iommu_rlookup_table, 1581 free_pages((unsigned long)amd_iommu_rlookup_table,
1475 get_order(rlookup_table_size)); 1582 get_order(rlookup_table_size));
@@ -1482,8 +1589,6 @@ static void __init free_on_init_error(void)
1482 1589
1483 free_iommu_all(); 1590 free_iommu_all();
1484 1591
1485 free_unity_maps();
1486
1487#ifdef CONFIG_GART_IOMMU 1592#ifdef CONFIG_GART_IOMMU
1488 /* 1593 /*
1489 * We failed to initialize the AMD IOMMU - try fallback to GART 1594 * We failed to initialize the AMD IOMMU - try fallback to GART
@@ -1494,6 +1599,33 @@ static void __init free_on_init_error(void)
1494#endif 1599#endif
1495} 1600}
1496 1601
1602static bool __init check_ioapic_information(void)
1603{
1604 int idx;
1605
1606 for (idx = 0; idx < nr_ioapics; idx++) {
1607 int id = mpc_ioapic_id(idx);
1608
1609 if (get_ioapic_devid(id) < 0) {
1610 pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id);
1611 pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n");
1612 return false;
1613 }
1614 }
1615
1616 return true;
1617}
1618
1619static void __init free_dma_resources(void)
1620{
1621 amd_iommu_uninit_devices();
1622
1623 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1624 get_order(MAX_DOMAIN_ID/8));
1625
1626 free_unity_maps();
1627}
1628
1497/* 1629/*
1498 * This is the hardware init function for AMD IOMMU in the system. 1630 * This is the hardware init function for AMD IOMMU in the system.
1499 * This function is called either from amd_iommu_init or from the interrupt 1631 * This function is called either from amd_iommu_init or from the interrupt
@@ -1580,9 +1712,6 @@ static int __init early_amd_iommu_init(void)
1580 if (amd_iommu_pd_alloc_bitmap == NULL) 1712 if (amd_iommu_pd_alloc_bitmap == NULL)
1581 goto out; 1713 goto out;
1582 1714
1583 /* init the device table */
1584 init_device_table();
1585
1586 /* 1715 /*
1587 * let all alias entries point to itself 1716 * let all alias entries point to itself
1588 */ 1717 */
@@ -1605,10 +1734,35 @@ static int __init early_amd_iommu_init(void)
1605 if (ret) 1734 if (ret)
1606 goto out; 1735 goto out;
1607 1736
1737 if (amd_iommu_irq_remap)
1738 amd_iommu_irq_remap = check_ioapic_information();
1739
1740 if (amd_iommu_irq_remap) {
1741 /*
1742 * Interrupt remapping enabled, create kmem_cache for the
1743 * remapping tables.
1744 */
1745 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
1746 MAX_IRQS_PER_TABLE * sizeof(u32),
1747 IRQ_TABLE_ALIGNMENT,
1748 0, NULL);
1749 if (!amd_iommu_irq_cache)
1750 goto out;
1751
1752 irq_lookup_table = (void *)__get_free_pages(
1753 GFP_KERNEL | __GFP_ZERO,
1754 get_order(rlookup_table_size));
1755 if (!irq_lookup_table)
1756 goto out;
1757 }
1758
1608 ret = init_memory_definitions(ivrs_base); 1759 ret = init_memory_definitions(ivrs_base);
1609 if (ret) 1760 if (ret)
1610 goto out; 1761 goto out;
1611 1762
1763 /* init the device table */
1764 init_device_table();
1765
1612out: 1766out:
1613 /* Don't leak any ACPI memory */ 1767 /* Don't leak any ACPI memory */
1614 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); 1768 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
@@ -1652,13 +1806,22 @@ static bool detect_ivrs(void)
1652 /* Make sure ACS will be enabled during PCI probe */ 1806 /* Make sure ACS will be enabled during PCI probe */
1653 pci_request_acs(); 1807 pci_request_acs();
1654 1808
1809 if (!disable_irq_remap)
1810 amd_iommu_irq_remap = true;
1811
1655 return true; 1812 return true;
1656} 1813}
1657 1814
1658static int amd_iommu_init_dma(void) 1815static int amd_iommu_init_dma(void)
1659{ 1816{
1817 struct amd_iommu *iommu;
1660 int ret; 1818 int ret;
1661 1819
1820 init_device_table_dma();
1821
1822 for_each_iommu(iommu)
1823 iommu_flush_all_caches(iommu);
1824
1662 if (iommu_pass_through) 1825 if (iommu_pass_through)
1663 ret = amd_iommu_init_passthrough(); 1826 ret = amd_iommu_init_passthrough();
1664 else 1827 else
@@ -1749,7 +1912,48 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
1749 return ret; 1912 return ret;
1750} 1913}
1751 1914
1915#ifdef CONFIG_IRQ_REMAP
1916int __init amd_iommu_prepare(void)
1917{
1918 return iommu_go_to_state(IOMMU_ACPI_FINISHED);
1919}
1920
1921int __init amd_iommu_supported(void)
1922{
1923 return amd_iommu_irq_remap ? 1 : 0;
1924}
1925
1926int __init amd_iommu_enable(void)
1927{
1928 int ret;
1929
1930 ret = iommu_go_to_state(IOMMU_ENABLED);
1931 if (ret)
1932 return ret;
1933
1934 irq_remapping_enabled = 1;
1935
1936 return 0;
1937}
1938
1939void amd_iommu_disable(void)
1940{
1941 amd_iommu_suspend();
1942}
1943
1944int amd_iommu_reenable(int mode)
1945{
1946 amd_iommu_resume();
1947
1948 return 0;
1949}
1752 1950
1951int __init amd_iommu_enable_faulting(void)
1952{
1953 /* We enable MSI later when PCI is initialized */
1954 return 0;
1955}
1956#endif
1753 1957
1754/* 1958/*
1755 * This is the core init function for AMD IOMMU hardware in the system. 1959 * This is the core init function for AMD IOMMU hardware in the system.
@@ -1762,8 +1966,17 @@ static int __init amd_iommu_init(void)
1762 1966
1763 ret = iommu_go_to_state(IOMMU_INITIALIZED); 1967 ret = iommu_go_to_state(IOMMU_INITIALIZED);
1764 if (ret) { 1968 if (ret) {
1765 disable_iommus(); 1969 free_dma_resources();
1766 free_on_init_error(); 1970 if (!irq_remapping_enabled) {
1971 disable_iommus();
1972 free_on_init_error();
1973 } else {
1974 struct amd_iommu *iommu;
1975
1976 uninit_device_table_dma();
1977 for_each_iommu(iommu)
1978 iommu_flush_all_caches(iommu);
1979 }
1767 } 1980 }
1768 1981
1769 return ret; 1982 return ret;
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 1a7f41c6cc6..c294961bdd3 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -32,6 +32,14 @@ extern void amd_iommu_uninit_devices(void);
32extern void amd_iommu_init_notifier(void); 32extern void amd_iommu_init_notifier(void);
33extern void amd_iommu_init_api(void); 33extern void amd_iommu_init_api(void);
34 34
35/* Needed for interrupt remapping */
36extern int amd_iommu_supported(void);
37extern int amd_iommu_prepare(void);
38extern int amd_iommu_enable(void);
39extern void amd_iommu_disable(void);
40extern int amd_iommu_reenable(int);
41extern int amd_iommu_enable_faulting(void);
42
35/* IOMMUv2 specific functions */ 43/* IOMMUv2 specific functions */
36struct iommu_domain; 44struct iommu_domain;
37 45
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index d0dab865a8b..c9aa3d079ff 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -152,6 +152,7 @@
152#define CMD_INV_DEV_ENTRY 0x02 152#define CMD_INV_DEV_ENTRY 0x02
153#define CMD_INV_IOMMU_PAGES 0x03 153#define CMD_INV_IOMMU_PAGES 0x03
154#define CMD_INV_IOTLB_PAGES 0x04 154#define CMD_INV_IOTLB_PAGES 0x04
155#define CMD_INV_IRT 0x05
155#define CMD_COMPLETE_PPR 0x07 156#define CMD_COMPLETE_PPR 0x07
156#define CMD_INV_ALL 0x08 157#define CMD_INV_ALL 0x08
157 158
@@ -175,6 +176,7 @@
175#define DEV_ENTRY_EX 0x67 176#define DEV_ENTRY_EX 0x67
176#define DEV_ENTRY_SYSMGT1 0x68 177#define DEV_ENTRY_SYSMGT1 0x68
177#define DEV_ENTRY_SYSMGT2 0x69 178#define DEV_ENTRY_SYSMGT2 0x69
179#define DEV_ENTRY_IRQ_TBL_EN 0x80
178#define DEV_ENTRY_INIT_PASS 0xb8 180#define DEV_ENTRY_INIT_PASS 0xb8
179#define DEV_ENTRY_EINT_PASS 0xb9 181#define DEV_ENTRY_EINT_PASS 0xb9
180#define DEV_ENTRY_NMI_PASS 0xba 182#define DEV_ENTRY_NMI_PASS 0xba
@@ -183,6 +185,8 @@
183#define DEV_ENTRY_MODE_MASK 0x07 185#define DEV_ENTRY_MODE_MASK 0x07
184#define DEV_ENTRY_MODE_SHIFT 0x09 186#define DEV_ENTRY_MODE_SHIFT 0x09
185 187
188#define MAX_DEV_TABLE_ENTRIES 0xffff
189
186/* constants to configure the command buffer */ 190/* constants to configure the command buffer */
187#define CMD_BUFFER_SIZE 8192 191#define CMD_BUFFER_SIZE 8192
188#define CMD_BUFFER_UNINITIALIZED 1 192#define CMD_BUFFER_UNINITIALIZED 1
@@ -255,7 +259,7 @@
255#define PAGE_SIZE_ALIGN(address, pagesize) \ 259#define PAGE_SIZE_ALIGN(address, pagesize) \
256 ((address) & ~((pagesize) - 1)) 260 ((address) & ~((pagesize) - 1))
257/* 261/*
258 * Creates an IOMMU PTE for an address an a given pagesize 262 * Creates an IOMMU PTE for an address and a given pagesize
259 * The PTE has no permission bits set 263 * The PTE has no permission bits set
260 * Pagesize is expected to be a power-of-two larger than 4096 264 * Pagesize is expected to be a power-of-two larger than 4096
261 */ 265 */
@@ -334,6 +338,23 @@ extern bool amd_iommu_np_cache;
334/* Only true if all IOMMUs support device IOTLBs */ 338/* Only true if all IOMMUs support device IOTLBs */
335extern bool amd_iommu_iotlb_sup; 339extern bool amd_iommu_iotlb_sup;
336 340
341#define MAX_IRQS_PER_TABLE 256
342#define IRQ_TABLE_ALIGNMENT 128
343
344struct irq_remap_table {
345 spinlock_t lock;
346 unsigned min_index;
347 u32 *table;
348};
349
350extern struct irq_remap_table **irq_lookup_table;
351
352/* Interrupt remapping feature used? */
353extern bool amd_iommu_irq_remap;
354
355/* kmem_cache to get tables with 128 byte alignement */
356extern struct kmem_cache *amd_iommu_irq_cache;
357
337/* 358/*
338 * Make iterating over all IOMMUs easier 359 * Make iterating over all IOMMUs easier
339 */ 360 */
@@ -404,7 +425,7 @@ struct iommu_dev_data {
404 struct list_head dev_data_list; /* For global dev_data_list */ 425 struct list_head dev_data_list; /* For global dev_data_list */
405 struct iommu_dev_data *alias_data;/* The alias dev_data */ 426 struct iommu_dev_data *alias_data;/* The alias dev_data */
406 struct protection_domain *domain; /* Domain the device is bound to */ 427 struct protection_domain *domain; /* Domain the device is bound to */
407 atomic_t bind; /* Domain attach reverent count */ 428 atomic_t bind; /* Domain attach reference count */
408 u16 devid; /* PCI Device ID */ 429 u16 devid; /* PCI Device ID */
409 bool iommu_v2; /* Device can make use of IOMMUv2 */ 430 bool iommu_v2; /* Device can make use of IOMMUv2 */
410 bool passthrough; /* Default for device is pt_domain */ 431 bool passthrough; /* Default for device is pt_domain */
@@ -565,6 +586,16 @@ struct amd_iommu {
565 u32 stored_l2[0x83]; 586 u32 stored_l2[0x83];
566}; 587};
567 588
589struct devid_map {
590 struct list_head list;
591 u8 id;
592 u16 devid;
593};
594
595/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
596extern struct list_head ioapic_map;
597extern struct list_head hpet_map;
598
568/* 599/*
569 * List with all IOMMUs in the system. This list is not locked because it is 600 * List with all IOMMUs in the system. This list is not locked because it is
570 * only written and read at driver initialization or suspend time 601 * only written and read at driver initialization or suspend time
@@ -678,6 +709,30 @@ static inline u16 calc_devid(u8 bus, u8 devfn)
678 return (((u16)bus) << 8) | devfn; 709 return (((u16)bus) << 8) | devfn;
679} 710}
680 711
712static inline int get_ioapic_devid(int id)
713{
714 struct devid_map *entry;
715
716 list_for_each_entry(entry, &ioapic_map, list) {
717 if (entry->id == id)
718 return entry->devid;
719 }
720
721 return -EINVAL;
722}
723
724static inline int get_hpet_devid(int id)
725{
726 struct devid_map *entry;
727
728 list_for_each_entry(entry, &hpet_map, list) {
729 if (entry->id == id)
730 return entry->devid;
731 }
732
733 return -EINVAL;
734}
735
681#ifdef CONFIG_AMD_IOMMU_STATS 736#ifdef CONFIG_AMD_IOMMU_STATS
682 737
683struct __iommu_counter { 738struct __iommu_counter {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 80bad32aa46..7fe44f83cc3 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -840,8 +840,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
840 if (__exynos_sysmmu_disable(data)) { 840 if (__exynos_sysmmu_disable(data)) {
841 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", 841 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
842 __func__, __pa(priv->pgtable)); 842 __func__, __pa(priv->pgtable));
843 list_del(&data->node); 843 list_del_init(&data->node);
844 INIT_LIST_HEAD(&data->node);
845 844
846 } else { 845 } else {
847 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", 846 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index db820d7dd0b..d4a4cd445ca 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -589,7 +589,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
589{ 589{
590 int i; 590 int i;
591 591
592 domain->iommu_coherency = 1; 592 i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
593
594 domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
593 595
594 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { 596 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
595 if (!ecap_coherent(g_iommus[i]->ecap)) { 597 if (!ecap_coherent(g_iommus[i]->ecap)) {
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 151690db692..faf85d6e33f 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -51,6 +51,11 @@ early_param("intremap", setup_irqremap);
51void __init setup_irq_remapping_ops(void) 51void __init setup_irq_remapping_ops(void)
52{ 52{
53 remap_ops = &intel_irq_remap_ops; 53 remap_ops = &intel_irq_remap_ops;
54
55#ifdef CONFIG_AMD_IOMMU
56 if (amd_iommu_irq_ops.prepare() == 0)
57 remap_ops = &amd_iommu_irq_ops;
58#endif
54} 59}
55 60
56int irq_remapping_supported(void) 61int irq_remapping_supported(void)
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index b12974cc1df..95363acb583 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -82,6 +82,12 @@ struct irq_remap_ops {
82}; 82};
83 83
84extern struct irq_remap_ops intel_irq_remap_ops; 84extern struct irq_remap_ops intel_irq_remap_ops;
85extern struct irq_remap_ops amd_iommu_irq_ops;
86
87#else /* CONFIG_IRQ_REMAP */
88
89#define irq_remapping_enabled 0
90#define disable_irq_remap 1
85 91
86#endif /* CONFIG_IRQ_REMAP */ 92#endif /* CONFIG_IRQ_REMAP */
87 93
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 2a4bb36bc68..0b4d62e0c64 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -32,14 +32,55 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_iommu.h> 34#include <linux/of_iommu.h>
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
35 37
36#include <asm/page.h> 38#include <asm/page.h>
37#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
38 40
39#include <mach/iomap.h> 41#include <mach/iomap.h>
40#include <mach/smmu.h>
41#include <mach/tegra-ahb.h> 42#include <mach/tegra-ahb.h>
42 43
44enum smmu_hwgrp {
45 HWGRP_AFI,
46 HWGRP_AVPC,
47 HWGRP_DC,
48 HWGRP_DCB,
49 HWGRP_EPP,
50 HWGRP_G2,
51 HWGRP_HC,
52 HWGRP_HDA,
53 HWGRP_ISP,
54 HWGRP_MPE,
55 HWGRP_NV,
56 HWGRP_NV2,
57 HWGRP_PPCS,
58 HWGRP_SATA,
59 HWGRP_VDE,
60 HWGRP_VI,
61
62 HWGRP_COUNT,
63
64 HWGRP_END = ~0,
65};
66
67#define HWG_AFI (1 << HWGRP_AFI)
68#define HWG_AVPC (1 << HWGRP_AVPC)
69#define HWG_DC (1 << HWGRP_DC)
70#define HWG_DCB (1 << HWGRP_DCB)
71#define HWG_EPP (1 << HWGRP_EPP)
72#define HWG_G2 (1 << HWGRP_G2)
73#define HWG_HC (1 << HWGRP_HC)
74#define HWG_HDA (1 << HWGRP_HDA)
75#define HWG_ISP (1 << HWGRP_ISP)
76#define HWG_MPE (1 << HWGRP_MPE)
77#define HWG_NV (1 << HWGRP_NV)
78#define HWG_NV2 (1 << HWGRP_NV2)
79#define HWG_PPCS (1 << HWGRP_PPCS)
80#define HWG_SATA (1 << HWGRP_SATA)
81#define HWG_VDE (1 << HWGRP_VDE)
82#define HWG_VI (1 << HWGRP_VI)
83
43/* bitmap of the page sizes currently supported */ 84/* bitmap of the page sizes currently supported */
44#define SMMU_IOMMU_PGSIZES (SZ_4K) 85#define SMMU_IOMMU_PGSIZES (SZ_4K)
45 86
@@ -47,16 +88,29 @@
47#define SMMU_CONFIG_DISABLE 0 88#define SMMU_CONFIG_DISABLE 0
48#define SMMU_CONFIG_ENABLE 1 89#define SMMU_CONFIG_ENABLE 1
49 90
50#define SMMU_TLB_CONFIG 0x14 91/* REVISIT: To support multiple MCs */
51#define SMMU_TLB_CONFIG_STATS__MASK (1 << 31) 92enum {
52#define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31) 93 _MC = 0,
94};
95
96enum {
97 _TLB = 0,
98 _PTC,
99};
100
101#define SMMU_CACHE_CONFIG_BASE 0x14
102#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache)
103#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache)
104
105#define SMMU_CACHE_CONFIG_STATS_SHIFT 31
106#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
107#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30
108#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
109
53#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29) 110#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
54#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10 111#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10
55#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010 112#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010
56 113
57#define SMMU_PTC_CONFIG 0x18
58#define SMMU_PTC_CONFIG_STATS__MASK (1 << 31)
59#define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31)
60#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29) 114#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29)
61#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f 115#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f
62#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f 116#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f
@@ -86,10 +140,10 @@
86 140
87#define SMMU_ASID_SECURITY 0x38 141#define SMMU_ASID_SECURITY 0x38
88 142
89#define SMMU_STATS_TLB_HIT_COUNT 0x1f0 143#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0
90#define SMMU_STATS_TLB_MISS_COUNT 0x1f4 144
91#define SMMU_STATS_PTC_HIT_COUNT 0x1f8 145#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \
92#define SMMU_STATS_PTC_MISS_COUNT 0x1fc 146 (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
93 147
94#define SMMU_TRANSLATION_ENABLE_0 0x228 148#define SMMU_TRANSLATION_ENABLE_0 0x228
95#define SMMU_TRANSLATION_ENABLE_1 0x22c 149#define SMMU_TRANSLATION_ENABLE_1 0x22c
@@ -231,6 +285,12 @@ struct smmu_as {
231 spinlock_t client_lock; /* for client list */ 285 spinlock_t client_lock; /* for client list */
232}; 286};
233 287
288struct smmu_debugfs_info {
289 struct smmu_device *smmu;
290 int mc;
291 int cache;
292};
293
234/* 294/*
235 * Per SMMU device - IOMMU device 295 * Per SMMU device - IOMMU device
236 */ 296 */
@@ -251,6 +311,9 @@ struct smmu_device {
251 unsigned long translation_enable_2; 311 unsigned long translation_enable_2;
252 unsigned long asid_security; 312 unsigned long asid_security;
253 313
314 struct dentry *debugfs_root;
315 struct smmu_debugfs_info *debugfs_info;
316
254 struct device_node *ahb; 317 struct device_node *ahb;
255 318
256 int num_as; 319 int num_as;
@@ -412,8 +475,8 @@ static int smmu_setup_regs(struct smmu_device *smmu)
412 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1); 475 smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
413 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2); 476 smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
414 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY); 477 smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
415 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG); 478 smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
416 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG); 479 smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
417 480
418 smmu_flush_regs(smmu, 1); 481 smmu_flush_regs(smmu, 1);
419 482
@@ -895,6 +958,175 @@ static struct iommu_ops smmu_iommu_ops = {
895 .pgsize_bitmap = SMMU_IOMMU_PGSIZES, 958 .pgsize_bitmap = SMMU_IOMMU_PGSIZES,
896}; 959};
897 960
961/* Should be in the order of enum */
962static const char * const smmu_debugfs_mc[] = { "mc", };
963static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", };
964
965static ssize_t smmu_debugfs_stats_write(struct file *file,
966 const char __user *buffer,
967 size_t count, loff_t *pos)
968{
969 struct smmu_debugfs_info *info;
970 struct smmu_device *smmu;
971 struct dentry *dent;
972 int i;
973 enum {
974 _OFF = 0,
975 _ON,
976 _RESET,
977 };
978 const char * const command[] = {
979 [_OFF] = "off",
980 [_ON] = "on",
981 [_RESET] = "reset",
982 };
983 char str[] = "reset";
984 u32 val;
985 size_t offs;
986
987 count = min_t(size_t, count, sizeof(str));
988 if (copy_from_user(str, buffer, count))
989 return -EINVAL;
990
991 for (i = 0; i < ARRAY_SIZE(command); i++)
992 if (strncmp(str, command[i],
993 strlen(command[i])) == 0)
994 break;
995
996 if (i == ARRAY_SIZE(command))
997 return -EINVAL;
998
999 dent = file->f_dentry;
1000 info = dent->d_inode->i_private;
1001 smmu = info->smmu;
1002
1003 offs = SMMU_CACHE_CONFIG(info->cache);
1004 val = smmu_read(smmu, offs);
1005 switch (i) {
1006 case _OFF:
1007 val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
1008 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1009 smmu_write(smmu, val, offs);
1010 break;
1011 case _ON:
1012 val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
1013 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1014 smmu_write(smmu, val, offs);
1015 break;
1016 case _RESET:
1017 val |= SMMU_CACHE_CONFIG_STATS_TEST;
1018 smmu_write(smmu, val, offs);
1019 val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
1020 smmu_write(smmu, val, offs);
1021 break;
1022 default:
1023 BUG();
1024 break;
1025 }
1026
1027 dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
1028 val, smmu_read(smmu, offs), offs);
1029
1030 return count;
1031}
1032
1033static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
1034{
1035 struct smmu_debugfs_info *info;
1036 struct smmu_device *smmu;
1037 struct dentry *dent;
1038 int i;
1039 const char * const stats[] = { "hit", "miss", };
1040
1041 dent = d_find_alias(s->private);
1042 info = dent->d_inode->i_private;
1043 smmu = info->smmu;
1044
1045 for (i = 0; i < ARRAY_SIZE(stats); i++) {
1046 u32 val;
1047 size_t offs;
1048
1049 offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
1050 val = smmu_read(smmu, offs);
1051 seq_printf(s, "%s:%08x ", stats[i], val);
1052
1053 dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
1054 stats[i], val, offs);
1055 }
1056 seq_printf(s, "\n");
1057
1058 return 0;
1059}
1060
1061static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
1062{
1063 return single_open(file, smmu_debugfs_stats_show, inode);
1064}
1065
1066static const struct file_operations smmu_debugfs_stats_fops = {
1067 .open = smmu_debugfs_stats_open,
1068 .read = seq_read,
1069 .llseek = seq_lseek,
1070 .release = single_release,
1071 .write = smmu_debugfs_stats_write,
1072};
1073
1074static void smmu_debugfs_delete(struct smmu_device *smmu)
1075{
1076 debugfs_remove_recursive(smmu->debugfs_root);
1077 kfree(smmu->debugfs_info);
1078}
1079
1080static void smmu_debugfs_create(struct smmu_device *smmu)
1081{
1082 int i;
1083 size_t bytes;
1084 struct dentry *root;
1085
1086 bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
1087 sizeof(*smmu->debugfs_info);
1088 smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
1089 if (!smmu->debugfs_info)
1090 return;
1091
1092 root = debugfs_create_dir(dev_name(smmu->dev), NULL);
1093 if (!root)
1094 goto err_out;
1095 smmu->debugfs_root = root;
1096
1097 for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
1098 int j;
1099 struct dentry *mc;
1100
1101 mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
1102 if (!mc)
1103 goto err_out;
1104
1105 for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
1106 struct dentry *cache;
1107 struct smmu_debugfs_info *info;
1108
1109 info = smmu->debugfs_info;
1110 info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
1111 info->smmu = smmu;
1112 info->mc = i;
1113 info->cache = j;
1114
1115 cache = debugfs_create_file(smmu_debugfs_cache[j],
1116 S_IWUGO | S_IRUGO, mc,
1117 (void *)info,
1118 &smmu_debugfs_stats_fops);
1119 if (!cache)
1120 goto err_out;
1121 }
1122 }
1123
1124 return;
1125
1126err_out:
1127 smmu_debugfs_delete(smmu);
1128}
1129
898static int tegra_smmu_suspend(struct device *dev) 1130static int tegra_smmu_suspend(struct device *dev)
899{ 1131{
900 struct smmu_device *smmu = dev_get_drvdata(dev); 1132 struct smmu_device *smmu = dev_get_drvdata(dev);
@@ -999,6 +1231,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
999 if (!smmu->avp_vector_page) 1231 if (!smmu->avp_vector_page)
1000 return -ENOMEM; 1232 return -ENOMEM;
1001 1233
1234 smmu_debugfs_create(smmu);
1002 smmu_handle = smmu; 1235 smmu_handle = smmu;
1003 return 0; 1236 return 0;
1004} 1237}
@@ -1008,6 +1241,8 @@ static int tegra_smmu_remove(struct platform_device *pdev)
1008 struct smmu_device *smmu = platform_get_drvdata(pdev); 1241 struct smmu_device *smmu = platform_get_drvdata(pdev);
1009 int i; 1242 int i;
1010 1243
1244 smmu_debugfs_delete(smmu);
1245
1011 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); 1246 smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
1012 for (i = 0; i < smmu->num_as; i++) 1247 for (i = 0; i < smmu->num_as; i++)
1013 free_pdir(&smmu->as[i]); 1248 free_pdir(&smmu->as[i]);