aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c145
1 files changed, 64 insertions, 81 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 830183737b0f..21d02b0d907c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -46,6 +46,7 @@
46#include "amd_iommu_proto.h" 46#include "amd_iommu_proto.h"
47#include "amd_iommu_types.h" 47#include "amd_iommu_types.h"
48#include "irq_remapping.h" 48#include "irq_remapping.h"
49#include "pci.h"
49 50
50#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 51#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
51 52
@@ -263,12 +264,6 @@ static bool check_device(struct device *dev)
263 return true; 264 return true;
264} 265}
265 266
266static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
267{
268 pci_dev_put(*from);
269 *from = to;
270}
271
272static struct pci_bus *find_hosted_bus(struct pci_bus *bus) 267static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
273{ 268{
274 while (!bus->self) { 269 while (!bus->self) {
@@ -701,9 +696,6 @@ retry:
701static void iommu_poll_events(struct amd_iommu *iommu) 696static void iommu_poll_events(struct amd_iommu *iommu)
702{ 697{
703 u32 head, tail; 698 u32 head, tail;
704 unsigned long flags;
705
706 spin_lock_irqsave(&iommu->lock, flags);
707 699
708 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 700 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
709 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 701 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
@@ -714,8 +706,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
714 } 706 }
715 707
716 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 708 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
717
718 spin_unlock_irqrestore(&iommu->lock, flags);
719} 709}
720 710
721static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) 711static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
@@ -740,17 +730,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
740 730
741static void iommu_poll_ppr_log(struct amd_iommu *iommu) 731static void iommu_poll_ppr_log(struct amd_iommu *iommu)
742{ 732{
743 unsigned long flags;
744 u32 head, tail; 733 u32 head, tail;
745 734
746 if (iommu->ppr_log == NULL) 735 if (iommu->ppr_log == NULL)
747 return; 736 return;
748 737
749 /* enable ppr interrupts again */
750 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
751
752 spin_lock_irqsave(&iommu->lock, flags);
753
754 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 738 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
755 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 739 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
756 740
@@ -786,34 +770,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
786 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; 770 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
787 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 771 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
788 772
789 /*
790 * Release iommu->lock because ppr-handling might need to
791 * re-acquire it
792 */
793 spin_unlock_irqrestore(&iommu->lock, flags);
794
795 /* Handle PPR entry */ 773 /* Handle PPR entry */
796 iommu_handle_ppr_entry(iommu, entry); 774 iommu_handle_ppr_entry(iommu, entry);
797 775
798 spin_lock_irqsave(&iommu->lock, flags);
799
800 /* Refresh ring-buffer information */ 776 /* Refresh ring-buffer information */
801 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 777 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
802 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 778 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
803 } 779 }
804
805 spin_unlock_irqrestore(&iommu->lock, flags);
806} 780}
807 781
808irqreturn_t amd_iommu_int_thread(int irq, void *data) 782irqreturn_t amd_iommu_int_thread(int irq, void *data)
809{ 783{
810 struct amd_iommu *iommu; 784 struct amd_iommu *iommu = (struct amd_iommu *) data;
785 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
811 786
812 for_each_iommu(iommu) { 787 while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
813 iommu_poll_events(iommu); 788 /* Enable EVT and PPR interrupts again */
814 iommu_poll_ppr_log(iommu); 789 writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
815 } 790 iommu->mmio_base + MMIO_STATUS_OFFSET);
816 791
792 if (status & MMIO_STATUS_EVT_INT_MASK) {
793 pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
794 iommu_poll_events(iommu);
795 }
796
797 if (status & MMIO_STATUS_PPR_INT_MASK) {
798 pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
799 iommu_poll_ppr_log(iommu);
800 }
801
802 /*
803 * Hardware bug: ERBT1312
804 * When re-enabling interrupt (by writing 1
805 * to clear the bit), the hardware might also try to set
806 * the interrupt bit in the event status register.
807 * In this scenario, the bit will be set, and disable
808 * subsequent interrupts.
809 *
810 * Workaround: The IOMMU driver should read back the
811 * status register and check if the interrupt bits are cleared.
812 * If not, driver will need to go through the interrupt handler
813 * again and re-clear the bits
814 */
815 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
816 }
817 return IRQ_HANDLED; 817 return IRQ_HANDLED;
818} 818}
819 819
@@ -2839,24 +2839,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2839} 2839}
2840 2840
2841/* 2841/*
2842 * This is a special map_sg function which is used if we should map a
2843 * device which is not handled by an AMD IOMMU in the system.
2844 */
2845static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
2846 int nelems, int dir)
2847{
2848 struct scatterlist *s;
2849 int i;
2850
2851 for_each_sg(sglist, s, nelems, i) {
2852 s->dma_address = (dma_addr_t)sg_phys(s);
2853 s->dma_length = s->length;
2854 }
2855
2856 return nelems;
2857}
2858
2859/*
2860 * The exported map_sg function for dma_ops (handles scatter-gather 2842 * The exported map_sg function for dma_ops (handles scatter-gather
2861 * lists). 2843 * lists).
2862 */ 2844 */
@@ -2875,9 +2857,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2875 INC_STATS_COUNTER(cnt_map_sg); 2857 INC_STATS_COUNTER(cnt_map_sg);
2876 2858
2877 domain = get_domain(dev); 2859 domain = get_domain(dev);
2878 if (PTR_ERR(domain) == -EINVAL) 2860 if (IS_ERR(domain))
2879 return map_sg_no_iommu(dev, sglist, nelems, dir);
2880 else if (IS_ERR(domain))
2881 return 0; 2861 return 0;
2882 2862
2883 dma_mask = *dev->dma_mask; 2863 dma_mask = *dev->dma_mask;
@@ -3410,7 +3390,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3410} 3390}
3411 3391
3412static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 3392static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3413 unsigned long iova) 3393 dma_addr_t iova)
3414{ 3394{
3415 struct protection_domain *domain = dom->priv; 3395 struct protection_domain *domain = dom->priv;
3416 unsigned long offset_mask; 3396 unsigned long offset_mask;
@@ -3947,6 +3927,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3947 if (!table) 3927 if (!table)
3948 goto out; 3928 goto out;
3949 3929
3930 /* Initialize table spin-lock */
3931 spin_lock_init(&table->lock);
3932
3950 if (ioapic) 3933 if (ioapic)
3951 /* Keep the first 32 indexes free for IOAPIC interrupts */ 3934 /* Keep the first 32 indexes free for IOAPIC interrupts */
3952 table->min_index = 32; 3935 table->min_index = 32;
@@ -4007,7 +3990,7 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
4007 c = 0; 3990 c = 0;
4008 3991
4009 if (c == count) { 3992 if (c == count) {
4010 struct irq_2_iommu *irte_info; 3993 struct irq_2_irte *irte_info;
4011 3994
4012 for (; c != 0; --c) 3995 for (; c != 0; --c)
4013 table->table[index - c + 1] = IRTE_ALLOCATED; 3996 table->table[index - c + 1] = IRTE_ALLOCATED;
@@ -4015,9 +3998,9 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
4015 index -= count - 1; 3998 index -= count - 1;
4016 3999
4017 cfg->remapped = 1; 4000 cfg->remapped = 1;
4018 irte_info = &cfg->irq_2_iommu; 4001 irte_info = &cfg->irq_2_irte;
4019 irte_info->sub_handle = devid; 4002 irte_info->devid = devid;
4020 irte_info->irte_index = index; 4003 irte_info->index = index;
4021 4004
4022 goto out; 4005 goto out;
4023 } 4006 }
@@ -4098,7 +4081,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
4098 struct io_apic_irq_attr *attr) 4081 struct io_apic_irq_attr *attr)
4099{ 4082{
4100 struct irq_remap_table *table; 4083 struct irq_remap_table *table;
4101 struct irq_2_iommu *irte_info; 4084 struct irq_2_irte *irte_info;
4102 struct irq_cfg *cfg; 4085 struct irq_cfg *cfg;
4103 union irte irte; 4086 union irte irte;
4104 int ioapic_id; 4087 int ioapic_id;
@@ -4110,7 +4093,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
4110 if (!cfg) 4093 if (!cfg)
4111 return -EINVAL; 4094 return -EINVAL;
4112 4095
4113 irte_info = &cfg->irq_2_iommu; 4096 irte_info = &cfg->irq_2_irte;
4114 ioapic_id = mpc_ioapic_id(attr->ioapic); 4097 ioapic_id = mpc_ioapic_id(attr->ioapic);
4115 devid = get_ioapic_devid(ioapic_id); 4098 devid = get_ioapic_devid(ioapic_id);
4116 4099
@@ -4125,8 +4108,8 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
4125 4108
4126 /* Setup IRQ remapping info */ 4109 /* Setup IRQ remapping info */
4127 cfg->remapped = 1; 4110 cfg->remapped = 1;
4128 irte_info->sub_handle = devid; 4111 irte_info->devid = devid;
4129 irte_info->irte_index = index; 4112 irte_info->index = index;
4130 4113
4131 /* Setup IRTE for IOMMU */ 4114 /* Setup IRTE for IOMMU */
4132 irte.val = 0; 4115 irte.val = 0;
@@ -4160,7 +4143,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
4160static int set_affinity(struct irq_data *data, const struct cpumask *mask, 4143static int set_affinity(struct irq_data *data, const struct cpumask *mask,
4161 bool force) 4144 bool force)
4162{ 4145{
4163 struct irq_2_iommu *irte_info; 4146 struct irq_2_irte *irte_info;
4164 unsigned int dest, irq; 4147 unsigned int dest, irq;
4165 struct irq_cfg *cfg; 4148 struct irq_cfg *cfg;
4166 union irte irte; 4149 union irte irte;
@@ -4171,12 +4154,12 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
4171 4154
4172 cfg = data->chip_data; 4155 cfg = data->chip_data;
4173 irq = data->irq; 4156 irq = data->irq;
4174 irte_info = &cfg->irq_2_iommu; 4157 irte_info = &cfg->irq_2_irte;
4175 4158
4176 if (!cpumask_intersects(mask, cpu_online_mask)) 4159 if (!cpumask_intersects(mask, cpu_online_mask))
4177 return -EINVAL; 4160 return -EINVAL;
4178 4161
4179 if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte)) 4162 if (get_irte(irte_info->devid, irte_info->index, &irte))
4180 return -EBUSY; 4163 return -EBUSY;
4181 4164
4182 if (assign_irq_vector(irq, cfg, mask)) 4165 if (assign_irq_vector(irq, cfg, mask))
@@ -4192,7 +4175,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
4192 irte.fields.vector = cfg->vector; 4175 irte.fields.vector = cfg->vector;
4193 irte.fields.destination = dest; 4176 irte.fields.destination = dest;
4194 4177
4195 modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); 4178 modify_irte(irte_info->devid, irte_info->index, irte);
4196 4179
4197 if (cfg->move_in_progress) 4180 if (cfg->move_in_progress)
4198 send_cleanup_vector(cfg); 4181 send_cleanup_vector(cfg);
@@ -4204,16 +4187,16 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
4204 4187
4205static int free_irq(int irq) 4188static int free_irq(int irq)
4206{ 4189{
4207 struct irq_2_iommu *irte_info; 4190 struct irq_2_irte *irte_info;
4208 struct irq_cfg *cfg; 4191 struct irq_cfg *cfg;
4209 4192
4210 cfg = irq_get_chip_data(irq); 4193 cfg = irq_get_chip_data(irq);
4211 if (!cfg) 4194 if (!cfg)
4212 return -EINVAL; 4195 return -EINVAL;
4213 4196
4214 irte_info = &cfg->irq_2_iommu; 4197 irte_info = &cfg->irq_2_irte;
4215 4198
4216 free_irte(irte_info->sub_handle, irte_info->irte_index); 4199 free_irte(irte_info->devid, irte_info->index);
4217 4200
4218 return 0; 4201 return 0;
4219} 4202}
@@ -4222,7 +4205,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
4222 unsigned int irq, unsigned int dest, 4205 unsigned int irq, unsigned int dest,
4223 struct msi_msg *msg, u8 hpet_id) 4206 struct msi_msg *msg, u8 hpet_id)
4224{ 4207{
4225 struct irq_2_iommu *irte_info; 4208 struct irq_2_irte *irte_info;
4226 struct irq_cfg *cfg; 4209 struct irq_cfg *cfg;
4227 union irte irte; 4210 union irte irte;
4228 4211
@@ -4230,7 +4213,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
4230 if (!cfg) 4213 if (!cfg)
4231 return; 4214 return;
4232 4215
4233 irte_info = &cfg->irq_2_iommu; 4216 irte_info = &cfg->irq_2_irte;
4234 4217
4235 irte.val = 0; 4218 irte.val = 0;
4236 irte.fields.vector = cfg->vector; 4219 irte.fields.vector = cfg->vector;
@@ -4239,11 +4222,11 @@ static void compose_msi_msg(struct pci_dev *pdev,
4239 irte.fields.dm = apic->irq_dest_mode; 4222 irte.fields.dm = apic->irq_dest_mode;
4240 irte.fields.valid = 1; 4223 irte.fields.valid = 1;
4241 4224
4242 modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); 4225 modify_irte(irte_info->devid, irte_info->index, irte);
4243 4226
4244 msg->address_hi = MSI_ADDR_BASE_HI; 4227 msg->address_hi = MSI_ADDR_BASE_HI;
4245 msg->address_lo = MSI_ADDR_BASE_LO; 4228 msg->address_lo = MSI_ADDR_BASE_LO;
4246 msg->data = irte_info->irte_index; 4229 msg->data = irte_info->index;
4247} 4230}
4248 4231
4249static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) 4232static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
@@ -4268,7 +4251,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
4268static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, 4251static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
4269 int index, int offset) 4252 int index, int offset)
4270{ 4253{
4271 struct irq_2_iommu *irte_info; 4254 struct irq_2_irte *irte_info;
4272 struct irq_cfg *cfg; 4255 struct irq_cfg *cfg;
4273 u16 devid; 4256 u16 devid;
4274 4257
@@ -4283,18 +4266,18 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
4283 return 0; 4266 return 0;
4284 4267
4285 devid = get_device_id(&pdev->dev); 4268 devid = get_device_id(&pdev->dev);
4286 irte_info = &cfg->irq_2_iommu; 4269 irte_info = &cfg->irq_2_irte;
4287 4270
4288 cfg->remapped = 1; 4271 cfg->remapped = 1;
4289 irte_info->sub_handle = devid; 4272 irte_info->devid = devid;
4290 irte_info->irte_index = index + offset; 4273 irte_info->index = index + offset;
4291 4274
4292 return 0; 4275 return 0;
4293} 4276}
4294 4277
4295static int setup_hpet_msi(unsigned int irq, unsigned int id) 4278static int setup_hpet_msi(unsigned int irq, unsigned int id)
4296{ 4279{
4297 struct irq_2_iommu *irte_info; 4280 struct irq_2_irte *irte_info;
4298 struct irq_cfg *cfg; 4281 struct irq_cfg *cfg;
4299 int index, devid; 4282 int index, devid;
4300 4283
@@ -4302,7 +4285,7 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
4302 if (!cfg) 4285 if (!cfg)
4303 return -EINVAL; 4286 return -EINVAL;
4304 4287
4305 irte_info = &cfg->irq_2_iommu; 4288 irte_info = &cfg->irq_2_irte;
4306 devid = get_hpet_devid(id); 4289 devid = get_hpet_devid(id);
4307 if (devid < 0) 4290 if (devid < 0)
4308 return devid; 4291 return devid;
@@ -4312,8 +4295,8 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
4312 return index; 4295 return index;
4313 4296
4314 cfg->remapped = 1; 4297 cfg->remapped = 1;
4315 irte_info->sub_handle = devid; 4298 irte_info->devid = devid;
4316 irte_info->irte_index = index; 4299 irte_info->index = index;
4317 4300
4318 return 0; 4301 return 0;
4319} 4302}