diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-06 17:59:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-06 17:59:13 -0400 |
commit | 99737982ca39065a58021bdc31486ea783f952d3 (patch) | |
tree | e402502e0f4700d92c4c0b48d0919ea2687f53dc /drivers/iommu | |
parent | 91f8575685e35f3bd021286bc82d26397458f5a9 (diff) | |
parent | 0c4513be3d01a854867446ee793748409cc0ebdf (diff) |
Merge tag 'iommu-updates-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
"The updates are mostly about the x86 IOMMUs this time.
Exceptions are the groundwork for the PAMU IOMMU from Freescale (for a
PPC platform) and an extension to the IOMMU group interface.
On the x86 side this includes a workaround for VT-d to disable
interrupt remapping on broken chipsets. On the AMD-Vi side the most
important new feature is a kernel command-line interface to override
broken information in IVRS ACPI tables and get interrupt remapping
working this way.
Besides that there are small fixes all over the place."
* tag 'iommu-updates-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (24 commits)
iommu/tegra: Fix printk formats for dma_addr_t
iommu: Add a function to find an iommu group by id
iommu/vt-d: Remove warning for HPET scope type
iommu: Move swap_pci_ref function to drivers/iommu/pci.h.
iommu/vt-d: Disable translation if already enabled
iommu/amd: fix error return code in early_amd_iommu_init()
iommu/AMD: Per-thread IOMMU Interrupt Handling
iommu: Include linux/err.h
iommu/amd: Workaround for ERBT1312
iommu/amd: Document ivrs_ioapic and ivrs_hpet parameters
iommu/amd: Don't report firmware bugs with cmd-line ivrs overrides
iommu/amd: Add ioapic and hpet ivrs override
iommu/amd: Add early maps for ioapic and hpet
iommu/amd: Extend IVRS special device data structure
iommu/amd: Move add_special_device() to __init
iommu: Fix compile warnings with forward declarations
iommu/amd: Properly initialize irq-table lock
iommu/amd: Use AMD specific data structure for irq remapping
iommu/amd: Remove map_sg_no_iommu()
iommu/vt-d: add quirk for broken interrupt remapping on 55XX chipsets
...
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 145 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 154 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 2 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 23 | ||||
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 24 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 10 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 37 | ||||
-rw-r--r-- | drivers/iommu/irq_remapping.c | 6 | ||||
-rw-r--r-- | drivers/iommu/irq_remapping.h | 2 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/pci.h | 29 | ||||
-rw-r--r-- | drivers/iommu/shmobile-iommu.c | 2 | ||||
-rw-r--r-- | drivers/iommu/tegra-gart.c | 5 | ||||
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 5 |
16 files changed, 328 insertions, 122 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 830183737b0f..21d02b0d907c 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "amd_iommu_proto.h" | 46 | #include "amd_iommu_proto.h" |
47 | #include "amd_iommu_types.h" | 47 | #include "amd_iommu_types.h" |
48 | #include "irq_remapping.h" | 48 | #include "irq_remapping.h" |
49 | #include "pci.h" | ||
49 | 50 | ||
50 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 51 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
51 | 52 | ||
@@ -263,12 +264,6 @@ static bool check_device(struct device *dev) | |||
263 | return true; | 264 | return true; |
264 | } | 265 | } |
265 | 266 | ||
266 | static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) | ||
267 | { | ||
268 | pci_dev_put(*from); | ||
269 | *from = to; | ||
270 | } | ||
271 | |||
272 | static struct pci_bus *find_hosted_bus(struct pci_bus *bus) | 267 | static struct pci_bus *find_hosted_bus(struct pci_bus *bus) |
273 | { | 268 | { |
274 | while (!bus->self) { | 269 | while (!bus->self) { |
@@ -701,9 +696,6 @@ retry: | |||
701 | static void iommu_poll_events(struct amd_iommu *iommu) | 696 | static void iommu_poll_events(struct amd_iommu *iommu) |
702 | { | 697 | { |
703 | u32 head, tail; | 698 | u32 head, tail; |
704 | unsigned long flags; | ||
705 | |||
706 | spin_lock_irqsave(&iommu->lock, flags); | ||
707 | 699 | ||
708 | head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); | 700 | head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); |
709 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); | 701 | tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); |
@@ -714,8 +706,6 @@ static void iommu_poll_events(struct amd_iommu *iommu) | |||
714 | } | 706 | } |
715 | 707 | ||
716 | writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); | 708 | writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); |
717 | |||
718 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
719 | } | 709 | } |
720 | 710 | ||
721 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) | 711 | static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) |
@@ -740,17 +730,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) | |||
740 | 730 | ||
741 | static void iommu_poll_ppr_log(struct amd_iommu *iommu) | 731 | static void iommu_poll_ppr_log(struct amd_iommu *iommu) |
742 | { | 732 | { |
743 | unsigned long flags; | ||
744 | u32 head, tail; | 733 | u32 head, tail; |
745 | 734 | ||
746 | if (iommu->ppr_log == NULL) | 735 | if (iommu->ppr_log == NULL) |
747 | return; | 736 | return; |
748 | 737 | ||
749 | /* enable ppr interrupts again */ | ||
750 | writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
751 | |||
752 | spin_lock_irqsave(&iommu->lock, flags); | ||
753 | |||
754 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 738 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
755 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 739 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
756 | 740 | ||
@@ -786,34 +770,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) | |||
786 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; | 770 | head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; |
787 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 771 | writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
788 | 772 | ||
789 | /* | ||
790 | * Release iommu->lock because ppr-handling might need to | ||
791 | * re-acquire it | ||
792 | */ | ||
793 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
794 | |||
795 | /* Handle PPR entry */ | 773 | /* Handle PPR entry */ |
796 | iommu_handle_ppr_entry(iommu, entry); | 774 | iommu_handle_ppr_entry(iommu, entry); |
797 | 775 | ||
798 | spin_lock_irqsave(&iommu->lock, flags); | ||
799 | |||
800 | /* Refresh ring-buffer information */ | 776 | /* Refresh ring-buffer information */ |
801 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | 777 | head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); |
802 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | 778 | tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); |
803 | } | 779 | } |
804 | |||
805 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
806 | } | 780 | } |
807 | 781 | ||
808 | irqreturn_t amd_iommu_int_thread(int irq, void *data) | 782 | irqreturn_t amd_iommu_int_thread(int irq, void *data) |
809 | { | 783 | { |
810 | struct amd_iommu *iommu; | 784 | struct amd_iommu *iommu = (struct amd_iommu *) data; |
785 | u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
811 | 786 | ||
812 | for_each_iommu(iommu) { | 787 | while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) { |
813 | iommu_poll_events(iommu); | 788 | /* Enable EVT and PPR interrupts again */ |
814 | iommu_poll_ppr_log(iommu); | 789 | writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK), |
815 | } | 790 | iommu->mmio_base + MMIO_STATUS_OFFSET); |
816 | 791 | ||
792 | if (status & MMIO_STATUS_EVT_INT_MASK) { | ||
793 | pr_devel("AMD-Vi: Processing IOMMU Event Log\n"); | ||
794 | iommu_poll_events(iommu); | ||
795 | } | ||
796 | |||
797 | if (status & MMIO_STATUS_PPR_INT_MASK) { | ||
798 | pr_devel("AMD-Vi: Processing IOMMU PPR Log\n"); | ||
799 | iommu_poll_ppr_log(iommu); | ||
800 | } | ||
801 | |||
802 | /* | ||
803 | * Hardware bug: ERBT1312 | ||
804 | * When re-enabling interrupt (by writing 1 | ||
805 | * to clear the bit), the hardware might also try to set | ||
806 | * the interrupt bit in the event status register. | ||
807 | * In this scenario, the bit will be set, and disable | ||
808 | * subsequent interrupts. | ||
809 | * | ||
810 | * Workaround: The IOMMU driver should read back the | ||
811 | * status register and check if the interrupt bits are cleared. | ||
812 | * If not, driver will need to go through the interrupt handler | ||
813 | * again and re-clear the bits | ||
814 | */ | ||
815 | status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); | ||
816 | } | ||
817 | return IRQ_HANDLED; | 817 | return IRQ_HANDLED; |
818 | } | 818 | } |
819 | 819 | ||
@@ -2839,24 +2839,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
2839 | } | 2839 | } |
2840 | 2840 | ||
2841 | /* | 2841 | /* |
2842 | * This is a special map_sg function which is used if we should map a | ||
2843 | * device which is not handled by an AMD IOMMU in the system. | ||
2844 | */ | ||
2845 | static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, | ||
2846 | int nelems, int dir) | ||
2847 | { | ||
2848 | struct scatterlist *s; | ||
2849 | int i; | ||
2850 | |||
2851 | for_each_sg(sglist, s, nelems, i) { | ||
2852 | s->dma_address = (dma_addr_t)sg_phys(s); | ||
2853 | s->dma_length = s->length; | ||
2854 | } | ||
2855 | |||
2856 | return nelems; | ||
2857 | } | ||
2858 | |||
2859 | /* | ||
2860 | * The exported map_sg function for dma_ops (handles scatter-gather | 2842 | * The exported map_sg function for dma_ops (handles scatter-gather |
2861 | * lists). | 2843 | * lists). |
2862 | */ | 2844 | */ |
@@ -2875,9 +2857,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
2875 | INC_STATS_COUNTER(cnt_map_sg); | 2857 | INC_STATS_COUNTER(cnt_map_sg); |
2876 | 2858 | ||
2877 | domain = get_domain(dev); | 2859 | domain = get_domain(dev); |
2878 | if (PTR_ERR(domain) == -EINVAL) | 2860 | if (IS_ERR(domain)) |
2879 | return map_sg_no_iommu(dev, sglist, nelems, dir); | ||
2880 | else if (IS_ERR(domain)) | ||
2881 | return 0; | 2861 | return 0; |
2882 | 2862 | ||
2883 | dma_mask = *dev->dma_mask; | 2863 | dma_mask = *dev->dma_mask; |
@@ -3410,7 +3390,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
3410 | } | 3390 | } |
3411 | 3391 | ||
3412 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 3392 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
3413 | unsigned long iova) | 3393 | dma_addr_t iova) |
3414 | { | 3394 | { |
3415 | struct protection_domain *domain = dom->priv; | 3395 | struct protection_domain *domain = dom->priv; |
3416 | unsigned long offset_mask; | 3396 | unsigned long offset_mask; |
@@ -3947,6 +3927,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) | |||
3947 | if (!table) | 3927 | if (!table) |
3948 | goto out; | 3928 | goto out; |
3949 | 3929 | ||
3930 | /* Initialize table spin-lock */ | ||
3931 | spin_lock_init(&table->lock); | ||
3932 | |||
3950 | if (ioapic) | 3933 | if (ioapic) |
3951 | /* Keep the first 32 indexes free for IOAPIC interrupts */ | 3934 | /* Keep the first 32 indexes free for IOAPIC interrupts */ |
3952 | table->min_index = 32; | 3935 | table->min_index = 32; |
@@ -4007,7 +3990,7 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) | |||
4007 | c = 0; | 3990 | c = 0; |
4008 | 3991 | ||
4009 | if (c == count) { | 3992 | if (c == count) { |
4010 | struct irq_2_iommu *irte_info; | 3993 | struct irq_2_irte *irte_info; |
4011 | 3994 | ||
4012 | for (; c != 0; --c) | 3995 | for (; c != 0; --c) |
4013 | table->table[index - c + 1] = IRTE_ALLOCATED; | 3996 | table->table[index - c + 1] = IRTE_ALLOCATED; |
@@ -4015,9 +3998,9 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) | |||
4015 | index -= count - 1; | 3998 | index -= count - 1; |
4016 | 3999 | ||
4017 | cfg->remapped = 1; | 4000 | cfg->remapped = 1; |
4018 | irte_info = &cfg->irq_2_iommu; | 4001 | irte_info = &cfg->irq_2_irte; |
4019 | irte_info->sub_handle = devid; | 4002 | irte_info->devid = devid; |
4020 | irte_info->irte_index = index; | 4003 | irte_info->index = index; |
4021 | 4004 | ||
4022 | goto out; | 4005 | goto out; |
4023 | } | 4006 | } |
@@ -4098,7 +4081,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, | |||
4098 | struct io_apic_irq_attr *attr) | 4081 | struct io_apic_irq_attr *attr) |
4099 | { | 4082 | { |
4100 | struct irq_remap_table *table; | 4083 | struct irq_remap_table *table; |
4101 | struct irq_2_iommu *irte_info; | 4084 | struct irq_2_irte *irte_info; |
4102 | struct irq_cfg *cfg; | 4085 | struct irq_cfg *cfg; |
4103 | union irte irte; | 4086 | union irte irte; |
4104 | int ioapic_id; | 4087 | int ioapic_id; |
@@ -4110,7 +4093,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, | |||
4110 | if (!cfg) | 4093 | if (!cfg) |
4111 | return -EINVAL; | 4094 | return -EINVAL; |
4112 | 4095 | ||
4113 | irte_info = &cfg->irq_2_iommu; | 4096 | irte_info = &cfg->irq_2_irte; |
4114 | ioapic_id = mpc_ioapic_id(attr->ioapic); | 4097 | ioapic_id = mpc_ioapic_id(attr->ioapic); |
4115 | devid = get_ioapic_devid(ioapic_id); | 4098 | devid = get_ioapic_devid(ioapic_id); |
4116 | 4099 | ||
@@ -4125,8 +4108,8 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, | |||
4125 | 4108 | ||
4126 | /* Setup IRQ remapping info */ | 4109 | /* Setup IRQ remapping info */ |
4127 | cfg->remapped = 1; | 4110 | cfg->remapped = 1; |
4128 | irte_info->sub_handle = devid; | 4111 | irte_info->devid = devid; |
4129 | irte_info->irte_index = index; | 4112 | irte_info->index = index; |
4130 | 4113 | ||
4131 | /* Setup IRTE for IOMMU */ | 4114 | /* Setup IRTE for IOMMU */ |
4132 | irte.val = 0; | 4115 | irte.val = 0; |
@@ -4160,7 +4143,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, | |||
4160 | static int set_affinity(struct irq_data *data, const struct cpumask *mask, | 4143 | static int set_affinity(struct irq_data *data, const struct cpumask *mask, |
4161 | bool force) | 4144 | bool force) |
4162 | { | 4145 | { |
4163 | struct irq_2_iommu *irte_info; | 4146 | struct irq_2_irte *irte_info; |
4164 | unsigned int dest, irq; | 4147 | unsigned int dest, irq; |
4165 | struct irq_cfg *cfg; | 4148 | struct irq_cfg *cfg; |
4166 | union irte irte; | 4149 | union irte irte; |
@@ -4171,12 +4154,12 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
4171 | 4154 | ||
4172 | cfg = data->chip_data; | 4155 | cfg = data->chip_data; |
4173 | irq = data->irq; | 4156 | irq = data->irq; |
4174 | irte_info = &cfg->irq_2_iommu; | 4157 | irte_info = &cfg->irq_2_irte; |
4175 | 4158 | ||
4176 | if (!cpumask_intersects(mask, cpu_online_mask)) | 4159 | if (!cpumask_intersects(mask, cpu_online_mask)) |
4177 | return -EINVAL; | 4160 | return -EINVAL; |
4178 | 4161 | ||
4179 | if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte)) | 4162 | if (get_irte(irte_info->devid, irte_info->index, &irte)) |
4180 | return -EBUSY; | 4163 | return -EBUSY; |
4181 | 4164 | ||
4182 | if (assign_irq_vector(irq, cfg, mask)) | 4165 | if (assign_irq_vector(irq, cfg, mask)) |
@@ -4192,7 +4175,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
4192 | irte.fields.vector = cfg->vector; | 4175 | irte.fields.vector = cfg->vector; |
4193 | irte.fields.destination = dest; | 4176 | irte.fields.destination = dest; |
4194 | 4177 | ||
4195 | modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); | 4178 | modify_irte(irte_info->devid, irte_info->index, irte); |
4196 | 4179 | ||
4197 | if (cfg->move_in_progress) | 4180 | if (cfg->move_in_progress) |
4198 | send_cleanup_vector(cfg); | 4181 | send_cleanup_vector(cfg); |
@@ -4204,16 +4187,16 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
4204 | 4187 | ||
4205 | static int free_irq(int irq) | 4188 | static int free_irq(int irq) |
4206 | { | 4189 | { |
4207 | struct irq_2_iommu *irte_info; | 4190 | struct irq_2_irte *irte_info; |
4208 | struct irq_cfg *cfg; | 4191 | struct irq_cfg *cfg; |
4209 | 4192 | ||
4210 | cfg = irq_get_chip_data(irq); | 4193 | cfg = irq_get_chip_data(irq); |
4211 | if (!cfg) | 4194 | if (!cfg) |
4212 | return -EINVAL; | 4195 | return -EINVAL; |
4213 | 4196 | ||
4214 | irte_info = &cfg->irq_2_iommu; | 4197 | irte_info = &cfg->irq_2_irte; |
4215 | 4198 | ||
4216 | free_irte(irte_info->sub_handle, irte_info->irte_index); | 4199 | free_irte(irte_info->devid, irte_info->index); |
4217 | 4200 | ||
4218 | return 0; | 4201 | return 0; |
4219 | } | 4202 | } |
@@ -4222,7 +4205,7 @@ static void compose_msi_msg(struct pci_dev *pdev, | |||
4222 | unsigned int irq, unsigned int dest, | 4205 | unsigned int irq, unsigned int dest, |
4223 | struct msi_msg *msg, u8 hpet_id) | 4206 | struct msi_msg *msg, u8 hpet_id) |
4224 | { | 4207 | { |
4225 | struct irq_2_iommu *irte_info; | 4208 | struct irq_2_irte *irte_info; |
4226 | struct irq_cfg *cfg; | 4209 | struct irq_cfg *cfg; |
4227 | union irte irte; | 4210 | union irte irte; |
4228 | 4211 | ||
@@ -4230,7 +4213,7 @@ static void compose_msi_msg(struct pci_dev *pdev, | |||
4230 | if (!cfg) | 4213 | if (!cfg) |
4231 | return; | 4214 | return; |
4232 | 4215 | ||
4233 | irte_info = &cfg->irq_2_iommu; | 4216 | irte_info = &cfg->irq_2_irte; |
4234 | 4217 | ||
4235 | irte.val = 0; | 4218 | irte.val = 0; |
4236 | irte.fields.vector = cfg->vector; | 4219 | irte.fields.vector = cfg->vector; |
@@ -4239,11 +4222,11 @@ static void compose_msi_msg(struct pci_dev *pdev, | |||
4239 | irte.fields.dm = apic->irq_dest_mode; | 4222 | irte.fields.dm = apic->irq_dest_mode; |
4240 | irte.fields.valid = 1; | 4223 | irte.fields.valid = 1; |
4241 | 4224 | ||
4242 | modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); | 4225 | modify_irte(irte_info->devid, irte_info->index, irte); |
4243 | 4226 | ||
4244 | msg->address_hi = MSI_ADDR_BASE_HI; | 4227 | msg->address_hi = MSI_ADDR_BASE_HI; |
4245 | msg->address_lo = MSI_ADDR_BASE_LO; | 4228 | msg->address_lo = MSI_ADDR_BASE_LO; |
4246 | msg->data = irte_info->irte_index; | 4229 | msg->data = irte_info->index; |
4247 | } | 4230 | } |
4248 | 4231 | ||
4249 | static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) | 4232 | static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) |
@@ -4268,7 +4251,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) | |||
4268 | static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | 4251 | static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, |
4269 | int index, int offset) | 4252 | int index, int offset) |
4270 | { | 4253 | { |
4271 | struct irq_2_iommu *irte_info; | 4254 | struct irq_2_irte *irte_info; |
4272 | struct irq_cfg *cfg; | 4255 | struct irq_cfg *cfg; |
4273 | u16 devid; | 4256 | u16 devid; |
4274 | 4257 | ||
@@ -4283,18 +4266,18 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, | |||
4283 | return 0; | 4266 | return 0; |
4284 | 4267 | ||
4285 | devid = get_device_id(&pdev->dev); | 4268 | devid = get_device_id(&pdev->dev); |
4286 | irte_info = &cfg->irq_2_iommu; | 4269 | irte_info = &cfg->irq_2_irte; |
4287 | 4270 | ||
4288 | cfg->remapped = 1; | 4271 | cfg->remapped = 1; |
4289 | irte_info->sub_handle = devid; | 4272 | irte_info->devid = devid; |
4290 | irte_info->irte_index = index + offset; | 4273 | irte_info->index = index + offset; |
4291 | 4274 | ||
4292 | return 0; | 4275 | return 0; |
4293 | } | 4276 | } |
4294 | 4277 | ||
4295 | static int setup_hpet_msi(unsigned int irq, unsigned int id) | 4278 | static int setup_hpet_msi(unsigned int irq, unsigned int id) |
4296 | { | 4279 | { |
4297 | struct irq_2_iommu *irte_info; | 4280 | struct irq_2_irte *irte_info; |
4298 | struct irq_cfg *cfg; | 4281 | struct irq_cfg *cfg; |
4299 | int index, devid; | 4282 | int index, devid; |
4300 | 4283 | ||
@@ -4302,7 +4285,7 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id) | |||
4302 | if (!cfg) | 4285 | if (!cfg) |
4303 | return -EINVAL; | 4286 | return -EINVAL; |
4304 | 4287 | ||
4305 | irte_info = &cfg->irq_2_iommu; | 4288 | irte_info = &cfg->irq_2_irte; |
4306 | devid = get_hpet_devid(id); | 4289 | devid = get_hpet_devid(id); |
4307 | if (devid < 0) | 4290 | if (devid < 0) |
4308 | return devid; | 4291 | return devid; |
@@ -4312,8 +4295,8 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id) | |||
4312 | return index; | 4295 | return index; |
4313 | 4296 | ||
4314 | cfg->remapped = 1; | 4297 | cfg->remapped = 1; |
4315 | irte_info->sub_handle = devid; | 4298 | irte_info->devid = devid; |
4316 | irte_info->irte_index = index; | 4299 | irte_info->index = index; |
4317 | 4300 | ||
4318 | return 0; | 4301 | return 0; |
4319 | } | 4302 | } |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 2f46881256a2..bf51abb78dee 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -213,6 +213,14 @@ enum iommu_init_state { | |||
213 | IOMMU_INIT_ERROR, | 213 | IOMMU_INIT_ERROR, |
214 | }; | 214 | }; |
215 | 215 | ||
216 | /* Early ioapic and hpet maps from kernel command line */ | ||
217 | #define EARLY_MAP_SIZE 4 | ||
218 | static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; | ||
219 | static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; | ||
220 | static int __initdata early_ioapic_map_size; | ||
221 | static int __initdata early_hpet_map_size; | ||
222 | static bool __initdata cmdline_maps; | ||
223 | |||
216 | static enum iommu_init_state init_state = IOMMU_START_STATE; | 224 | static enum iommu_init_state init_state = IOMMU_START_STATE; |
217 | 225 | ||
218 | static int amd_iommu_enable_interrupts(void); | 226 | static int amd_iommu_enable_interrupts(void); |
@@ -703,31 +711,66 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, | |||
703 | set_iommu_for_device(iommu, devid); | 711 | set_iommu_for_device(iommu, devid); |
704 | } | 712 | } |
705 | 713 | ||
706 | static int add_special_device(u8 type, u8 id, u16 devid) | 714 | static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) |
707 | { | 715 | { |
708 | struct devid_map *entry; | 716 | struct devid_map *entry; |
709 | struct list_head *list; | 717 | struct list_head *list; |
710 | 718 | ||
711 | if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET) | 719 | if (type == IVHD_SPECIAL_IOAPIC) |
720 | list = &ioapic_map; | ||
721 | else if (type == IVHD_SPECIAL_HPET) | ||
722 | list = &hpet_map; | ||
723 | else | ||
712 | return -EINVAL; | 724 | return -EINVAL; |
713 | 725 | ||
726 | list_for_each_entry(entry, list, list) { | ||
727 | if (!(entry->id == id && entry->cmd_line)) | ||
728 | continue; | ||
729 | |||
730 | pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", | ||
731 | type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); | ||
732 | |||
733 | return 0; | ||
734 | } | ||
735 | |||
714 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | 736 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
715 | if (!entry) | 737 | if (!entry) |
716 | return -ENOMEM; | 738 | return -ENOMEM; |
717 | 739 | ||
718 | entry->id = id; | 740 | entry->id = id; |
719 | entry->devid = devid; | 741 | entry->devid = devid; |
720 | 742 | entry->cmd_line = cmd_line; | |
721 | if (type == IVHD_SPECIAL_IOAPIC) | ||
722 | list = &ioapic_map; | ||
723 | else | ||
724 | list = &hpet_map; | ||
725 | 743 | ||
726 | list_add_tail(&entry->list, list); | 744 | list_add_tail(&entry->list, list); |
727 | 745 | ||
728 | return 0; | 746 | return 0; |
729 | } | 747 | } |
730 | 748 | ||
749 | static int __init add_early_maps(void) | ||
750 | { | ||
751 | int i, ret; | ||
752 | |||
753 | for (i = 0; i < early_ioapic_map_size; ++i) { | ||
754 | ret = add_special_device(IVHD_SPECIAL_IOAPIC, | ||
755 | early_ioapic_map[i].id, | ||
756 | early_ioapic_map[i].devid, | ||
757 | early_ioapic_map[i].cmd_line); | ||
758 | if (ret) | ||
759 | return ret; | ||
760 | } | ||
761 | |||
762 | for (i = 0; i < early_hpet_map_size; ++i) { | ||
763 | ret = add_special_device(IVHD_SPECIAL_HPET, | ||
764 | early_hpet_map[i].id, | ||
765 | early_hpet_map[i].devid, | ||
766 | early_hpet_map[i].cmd_line); | ||
767 | if (ret) | ||
768 | return ret; | ||
769 | } | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
731 | /* | 774 | /* |
732 | * Reads the device exclusion range from ACPI and initializes the IOMMU with | 775 | * Reads the device exclusion range from ACPI and initializes the IOMMU with |
733 | * it | 776 | * it |
@@ -764,6 +807,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
764 | u32 dev_i, ext_flags = 0; | 807 | u32 dev_i, ext_flags = 0; |
765 | bool alias = false; | 808 | bool alias = false; |
766 | struct ivhd_entry *e; | 809 | struct ivhd_entry *e; |
810 | int ret; | ||
811 | |||
812 | |||
813 | ret = add_early_maps(); | ||
814 | if (ret) | ||
815 | return ret; | ||
767 | 816 | ||
768 | /* | 817 | /* |
769 | * First save the recommended feature enable bits from ACPI | 818 | * First save the recommended feature enable bits from ACPI |
@@ -929,7 +978,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
929 | PCI_FUNC(devid)); | 978 | PCI_FUNC(devid)); |
930 | 979 | ||
931 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); | 980 | set_dev_entry_from_acpi(iommu, devid, e->flags, 0); |
932 | ret = add_special_device(type, handle, devid); | 981 | ret = add_special_device(type, handle, devid, false); |
933 | if (ret) | 982 | if (ret) |
934 | return ret; | 983 | return ret; |
935 | break; | 984 | break; |
@@ -1275,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu) | |||
1275 | amd_iommu_int_handler, | 1324 | amd_iommu_int_handler, |
1276 | amd_iommu_int_thread, | 1325 | amd_iommu_int_thread, |
1277 | 0, "AMD-Vi", | 1326 | 0, "AMD-Vi", |
1278 | iommu->dev); | 1327 | iommu); |
1279 | 1328 | ||
1280 | if (r) { | 1329 | if (r) { |
1281 | pci_disable_msi(iommu->dev); | 1330 | pci_disable_msi(iommu->dev); |
@@ -1638,18 +1687,28 @@ static void __init free_on_init_error(void) | |||
1638 | 1687 | ||
1639 | static bool __init check_ioapic_information(void) | 1688 | static bool __init check_ioapic_information(void) |
1640 | { | 1689 | { |
1690 | const char *fw_bug = FW_BUG; | ||
1641 | bool ret, has_sb_ioapic; | 1691 | bool ret, has_sb_ioapic; |
1642 | int idx; | 1692 | int idx; |
1643 | 1693 | ||
1644 | has_sb_ioapic = false; | 1694 | has_sb_ioapic = false; |
1645 | ret = false; | 1695 | ret = false; |
1646 | 1696 | ||
1697 | /* | ||
1698 | * If we have map overrides on the kernel command line the | ||
1699 | * messages in this function might not describe firmware bugs | ||
1700 | * anymore - so be careful | ||
1701 | */ | ||
1702 | if (cmdline_maps) | ||
1703 | fw_bug = ""; | ||
1704 | |||
1647 | for (idx = 0; idx < nr_ioapics; idx++) { | 1705 | for (idx = 0; idx < nr_ioapics; idx++) { |
1648 | int devid, id = mpc_ioapic_id(idx); | 1706 | int devid, id = mpc_ioapic_id(idx); |
1649 | 1707 | ||
1650 | devid = get_ioapic_devid(id); | 1708 | devid = get_ioapic_devid(id); |
1651 | if (devid < 0) { | 1709 | if (devid < 0) { |
1652 | pr_err(FW_BUG "AMD-Vi: IOAPIC[%d] not in IVRS table\n", id); | 1710 | pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", |
1711 | fw_bug, id); | ||
1653 | ret = false; | 1712 | ret = false; |
1654 | } else if (devid == IOAPIC_SB_DEVID) { | 1713 | } else if (devid == IOAPIC_SB_DEVID) { |
1655 | has_sb_ioapic = true; | 1714 | has_sb_ioapic = true; |
@@ -1666,11 +1725,11 @@ static bool __init check_ioapic_information(void) | |||
1666 | * when the BIOS is buggy and provides us the wrong | 1725 | * when the BIOS is buggy and provides us the wrong |
1667 | * device id for the IOAPIC in the system. | 1726 | * device id for the IOAPIC in the system. |
1668 | */ | 1727 | */ |
1669 | pr_err(FW_BUG "AMD-Vi: No southbridge IOAPIC found in IVRS table\n"); | 1728 | pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug); |
1670 | } | 1729 | } |
1671 | 1730 | ||
1672 | if (!ret) | 1731 | if (!ret) |
1673 | pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug(s)\n"); | 1732 | pr_err("AMD-Vi: Disabling interrupt remapping\n"); |
1674 | 1733 | ||
1675 | return ret; | 1734 | return ret; |
1676 | } | 1735 | } |
@@ -1801,6 +1860,7 @@ static int __init early_amd_iommu_init(void) | |||
1801 | * Interrupt remapping enabled, create kmem_cache for the | 1860 | * Interrupt remapping enabled, create kmem_cache for the |
1802 | * remapping tables. | 1861 | * remapping tables. |
1803 | */ | 1862 | */ |
1863 | ret = -ENOMEM; | ||
1804 | amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", | 1864 | amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", |
1805 | MAX_IRQS_PER_TABLE * sizeof(u32), | 1865 | MAX_IRQS_PER_TABLE * sizeof(u32), |
1806 | IRQ_TABLE_ALIGNMENT, | 1866 | IRQ_TABLE_ALIGNMENT, |
@@ -2097,8 +2157,70 @@ static int __init parse_amd_iommu_options(char *str) | |||
2097 | return 1; | 2157 | return 1; |
2098 | } | 2158 | } |
2099 | 2159 | ||
2100 | __setup("amd_iommu_dump", parse_amd_iommu_dump); | 2160 | static int __init parse_ivrs_ioapic(char *str) |
2101 | __setup("amd_iommu=", parse_amd_iommu_options); | 2161 | { |
2162 | unsigned int bus, dev, fn; | ||
2163 | int ret, id, i; | ||
2164 | u16 devid; | ||
2165 | |||
2166 | ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); | ||
2167 | |||
2168 | if (ret != 4) { | ||
2169 | pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str); | ||
2170 | return 1; | ||
2171 | } | ||
2172 | |||
2173 | if (early_ioapic_map_size == EARLY_MAP_SIZE) { | ||
2174 | pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", | ||
2175 | str); | ||
2176 | return 1; | ||
2177 | } | ||
2178 | |||
2179 | devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); | ||
2180 | |||
2181 | cmdline_maps = true; | ||
2182 | i = early_ioapic_map_size++; | ||
2183 | early_ioapic_map[i].id = id; | ||
2184 | early_ioapic_map[i].devid = devid; | ||
2185 | early_ioapic_map[i].cmd_line = true; | ||
2186 | |||
2187 | return 1; | ||
2188 | } | ||
2189 | |||
2190 | static int __init parse_ivrs_hpet(char *str) | ||
2191 | { | ||
2192 | unsigned int bus, dev, fn; | ||
2193 | int ret, id, i; | ||
2194 | u16 devid; | ||
2195 | |||
2196 | ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); | ||
2197 | |||
2198 | if (ret != 4) { | ||
2199 | pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str); | ||
2200 | return 1; | ||
2201 | } | ||
2202 | |||
2203 | if (early_hpet_map_size == EARLY_MAP_SIZE) { | ||
2204 | pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n", | ||
2205 | str); | ||
2206 | return 1; | ||
2207 | } | ||
2208 | |||
2209 | devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); | ||
2210 | |||
2211 | cmdline_maps = true; | ||
2212 | i = early_hpet_map_size++; | ||
2213 | early_hpet_map[i].id = id; | ||
2214 | early_hpet_map[i].devid = devid; | ||
2215 | early_hpet_map[i].cmd_line = true; | ||
2216 | |||
2217 | return 1; | ||
2218 | } | ||
2219 | |||
2220 | __setup("amd_iommu_dump", parse_amd_iommu_dump); | ||
2221 | __setup("amd_iommu=", parse_amd_iommu_options); | ||
2222 | __setup("ivrs_ioapic", parse_ivrs_ioapic); | ||
2223 | __setup("ivrs_hpet", parse_ivrs_hpet); | ||
2102 | 2224 | ||
2103 | IOMMU_INIT_FINISH(amd_iommu_detect, | 2225 | IOMMU_INIT_FINISH(amd_iommu_detect, |
2104 | gart_iommu_hole_init, | 2226 | gart_iommu_hole_init, |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index ec36cf63e0ca..0285a215df16 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -100,6 +100,7 @@ | |||
100 | #define PASID_MASK 0x000fffff | 100 | #define PASID_MASK 0x000fffff |
101 | 101 | ||
102 | /* MMIO status bits */ | 102 | /* MMIO status bits */ |
103 | #define MMIO_STATUS_EVT_INT_MASK (1 << 1) | ||
103 | #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) | 104 | #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) |
104 | #define MMIO_STATUS_PPR_INT_MASK (1 << 6) | 105 | #define MMIO_STATUS_PPR_INT_MASK (1 << 6) |
105 | 106 | ||
@@ -589,6 +590,7 @@ struct devid_map { | |||
589 | struct list_head list; | 590 | struct list_head list; |
590 | u8 id; | 591 | u8 id; |
591 | u16 devid; | 592 | u16 devid; |
593 | bool cmd_line; | ||
592 | }; | 594 | }; |
593 | 595 | ||
594 | /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ | 596 | /* Map HPET and IOAPIC ids to the devid used by the IOMMU */ |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index b8008f679bc7..a7967ceb79e6 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -646,7 +646,7 @@ out: | |||
646 | int alloc_iommu(struct dmar_drhd_unit *drhd) | 646 | int alloc_iommu(struct dmar_drhd_unit *drhd) |
647 | { | 647 | { |
648 | struct intel_iommu *iommu; | 648 | struct intel_iommu *iommu; |
649 | u32 ver; | 649 | u32 ver, sts; |
650 | static int iommu_allocated = 0; | 650 | static int iommu_allocated = 0; |
651 | int agaw = 0; | 651 | int agaw = 0; |
652 | int msagaw = 0; | 652 | int msagaw = 0; |
@@ -696,6 +696,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
696 | (unsigned long long)iommu->cap, | 696 | (unsigned long long)iommu->cap, |
697 | (unsigned long long)iommu->ecap); | 697 | (unsigned long long)iommu->ecap); |
698 | 698 | ||
699 | /* Reflect status in gcmd */ | ||
700 | sts = readl(iommu->reg + DMAR_GSTS_REG); | ||
701 | if (sts & DMA_GSTS_IRES) | ||
702 | iommu->gcmd |= DMA_GCMD_IRE; | ||
703 | if (sts & DMA_GSTS_TES) | ||
704 | iommu->gcmd |= DMA_GCMD_TE; | ||
705 | if (sts & DMA_GSTS_QIES) | ||
706 | iommu->gcmd |= DMA_GCMD_QIE; | ||
707 | |||
699 | raw_spin_lock_init(&iommu->register_lock); | 708 | raw_spin_lock_init(&iommu->register_lock); |
700 | 709 | ||
701 | drhd->iommu = iommu; | 710 | drhd->iommu = iommu; |
@@ -1205,7 +1214,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
1205 | 1214 | ||
1206 | /* TBD: ignore advanced fault log currently */ | 1215 | /* TBD: ignore advanced fault log currently */ |
1207 | if (!(fault_status & DMA_FSTS_PPF)) | 1216 | if (!(fault_status & DMA_FSTS_PPF)) |
1208 | goto clear_rest; | 1217 | goto unlock_exit; |
1209 | 1218 | ||
1210 | fault_index = dma_fsts_fault_record_index(fault_status); | 1219 | fault_index = dma_fsts_fault_record_index(fault_status); |
1211 | reg = cap_fault_reg_offset(iommu->cap); | 1220 | reg = cap_fault_reg_offset(iommu->cap); |
@@ -1246,11 +1255,10 @@ irqreturn_t dmar_fault(int irq, void *dev_id) | |||
1246 | fault_index = 0; | 1255 | fault_index = 0; |
1247 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 1256 | raw_spin_lock_irqsave(&iommu->register_lock, flag); |
1248 | } | 1257 | } |
1249 | clear_rest: | ||
1250 | /* clear all the other faults */ | ||
1251 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
1252 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); | ||
1253 | 1258 | ||
1259 | writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); | ||
1260 | |||
1261 | unlock_exit: | ||
1254 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 1262 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); |
1255 | return IRQ_HANDLED; | 1263 | return IRQ_HANDLED; |
1256 | } | 1264 | } |
@@ -1298,6 +1306,7 @@ int __init enable_drhd_fault_handling(void) | |||
1298 | for_each_drhd_unit(drhd) { | 1306 | for_each_drhd_unit(drhd) { |
1299 | int ret; | 1307 | int ret; |
1300 | struct intel_iommu *iommu = drhd->iommu; | 1308 | struct intel_iommu *iommu = drhd->iommu; |
1309 | u32 fault_status; | ||
1301 | ret = dmar_set_interrupt(iommu); | 1310 | ret = dmar_set_interrupt(iommu); |
1302 | 1311 | ||
1303 | if (ret) { | 1312 | if (ret) { |
@@ -1310,6 +1319,8 @@ int __init enable_drhd_fault_handling(void) | |||
1310 | * Clear any previous faults. | 1319 | * Clear any previous faults. |
1311 | */ | 1320 | */ |
1312 | dmar_fault(iommu->irq, iommu); | 1321 | dmar_fault(iommu->irq, iommu); |
1322 | fault_status = readl(iommu->reg + DMAR_FSTS_REG); | ||
1323 | writel(fault_status, iommu->reg + DMAR_FSTS_REG); | ||
1313 | } | 1324 | } |
1314 | 1325 | ||
1315 | return 0; | 1326 | return 0; |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 238a3caa949a..3f32d64ab87a 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -1027,7 +1027,7 @@ done: | |||
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, | 1029 | static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, |
1030 | unsigned long iova) | 1030 | dma_addr_t iova) |
1031 | { | 1031 | { |
1032 | struct exynos_iommu_domain *priv = domain->priv; | 1032 | struct exynos_iommu_domain *priv = domain->priv; |
1033 | unsigned long *entry; | 1033 | unsigned long *entry; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0099667a397e..b4f0e28dfa41 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/iommu.h> | 47 | #include <asm/iommu.h> |
48 | 48 | ||
49 | #include "irq_remapping.h" | 49 | #include "irq_remapping.h" |
50 | #include "pci.h" | ||
50 | 51 | ||
51 | #define ROOT_SIZE VTD_PAGE_SIZE | 52 | #define ROOT_SIZE VTD_PAGE_SIZE |
52 | #define CONTEXT_SIZE VTD_PAGE_SIZE | 53 | #define CONTEXT_SIZE VTD_PAGE_SIZE |
@@ -3665,6 +3666,7 @@ static struct notifier_block device_nb = { | |||
3665 | int __init intel_iommu_init(void) | 3666 | int __init intel_iommu_init(void) |
3666 | { | 3667 | { |
3667 | int ret = 0; | 3668 | int ret = 0; |
3669 | struct dmar_drhd_unit *drhd; | ||
3668 | 3670 | ||
3669 | /* VT-d is required for a TXT/tboot launch, so enforce that */ | 3671 | /* VT-d is required for a TXT/tboot launch, so enforce that */ |
3670 | force_on = tboot_force_iommu(); | 3672 | force_on = tboot_force_iommu(); |
@@ -3675,6 +3677,20 @@ int __init intel_iommu_init(void) | |||
3675 | return -ENODEV; | 3677 | return -ENODEV; |
3676 | } | 3678 | } |
3677 | 3679 | ||
3680 | /* | ||
3681 | * Disable translation if already enabled prior to OS handover. | ||
3682 | */ | ||
3683 | for_each_drhd_unit(drhd) { | ||
3684 | struct intel_iommu *iommu; | ||
3685 | |||
3686 | if (drhd->ignored) | ||
3687 | continue; | ||
3688 | |||
3689 | iommu = drhd->iommu; | ||
3690 | if (iommu->gcmd & DMA_GCMD_TE) | ||
3691 | iommu_disable_translation(iommu); | ||
3692 | } | ||
3693 | |||
3678 | if (dmar_dev_scope_init() < 0) { | 3694 | if (dmar_dev_scope_init() < 0) { |
3679 | if (force_on) | 3695 | if (force_on) |
3680 | panic("tboot: Failed to initialize DMAR device scope\n"); | 3696 | panic("tboot: Failed to initialize DMAR device scope\n"); |
@@ -4111,7 +4127,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, | |||
4111 | } | 4127 | } |
4112 | 4128 | ||
4113 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 4129 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
4114 | unsigned long iova) | 4130 | dma_addr_t iova) |
4115 | { | 4131 | { |
4116 | struct dmar_domain *dmar_domain = domain->priv; | 4132 | struct dmar_domain *dmar_domain = domain->priv; |
4117 | struct dma_pte *pte; | 4133 | struct dma_pte *pte; |
@@ -4137,12 +4153,6 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain, | |||
4137 | return 0; | 4153 | return 0; |
4138 | } | 4154 | } |
4139 | 4155 | ||
4140 | static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) | ||
4141 | { | ||
4142 | pci_dev_put(*from); | ||
4143 | *from = to; | ||
4144 | } | ||
4145 | |||
4146 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | 4156 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) |
4147 | 4157 | ||
4148 | static int intel_iommu_add_device(struct device *dev) | 4158 | static int intel_iommu_add_device(struct device *dev) |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index f3b8f23b5d8f..5b19b2d6ec2d 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
@@ -524,6 +524,16 @@ static int __init intel_irq_remapping_supported(void) | |||
524 | 524 | ||
525 | if (disable_irq_remap) | 525 | if (disable_irq_remap) |
526 | return 0; | 526 | return 0; |
527 | if (irq_remap_broken) { | ||
528 | WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, | ||
529 | "This system BIOS has enabled interrupt remapping\n" | ||
530 | "on a chipset that contains an erratum making that\n" | ||
531 | "feature unstable. To maintain system stability\n" | ||
532 | "interrupt remapping is being disabled. Please\n" | ||
533 | "contact your BIOS vendor for an update\n"); | ||
534 | disable_irq_remap = 1; | ||
535 | return 0; | ||
536 | } | ||
527 | 537 | ||
528 | if (!dmar_ir_support()) | 538 | if (!dmar_ir_support()) |
529 | return 0; | 539 | return 0; |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b972d430d92b..d8f98b14e2fe 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -204,6 +204,35 @@ again: | |||
204 | } | 204 | } |
205 | EXPORT_SYMBOL_GPL(iommu_group_alloc); | 205 | EXPORT_SYMBOL_GPL(iommu_group_alloc); |
206 | 206 | ||
207 | struct iommu_group *iommu_group_get_by_id(int id) | ||
208 | { | ||
209 | struct kobject *group_kobj; | ||
210 | struct iommu_group *group; | ||
211 | const char *name; | ||
212 | |||
213 | if (!iommu_group_kset) | ||
214 | return NULL; | ||
215 | |||
216 | name = kasprintf(GFP_KERNEL, "%d", id); | ||
217 | if (!name) | ||
218 | return NULL; | ||
219 | |||
220 | group_kobj = kset_find_obj(iommu_group_kset, name); | ||
221 | kfree(name); | ||
222 | |||
223 | if (!group_kobj) | ||
224 | return NULL; | ||
225 | |||
226 | group = container_of(group_kobj, struct iommu_group, kobj); | ||
227 | BUG_ON(group->id != id); | ||
228 | |||
229 | kobject_get(group->devices_kobj); | ||
230 | kobject_put(&group->kobj); | ||
231 | |||
232 | return group; | ||
233 | } | ||
234 | EXPORT_SYMBOL_GPL(iommu_group_get_by_id); | ||
235 | |||
207 | /** | 236 | /** |
208 | * iommu_group_get_iommudata - retrieve iommu_data registered for a group | 237 | * iommu_group_get_iommudata - retrieve iommu_data registered for a group |
209 | * @group: the group | 238 | * @group: the group |
@@ -706,8 +735,7 @@ void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) | |||
706 | } | 735 | } |
707 | EXPORT_SYMBOL_GPL(iommu_detach_group); | 736 | EXPORT_SYMBOL_GPL(iommu_detach_group); |
708 | 737 | ||
709 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 738 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) |
710 | unsigned long iova) | ||
711 | { | 739 | { |
712 | if (unlikely(domain->ops->iova_to_phys == NULL)) | 740 | if (unlikely(domain->ops->iova_to_phys == NULL)) |
713 | return 0; | 741 | return 0; |
@@ -854,12 +882,13 @@ EXPORT_SYMBOL_GPL(iommu_unmap); | |||
854 | 882 | ||
855 | 883 | ||
856 | int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, | 884 | int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
857 | phys_addr_t paddr, u64 size) | 885 | phys_addr_t paddr, u64 size, int prot) |
858 | { | 886 | { |
859 | if (unlikely(domain->ops->domain_window_enable == NULL)) | 887 | if (unlikely(domain->ops->domain_window_enable == NULL)) |
860 | return -ENODEV; | 888 | return -ENODEV; |
861 | 889 | ||
862 | return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size); | 890 | return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, |
891 | prot); | ||
863 | } | 892 | } |
864 | EXPORT_SYMBOL_GPL(iommu_domain_window_enable); | 893 | EXPORT_SYMBOL_GPL(iommu_domain_window_enable); |
865 | 894 | ||
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 7c11ff368d07..dcfea4e39be7 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c | |||
@@ -18,6 +18,7 @@ | |||
18 | int irq_remapping_enabled; | 18 | int irq_remapping_enabled; |
19 | 19 | ||
20 | int disable_irq_remap; | 20 | int disable_irq_remap; |
21 | int irq_remap_broken; | ||
21 | int disable_sourceid_checking; | 22 | int disable_sourceid_checking; |
22 | int no_x2apic_optout; | 23 | int no_x2apic_optout; |
23 | 24 | ||
@@ -210,6 +211,11 @@ void __init setup_irq_remapping_ops(void) | |||
210 | #endif | 211 | #endif |
211 | } | 212 | } |
212 | 213 | ||
214 | void set_irq_remapping_broken(void) | ||
215 | { | ||
216 | irq_remap_broken = 1; | ||
217 | } | ||
218 | |||
213 | int irq_remapping_supported(void) | 219 | int irq_remapping_supported(void) |
214 | { | 220 | { |
215 | if (disable_irq_remap) | 221 | if (disable_irq_remap) |
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index ecb637670405..90c4dae5a46b 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h | |||
@@ -32,6 +32,7 @@ struct pci_dev; | |||
32 | struct msi_msg; | 32 | struct msi_msg; |
33 | 33 | ||
34 | extern int disable_irq_remap; | 34 | extern int disable_irq_remap; |
35 | extern int irq_remap_broken; | ||
35 | extern int disable_sourceid_checking; | 36 | extern int disable_sourceid_checking; |
36 | extern int no_x2apic_optout; | 37 | extern int no_x2apic_optout; |
37 | extern int irq_remapping_enabled; | 38 | extern int irq_remapping_enabled; |
@@ -89,6 +90,7 @@ extern struct irq_remap_ops amd_iommu_irq_ops; | |||
89 | 90 | ||
90 | #define irq_remapping_enabled 0 | 91 | #define irq_remapping_enabled 0 |
91 | #define disable_irq_remap 1 | 92 | #define disable_irq_remap 1 |
93 | #define irq_remap_broken 0 | ||
92 | 94 | ||
93 | #endif /* CONFIG_IRQ_REMAP */ | 95 | #endif /* CONFIG_IRQ_REMAP */ |
94 | 96 | ||
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 6a8870a31668..8ab4f41090af 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c | |||
@@ -554,7 +554,7 @@ fail: | |||
554 | } | 554 | } |
555 | 555 | ||
556 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, | 556 | static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, |
557 | unsigned long va) | 557 | dma_addr_t va) |
558 | { | 558 | { |
559 | struct msm_priv *priv; | 559 | struct msm_priv *priv; |
560 | struct msm_iommu_drvdata *iommu_drvdata; | 560 | struct msm_iommu_drvdata *iommu_drvdata; |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 6ac02fa5910f..e02e5d71745b 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -1219,7 +1219,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain) | |||
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, | 1221 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, |
1222 | unsigned long da) | 1222 | dma_addr_t da) |
1223 | { | 1223 | { |
1224 | struct omap_iommu_domain *omap_domain = domain->priv; | 1224 | struct omap_iommu_domain *omap_domain = domain->priv; |
1225 | struct omap_iommu *oiommu = omap_domain->iommu_dev; | 1225 | struct omap_iommu *oiommu = omap_domain->iommu_dev; |
diff --git a/drivers/iommu/pci.h b/drivers/iommu/pci.h new file mode 100644 index 000000000000..352d80ae7443 --- /dev/null +++ b/drivers/iommu/pci.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright (C) 2013 Red Hat, Inc. | ||
16 | * Copyright (C) 2013 Freescale Semiconductor, Inc. | ||
17 | * | ||
18 | */ | ||
19 | #ifndef __IOMMU_PCI_H | ||
20 | #define __IOMMU_PCI_H | ||
21 | |||
22 | /* Helper function for swapping pci device reference */ | ||
23 | static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) | ||
24 | { | ||
25 | pci_dev_put(*from); | ||
26 | *from = to; | ||
27 | } | ||
28 | |||
29 | #endif /* __IOMMU_PCI_H */ | ||
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c index b6e8b57cf0a8..d572863dfccd 100644 --- a/drivers/iommu/shmobile-iommu.c +++ b/drivers/iommu/shmobile-iommu.c | |||
@@ -296,7 +296,7 @@ done: | |||
296 | } | 296 | } |
297 | 297 | ||
298 | static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, | 298 | static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, |
299 | unsigned long iova) | 299 | dma_addr_t iova) |
300 | { | 300 | { |
301 | struct shmobile_iommu_domain *sh_domain = domain->priv; | 301 | struct shmobile_iommu_domain *sh_domain = domain->priv; |
302 | uint32_t l1entry = 0, l2entry = 0; | 302 | uint32_t l1entry = 0, l2entry = 0; |
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 86437575f94d..108c0e9c24d9 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
@@ -279,7 +279,7 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
279 | } | 279 | } |
280 | 280 | ||
281 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | 281 | static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, |
282 | unsigned long iova) | 282 | dma_addr_t iova) |
283 | { | 283 | { |
284 | struct gart_device *gart = domain->priv; | 284 | struct gart_device *gart = domain->priv; |
285 | unsigned long pte; | 285 | unsigned long pte; |
@@ -295,7 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, | |||
295 | 295 | ||
296 | pa = (pte & GART_PAGE_MASK); | 296 | pa = (pte & GART_PAGE_MASK); |
297 | if (!pfn_valid(__phys_to_pfn(pa))) { | 297 | if (!pfn_valid(__phys_to_pfn(pa))) { |
298 | dev_err(gart->dev, "No entry for %08lx:%08x\n", iova, pa); | 298 | dev_err(gart->dev, "No entry for %08llx:%08x\n", |
299 | (unsigned long long)iova, pa); | ||
299 | gart_dump_table(gart); | 300 | gart_dump_table(gart); |
300 | return -EINVAL; | 301 | return -EINVAL; |
301 | } | 302 | } |
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index b34e5fd7fd9e..f6f120e25409 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c | |||
@@ -757,7 +757,7 @@ static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |||
757 | } | 757 | } |
758 | 758 | ||
759 | static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, | 759 | static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, |
760 | unsigned long iova) | 760 | dma_addr_t iova) |
761 | { | 761 | { |
762 | struct smmu_as *as = domain->priv; | 762 | struct smmu_as *as = domain->priv; |
763 | unsigned long *pte; | 763 | unsigned long *pte; |
@@ -772,7 +772,8 @@ static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, | |||
772 | pfn = *pte & SMMU_PFN_MASK; | 772 | pfn = *pte & SMMU_PFN_MASK; |
773 | WARN_ON(!pfn_valid(pfn)); | 773 | WARN_ON(!pfn_valid(pfn)); |
774 | dev_dbg(as->smmu->dev, | 774 | dev_dbg(as->smmu->dev, |
775 | "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid); | 775 | "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova, |
776 | pfn, as->asid); | ||
776 | 777 | ||
777 | spin_unlock_irqrestore(&as->lock, flags); | 778 | spin_unlock_irqrestore(&as->lock, flags); |
778 | return PFN_PHYS(pfn); | 779 | return PFN_PHYS(pfn); |