aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c14
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c13
-rw-r--r--drivers/pci/htirq.c22
-rw-r--r--drivers/pci/intel-iommu.c117
-rw-r--r--drivers/pci/intr_remapping.c212
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/msi.c38
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c1
-rw-r--r--drivers/pci/quirks.c20
10 files changed, 162 insertions, 282 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 0a19708074c2..0157708d474d 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -36,6 +36,7 @@
36#include <linux/tboot.h> 36#include <linux/tboot.h>
37#include <linux/dmi.h> 37#include <linux/dmi.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <asm/iommu_table.h>
39 40
40#define PREFIX "DMAR: " 41#define PREFIX "DMAR: "
41 42
@@ -687,7 +688,7 @@ failed:
687 return 0; 688 return 0;
688} 689}
689 690
690void __init detect_intel_iommu(void) 691int __init detect_intel_iommu(void)
691{ 692{
692 int ret; 693 int ret;
693 694
@@ -723,6 +724,8 @@ void __init detect_intel_iommu(void)
723 } 724 }
724 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); 725 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
725 dmar_tbl = NULL; 726 dmar_tbl = NULL;
727
728 return ret ? 1 : -ENODEV;
726} 729}
727 730
728 731
@@ -1221,9 +1224,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1221 } 1224 }
1222} 1225}
1223 1226
1224void dmar_msi_unmask(unsigned int irq) 1227void dmar_msi_unmask(struct irq_data *data)
1225{ 1228{
1226 struct intel_iommu *iommu = get_irq_data(irq); 1229 struct intel_iommu *iommu = irq_data_get_irq_data(data);
1227 unsigned long flag; 1230 unsigned long flag;
1228 1231
1229 /* unmask it */ 1232 /* unmask it */
@@ -1234,10 +1237,10 @@ void dmar_msi_unmask(unsigned int irq)
1234 spin_unlock_irqrestore(&iommu->register_lock, flag); 1237 spin_unlock_irqrestore(&iommu->register_lock, flag);
1235} 1238}
1236 1239
1237void dmar_msi_mask(unsigned int irq) 1240void dmar_msi_mask(struct irq_data *data)
1238{ 1241{
1239 unsigned long flag; 1242 unsigned long flag;
1240 struct intel_iommu *iommu = get_irq_data(irq); 1243 struct intel_iommu *iommu = irq_data_get_irq_data(data);
1241 1244
1242 /* mask it */ 1245 /* mask it */
1243 spin_lock_irqsave(&iommu->register_lock, flag); 1246 spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1455,3 +1458,4 @@ int __init dmar_ir_support(void)
1455 return 0; 1458 return 0;
1456 return dmar->flags & 0x1; 1459 return dmar->flags & 0x1;
1457} 1460}
1461IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index 56215322930a..4cb30447a486 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -34,10 +34,11 @@
34#include <linux/workqueue.h> 34#include <linux/workqueue.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/pci_hotplug.h> 36#include <linux/pci_hotplug.h>
37#include <linux/smp_lock.h> 37#include <linux/mutex.h>
38#include <linux/debugfs.h> 38#include <linux/debugfs.h>
39#include "cpqphp.h" 39#include "cpqphp.h"
40 40
41static DEFINE_MUTEX(cpqphp_mutex);
41static int show_ctrl (struct controller *ctrl, char *buf) 42static int show_ctrl (struct controller *ctrl, char *buf)
42{ 43{
43 char *out = buf; 44 char *out = buf;
@@ -147,7 +148,7 @@ static int open(struct inode *inode, struct file *file)
147 struct ctrl_dbg *dbg; 148 struct ctrl_dbg *dbg;
148 int retval = -ENOMEM; 149 int retval = -ENOMEM;
149 150
150 lock_kernel(); 151 mutex_lock(&cpqphp_mutex);
151 dbg = kmalloc(sizeof(*dbg), GFP_KERNEL); 152 dbg = kmalloc(sizeof(*dbg), GFP_KERNEL);
152 if (!dbg) 153 if (!dbg)
153 goto exit; 154 goto exit;
@@ -160,7 +161,7 @@ static int open(struct inode *inode, struct file *file)
160 file->private_data = dbg; 161 file->private_data = dbg;
161 retval = 0; 162 retval = 0;
162exit: 163exit:
163 unlock_kernel(); 164 mutex_unlock(&cpqphp_mutex);
164 return retval; 165 return retval;
165} 166}
166 167
@@ -169,7 +170,7 @@ static loff_t lseek(struct file *file, loff_t off, int whence)
169 struct ctrl_dbg *dbg; 170 struct ctrl_dbg *dbg;
170 loff_t new = -1; 171 loff_t new = -1;
171 172
172 lock_kernel(); 173 mutex_lock(&cpqphp_mutex);
173 dbg = file->private_data; 174 dbg = file->private_data;
174 175
175 switch (whence) { 176 switch (whence) {
@@ -181,10 +182,10 @@ static loff_t lseek(struct file *file, loff_t off, int whence)
181 break; 182 break;
182 } 183 }
183 if (new < 0 || new > dbg->size) { 184 if (new < 0 || new > dbg->size) {
184 unlock_kernel(); 185 mutex_unlock(&cpqphp_mutex);
185 return -EINVAL; 186 return -EINVAL;
186 } 187 }
187 unlock_kernel(); 188 mutex_unlock(&cpqphp_mutex);
188 return (file->f_pos = new); 189 return (file->f_pos = new);
189} 190}
190 191
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 98abf8b91294..834842aa5bbf 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
57 *msg = cfg->msg; 57 *msg = cfg->msg;
58} 58}
59 59
60void mask_ht_irq(unsigned int irq) 60void mask_ht_irq(struct irq_data *data)
61{ 61{
62 struct ht_irq_cfg *cfg; 62 struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
63 struct ht_irq_msg msg; 63 struct ht_irq_msg msg = cfg->msg;
64
65 cfg = get_irq_data(irq);
66 64
67 msg = cfg->msg;
68 msg.address_lo |= 1; 65 msg.address_lo |= 1;
69 write_ht_irq_msg(irq, &msg); 66 write_ht_irq_msg(data->irq, &msg);
70} 67}
71 68
72void unmask_ht_irq(unsigned int irq) 69void unmask_ht_irq(struct irq_data *data)
73{ 70{
74 struct ht_irq_cfg *cfg; 71 struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
75 struct ht_irq_msg msg; 72 struct ht_irq_msg msg = cfg->msg;
76
77 cfg = get_irq_data(irq);
78 73
79 msg = cfg->msg;
80 msg.address_lo &= ~1; 74 msg.address_lo &= ~1;
81 write_ht_irq_msg(irq, &msg); 75 write_ht_irq_msg(data->irq, &msg);
82} 76}
83 77
84/** 78/**
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3ceebb5be84..4789f8e8bf7a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
73 73
74/* page table handling */
75#define LEVEL_STRIDE (9)
76#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
77
78static inline int agaw_to_level(int agaw)
79{
80 return agaw + 2;
81}
82
83static inline int agaw_to_width(int agaw)
84{
85 return 30 + agaw * LEVEL_STRIDE;
86}
87
88static inline int width_to_agaw(int width)
89{
90 return (width - 30) / LEVEL_STRIDE;
91}
92
93static inline unsigned int level_to_offset_bits(int level)
94{
95 return (level - 1) * LEVEL_STRIDE;
96}
97
98static inline int pfn_level_offset(unsigned long pfn, int level)
99{
100 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
101}
102
103static inline unsigned long level_mask(int level)
104{
105 return -1UL << level_to_offset_bits(level);
106}
107
108static inline unsigned long level_size(int level)
109{
110 return 1UL << level_to_offset_bits(level);
111}
112
113static inline unsigned long align_to_level(unsigned long pfn, int level)
114{
115 return (pfn + level_size(level) - 1) & level_mask(level);
116}
74 117
75/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 118/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */ 119 are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
434} 477}
435 478
436 479
437static inline int width_to_agaw(int width);
438
439static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) 480static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
440{ 481{
441 unsigned long sagaw; 482 unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
646 spin_unlock_irqrestore(&iommu->lock, flags); 687 spin_unlock_irqrestore(&iommu->lock, flags);
647} 688}
648 689
649/* page table handling */
650#define LEVEL_STRIDE (9)
651#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
652
653static inline int agaw_to_level(int agaw)
654{
655 return agaw + 2;
656}
657
658static inline int agaw_to_width(int agaw)
659{
660 return 30 + agaw * LEVEL_STRIDE;
661
662}
663
664static inline int width_to_agaw(int width)
665{
666 return (width - 30) / LEVEL_STRIDE;
667}
668
669static inline unsigned int level_to_offset_bits(int level)
670{
671 return (level - 1) * LEVEL_STRIDE;
672}
673
674static inline int pfn_level_offset(unsigned long pfn, int level)
675{
676 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
677}
678
679static inline unsigned long level_mask(int level)
680{
681 return -1UL << level_to_offset_bits(level);
682}
683
684static inline unsigned long level_size(int level)
685{
686 return 1UL << level_to_offset_bits(level);
687}
688
689static inline unsigned long align_to_level(unsigned long pfn, int level)
690{
691 return (pfn + level_size(level) - 1) & level_mask(level);
692}
693
694static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 690static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
695 unsigned long pfn) 691 unsigned long pfn)
696{ 692{
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3761 3757
3762DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3763 3759
3760#define GGC 0x52
3761#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3762#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3763#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3764#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3765#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3766#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3767#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3768#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3769
3770static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3771{
3772 unsigned short ggc;
3773
3774 if (pci_read_config_word(dev, GGC, &ggc))
3775 return;
3776
3777 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3778 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3779 dmar_map_gfx = 0;
3780 }
3781}
3782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3786
3764/* On Tylersburg chipsets, some BIOSes have been known to enable the 3787/* On Tylersburg chipsets, some BIOSes have been known to enable the
3765 ISOCH DMAR unit for the Azalia sound device, but not give it any 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any
3766 TLB entries, which causes it to deadlock. Check for that. We do 3789 TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index fd1d2867cdcc..ec87cd66f3eb 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -46,109 +46,24 @@ static __init int setup_intremap(char *str)
46} 46}
47early_param("intremap", setup_intremap); 47early_param("intremap", setup_intremap);
48 48
49struct irq_2_iommu {
50 struct intel_iommu *iommu;
51 u16 irte_index;
52 u16 sub_handle;
53 u8 irte_mask;
54};
55
56#ifdef CONFIG_GENERIC_HARDIRQS
57static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
58{
59 struct irq_2_iommu *iommu;
60
61 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
62 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
63
64 return iommu;
65}
66
67static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
68{
69 struct irq_desc *desc;
70
71 desc = irq_to_desc(irq);
72
73 if (WARN_ON_ONCE(!desc))
74 return NULL;
75
76 return desc->irq_2_iommu;
77}
78
79static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
80{
81 struct irq_desc *desc;
82 struct irq_2_iommu *irq_iommu;
83
84 desc = irq_to_desc(irq);
85 if (!desc) {
86 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
87 return NULL;
88 }
89
90 irq_iommu = desc->irq_2_iommu;
91
92 if (!irq_iommu)
93 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
94
95 return desc->irq_2_iommu;
96}
97
98#else /* !CONFIG_SPARSE_IRQ */
99
100static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
101
102static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
103{
104 if (irq < nr_irqs)
105 return &irq_2_iommuX[irq];
106
107 return NULL;
108}
109static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
110{
111 return irq_2_iommu(irq);
112}
113#endif
114
115static DEFINE_SPINLOCK(irq_2_ir_lock); 49static DEFINE_SPINLOCK(irq_2_ir_lock);
116 50
117static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 51static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
118{
119 struct irq_2_iommu *irq_iommu;
120
121 irq_iommu = irq_2_iommu(irq);
122
123 if (!irq_iommu)
124 return NULL;
125
126 if (!irq_iommu->iommu)
127 return NULL;
128
129 return irq_iommu;
130}
131
132int irq_remapped(int irq)
133{ 52{
134 return valid_irq_2_iommu(irq) != NULL; 53 struct irq_cfg *cfg = get_irq_chip_data(irq);
54 return cfg ? &cfg->irq_2_iommu : NULL;
135} 55}
136 56
137int get_irte(int irq, struct irte *entry) 57int get_irte(int irq, struct irte *entry)
138{ 58{
139 int index; 59 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
140 struct irq_2_iommu *irq_iommu;
141 unsigned long flags; 60 unsigned long flags;
61 int index;
142 62
143 if (!entry) 63 if (!entry || !irq_iommu)
144 return -1; 64 return -1;
145 65
146 spin_lock_irqsave(&irq_2_ir_lock, flags); 66 spin_lock_irqsave(&irq_2_ir_lock, flags);
147 irq_iommu = valid_irq_2_iommu(irq);
148 if (!irq_iommu) {
149 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
150 return -1;
151 }
152 67
153 index = irq_iommu->irte_index + irq_iommu->sub_handle; 68 index = irq_iommu->irte_index + irq_iommu->sub_handle;
154 *entry = *(irq_iommu->iommu->ir_table->base + index); 69 *entry = *(irq_iommu->iommu->ir_table->base + index);
@@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry)
160int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 75int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
161{ 76{
162 struct ir_table *table = iommu->ir_table; 77 struct ir_table *table = iommu->ir_table;
163 struct irq_2_iommu *irq_iommu; 78 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
164 u16 index, start_index; 79 u16 index, start_index;
165 unsigned int mask = 0; 80 unsigned int mask = 0;
166 unsigned long flags; 81 unsigned long flags;
167 int i; 82 int i;
168 83
169 if (!count) 84 if (!count || !irq_iommu)
170 return -1;
171
172#ifndef CONFIG_SPARSE_IRQ
173 /* protect irq_2_iommu_alloc later */
174 if (irq >= nr_irqs)
175 return -1; 85 return -1;
176#endif
177 86
178 /* 87 /*
179 * start the IRTE search from index 0. 88 * start the IRTE search from index 0.
@@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
214 for (i = index; i < index + count; i++) 123 for (i = index; i < index + count; i++)
215 table->base[i].present = 1; 124 table->base[i].present = 1;
216 125
217 irq_iommu = irq_2_iommu_alloc(irq);
218 if (!irq_iommu) {
219 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
220 printk(KERN_ERR "can't allocate irq_2_iommu\n");
221 return -1;
222 }
223
224 irq_iommu->iommu = iommu; 126 irq_iommu->iommu = iommu;
225 irq_iommu->irte_index = index; 127 irq_iommu->irte_index = index;
226 irq_iommu->sub_handle = 0; 128 irq_iommu->sub_handle = 0;
@@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
244 146
245int map_irq_to_irte_handle(int irq, u16 *sub_handle) 147int map_irq_to_irte_handle(int irq, u16 *sub_handle)
246{ 148{
247 int index; 149 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
248 struct irq_2_iommu *irq_iommu;
249 unsigned long flags; 150 unsigned long flags;
151 int index;
250 152
251 spin_lock_irqsave(&irq_2_ir_lock, flags); 153 if (!irq_iommu)
252 irq_iommu = valid_irq_2_iommu(irq);
253 if (!irq_iommu) {
254 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
255 return -1; 154 return -1;
256 }
257 155
156 spin_lock_irqsave(&irq_2_ir_lock, flags);
258 *sub_handle = irq_iommu->sub_handle; 157 *sub_handle = irq_iommu->sub_handle;
259 index = irq_iommu->irte_index; 158 index = irq_iommu->irte_index;
260 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 159 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
@@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
263 162
264int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 163int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
265{ 164{
266 struct irq_2_iommu *irq_iommu; 165 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
267 unsigned long flags; 166 unsigned long flags;
268 167
269 spin_lock_irqsave(&irq_2_ir_lock, flags); 168 if (!irq_iommu)
270
271 irq_iommu = irq_2_iommu_alloc(irq);
272
273 if (!irq_iommu) {
274 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
275 printk(KERN_ERR "can't allocate irq_2_iommu\n");
276 return -1; 169 return -1;
277 } 170
171 spin_lock_irqsave(&irq_2_ir_lock, flags);
278 172
279 irq_iommu->iommu = iommu; 173 irq_iommu->iommu = iommu;
280 irq_iommu->irte_index = index; 174 irq_iommu->irte_index = index;
@@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
286 return 0; 180 return 0;
287} 181}
288 182
289int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
290{
291 struct irq_2_iommu *irq_iommu;
292 unsigned long flags;
293
294 spin_lock_irqsave(&irq_2_ir_lock, flags);
295 irq_iommu = valid_irq_2_iommu(irq);
296 if (!irq_iommu) {
297 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
298 return -1;
299 }
300
301 irq_iommu->iommu = NULL;
302 irq_iommu->irte_index = 0;
303 irq_iommu->sub_handle = 0;
304 irq_2_iommu(irq)->irte_mask = 0;
305
306 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
307
308 return 0;
309}
310
311int modify_irte(int irq, struct irte *irte_modified) 183int modify_irte(int irq, struct irte *irte_modified)
312{ 184{
313 int rc; 185 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
314 int index;
315 struct irte *irte;
316 struct intel_iommu *iommu; 186 struct intel_iommu *iommu;
317 struct irq_2_iommu *irq_iommu;
318 unsigned long flags; 187 unsigned long flags;
188 struct irte *irte;
189 int rc, index;
319 190
320 spin_lock_irqsave(&irq_2_ir_lock, flags); 191 if (!irq_iommu)
321 irq_iommu = valid_irq_2_iommu(irq);
322 if (!irq_iommu) {
323 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
324 return -1; 192 return -1;
325 } 193
194 spin_lock_irqsave(&irq_2_ir_lock, flags);
326 195
327 iommu = irq_iommu->iommu; 196 iommu = irq_iommu->iommu;
328 197
@@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified)
339 return rc; 208 return rc;
340} 209}
341 210
342int flush_irte(int irq)
343{
344 int rc;
345 int index;
346 struct intel_iommu *iommu;
347 struct irq_2_iommu *irq_iommu;
348 unsigned long flags;
349
350 spin_lock_irqsave(&irq_2_ir_lock, flags);
351 irq_iommu = valid_irq_2_iommu(irq);
352 if (!irq_iommu) {
353 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
354 return -1;
355 }
356
357 iommu = irq_iommu->iommu;
358
359 index = irq_iommu->irte_index + irq_iommu->sub_handle;
360
361 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
362 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
363
364 return rc;
365}
366
367struct intel_iommu *map_hpet_to_ir(u8 hpet_id) 211struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
368{ 212{
369 int i; 213 int i;
@@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
420 264
421int free_irte(int irq) 265int free_irte(int irq)
422{ 266{
423 int rc = 0; 267 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
424 struct irq_2_iommu *irq_iommu;
425 unsigned long flags; 268 unsigned long flags;
269 int rc;
426 270
427 spin_lock_irqsave(&irq_2_ir_lock, flags); 271 if (!irq_iommu)
428 irq_iommu = valid_irq_2_iommu(irq);
429 if (!irq_iommu) {
430 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
431 return -1; 272 return -1;
432 } 273
274 spin_lock_irqsave(&irq_2_ir_lock, flags);
433 275
434 rc = clear_entries(irq_iommu); 276 rc = clear_entries(irq_iommu);
435 277
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a3666b3d9..553d8ee55c1c 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
608 * the VF BAR size multiplied by the number of VFs. The alignment 608 * the VF BAR size multiplied by the number of VFs. The alignment
609 * is just the VF BAR size. 609 * is just the VF BAR size.
610 */ 610 */
611int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 611resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
612{ 612{
613 struct resource tmp; 613 struct resource tmp;
614 enum pci_bar_type type; 614 enum pci_bar_type type;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 69b7be33b3a2..5fcf5aec680f 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -170,33 +170,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
170 desc->masked = __msix_mask_irq(desc, flag); 170 desc->masked = __msix_mask_irq(desc, flag);
171} 171}
172 172
173static void msi_set_mask_bit(unsigned irq, u32 flag) 173static void msi_set_mask_bit(struct irq_data *data, u32 flag)
174{ 174{
175 struct msi_desc *desc = get_irq_msi(irq); 175 struct msi_desc *desc = irq_data_get_msi(data);
176 176
177 if (desc->msi_attrib.is_msix) { 177 if (desc->msi_attrib.is_msix) {
178 msix_mask_irq(desc, flag); 178 msix_mask_irq(desc, flag);
179 readl(desc->mask_base); /* Flush write to device */ 179 readl(desc->mask_base); /* Flush write to device */
180 } else { 180 } else {
181 unsigned offset = irq - desc->dev->irq; 181 unsigned offset = data->irq - desc->dev->irq;
182 msi_mask_irq(desc, 1 << offset, flag << offset); 182 msi_mask_irq(desc, 1 << offset, flag << offset);
183 } 183 }
184} 184}
185 185
186void mask_msi_irq(unsigned int irq) 186void mask_msi_irq(struct irq_data *data)
187{ 187{
188 msi_set_mask_bit(irq, 1); 188 msi_set_mask_bit(data, 1);
189} 189}
190 190
191void unmask_msi_irq(unsigned int irq) 191void unmask_msi_irq(struct irq_data *data)
192{ 192{
193 msi_set_mask_bit(irq, 0); 193 msi_set_mask_bit(data, 0);
194} 194}
195 195
196void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 196void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
197{ 197{
198 struct msi_desc *entry = get_irq_desc_msi(desc);
199
200 BUG_ON(entry->dev->current_state != PCI_D0); 198 BUG_ON(entry->dev->current_state != PCI_D0);
201 199
202 if (entry->msi_attrib.is_msix) { 200 if (entry->msi_attrib.is_msix) {
@@ -227,15 +225,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
227 225
228void read_msi_msg(unsigned int irq, struct msi_msg *msg) 226void read_msi_msg(unsigned int irq, struct msi_msg *msg)
229{ 227{
230 struct irq_desc *desc = irq_to_desc(irq); 228 struct msi_desc *entry = get_irq_msi(irq);
231 229
232 read_msi_msg_desc(desc, msg); 230 __read_msi_msg(entry, msg);
233} 231}
234 232
235void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 233void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
236{ 234{
237 struct msi_desc *entry = get_irq_desc_msi(desc);
238
239 /* Assert that the cache is valid, assuming that 235 /* Assert that the cache is valid, assuming that
240 * valid messages are not all-zeroes. */ 236 * valid messages are not all-zeroes. */
241 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | 237 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
@@ -246,15 +242,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
246 242
247void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 243void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
248{ 244{
249 struct irq_desc *desc = irq_to_desc(irq); 245 struct msi_desc *entry = get_irq_msi(irq);
250 246
251 get_cached_msi_msg_desc(desc, msg); 247 __get_cached_msi_msg(entry, msg);
252} 248}
253 249
254void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 250void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
255{ 251{
256 struct msi_desc *entry = get_irq_desc_msi(desc);
257
258 if (entry->dev->current_state != PCI_D0) { 252 if (entry->dev->current_state != PCI_D0) {
259 /* Don't touch the hardware now */ 253 /* Don't touch the hardware now */
260 } else if (entry->msi_attrib.is_msix) { 254 } else if (entry->msi_attrib.is_msix) {
@@ -292,9 +286,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
292 286
293void write_msi_msg(unsigned int irq, struct msi_msg *msg) 287void write_msi_msg(unsigned int irq, struct msi_msg *msg)
294{ 288{
295 struct irq_desc *desc = irq_to_desc(irq); 289 struct msi_desc *entry = get_irq_msi(irq);
296 290
297 write_msi_msg_desc(desc, msg); 291 __write_msi_msg(entry, msg);
298} 292}
299 293
300static void free_msi_irqs(struct pci_dev *dev) 294static void free_msi_irqs(struct pci_dev *dev)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7754a678ab15..6beb11b617a9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
264extern void pci_iov_release(struct pci_dev *dev); 264extern void pci_iov_release(struct pci_dev *dev);
265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
266 enum pci_bar_type *type); 266 enum pci_bar_type *type);
267extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 267extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
268 int resno);
268extern void pci_restore_iov_state(struct pci_dev *dev); 269extern void pci_restore_iov_state(struct pci_dev *dev);
269extern int pci_iov_bus_range(struct pci_bus *bus); 270extern int pci_iov_bus_range(struct pci_bus *bus);
270 271
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
320} 321}
321#endif /* CONFIG_PCI_IOV */ 322#endif /* CONFIG_PCI_IOV */
322 323
323static inline int pci_resource_alignment(struct pci_dev *dev, 324static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
324 struct resource *res) 325 struct resource *res)
325{ 326{
326#ifdef CONFIG_PCI_IOV 327#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 909924692b8a..b3cf6223f63a 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -472,6 +472,7 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
472static const struct file_operations aer_inject_fops = { 472static const struct file_operations aer_inject_fops = {
473 .write = aer_inject_write, 473 .write = aer_inject_write,
474 .owner = THIS_MODULE, 474 .owner = THIS_MODULE,
475 .llseek = noop_llseek,
475}; 476};
476 477
477static struct miscdevice aer_inject_device = { 478static struct miscdevice aer_inject_device = {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 89ed181cd90c..857ae01734a6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -163,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
164 164
165/* 165/*
166 * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
167 * for some HT machines to use C4 w/o hanging.
168 */
169static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
170{
171 u32 pmbase;
172 u16 pm1a;
173
174 pci_read_config_dword(dev, 0x40, &pmbase);
175 pmbase = pmbase & 0xff80;
176 pm1a = inw(pmbase);
177
178 if (pm1a & 0x10) {
179 dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
180 outw(0x10, pmbase);
181 }
182}
183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
184
185/*
166 * Chipsets where PCI->PCI transfers vanish or hang 186 * Chipsets where PCI->PCI transfers vanish or hang
167 */ 187 */
168static void __devinit quirk_nopcipci(struct pci_dev *dev) 188static void __devinit quirk_nopcipci(struct pci_dev *dev)