aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intr_remapping.c
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:20:30 -0500
committerGrant Likely <grant.likely@secretlab.ca>2010-12-30 00:21:47 -0500
commitd392da5207352f09030e95d9ea335a4225667ec0 (patch)
tree7d6cd1932afcad0a5619a5c504a6d93ca318187c /drivers/pci/intr_remapping.c
parente39d5ef678045d61812c1401f04fe8edb14d6359 (diff)
parent387c31c7e5c9805b0aef8833d1731a5fe7bdea14 (diff)
Merge v2.6.37-rc8 into powerpc/next
Diffstat (limited to 'drivers/pci/intr_remapping.c')
-rw-r--r--drivers/pci/intr_remapping.c222
1 files changed, 42 insertions, 180 deletions
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 1694a0e2845b..ec87cd66f3eb 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -21,6 +21,8 @@ static int ir_ioapic_num, ir_hpet_num;
21int intr_remapping_enabled; 21int intr_remapping_enabled;
22 22
23static int disable_intremap; 23static int disable_intremap;
24static int disable_sourceid_checking;
25
24static __init int setup_nointremap(char *str) 26static __init int setup_nointremap(char *str)
25{ 27{
26 disable_intremap = 1; 28 disable_intremap = 1;
@@ -28,109 +30,40 @@ static __init int setup_nointremap(char *str)
28} 30}
29early_param("nointremap", setup_nointremap); 31early_param("nointremap", setup_nointremap);
30 32
31struct irq_2_iommu { 33static __init int setup_intremap(char *str)
32 struct intel_iommu *iommu;
33 u16 irte_index;
34 u16 sub_handle;
35 u8 irte_mask;
36};
37
38#ifdef CONFIG_GENERIC_HARDIRQS
39static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
40{ 34{
41 struct irq_2_iommu *iommu; 35 if (!str)
42 36 return -EINVAL;
43 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
44 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
45 37
46 return iommu; 38 if (!strncmp(str, "on", 2))
47} 39 disable_intremap = 0;
48 40 else if (!strncmp(str, "off", 3))
49static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 41 disable_intremap = 1;
50{ 42 else if (!strncmp(str, "nosid", 5))
51 struct irq_desc *desc; 43 disable_sourceid_checking = 1;
52
53 desc = irq_to_desc(irq);
54
55 if (WARN_ON_ONCE(!desc))
56 return NULL;
57
58 return desc->irq_2_iommu;
59}
60
61static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
62{
63 struct irq_desc *desc;
64 struct irq_2_iommu *irq_iommu;
65 44
66 desc = irq_to_desc(irq); 45 return 0;
67 if (!desc) {
68 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
69 return NULL;
70 }
71
72 irq_iommu = desc->irq_2_iommu;
73
74 if (!irq_iommu)
75 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
76
77 return desc->irq_2_iommu;
78}
79
80#else /* !CONFIG_SPARSE_IRQ */
81
82static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
83
84static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
85{
86 if (irq < nr_irqs)
87 return &irq_2_iommuX[irq];
88
89 return NULL;
90}
91static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
92{
93 return irq_2_iommu(irq);
94} 46}
95#endif 47early_param("intremap", setup_intremap);
96 48
97static DEFINE_SPINLOCK(irq_2_ir_lock); 49static DEFINE_SPINLOCK(irq_2_ir_lock);
98 50
99static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 51static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
100{
101 struct irq_2_iommu *irq_iommu;
102
103 irq_iommu = irq_2_iommu(irq);
104
105 if (!irq_iommu)
106 return NULL;
107
108 if (!irq_iommu->iommu)
109 return NULL;
110
111 return irq_iommu;
112}
113
114int irq_remapped(int irq)
115{ 52{
116 return valid_irq_2_iommu(irq) != NULL; 53 struct irq_cfg *cfg = get_irq_chip_data(irq);
54 return cfg ? &cfg->irq_2_iommu : NULL;
117} 55}
118 56
119int get_irte(int irq, struct irte *entry) 57int get_irte(int irq, struct irte *entry)
120{ 58{
121 int index; 59 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
122 struct irq_2_iommu *irq_iommu;
123 unsigned long flags; 60 unsigned long flags;
61 int index;
124 62
125 if (!entry) 63 if (!entry || !irq_iommu)
126 return -1; 64 return -1;
127 65
128 spin_lock_irqsave(&irq_2_ir_lock, flags); 66 spin_lock_irqsave(&irq_2_ir_lock, flags);
129 irq_iommu = valid_irq_2_iommu(irq);
130 if (!irq_iommu) {
131 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
132 return -1;
133 }
134 67
135 index = irq_iommu->irte_index + irq_iommu->sub_handle; 68 index = irq_iommu->irte_index + irq_iommu->sub_handle;
136 *entry = *(irq_iommu->iommu->ir_table->base + index); 69 *entry = *(irq_iommu->iommu->ir_table->base + index);
@@ -142,21 +75,15 @@ int get_irte(int irq, struct irte *entry)
142int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 75int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
143{ 76{
144 struct ir_table *table = iommu->ir_table; 77 struct ir_table *table = iommu->ir_table;
145 struct irq_2_iommu *irq_iommu; 78 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
146 u16 index, start_index; 79 u16 index, start_index;
147 unsigned int mask = 0; 80 unsigned int mask = 0;
148 unsigned long flags; 81 unsigned long flags;
149 int i; 82 int i;
150 83
151 if (!count) 84 if (!count || !irq_iommu)
152 return -1; 85 return -1;
153 86
154#ifndef CONFIG_SPARSE_IRQ
155 /* protect irq_2_iommu_alloc later */
156 if (irq >= nr_irqs)
157 return -1;
158#endif
159
160 /* 87 /*
161 * start the IRTE search from index 0. 88 * start the IRTE search from index 0.
162 */ 89 */
@@ -196,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
196 for (i = index; i < index + count; i++) 123 for (i = index; i < index + count; i++)
197 table->base[i].present = 1; 124 table->base[i].present = 1;
198 125
199 irq_iommu = irq_2_iommu_alloc(irq);
200 if (!irq_iommu) {
201 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
202 printk(KERN_ERR "can't allocate irq_2_iommu\n");
203 return -1;
204 }
205
206 irq_iommu->iommu = iommu; 126 irq_iommu->iommu = iommu;
207 irq_iommu->irte_index = index; 127 irq_iommu->irte_index = index;
208 irq_iommu->sub_handle = 0; 128 irq_iommu->sub_handle = 0;
@@ -226,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
226 146
227int map_irq_to_irte_handle(int irq, u16 *sub_handle) 147int map_irq_to_irte_handle(int irq, u16 *sub_handle)
228{ 148{
229 int index; 149 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
230 struct irq_2_iommu *irq_iommu;
231 unsigned long flags; 150 unsigned long flags;
151 int index;
232 152
233 spin_lock_irqsave(&irq_2_ir_lock, flags); 153 if (!irq_iommu)
234 irq_iommu = valid_irq_2_iommu(irq);
235 if (!irq_iommu) {
236 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
237 return -1; 154 return -1;
238 }
239 155
156 spin_lock_irqsave(&irq_2_ir_lock, flags);
240 *sub_handle = irq_iommu->sub_handle; 157 *sub_handle = irq_iommu->sub_handle;
241 index = irq_iommu->irte_index; 158 index = irq_iommu->irte_index;
242 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 159 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
@@ -245,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
245 162
246int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 163int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
247{ 164{
248 struct irq_2_iommu *irq_iommu; 165 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
249 unsigned long flags; 166 unsigned long flags;
250 167
251 spin_lock_irqsave(&irq_2_ir_lock, flags); 168 if (!irq_iommu)
252
253 irq_iommu = irq_2_iommu_alloc(irq);
254
255 if (!irq_iommu) {
256 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
257 printk(KERN_ERR "can't allocate irq_2_iommu\n");
258 return -1; 169 return -1;
259 } 170
171 spin_lock_irqsave(&irq_2_ir_lock, flags);
260 172
261 irq_iommu->iommu = iommu; 173 irq_iommu->iommu = iommu;
262 irq_iommu->irte_index = index; 174 irq_iommu->irte_index = index;
@@ -268,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
268 return 0; 180 return 0;
269} 181}
270 182
271int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
272{
273 struct irq_2_iommu *irq_iommu;
274 unsigned long flags;
275
276 spin_lock_irqsave(&irq_2_ir_lock, flags);
277 irq_iommu = valid_irq_2_iommu(irq);
278 if (!irq_iommu) {
279 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280 return -1;
281 }
282
283 irq_iommu->iommu = NULL;
284 irq_iommu->irte_index = 0;
285 irq_iommu->sub_handle = 0;
286 irq_2_iommu(irq)->irte_mask = 0;
287
288 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
289
290 return 0;
291}
292
293int modify_irte(int irq, struct irte *irte_modified) 183int modify_irte(int irq, struct irte *irte_modified)
294{ 184{
295 int rc; 185 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
296 int index;
297 struct irte *irte;
298 struct intel_iommu *iommu; 186 struct intel_iommu *iommu;
299 struct irq_2_iommu *irq_iommu;
300 unsigned long flags; 187 unsigned long flags;
188 struct irte *irte;
189 int rc, index;
301 190
302 spin_lock_irqsave(&irq_2_ir_lock, flags); 191 if (!irq_iommu)
303 irq_iommu = valid_irq_2_iommu(irq);
304 if (!irq_iommu) {
305 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
306 return -1; 192 return -1;
307 } 193
194 spin_lock_irqsave(&irq_2_ir_lock, flags);
308 195
309 iommu = irq_iommu->iommu; 196 iommu = irq_iommu->iommu;
310 197
@@ -321,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified)
321 return rc; 208 return rc;
322} 209}
323 210
324int flush_irte(int irq)
325{
326 int rc;
327 int index;
328 struct intel_iommu *iommu;
329 struct irq_2_iommu *irq_iommu;
330 unsigned long flags;
331
332 spin_lock_irqsave(&irq_2_ir_lock, flags);
333 irq_iommu = valid_irq_2_iommu(irq);
334 if (!irq_iommu) {
335 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
336 return -1;
337 }
338
339 iommu = irq_iommu->iommu;
340
341 index = irq_iommu->irte_index + irq_iommu->sub_handle;
342
343 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
344 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
345
346 return rc;
347}
348
349struct intel_iommu *map_hpet_to_ir(u8 hpet_id) 211struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
350{ 212{
351 int i; 213 int i;
@@ -402,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
402 264
403int free_irte(int irq) 265int free_irte(int irq)
404{ 266{
405 int rc = 0; 267 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
406 struct irq_2_iommu *irq_iommu;
407 unsigned long flags; 268 unsigned long flags;
269 int rc;
408 270
409 spin_lock_irqsave(&irq_2_ir_lock, flags); 271 if (!irq_iommu)
410 irq_iommu = valid_irq_2_iommu(irq);
411 if (!irq_iommu) {
412 spin_unlock_irqrestore(&irq_2_ir_lock, flags);
413 return -1; 272 return -1;
414 } 273
274 spin_lock_irqsave(&irq_2_ir_lock, flags);
415 275
416 rc = clear_entries(irq_iommu); 276 rc = clear_entries(irq_iommu);
417 277
@@ -453,6 +313,8 @@ int free_irte(int irq)
453static void set_irte_sid(struct irte *irte, unsigned int svt, 313static void set_irte_sid(struct irte *irte, unsigned int svt,
454 unsigned int sq, unsigned int sid) 314 unsigned int sq, unsigned int sid)
455{ 315{
316 if (disable_sourceid_checking)
317 svt = SVT_NO_VERIFY;
456 irte->svt = svt; 318 irte->svt = svt;
457 irte->sq = sq; 319 irte->sq = sq;
458 irte->sid = sid; 320 irte->sid = sid;