aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 16:22:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 16:23:01 -0400
commit9301975ec251bab1ad7cfcb84a688b26187e4e4a (patch)
tree91e48be0bdc67cbcb75bc8a299a3dcf168e0a814 /drivers/pci
parent7110879cf2afbfb7af79675f5ff109e63d631c25 (diff)
parentdd3a1db900f2a215a7d7dd71b836e149a6cf5fed (diff)
Merge branch 'genirq-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
This merges branches irq/genirq, irq/sparseirq-v4, timers/hpet-percpu and x86/uv. The sparseirq branch is just preliminary groundwork: no sparse IRQs are actually implemented by this tree anymore - just the new APIs are added while keeping the old way intact as well (the new APIs map 1:1 to irq_desc[]). The 'real' sparse IRQ support will then be a relatively small patch ontop of this - with a v2.6.29 merge target. * 'genirq-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (178 commits) genirq: improve include files intr_remapping: fix typo io_apic: make irq_mis_count available on 64-bit too genirq: fix name space collisions of nr_irqs in arch/* genirq: fix name space collision of nr_irqs in autoprobe.c genirq: use iterators for irq_desc loops proc: fixup irq iterator genirq: add reverse iterator for irq_desc x86: move ack_bad_irq() to irq.c x86: unify show_interrupts() and proc helpers x86: cleanup show_interrupts genirq: cleanup the sparseirq modifications genirq: remove artifacts from sparseirq removal genirq: revert dynarray genirq: remove irq_to_desc_alloc genirq: remove sparse irq code genirq: use inline function for irq_to_desc genirq: consolidate nr_irqs and for_each_irq_desc() x86: remove sparse irq from Kconfig genirq: define nr_irqs for architectures with GENERIC_HARDIRQS=n ...
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dmar.c61
-rw-r--r--drivers/pci/htirq.c3
-rw-r--r--drivers/pci/intr_remapping.c139
3 files changed, 126 insertions, 77 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index e842e756308a..8b29c307f1a1 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -193,7 +193,7 @@ dmar_parse_dev(struct dmar_drhd_unit *dmaru)
193{ 193{
194 struct acpi_dmar_hardware_unit *drhd; 194 struct acpi_dmar_hardware_unit *drhd;
195 static int include_all; 195 static int include_all;
196 int ret; 196 int ret = 0;
197 197
198 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; 198 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
199 199
@@ -212,7 +212,7 @@ dmar_parse_dev(struct dmar_drhd_unit *dmaru)
212 include_all = 1; 212 include_all = 1;
213 } 213 }
214 214
215 if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) { 215 if (ret) {
216 list_del(&dmaru->list); 216 list_del(&dmaru->list);
217 kfree(dmaru); 217 kfree(dmaru);
218 } 218 }
@@ -289,6 +289,24 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
289 } 289 }
290} 290}
291 291
292/**
293 * dmar_table_detect - checks to see if the platform supports DMAR devices
294 */
295static int __init dmar_table_detect(void)
296{
297 acpi_status status = AE_OK;
298
299 /* if we could find DMAR table, then there are DMAR devices */
300 status = acpi_get_table(ACPI_SIG_DMAR, 0,
301 (struct acpi_table_header **)&dmar_tbl);
302
303 if (ACPI_SUCCESS(status) && !dmar_tbl) {
304 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
305 status = AE_NOT_FOUND;
306 }
307
308 return (ACPI_SUCCESS(status) ? 1 : 0);
309}
292 310
293/** 311/**
294 * parse_dmar_table - parses the DMA reporting table 312 * parse_dmar_table - parses the DMA reporting table
@@ -300,6 +318,12 @@ parse_dmar_table(void)
300 struct acpi_dmar_header *entry_header; 318 struct acpi_dmar_header *entry_header;
301 int ret = 0; 319 int ret = 0;
302 320
321 /*
322 * Do it again, earlier dmar_tbl mapping could be mapped with
323 * fixed map.
324 */
325 dmar_table_detect();
326
303 dmar = (struct acpi_table_dmar *)dmar_tbl; 327 dmar = (struct acpi_table_dmar *)dmar_tbl;
304 if (!dmar) 328 if (!dmar)
305 return -ENODEV; 329 return -ENODEV;
@@ -373,10 +397,10 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
373 397
374int __init dmar_dev_scope_init(void) 398int __init dmar_dev_scope_init(void)
375{ 399{
376 struct dmar_drhd_unit *drhd; 400 struct dmar_drhd_unit *drhd, *drhd_n;
377 int ret = -ENODEV; 401 int ret = -ENODEV;
378 402
379 for_each_drhd_unit(drhd) { 403 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
380 ret = dmar_parse_dev(drhd); 404 ret = dmar_parse_dev(drhd);
381 if (ret) 405 if (ret)
382 return ret; 406 return ret;
@@ -384,8 +408,8 @@ int __init dmar_dev_scope_init(void)
384 408
385#ifdef CONFIG_DMAR 409#ifdef CONFIG_DMAR
386 { 410 {
387 struct dmar_rmrr_unit *rmrr; 411 struct dmar_rmrr_unit *rmrr, *rmrr_n;
388 for_each_rmrr_units(rmrr) { 412 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
389 ret = rmrr_parse_dev(rmrr); 413 ret = rmrr_parse_dev(rmrr);
390 if (ret) 414 if (ret)
391 return ret; 415 return ret;
@@ -430,30 +454,11 @@ int __init dmar_table_init(void)
430 return 0; 454 return 0;
431} 455}
432 456
433/**
434 * early_dmar_detect - checks to see if the platform supports DMAR devices
435 */
436int __init early_dmar_detect(void)
437{
438 acpi_status status = AE_OK;
439
440 /* if we could find DMAR table, then there are DMAR devices */
441 status = acpi_get_table(ACPI_SIG_DMAR, 0,
442 (struct acpi_table_header **)&dmar_tbl);
443
444 if (ACPI_SUCCESS(status) && !dmar_tbl) {
445 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
446 status = AE_NOT_FOUND;
447 }
448
449 return (ACPI_SUCCESS(status) ? 1 : 0);
450}
451
452void __init detect_intel_iommu(void) 457void __init detect_intel_iommu(void)
453{ 458{
454 int ret; 459 int ret;
455 460
456 ret = early_dmar_detect(); 461 ret = dmar_table_detect();
457 462
458#ifdef CONFIG_DMAR 463#ifdef CONFIG_DMAR
459 { 464 {
@@ -479,14 +484,16 @@ void __init detect_intel_iommu(void)
479 " x2apic support\n"); 484 " x2apic support\n");
480 485
481 dmar_disabled = 1; 486 dmar_disabled = 1;
482 return; 487 goto end;
483 } 488 }
484 489
485 if (ret && !no_iommu && !iommu_detected && !swiotlb && 490 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
486 !dmar_disabled) 491 !dmar_disabled)
487 iommu_detected = 1; 492 iommu_detected = 1;
488 } 493 }
494end:
489#endif 495#endif
496 dmar_tbl = NULL;
490} 497}
491 498
492 499
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 279c940a0039..bf7d6ce9bbb3 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -126,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
126 cfg->msg.address_hi = 0xffffffff; 126 cfg->msg.address_hi = 0xffffffff;
127 127
128 irq = create_irq(); 128 irq = create_irq();
129 if (irq < 0) { 129
130 if (irq <= 0) {
130 kfree(cfg); 131 kfree(cfg);
131 return -EBUSY; 132 return -EBUSY;
132 } 133 }
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 738d4c89581c..2de5a3238c94 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -1,3 +1,4 @@
1#include <linux/interrupt.h>
1#include <linux/dmar.h> 2#include <linux/dmar.h>
2#include <linux/spinlock.h> 3#include <linux/spinlock.h>
3#include <linux/jiffies.h> 4#include <linux/jiffies.h>
@@ -11,41 +12,64 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
11static int ir_ioapic_num; 12static int ir_ioapic_num;
12int intr_remapping_enabled; 13int intr_remapping_enabled;
13 14
14static struct { 15struct irq_2_iommu {
15 struct intel_iommu *iommu; 16 struct intel_iommu *iommu;
16 u16 irte_index; 17 u16 irte_index;
17 u16 sub_handle; 18 u16 sub_handle;
18 u8 irte_mask; 19 u8 irte_mask;
19} irq_2_iommu[NR_IRQS]; 20};
21
22static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
23
24static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
25{
26 return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL;
27}
28
29static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
30{
31 return irq_2_iommu(irq);
32}
20 33
21static DEFINE_SPINLOCK(irq_2_ir_lock); 34static DEFINE_SPINLOCK(irq_2_ir_lock);
22 35
23int irq_remapped(int irq) 36static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
24{ 37{
25 if (irq > NR_IRQS) 38 struct irq_2_iommu *irq_iommu;
26 return 0; 39
40 irq_iommu = irq_2_iommu(irq);
41
42 if (!irq_iommu)
43 return NULL;
44
45 if (!irq_iommu->iommu)
46 return NULL;
27 47
28 if (!irq_2_iommu[irq].iommu) 48 return irq_iommu;
29 return 0; 49}
30 50
31 return 1; 51int irq_remapped(int irq)
52{
53 return valid_irq_2_iommu(irq) != NULL;
32} 54}
33 55
34int get_irte(int irq, struct irte *entry) 56int get_irte(int irq, struct irte *entry)
35{ 57{
36 int index; 58 int index;
59 struct irq_2_iommu *irq_iommu;
37 60
38 if (!entry || irq > NR_IRQS) 61 if (!entry)
39 return -1; 62 return -1;
40 63
41 spin_lock(&irq_2_ir_lock); 64 spin_lock(&irq_2_ir_lock);
42 if (!irq_2_iommu[irq].iommu) { 65 irq_iommu = valid_irq_2_iommu(irq);
66 if (!irq_iommu) {
43 spin_unlock(&irq_2_ir_lock); 67 spin_unlock(&irq_2_ir_lock);
44 return -1; 68 return -1;
45 } 69 }
46 70
47 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; 71 index = irq_iommu->irte_index + irq_iommu->sub_handle;
48 *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index); 72 *entry = *(irq_iommu->iommu->ir_table->base + index);
49 73
50 spin_unlock(&irq_2_ir_lock); 74 spin_unlock(&irq_2_ir_lock);
51 return 0; 75 return 0;
@@ -54,6 +78,7 @@ int get_irte(int irq, struct irte *entry)
54int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 78int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
55{ 79{
56 struct ir_table *table = iommu->ir_table; 80 struct ir_table *table = iommu->ir_table;
81 struct irq_2_iommu *irq_iommu;
57 u16 index, start_index; 82 u16 index, start_index;
58 unsigned int mask = 0; 83 unsigned int mask = 0;
59 int i; 84 int i;
@@ -61,6 +86,10 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
61 if (!count) 86 if (!count)
62 return -1; 87 return -1;
63 88
89 /* protect irq_2_iommu_alloc later */
90 if (irq >= nr_irqs)
91 return -1;
92
64 /* 93 /*
65 * start the IRTE search from index 0. 94 * start the IRTE search from index 0.
66 */ 95 */
@@ -100,10 +129,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
100 for (i = index; i < index + count; i++) 129 for (i = index; i < index + count; i++)
101 table->base[i].present = 1; 130 table->base[i].present = 1;
102 131
103 irq_2_iommu[irq].iommu = iommu; 132 irq_iommu = irq_2_iommu_alloc(irq);
104 irq_2_iommu[irq].irte_index = index; 133 irq_iommu->iommu = iommu;
105 irq_2_iommu[irq].sub_handle = 0; 134 irq_iommu->irte_index = index;
106 irq_2_iommu[irq].irte_mask = mask; 135 irq_iommu->sub_handle = 0;
136 irq_iommu->irte_mask = mask;
107 137
108 spin_unlock(&irq_2_ir_lock); 138 spin_unlock(&irq_2_ir_lock);
109 139
@@ -124,31 +154,33 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
124int map_irq_to_irte_handle(int irq, u16 *sub_handle) 154int map_irq_to_irte_handle(int irq, u16 *sub_handle)
125{ 155{
126 int index; 156 int index;
157 struct irq_2_iommu *irq_iommu;
127 158
128 spin_lock(&irq_2_ir_lock); 159 spin_lock(&irq_2_ir_lock);
129 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { 160 irq_iommu = valid_irq_2_iommu(irq);
161 if (!irq_iommu) {
130 spin_unlock(&irq_2_ir_lock); 162 spin_unlock(&irq_2_ir_lock);
131 return -1; 163 return -1;
132 } 164 }
133 165
134 *sub_handle = irq_2_iommu[irq].sub_handle; 166 *sub_handle = irq_iommu->sub_handle;
135 index = irq_2_iommu[irq].irte_index; 167 index = irq_iommu->irte_index;
136 spin_unlock(&irq_2_ir_lock); 168 spin_unlock(&irq_2_ir_lock);
137 return index; 169 return index;
138} 170}
139 171
140int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 172int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
141{ 173{
174 struct irq_2_iommu *irq_iommu;
175
142 spin_lock(&irq_2_ir_lock); 176 spin_lock(&irq_2_ir_lock);
143 if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) {
144 spin_unlock(&irq_2_ir_lock);
145 return -1;
146 }
147 177
148 irq_2_iommu[irq].iommu = iommu; 178 irq_iommu = irq_2_iommu_alloc(irq);
149 irq_2_iommu[irq].irte_index = index; 179
150 irq_2_iommu[irq].sub_handle = subhandle; 180 irq_iommu->iommu = iommu;
151 irq_2_iommu[irq].irte_mask = 0; 181 irq_iommu->irte_index = index;
182 irq_iommu->sub_handle = subhandle;
183 irq_iommu->irte_mask = 0;
152 184
153 spin_unlock(&irq_2_ir_lock); 185 spin_unlock(&irq_2_ir_lock);
154 186
@@ -157,16 +189,19 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
157 189
158int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 190int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
159{ 191{
192 struct irq_2_iommu *irq_iommu;
193
160 spin_lock(&irq_2_ir_lock); 194 spin_lock(&irq_2_ir_lock);
161 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { 195 irq_iommu = valid_irq_2_iommu(irq);
196 if (!irq_iommu) {
162 spin_unlock(&irq_2_ir_lock); 197 spin_unlock(&irq_2_ir_lock);
163 return -1; 198 return -1;
164 } 199 }
165 200
166 irq_2_iommu[irq].iommu = NULL; 201 irq_iommu->iommu = NULL;
167 irq_2_iommu[irq].irte_index = 0; 202 irq_iommu->irte_index = 0;
168 irq_2_iommu[irq].sub_handle = 0; 203 irq_iommu->sub_handle = 0;
169 irq_2_iommu[irq].irte_mask = 0; 204 irq_2_iommu(irq)->irte_mask = 0;
170 205
171 spin_unlock(&irq_2_ir_lock); 206 spin_unlock(&irq_2_ir_lock);
172 207
@@ -178,16 +213,18 @@ int modify_irte(int irq, struct irte *irte_modified)
178 int index; 213 int index;
179 struct irte *irte; 214 struct irte *irte;
180 struct intel_iommu *iommu; 215 struct intel_iommu *iommu;
216 struct irq_2_iommu *irq_iommu;
181 217
182 spin_lock(&irq_2_ir_lock); 218 spin_lock(&irq_2_ir_lock);
183 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { 219 irq_iommu = valid_irq_2_iommu(irq);
220 if (!irq_iommu) {
184 spin_unlock(&irq_2_ir_lock); 221 spin_unlock(&irq_2_ir_lock);
185 return -1; 222 return -1;
186 } 223 }
187 224
188 iommu = irq_2_iommu[irq].iommu; 225 iommu = irq_iommu->iommu;
189 226
190 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; 227 index = irq_iommu->irte_index + irq_iommu->sub_handle;
191 irte = &iommu->ir_table->base[index]; 228 irte = &iommu->ir_table->base[index];
192 229
193 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 230 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
@@ -203,18 +240,20 @@ int flush_irte(int irq)
203{ 240{
204 int index; 241 int index;
205 struct intel_iommu *iommu; 242 struct intel_iommu *iommu;
243 struct irq_2_iommu *irq_iommu;
206 244
207 spin_lock(&irq_2_ir_lock); 245 spin_lock(&irq_2_ir_lock);
208 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { 246 irq_iommu = valid_irq_2_iommu(irq);
247 if (!irq_iommu) {
209 spin_unlock(&irq_2_ir_lock); 248 spin_unlock(&irq_2_ir_lock);
210 return -1; 249 return -1;
211 } 250 }
212 251
213 iommu = irq_2_iommu[irq].iommu; 252 iommu = irq_iommu->iommu;
214 253
215 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; 254 index = irq_iommu->irte_index + irq_iommu->sub_handle;
216 255
217 qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); 256 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
218 spin_unlock(&irq_2_ir_lock); 257 spin_unlock(&irq_2_ir_lock);
219 258
220 return 0; 259 return 0;
@@ -246,28 +285,30 @@ int free_irte(int irq)
246 int index, i; 285 int index, i;
247 struct irte *irte; 286 struct irte *irte;
248 struct intel_iommu *iommu; 287 struct intel_iommu *iommu;
288 struct irq_2_iommu *irq_iommu;
249 289
250 spin_lock(&irq_2_ir_lock); 290 spin_lock(&irq_2_ir_lock);
251 if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) { 291 irq_iommu = valid_irq_2_iommu(irq);
292 if (!irq_iommu) {
252 spin_unlock(&irq_2_ir_lock); 293 spin_unlock(&irq_2_ir_lock);
253 return -1; 294 return -1;
254 } 295 }
255 296
256 iommu = irq_2_iommu[irq].iommu; 297 iommu = irq_iommu->iommu;
257 298
258 index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle; 299 index = irq_iommu->irte_index + irq_iommu->sub_handle;
259 irte = &iommu->ir_table->base[index]; 300 irte = &iommu->ir_table->base[index];
260 301
261 if (!irq_2_iommu[irq].sub_handle) { 302 if (!irq_iommu->sub_handle) {
262 for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++) 303 for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
263 set_64bit((unsigned long *)irte, 0); 304 set_64bit((unsigned long *)irte, 0);
264 qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask); 305 qi_flush_iec(iommu, index, irq_iommu->irte_mask);
265 } 306 }
266 307
267 irq_2_iommu[irq].iommu = NULL; 308 irq_iommu->iommu = NULL;
268 irq_2_iommu[irq].irte_index = 0; 309 irq_iommu->irte_index = 0;
269 irq_2_iommu[irq].sub_handle = 0; 310 irq_iommu->sub_handle = 0;
270 irq_2_iommu[irq].irte_mask = 0; 311 irq_iommu->irte_mask = 0;
271 312
272 spin_unlock(&irq_2_ir_lock); 313 spin_unlock(&irq_2_ir_lock);
273 314