diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2015-08-02 16:38:27 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-08-05 18:14:59 -0400 |
commit | a782a7e46bb50822fabfeb7271605762a59c86df (patch) | |
tree | a1b4d1798fc5b87f255716f07a170795cf5d5600 | |
parent | f61ae4fb66a4f7ae49e3456003fc4328d6db09c9 (diff) |
x86/irq: Store irq descriptor in vector array
We can spare the irq_desc lookup in the interrupt entry code if we
store the descriptor pointer in the vector array instead the interrupt
number.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Link: http://lkml.kernel.org/r/20150802203609.717724106@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/irq.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/apic/vector.c | 51 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 37 | ||||
-rw-r--r-- | arch/x86/kernel/irq_32.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/irq_64.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit.c | 4 | ||||
-rw-r--r-- | arch/x86/lguest/boot.c | 4 |
8 files changed, 58 insertions, 66 deletions
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 62bb8d23b826..1e3408e88604 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -182,10 +182,10 @@ extern char irq_entries_start[]; | |||
182 | #define trace_irq_entries_start irq_entries_start | 182 | #define trace_irq_entries_start irq_entries_start |
183 | #endif | 183 | #endif |
184 | 184 | ||
185 | #define VECTOR_UNUSED (-1) | 185 | #define VECTOR_UNUSED NULL |
186 | #define VECTOR_RETRIGGERED (-2) | 186 | #define VECTOR_RETRIGGERED ((void *)~0UL) |
187 | 187 | ||
188 | typedef int vector_irq_t[NR_VECTORS]; | 188 | typedef struct irq_desc* vector_irq_t[NR_VECTORS]; |
189 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 189 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
190 | 190 | ||
191 | #endif /* !ASSEMBLY_ */ | 191 | #endif /* !ASSEMBLY_ */ |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 8008d06581c7..881b4768644a 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -36,7 +36,9 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); | |||
36 | 36 | ||
37 | extern void (*x86_platform_ipi_callback)(void); | 37 | extern void (*x86_platform_ipi_callback)(void); |
38 | extern void native_init_IRQ(void); | 38 | extern void native_init_IRQ(void); |
39 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); | 39 | |
40 | struct irq_desc; | ||
41 | extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); | ||
40 | 42 | ||
41 | extern __visible unsigned int do_IRQ(struct pt_regs *regs); | 43 | extern __visible unsigned int do_IRQ(struct pt_regs *regs); |
42 | 44 | ||
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 9a6d11258684..200b5a5d6b79 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -169,7 +169,7 @@ next: | |||
169 | goto next; | 169 | goto next; |
170 | 170 | ||
171 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { | 171 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { |
172 | if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNUSED) | 172 | if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) |
173 | goto next; | 173 | goto next; |
174 | } | 174 | } |
175 | /* Found one! */ | 175 | /* Found one! */ |
@@ -181,7 +181,7 @@ next: | |||
181 | cpumask_intersects(d->old_domain, cpu_online_mask); | 181 | cpumask_intersects(d->old_domain, cpu_online_mask); |
182 | } | 182 | } |
183 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) | 183 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) |
184 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 184 | per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); |
185 | d->cfg.vector = vector; | 185 | d->cfg.vector = vector; |
186 | cpumask_copy(d->domain, vector_cpumask); | 186 | cpumask_copy(d->domain, vector_cpumask); |
187 | err = 0; | 187 | err = 0; |
@@ -223,8 +223,9 @@ static int assign_irq_vector_policy(int irq, int node, | |||
223 | 223 | ||
224 | static void clear_irq_vector(int irq, struct apic_chip_data *data) | 224 | static void clear_irq_vector(int irq, struct apic_chip_data *data) |
225 | { | 225 | { |
226 | int cpu, vector; | 226 | struct irq_desc *desc; |
227 | unsigned long flags; | 227 | unsigned long flags; |
228 | int cpu, vector; | ||
228 | 229 | ||
229 | raw_spin_lock_irqsave(&vector_lock, flags); | 230 | raw_spin_lock_irqsave(&vector_lock, flags); |
230 | BUG_ON(!data->cfg.vector); | 231 | BUG_ON(!data->cfg.vector); |
@@ -241,10 +242,11 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) | |||
241 | return; | 242 | return; |
242 | } | 243 | } |
243 | 244 | ||
245 | desc = irq_to_desc(irq); | ||
244 | for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { | 246 | for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { |
245 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 247 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
246 | vector++) { | 248 | vector++) { |
247 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 249 | if (per_cpu(vector_irq, cpu)[vector] != desc) |
248 | continue; | 250 | continue; |
249 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; | 251 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; |
250 | break; | 252 | break; |
@@ -402,30 +404,30 @@ int __init arch_early_irq_init(void) | |||
402 | return arch_early_ioapic_init(); | 404 | return arch_early_ioapic_init(); |
403 | } | 405 | } |
404 | 406 | ||
407 | /* Initialize vector_irq on a new cpu */ | ||
405 | static void __setup_vector_irq(int cpu) | 408 | static void __setup_vector_irq(int cpu) |
406 | { | 409 | { |
407 | /* Initialize vector_irq on a new cpu */ | ||
408 | int irq, vector; | ||
409 | struct apic_chip_data *data; | 410 | struct apic_chip_data *data; |
411 | struct irq_desc *desc; | ||
412 | int irq, vector; | ||
410 | 413 | ||
411 | /* Mark the inuse vectors */ | 414 | /* Mark the inuse vectors */ |
412 | for_each_active_irq(irq) { | 415 | for_each_irq_desc(irq, desc) { |
413 | data = apic_chip_data(irq_get_irq_data(irq)); | 416 | struct irq_data *idata = irq_desc_get_irq_data(desc); |
414 | if (!data) | ||
415 | continue; | ||
416 | 417 | ||
417 | if (!cpumask_test_cpu(cpu, data->domain)) | 418 | data = apic_chip_data(idata); |
419 | if (!data || !cpumask_test_cpu(cpu, data->domain)) | ||
418 | continue; | 420 | continue; |
419 | vector = data->cfg.vector; | 421 | vector = data->cfg.vector; |
420 | per_cpu(vector_irq, cpu)[vector] = irq; | 422 | per_cpu(vector_irq, cpu)[vector] = desc; |
421 | } | 423 | } |
422 | /* Mark the free vectors */ | 424 | /* Mark the free vectors */ |
423 | for (vector = 0; vector < NR_VECTORS; ++vector) { | 425 | for (vector = 0; vector < NR_VECTORS; ++vector) { |
424 | irq = per_cpu(vector_irq, cpu)[vector]; | 426 | desc = per_cpu(vector_irq, cpu)[vector]; |
425 | if (irq <= VECTOR_UNUSED) | 427 | if (IS_ERR_OR_NULL(desc)) |
426 | continue; | 428 | continue; |
427 | 429 | ||
428 | data = apic_chip_data(irq_get_irq_data(irq)); | 430 | data = apic_chip_data(irq_desc_get_irq_data(desc)); |
429 | if (!cpumask_test_cpu(cpu, data->domain)) | 431 | if (!cpumask_test_cpu(cpu, data->domain)) |
430 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; | 432 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; |
431 | } | 433 | } |
@@ -447,7 +449,7 @@ void setup_vector_irq(int cpu) | |||
447 | * legacy vector to irq mapping: | 449 | * legacy vector to irq mapping: |
448 | */ | 450 | */ |
449 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | 451 | for (irq = 0; irq < nr_legacy_irqs(); irq++) |
450 | per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq; | 452 | per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq); |
451 | 453 | ||
452 | __setup_vector_irq(cpu); | 454 | __setup_vector_irq(cpu); |
453 | } | 455 | } |
@@ -543,19 +545,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | |||
543 | 545 | ||
544 | me = smp_processor_id(); | 546 | me = smp_processor_id(); |
545 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 547 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
546 | int irq; | ||
547 | unsigned int irr; | ||
548 | struct irq_desc *desc; | ||
549 | struct apic_chip_data *data; | 548 | struct apic_chip_data *data; |
549 | struct irq_desc *desc; | ||
550 | unsigned int irr; | ||
550 | 551 | ||
551 | retry: | 552 | retry: |
552 | irq = __this_cpu_read(vector_irq[vector]); | 553 | desc = __this_cpu_read(vector_irq[vector]); |
553 | 554 | if (IS_ERR_OR_NULL(desc)) | |
554 | if (irq <= VECTOR_UNUSED) | ||
555 | continue; | ||
556 | |||
557 | desc = irq_to_desc(irq); | ||
558 | if (!desc) | ||
559 | continue; | 555 | continue; |
560 | 556 | ||
561 | if (!raw_spin_trylock(&desc->lock)) { | 557 | if (!raw_spin_trylock(&desc->lock)) { |
@@ -565,9 +561,10 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | |||
565 | goto retry; | 561 | goto retry; |
566 | } | 562 | } |
567 | 563 | ||
568 | data = apic_chip_data(&desc->irq_data); | 564 | data = apic_chip_data(irq_desc_get_irq_data(desc)); |
569 | if (!data) | 565 | if (!data) |
570 | goto unlock; | 566 | goto unlock; |
567 | |||
571 | /* | 568 | /* |
572 | * Check if the irq migration is in progress. If so, we | 569 | * Check if the irq migration is in progress. If so, we |
573 | * haven't received the cleanup request yet for this irq. | 570 | * haven't received the cleanup request yet for this irq. |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 140950fb9902..e010847583d7 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -211,22 +211,21 @@ u64 arch_irq_stat(void) | |||
211 | __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | 211 | __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) |
212 | { | 212 | { |
213 | struct pt_regs *old_regs = set_irq_regs(regs); | 213 | struct pt_regs *old_regs = set_irq_regs(regs); |
214 | 214 | struct irq_desc * desc; | |
215 | /* high bit used in ret_from_ code */ | 215 | /* high bit used in ret_from_ code */ |
216 | unsigned vector = ~regs->orig_ax; | 216 | unsigned vector = ~regs->orig_ax; |
217 | unsigned irq; | ||
218 | 217 | ||
219 | entering_irq(); | 218 | entering_irq(); |
220 | 219 | ||
221 | irq = __this_cpu_read(vector_irq[vector]); | 220 | desc = __this_cpu_read(vector_irq[vector]); |
222 | 221 | ||
223 | if (!handle_irq(irq, regs)) { | 222 | if (!handle_irq(desc, regs)) { |
224 | ack_APIC_irq(); | 223 | ack_APIC_irq(); |
225 | 224 | ||
226 | if (irq != VECTOR_RETRIGGERED) { | 225 | if (desc != VECTOR_RETRIGGERED) { |
227 | pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", | 226 | pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", |
228 | __func__, smp_processor_id(), | 227 | __func__, smp_processor_id(), |
229 | vector, irq); | 228 | vector); |
230 | } else { | 229 | } else { |
231 | __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); | 230 | __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); |
232 | } | 231 | } |
@@ -330,10 +329,10 @@ static struct cpumask affinity_new, online_new; | |||
330 | */ | 329 | */ |
331 | int check_irq_vectors_for_cpu_disable(void) | 330 | int check_irq_vectors_for_cpu_disable(void) |
332 | { | 331 | { |
333 | int irq, cpu; | ||
334 | unsigned int this_cpu, vector, this_count, count; | 332 | unsigned int this_cpu, vector, this_count, count; |
335 | struct irq_desc *desc; | 333 | struct irq_desc *desc; |
336 | struct irq_data *data; | 334 | struct irq_data *data; |
335 | int cpu; | ||
337 | 336 | ||
338 | this_cpu = smp_processor_id(); | 337 | this_cpu = smp_processor_id(); |
339 | cpumask_copy(&online_new, cpu_online_mask); | 338 | cpumask_copy(&online_new, cpu_online_mask); |
@@ -341,24 +340,21 @@ int check_irq_vectors_for_cpu_disable(void) | |||
341 | 340 | ||
342 | this_count = 0; | 341 | this_count = 0; |
343 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 342 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
344 | irq = __this_cpu_read(vector_irq[vector]); | 343 | desc = __this_cpu_read(vector_irq[vector]); |
345 | if (irq < 0) | 344 | if (IS_ERR_OR_NULL(desc)) |
346 | continue; | ||
347 | desc = irq_to_desc(irq); | ||
348 | if (!desc) | ||
349 | continue; | 345 | continue; |
350 | |||
351 | /* | 346 | /* |
352 | * Protect against concurrent action removal, affinity | 347 | * Protect against concurrent action removal, affinity |
353 | * changes etc. | 348 | * changes etc. |
354 | */ | 349 | */ |
355 | raw_spin_lock(&desc->lock); | 350 | raw_spin_lock(&desc->lock); |
356 | data = irq_desc_get_irq_data(desc); | 351 | data = irq_desc_get_irq_data(desc); |
357 | cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data)); | 352 | cpumask_copy(&affinity_new, |
353 | irq_data_get_affinity_mask(data)); | ||
358 | cpumask_clear_cpu(this_cpu, &affinity_new); | 354 | cpumask_clear_cpu(this_cpu, &affinity_new); |
359 | 355 | ||
360 | /* Do not count inactive or per-cpu irqs. */ | 356 | /* Do not count inactive or per-cpu irqs. */ |
361 | if (!irq_has_action(irq) || irqd_is_per_cpu(data)) { | 357 | if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) { |
362 | raw_spin_unlock(&desc->lock); | 358 | raw_spin_unlock(&desc->lock); |
363 | continue; | 359 | continue; |
364 | } | 360 | } |
@@ -399,8 +395,8 @@ int check_irq_vectors_for_cpu_disable(void) | |||
399 | for (vector = FIRST_EXTERNAL_VECTOR; | 395 | for (vector = FIRST_EXTERNAL_VECTOR; |
400 | vector < first_system_vector; vector++) { | 396 | vector < first_system_vector; vector++) { |
401 | if (!test_bit(vector, used_vectors) && | 397 | if (!test_bit(vector, used_vectors) && |
402 | per_cpu(vector_irq, cpu)[vector] <= VECTOR_UNUSED) | 398 | IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) |
403 | count++; | 399 | count++; |
404 | } | 400 | } |
405 | } | 401 | } |
406 | 402 | ||
@@ -504,14 +500,13 @@ void fixup_irqs(void) | |||
504 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | 500 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
505 | unsigned int irr; | 501 | unsigned int irr; |
506 | 502 | ||
507 | if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNUSED) | 503 | if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) |
508 | continue; | 504 | continue; |
509 | 505 | ||
510 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | 506 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); |
511 | if (irr & (1 << (vector % 32))) { | 507 | if (irr & (1 << (vector % 32))) { |
512 | irq = __this_cpu_read(vector_irq[vector]); | 508 | desc = __this_cpu_read(vector_irq[vector]); |
513 | 509 | ||
514 | desc = irq_to_desc(irq); | ||
515 | raw_spin_lock(&desc->lock); | 510 | raw_spin_lock(&desc->lock); |
516 | data = irq_desc_get_irq_data(desc); | 511 | data = irq_desc_get_irq_data(desc); |
517 | chip = irq_data_get_irq_chip(data); | 512 | chip = irq_data_get_irq_chip(data); |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index cd74f5978ab9..217b01388038 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -148,21 +148,20 @@ void do_softirq_own_stack(void) | |||
148 | call_on_stack(__do_softirq, isp); | 148 | call_on_stack(__do_softirq, isp); |
149 | } | 149 | } |
150 | 150 | ||
151 | bool handle_irq(unsigned irq, struct pt_regs *regs) | 151 | bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) |
152 | { | 152 | { |
153 | struct irq_desc *desc; | 153 | unsigned int irq = irq_desc_get_irq(desc); |
154 | int overflow; | 154 | int overflow; |
155 | 155 | ||
156 | overflow = check_stack_overflow(); | 156 | overflow = check_stack_overflow(); |
157 | 157 | ||
158 | desc = irq_to_desc(irq); | 158 | if (IS_ERR_OR_NULL(desc)) |
159 | if (unlikely(!desc)) | ||
160 | return false; | 159 | return false; |
161 | 160 | ||
162 | if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { | 161 | if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { |
163 | if (unlikely(overflow)) | 162 | if (unlikely(overflow)) |
164 | print_stack_overflow(); | 163 | print_stack_overflow(); |
165 | desc->handle_irq(irq, desc); | 164 | generic_handle_irq_desc(irq, desc); |
166 | } | 165 | } |
167 | 166 | ||
168 | return true; | 167 | return true; |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index bc4604e500a3..ff16ccb918f2 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -68,16 +68,13 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
68 | #endif | 68 | #endif |
69 | } | 69 | } |
70 | 70 | ||
71 | bool handle_irq(unsigned irq, struct pt_regs *regs) | 71 | bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) |
72 | { | 72 | { |
73 | struct irq_desc *desc; | ||
74 | |||
75 | stack_overflow_check(regs); | 73 | stack_overflow_check(regs); |
76 | 74 | ||
77 | desc = irq_to_desc(irq); | 75 | if (unlikely(IS_ERR_OR_NULL(desc))) |
78 | if (unlikely(!desc)) | ||
79 | return false; | 76 | return false; |
80 | 77 | ||
81 | generic_handle_irq_desc(irq, desc); | 78 | generic_handle_irq_desc(irq_desc_get_irq(desc), desc); |
82 | return true; | 79 | return true; |
83 | } | 80 | } |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 32a637ffdf98..1423ab1b0312 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector) | |||
60 | int cpu; | 60 | int cpu; |
61 | 61 | ||
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNUSED) | 63 | if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) |
64 | return 1; | 64 | return 1; |
65 | } | 65 | } |
66 | 66 | ||
@@ -94,7 +94,7 @@ void __init init_IRQ(void) | |||
94 | * irq's migrate etc. | 94 | * irq's migrate etc. |
95 | */ | 95 | */ |
96 | for (i = 0; i < nr_legacy_irqs(); i++) | 96 | for (i = 0; i < nr_legacy_irqs(); i++) |
97 | per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i; | 97 | per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i); |
98 | 98 | ||
99 | x86_init.irqs.intr_init(); | 99 | x86_init.irqs.intr_init(); |
100 | } | 100 | } |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 2165f45befff..47071a08bfa6 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -843,6 +843,7 @@ static struct irq_chip lguest_irq_controller = { | |||
843 | */ | 843 | */ |
844 | static int lguest_setup_irq(unsigned int irq) | 844 | static int lguest_setup_irq(unsigned int irq) |
845 | { | 845 | { |
846 | struct irq_desc *desc; | ||
846 | int err; | 847 | int err; |
847 | 848 | ||
848 | /* Returns -ve error or vector number. */ | 849 | /* Returns -ve error or vector number. */ |
@@ -858,7 +859,8 @@ static int lguest_setup_irq(unsigned int irq) | |||
858 | handle_level_irq, "level"); | 859 | handle_level_irq, "level"); |
859 | 860 | ||
860 | /* Some systems map "vectors" to interrupts weirdly. Not us! */ | 861 | /* Some systems map "vectors" to interrupts weirdly. Not us! */ |
861 | __this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], irq); | 862 | desc = irq_to_desc(irq); |
863 | __this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc); | ||
862 | return 0; | 864 | return 0; |
863 | } | 865 | } |
864 | 866 | ||