diff options
author | Yinghai Lu <yinghai@kernel.org> | 2008-12-11 03:15:01 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-16 18:14:01 -0500 |
commit | 48a1b10aff588833b73994704c47bbd0deb73e9c (patch) | |
tree | deb3c7b486346c3afa54014b3c3516344c2708f2 /kernel/irq/handle.c | |
parent | 13bd41bc227a48d6cf8992a3286bf6eba3c71a0c (diff) |
x86, sparseirq: move irq_desc according to smp_affinity, v7
Impact: improve NUMA handling by migrating irq_desc on smp_affinity changes
if CONFIG_NUMA_MIGRATE_IRQ_DESC is set:
- make irq_desc to go with affinity aka irq_desc moving etc
- call move_irq_desc in irq_complete_move()
- legacy irq_desc is not moved, because they are allocated via static array
for logical apic mode, need to add move_desc_in_progress_in_same_domain,
otherwise it will not be moved ==> also could need two phases to get
irq_desc moved.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r-- | kernel/irq/handle.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 8aa09547f5ef..f1a23069c20a 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -23,7 +23,7 @@ | |||
23 | /* | 23 | /* |
24 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | 24 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
25 | */ | 25 | */ |
26 | static struct lock_class_key irq_desc_lock_class; | 26 | struct lock_class_key irq_desc_lock_class; |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * handle_bad_irq - handle spurious and unhandled irqs | 29 | * handle_bad_irq - handle spurious and unhandled irqs |
@@ -73,7 +73,7 @@ static struct irq_desc irq_desc_init = { | |||
73 | #endif | 73 | #endif |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 76 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
77 | { | 77 | { |
78 | unsigned long bytes; | 78 | unsigned long bytes; |
79 | char *ptr; | 79 | char *ptr; |
@@ -113,7 +113,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
113 | /* | 113 | /* |
114 | * Protect the sparse_irqs: | 114 | * Protect the sparse_irqs: |
115 | */ | 115 | */ |
116 | static DEFINE_SPINLOCK(sparse_irq_lock); | 116 | DEFINE_SPINLOCK(sparse_irq_lock); |
117 | 117 | ||
118 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | 118 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; |
119 | 119 | ||
@@ -337,8 +337,11 @@ unsigned int __do_IRQ(unsigned int irq) | |||
337 | /* | 337 | /* |
338 | * No locking required for CPU-local interrupts: | 338 | * No locking required for CPU-local interrupts: |
339 | */ | 339 | */ |
340 | if (desc->chip->ack) | 340 | if (desc->chip->ack) { |
341 | desc->chip->ack(irq); | 341 | desc->chip->ack(irq); |
342 | /* get new one */ | ||
343 | desc = irq_remap_to_desc(irq, desc); | ||
344 | } | ||
342 | if (likely(!(desc->status & IRQ_DISABLED))) { | 345 | if (likely(!(desc->status & IRQ_DISABLED))) { |
343 | action_ret = handle_IRQ_event(irq, desc->action); | 346 | action_ret = handle_IRQ_event(irq, desc->action); |
344 | if (!noirqdebug) | 347 | if (!noirqdebug) |
@@ -349,8 +352,10 @@ unsigned int __do_IRQ(unsigned int irq) | |||
349 | } | 352 | } |
350 | 353 | ||
351 | spin_lock(&desc->lock); | 354 | spin_lock(&desc->lock); |
352 | if (desc->chip->ack) | 355 | if (desc->chip->ack) { |
353 | desc->chip->ack(irq); | 356 | desc->chip->ack(irq); |
357 | desc = irq_remap_to_desc(irq, desc); | ||
358 | } | ||
354 | /* | 359 | /* |
355 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 360 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
356 | * WAITING is used by probe to mark irqs that are being tested | 361 | * WAITING is used by probe to mark irqs that are being tested |