aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-10-04 05:16:27 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-04 10:55:26 -0400
commita24ceab4f44f21749aa0b6bd38bee37c775e036f (patch)
treed33625ff3b5f7a7b9b90a6e0e765ad19bfd00195
parentf5b9ed7acdcfea4bf73a70dececa7483787503ed (diff)
[PATCH] genirq: irq: convert the move_irq flag from a 32bit word to a single bit
The primary aim of this patchset is to remove maintenances problems caused by the irq infrastructure. The two big issues I address are an artificially small cap on the number of irqs, and that MSI assumes vector == irq. My primary focus is on x86_64 but I have touched other architectures where necessary to keep them from breaking. - To increase the number of irqs I modify the code to look at the (cpu, vector) pair instead of just looking at the vector. With a large number of irqs available systems with a large irq count no longer need to compress their irq numbers to fit. Removing a lot of brittle special cases. For acpi guys the result is that irq == gsi. - Addressing the fact that MSI assumes irq == vector takes a few more patches. But suffice it to say when I am done none of the generic irq code even knows what a vector is. In quick testing on a large Unisys x86_64 machine we stumbled over at least one driver that assumed that NR_IRQS could always fit into an 8 bit number. This driver is clearly buggy today. But this has become a class of bugs that it is now much easier to hit. This patch: This is a minor space optimization. In practice I don't think this has any affect because of our alignment constraints and the other fields but there is not point in chewing up an uncessary word and since we already read the flag field this should improve the cache hit ratio of the irq handler. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rajesh Shah <rajesh.shah@intel.com> Cc: Andi Kleen <ak@muc.de> Cc: "Protasevich, Natalie" <Natalie.Protasevich@UNISYS.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/irq.h3
-rw-r--r--kernel/irq/migration.c6
2 files changed, 4 insertions, 5 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 48d3cb3b6a47..3eab46f590a9 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -59,6 +59,7 @@
59#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */ 59#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */
60#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */ 60#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */
61#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */ 61#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */
62#define IRQ_MOVE_PENDING 0x40000000 /* need to re-target IRQ destination */
62 63
63struct proc_dir_entry; 64struct proc_dir_entry;
64 65
@@ -132,7 +133,6 @@ struct irq_chip {
132 * @affinity: IRQ affinity on SMP 133 * @affinity: IRQ affinity on SMP
133 * @cpu: cpu index useful for balancing 134 * @cpu: cpu index useful for balancing
134 * @pending_mask: pending rebalanced interrupts 135 * @pending_mask: pending rebalanced interrupts
135 * @move_irq: need to re-target IRQ destination
136 * @dir: /proc/irq/ procfs entry 136 * @dir: /proc/irq/ procfs entry
137 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP 137 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
138 * 138 *
@@ -159,7 +159,6 @@ struct irq_desc {
159#endif 159#endif
160#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) 160#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
161 cpumask_t pending_mask; 161 cpumask_t pending_mask;
162 unsigned int move_irq; /* need to re-target IRQ dest */
163#endif 162#endif
164#ifdef CONFIG_PROC_FS 163#ifdef CONFIG_PROC_FS
165 struct proc_dir_entry *dir; 164 struct proc_dir_entry *dir;
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index a57ebe9fa6f6..9b234df810d0 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -7,7 +7,7 @@ void set_pending_irq(unsigned int irq, cpumask_t mask)
7 unsigned long flags; 7 unsigned long flags;
8 8
9 spin_lock_irqsave(&desc->lock, flags); 9 spin_lock_irqsave(&desc->lock, flags);
10 desc->move_irq = 1; 10 desc->status |= IRQ_MOVE_PENDING;
11 irq_desc[irq].pending_mask = mask; 11 irq_desc[irq].pending_mask = mask;
12 spin_unlock_irqrestore(&desc->lock, flags); 12 spin_unlock_irqrestore(&desc->lock, flags);
13} 13}
@@ -17,7 +17,7 @@ void move_native_irq(int irq)
17 struct irq_desc *desc = irq_desc + irq; 17 struct irq_desc *desc = irq_desc + irq;
18 cpumask_t tmp; 18 cpumask_t tmp;
19 19
20 if (likely(!desc->move_irq)) 20 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
21 return; 21 return;
22 22
23 /* 23 /*
@@ -28,7 +28,7 @@ void move_native_irq(int irq)
28 return; 28 return;
29 } 29 }
30 30
31 desc->move_irq = 0; 31 desc->status &= ~IRQ_MOVE_PENDING;
32 32
33 if (unlikely(cpus_empty(irq_desc[irq].pending_mask))) 33 if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
34 return; 34 return;