diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-09-27 08:44:25 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-10-04 06:27:16 -0400 |
commit | ff7dcd44dd446db2c3e13bdedf2d52b8e0127f16 (patch) | |
tree | ca03e829ea08aa536124a7777d99233dbbd89984 /kernel | |
parent | 3bb9808e99bcc36eecb8e082bf70efb2a0bcdcb7 (diff) |
genirq: Create irq_data
Low level chip functions need access to irq_desc->handler_data,
irq_desc->chip_data and irq_desc->msi_desc. We hand down the irq
number to the low level functions, so they need to lookup irq_desc.
With sparse irq this means a radix tree lookup.
We could hand down irq_desc itself, but low level chip functions have
no need to fiddle with it directly and we want to restrict access to
irq_desc further.
Preparatory patch for new chip functions.
Note, that the ugly anon union/struct is there to avoid a full tree
wide clean up for now. This is not going to last 3 years like __do_IRQ()
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <20100927121841.645542300@linutronix.de>
Reviewed-by: H. Peter Anvin <hpa@zytor.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/handle.c | 39 |
1 files changed, 19 insertions, 20 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 27e5c6911223..099d4fc368c3 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -75,12 +75,10 @@ EXPORT_SYMBOL_GPL(nr_irqs); | |||
75 | #ifdef CONFIG_SPARSE_IRQ | 75 | #ifdef CONFIG_SPARSE_IRQ |
76 | 76 | ||
77 | static struct irq_desc irq_desc_init = { | 77 | static struct irq_desc irq_desc_init = { |
78 | .irq = -1, | 78 | .status = IRQ_DISABLED, |
79 | .status = IRQ_DISABLED, | 79 | .handle_irq = handle_bad_irq, |
80 | .chip = &no_irq_chip, | 80 | .depth = 1, |
81 | .handle_irq = handle_bad_irq, | 81 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
82 | .depth = 1, | ||
83 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
84 | }; | 82 | }; |
85 | 83 | ||
86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) | 84 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) |
@@ -105,7 +103,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | |||
105 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | 103 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); |
106 | 104 | ||
107 | raw_spin_lock_init(&desc->lock); | 105 | raw_spin_lock_init(&desc->lock); |
108 | desc->irq = irq; | 106 | desc->irq_data.irq = irq; |
109 | #ifdef CONFIG_SMP | 107 | #ifdef CONFIG_SMP |
110 | desc->node = node; | 108 | desc->node = node; |
111 | #endif | 109 | #endif |
@@ -151,12 +149,10 @@ void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
151 | 149 | ||
152 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 150 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
153 | [0 ... NR_IRQS_LEGACY-1] = { | 151 | [0 ... NR_IRQS_LEGACY-1] = { |
154 | .irq = -1, | 152 | .status = IRQ_DISABLED, |
155 | .status = IRQ_DISABLED, | 153 | .handle_irq = handle_bad_irq, |
156 | .chip = &no_irq_chip, | 154 | .depth = 1, |
157 | .handle_irq = handle_bad_irq, | 155 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
158 | .depth = 1, | ||
159 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
160 | } | 156 | } |
161 | }; | 157 | }; |
162 | 158 | ||
@@ -183,8 +179,11 @@ int __init early_irq_init(void) | |||
183 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * | 179 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * |
184 | sizeof(int), GFP_NOWAIT, node); | 180 | sizeof(int), GFP_NOWAIT, node); |
185 | 181 | ||
182 | irq_desc_init.irq_data.chip = &no_irq_chip; | ||
183 | |||
186 | for (i = 0; i < legacy_count; i++) { | 184 | for (i = 0; i < legacy_count; i++) { |
187 | desc[i].irq = i; | 185 | desc[i].irq_data.irq = i; |
186 | desc[i].irq_data.chip = &no_irq_chip; | ||
188 | #ifdef CONFIG_SMP | 187 | #ifdef CONFIG_SMP |
189 | desc[i].node = node; | 188 | desc[i].node = node; |
190 | #endif | 189 | #endif |
@@ -241,11 +240,10 @@ out_unlock: | |||
241 | 240 | ||
242 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 241 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
243 | [0 ... NR_IRQS-1] = { | 242 | [0 ... NR_IRQS-1] = { |
244 | .status = IRQ_DISABLED, | 243 | .status = IRQ_DISABLED, |
245 | .chip = &no_irq_chip, | 244 | .handle_irq = handle_bad_irq, |
246 | .handle_irq = handle_bad_irq, | 245 | .depth = 1, |
247 | .depth = 1, | 246 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
248 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | ||
249 | } | 247 | } |
250 | }; | 248 | }; |
251 | 249 | ||
@@ -264,7 +262,8 @@ int __init early_irq_init(void) | |||
264 | count = ARRAY_SIZE(irq_desc); | 262 | count = ARRAY_SIZE(irq_desc); |
265 | 263 | ||
266 | for (i = 0; i < count; i++) { | 264 | for (i = 0; i < count; i++) { |
267 | desc[i].irq = i; | 265 | desc[i].irq_data.irq = i; |
266 | desc[i].irq_data.chip = &no_irq_chip; | ||
268 | alloc_desc_masks(&desc[i], 0, true); | 267 | alloc_desc_masks(&desc[i], 0, true); |
269 | init_desc_masks(&desc[i]); | 268 | init_desc_masks(&desc[i]); |
270 | desc[i].kstat_irqs = kstat_irqs_all[i]; | 269 | desc[i].kstat_irqs = kstat_irqs_all[i]; |