diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-10-15 09:27:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-16 10:53:15 -0400 |
commit | d6c88a507ef0b6afdb013cba4e7804ba7324d99a (patch) | |
tree | cdc4041acc212585e3920ad50bf2574cec04076d /kernel/irq/handle.c | |
parent | ee32c9732244bde4b9b59eeac2814c23e2b71f8d (diff) |
genirq: revert dynarray
Revert the dynarray changes. They need more thought and polishing.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r-- | kernel/irq/handle.c | 114 |
1 files changed, 9 insertions, 105 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index f837133cdfbe..9fe86b3a60a5 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -18,11 +18,6 @@ | |||
18 | 18 | ||
19 | #include "internals.h" | 19 | #include "internals.h" |
20 | 20 | ||
21 | /* | ||
22 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
23 | */ | ||
24 | static struct lock_class_key irq_desc_lock_class; | ||
25 | |||
26 | /** | 21 | /** |
27 | * handle_bad_irq - handle spurious and unhandled irqs | 22 | * handle_bad_irq - handle spurious and unhandled irqs |
28 | * @irq: the interrupt number | 23 | * @irq: the interrupt number |
@@ -30,15 +25,10 @@ static struct lock_class_key irq_desc_lock_class; | |||
30 | * | 25 | * |
31 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | 26 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. |
32 | */ | 27 | */ |
33 | void | 28 | void handle_bad_irq(unsigned int irq, struct irq_desc *desc) |
34 | handle_bad_irq(unsigned int irq, struct irq_desc *desc) | ||
35 | { | 29 | { |
36 | print_irq_desc(irq, desc); | 30 | print_irq_desc(irq, desc); |
37 | #ifdef CONFIG_HAVE_DYN_ARRAY | 31 | kstat_incr_irqs_this_cpu(irq, desc); |
38 | kstat_irqs_this_cpu(desc)++; | ||
39 | #else | ||
40 | kstat_irqs_this_cpu(irq)++; | ||
41 | #endif | ||
42 | ack_bad_irq(irq); | 32 | ack_bad_irq(irq); |
43 | } | 33 | } |
44 | 34 | ||
@@ -59,80 +49,6 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
59 | int nr_irqs = NR_IRQS; | 49 | int nr_irqs = NR_IRQS; |
60 | EXPORT_SYMBOL_GPL(nr_irqs); | 50 | EXPORT_SYMBOL_GPL(nr_irqs); |
61 | 51 | ||
62 | #ifdef CONFIG_HAVE_DYN_ARRAY | ||
63 | static struct irq_desc irq_desc_init = { | ||
64 | .irq = -1U, | ||
65 | .status = IRQ_DISABLED, | ||
66 | .chip = &no_irq_chip, | ||
67 | .handle_irq = handle_bad_irq, | ||
68 | .depth = 1, | ||
69 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
70 | #ifdef CONFIG_SMP | ||
71 | .affinity = CPU_MASK_ALL | ||
72 | #endif | ||
73 | }; | ||
74 | |||
75 | |||
76 | static void init_one_irq_desc(struct irq_desc *desc) | ||
77 | { | ||
78 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | ||
79 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
80 | } | ||
81 | |||
82 | extern int after_bootmem; | ||
83 | extern void *__alloc_bootmem_nopanic(unsigned long size, | ||
84 | unsigned long align, | ||
85 | unsigned long goal); | ||
86 | |||
87 | static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr) | ||
88 | { | ||
89 | unsigned long bytes, total_bytes; | ||
90 | char *ptr; | ||
91 | int i; | ||
92 | unsigned long phys; | ||
93 | |||
94 | /* Compute how many bytes we need per irq and allocate them */ | ||
95 | bytes = nr * sizeof(unsigned int); | ||
96 | total_bytes = bytes * nr_desc; | ||
97 | if (after_bootmem) | ||
98 | ptr = kzalloc(total_bytes, GFP_ATOMIC); | ||
99 | else | ||
100 | ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0); | ||
101 | |||
102 | if (!ptr) | ||
103 | panic(" can not allocate kstat_irqs\n"); | ||
104 | |||
105 | phys = __pa(ptr); | ||
106 | printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes); | ||
107 | |||
108 | for (i = 0; i < nr_desc; i++) { | ||
109 | desc[i].kstat_irqs = (unsigned int *)ptr; | ||
110 | ptr += bytes; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static void __init init_work(void *data) | ||
115 | { | ||
116 | struct dyn_array *da = data; | ||
117 | int i; | ||
118 | struct irq_desc *desc; | ||
119 | |||
120 | desc = *da->name; | ||
121 | |||
122 | for (i = 0; i < *da->nr; i++) { | ||
123 | init_one_irq_desc(&desc[i]); | ||
124 | desc[i].irq = i; | ||
125 | } | ||
126 | |||
127 | /* init kstat_irqs, nr_cpu_ids is ready already */ | ||
128 | init_kstat_irqs(desc, *da->nr, nr_cpu_ids); | ||
129 | } | ||
130 | |||
131 | struct irq_desc *irq_desc; | ||
132 | DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work); | ||
133 | |||
134 | #else | ||
135 | |||
136 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 52 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
137 | [0 ... NR_IRQS-1] = { | 53 | [0 ... NR_IRQS-1] = { |
138 | .status = IRQ_DISABLED, | 54 | .status = IRQ_DISABLED, |
@@ -146,8 +62,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
146 | } | 62 | } |
147 | }; | 63 | }; |
148 | 64 | ||
149 | #endif | ||
150 | |||
151 | /* | 65 | /* |
152 | * What should we do if we get a hw irq event on an illegal vector? | 66 | * What should we do if we get a hw irq event on an illegal vector? |
153 | * Each architecture has to answer this themself. | 67 | * Each architecture has to answer this themself. |
@@ -258,11 +172,8 @@ unsigned int __do_IRQ(unsigned int irq) | |||
258 | struct irqaction *action; | 172 | struct irqaction *action; |
259 | unsigned int status; | 173 | unsigned int status; |
260 | 174 | ||
261 | #ifdef CONFIG_HAVE_DYN_ARRAY | 175 | kstat_incr_irqs_this_cpu(irq, desc); |
262 | kstat_irqs_this_cpu(desc)++; | 176 | |
263 | #else | ||
264 | kstat_irqs_this_cpu(irq)++; | ||
265 | #endif | ||
266 | if (CHECK_IRQ_PER_CPU(desc->status)) { | 177 | if (CHECK_IRQ_PER_CPU(desc->status)) { |
267 | irqreturn_t action_ret; | 178 | irqreturn_t action_ret; |
268 | 179 | ||
@@ -351,23 +262,16 @@ out: | |||
351 | 262 | ||
352 | 263 | ||
353 | #ifdef CONFIG_TRACE_IRQFLAGS | 264 | #ifdef CONFIG_TRACE_IRQFLAGS |
265 | /* | ||
266 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
267 | */ | ||
268 | static struct lock_class_key irq_desc_lock_class; | ||
269 | |||
354 | void early_init_irq_lock_class(void) | 270 | void early_init_irq_lock_class(void) |
355 | { | 271 | { |
356 | #ifndef CONFIG_HAVE_DYN_ARRAY | ||
357 | int i; | 272 | int i; |
358 | 273 | ||
359 | for (i = 0; i < nr_irqs; i++) | 274 | for (i = 0; i < nr_irqs; i++) |
360 | lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); | 275 | lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); |
361 | #endif | ||
362 | } | 276 | } |
363 | #endif | 277 | #endif |
364 | |||
365 | #ifdef CONFIG_HAVE_DYN_ARRAY | ||
366 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | ||
367 | { | ||
368 | struct irq_desc *desc = irq_to_desc(irq); | ||
369 | return desc->kstat_irqs[cpu]; | ||
370 | } | ||
371 | #endif | ||
372 | EXPORT_SYMBOL(kstat_irqs_cpu); | ||
373 | |||