diff options
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r-- | kernel/irq/handle.c | 210 |
1 files changed, 191 insertions, 19 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 5fa6198e9139..6492400cb50d 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -15,9 +15,16 @@ | |||
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | ||
19 | #include <linux/hash.h> | ||
18 | 20 | ||
19 | #include "internals.h" | 21 | #include "internals.h" |
20 | 22 | ||
23 | /* | ||
24 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
25 | */ | ||
26 | struct lock_class_key irq_desc_lock_class; | ||
27 | |||
21 | /** | 28 | /** |
22 | * handle_bad_irq - handle spurious and unhandled irqs | 29 | * handle_bad_irq - handle spurious and unhandled irqs |
23 | * @irq: the interrupt number | 30 | * @irq: the interrupt number |
@@ -25,11 +32,10 @@ | |||
25 | * | 32 | * |
26 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | 33 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. |
27 | */ | 34 | */ |
28 | void | 35 | void handle_bad_irq(unsigned int irq, struct irq_desc *desc) |
29 | handle_bad_irq(unsigned int irq, struct irq_desc *desc) | ||
30 | { | 36 | { |
31 | print_irq_desc(irq, desc); | 37 | print_irq_desc(irq, desc); |
32 | kstat_this_cpu.irqs[irq]++; | 38 | kstat_incr_irqs_this_cpu(irq, desc); |
33 | ack_bad_irq(irq); | 39 | ack_bad_irq(irq); |
34 | } | 40 | } |
35 | 41 | ||
@@ -47,6 +53,158 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
47 | * | 53 | * |
48 | * Controller mappings for all interrupt sources: | 54 | * Controller mappings for all interrupt sources: |
49 | */ | 55 | */ |
56 | int nr_irqs = NR_IRQS; | ||
57 | EXPORT_SYMBOL_GPL(nr_irqs); | ||
58 | |||
59 | void __init __attribute__((weak)) arch_early_irq_init(void) | ||
60 | { | ||
61 | } | ||
62 | |||
63 | #ifdef CONFIG_SPARSE_IRQ | ||
64 | static struct irq_desc irq_desc_init = { | ||
65 | .irq = -1, | ||
66 | .status = IRQ_DISABLED, | ||
67 | .chip = &no_irq_chip, | ||
68 | .handle_irq = handle_bad_irq, | ||
69 | .depth = 1, | ||
70 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
71 | #ifdef CONFIG_SMP | ||
72 | .affinity = CPU_MASK_ALL | ||
73 | #endif | ||
74 | }; | ||
75 | |||
76 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | ||
77 | { | ||
78 | unsigned long bytes; | ||
79 | char *ptr; | ||
80 | int node; | ||
81 | |||
82 | /* Compute how many bytes we need per irq and allocate them */ | ||
83 | bytes = nr * sizeof(unsigned int); | ||
84 | |||
85 | node = cpu_to_node(cpu); | ||
86 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | ||
87 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | ||
88 | |||
89 | if (ptr) | ||
90 | desc->kstat_irqs = (unsigned int *)ptr; | ||
91 | } | ||
92 | |||
93 | void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) | ||
94 | { | ||
95 | } | ||
96 | |||
97 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | ||
98 | { | ||
99 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | ||
100 | desc->irq = irq; | ||
101 | #ifdef CONFIG_SMP | ||
102 | desc->cpu = cpu; | ||
103 | #endif | ||
104 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
105 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | ||
106 | if (!desc->kstat_irqs) { | ||
107 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | ||
108 | BUG_ON(1); | ||
109 | } | ||
110 | arch_init_chip_data(desc, cpu); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Protect the sparse_irqs: | ||
115 | */ | ||
116 | DEFINE_SPINLOCK(sparse_irq_lock); | ||
117 | |||
118 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | ||
119 | |||
120 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | ||
121 | [0 ... NR_IRQS_LEGACY-1] = { | ||
122 | .irq = -1, | ||
123 | .status = IRQ_DISABLED, | ||
124 | .chip = &no_irq_chip, | ||
125 | .handle_irq = handle_bad_irq, | ||
126 | .depth = 1, | ||
127 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
128 | #ifdef CONFIG_SMP | ||
129 | .affinity = CPU_MASK_ALL | ||
130 | #endif | ||
131 | } | ||
132 | }; | ||
133 | |||
134 | /* FIXME: use bootmem alloc ...*/ | ||
135 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
136 | |||
137 | void __init early_irq_init(void) | ||
138 | { | ||
139 | struct irq_desc *desc; | ||
140 | int legacy_count; | ||
141 | int i; | ||
142 | |||
143 | desc = irq_desc_legacy; | ||
144 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | ||
145 | |||
146 | for (i = 0; i < legacy_count; i++) { | ||
147 | desc[i].irq = i; | ||
148 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | ||
149 | |||
150 | irq_desc_ptrs[i] = desc + i; | ||
151 | } | ||
152 | |||
153 | for (i = legacy_count; i < NR_IRQS; i++) | ||
154 | irq_desc_ptrs[i] = NULL; | ||
155 | |||
156 | arch_early_irq_init(); | ||
157 | } | ||
158 | |||
159 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
160 | { | ||
161 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | ||
162 | } | ||
163 | |||
164 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
165 | { | ||
166 | struct irq_desc *desc; | ||
167 | unsigned long flags; | ||
168 | int node; | ||
169 | |||
170 | if (irq >= NR_IRQS) { | ||
171 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | ||
172 | irq, NR_IRQS); | ||
173 | WARN_ON(1); | ||
174 | return NULL; | ||
175 | } | ||
176 | |||
177 | desc = irq_desc_ptrs[irq]; | ||
178 | if (desc) | ||
179 | return desc; | ||
180 | |||
181 | spin_lock_irqsave(&sparse_irq_lock, flags); | ||
182 | |||
183 | /* We have to check it to avoid races with another CPU */ | ||
184 | desc = irq_desc_ptrs[irq]; | ||
185 | if (desc) | ||
186 | goto out_unlock; | ||
187 | |||
188 | node = cpu_to_node(cpu); | ||
189 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
190 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | ||
191 | irq, cpu, node); | ||
192 | if (!desc) { | ||
193 | printk(KERN_ERR "can not alloc irq_desc\n"); | ||
194 | BUG_ON(1); | ||
195 | } | ||
196 | init_one_irq_desc(irq, desc, cpu); | ||
197 | |||
198 | irq_desc_ptrs[irq] = desc; | ||
199 | |||
200 | out_unlock: | ||
201 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
202 | |||
203 | return desc; | ||
204 | } | ||
205 | |||
206 | #else | ||
207 | |||
50 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 208 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
51 | [0 ... NR_IRQS-1] = { | 209 | [0 ... NR_IRQS-1] = { |
52 | .status = IRQ_DISABLED, | 210 | .status = IRQ_DISABLED, |
@@ -60,13 +218,17 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
60 | } | 218 | } |
61 | }; | 219 | }; |
62 | 220 | ||
221 | #endif | ||
222 | |||
63 | /* | 223 | /* |
64 | * What should we do if we get a hw irq event on an illegal vector? | 224 | * What should we do if we get a hw irq event on an illegal vector? |
65 | * Each architecture has to answer this themself. | 225 | * Each architecture has to answer this themself. |
66 | */ | 226 | */ |
67 | static void ack_bad(unsigned int irq) | 227 | static void ack_bad(unsigned int irq) |
68 | { | 228 | { |
69 | print_irq_desc(irq, irq_desc + irq); | 229 | struct irq_desc *desc = irq_to_desc(irq); |
230 | |||
231 | print_irq_desc(irq, desc); | ||
70 | ack_bad_irq(irq); | 232 | ack_bad_irq(irq); |
71 | } | 233 | } |
72 | 234 | ||
@@ -131,8 +293,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
131 | irqreturn_t ret, retval = IRQ_NONE; | 293 | irqreturn_t ret, retval = IRQ_NONE; |
132 | unsigned int status = 0; | 294 | unsigned int status = 0; |
133 | 295 | ||
134 | handle_dynamic_tick(action); | ||
135 | |||
136 | if (!(action->flags & IRQF_DISABLED)) | 296 | if (!(action->flags & IRQF_DISABLED)) |
137 | local_irq_enable_in_hardirq(); | 297 | local_irq_enable_in_hardirq(); |
138 | 298 | ||
@@ -165,19 +325,23 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
165 | */ | 325 | */ |
166 | unsigned int __do_IRQ(unsigned int irq) | 326 | unsigned int __do_IRQ(unsigned int irq) |
167 | { | 327 | { |
168 | struct irq_desc *desc = irq_desc + irq; | 328 | struct irq_desc *desc = irq_to_desc(irq); |
169 | struct irqaction *action; | 329 | struct irqaction *action; |
170 | unsigned int status; | 330 | unsigned int status; |
171 | 331 | ||
172 | kstat_this_cpu.irqs[irq]++; | 332 | kstat_incr_irqs_this_cpu(irq, desc); |
333 | |||
173 | if (CHECK_IRQ_PER_CPU(desc->status)) { | 334 | if (CHECK_IRQ_PER_CPU(desc->status)) { |
174 | irqreturn_t action_ret; | 335 | irqreturn_t action_ret; |
175 | 336 | ||
176 | /* | 337 | /* |
177 | * No locking required for CPU-local interrupts: | 338 | * No locking required for CPU-local interrupts: |
178 | */ | 339 | */ |
179 | if (desc->chip->ack) | 340 | if (desc->chip->ack) { |
180 | desc->chip->ack(irq); | 341 | desc->chip->ack(irq); |
342 | /* get new one */ | ||
343 | desc = irq_remap_to_desc(irq, desc); | ||
344 | } | ||
181 | if (likely(!(desc->status & IRQ_DISABLED))) { | 345 | if (likely(!(desc->status & IRQ_DISABLED))) { |
182 | action_ret = handle_IRQ_event(irq, desc->action); | 346 | action_ret = handle_IRQ_event(irq, desc->action); |
183 | if (!noirqdebug) | 347 | if (!noirqdebug) |
@@ -188,8 +352,10 @@ unsigned int __do_IRQ(unsigned int irq) | |||
188 | } | 352 | } |
189 | 353 | ||
190 | spin_lock(&desc->lock); | 354 | spin_lock(&desc->lock); |
191 | if (desc->chip->ack) | 355 | if (desc->chip->ack) { |
192 | desc->chip->ack(irq); | 356 | desc->chip->ack(irq); |
357 | desc = irq_remap_to_desc(irq, desc); | ||
358 | } | ||
193 | /* | 359 | /* |
194 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 360 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
195 | * WAITING is used by probe to mark irqs that are being tested | 361 | * WAITING is used by probe to mark irqs that are being tested |
@@ -256,19 +422,25 @@ out: | |||
256 | } | 422 | } |
257 | #endif | 423 | #endif |
258 | 424 | ||
259 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
260 | |||
261 | /* | ||
262 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
263 | */ | ||
264 | static struct lock_class_key irq_desc_lock_class; | ||
265 | |||
266 | void early_init_irq_lock_class(void) | 425 | void early_init_irq_lock_class(void) |
267 | { | 426 | { |
427 | struct irq_desc *desc; | ||
268 | int i; | 428 | int i; |
269 | 429 | ||
270 | for (i = 0; i < NR_IRQS; i++) | 430 | for_each_irq_desc(i, desc) { |
271 | lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); | 431 | if (!desc) |
432 | continue; | ||
433 | |||
434 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
435 | } | ||
272 | } | 436 | } |
273 | 437 | ||
438 | #ifdef CONFIG_SPARSE_IRQ | ||
439 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | ||
440 | { | ||
441 | struct irq_desc *desc = irq_to_desc(irq); | ||
442 | return desc->kstat_irqs[cpu]; | ||
443 | } | ||
274 | #endif | 444 | #endif |
445 | EXPORT_SYMBOL(kstat_irqs_cpu); | ||
446 | |||