diff options
-rw-r--r-- | include/linux/irq.h | 292 | ||||
-rw-r--r-- | include/linux/irqdesc.h | 171 | ||||
-rw-r--r-- | kernel/irq/internals.h | 100 |
3 files changed, 284 insertions, 279 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 82ed8231394a..f5827abbc034 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -80,7 +80,6 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | struct proc_dir_entry; | ||
84 | struct msi_desc; | 83 | struct msi_desc; |
85 | 84 | ||
86 | /** | 85 | /** |
@@ -202,152 +201,36 @@ struct irq_chip { | |||
202 | #endif | 201 | #endif |
203 | }; | 202 | }; |
204 | 203 | ||
205 | struct timer_rand_state; | 204 | /* This include will go away once we isolated irq_desc usage to core code */ |
206 | struct irq_2_iommu; | 205 | #include <linux/irqdesc.h> |
207 | /** | ||
208 | * struct irq_desc - interrupt descriptor | ||
209 | * @irq_data: per irq and chip data passed down to chip functions | ||
210 | * @timer_rand_state: pointer to timer rand state struct | ||
211 | * @kstat_irqs: irq stats per cpu | ||
212 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
213 | * @action: the irq action chain | ||
214 | * @status: status information | ||
215 | * @depth: disable-depth, for nested irq_disable() calls | ||
216 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
217 | * @irq_count: stats field to detect stalled irqs | ||
218 | * @last_unhandled: aging timer for unhandled count | ||
219 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
220 | * @lock: locking for SMP | ||
221 | * @pending_mask: pending rebalanced interrupts | ||
222 | * @threads_active: number of irqaction threads currently running | ||
223 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
224 | * @dir: /proc/irq/ procfs entry | ||
225 | * @name: flow handler name for /proc/interrupts output | ||
226 | */ | ||
227 | struct irq_desc { | ||
228 | |||
229 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
230 | struct irq_data irq_data; | ||
231 | #else | ||
232 | /* | ||
233 | * This union will go away, once we fixed the direct access to | ||
234 | * irq_desc all over the place. The direct fields are a 1:1 | ||
235 | * overlay of irq_data. | ||
236 | */ | ||
237 | union { | ||
238 | struct irq_data irq_data; | ||
239 | struct { | ||
240 | unsigned int irq; | ||
241 | unsigned int node; | ||
242 | struct irq_chip *chip; | ||
243 | void *handler_data; | ||
244 | void *chip_data; | ||
245 | struct msi_desc *msi_desc; | ||
246 | #ifdef CONFIG_SMP | ||
247 | cpumask_var_t affinity; | ||
248 | #endif | ||
249 | #ifdef CONFIG_INTR_REMAP | ||
250 | struct irq_2_iommu *irq_2_iommu; | ||
251 | #endif | ||
252 | }; | ||
253 | }; | ||
254 | #endif | ||
255 | |||
256 | struct timer_rand_state *timer_rand_state; | ||
257 | unsigned int *kstat_irqs; | ||
258 | irq_flow_handler_t handle_irq; | ||
259 | struct irqaction *action; /* IRQ action list */ | ||
260 | unsigned int status; /* IRQ status */ | ||
261 | |||
262 | unsigned int depth; /* nested irq disables */ | ||
263 | unsigned int wake_depth; /* nested wake enables */ | ||
264 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
265 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
266 | unsigned int irqs_unhandled; | ||
267 | raw_spinlock_t lock; | ||
268 | #ifdef CONFIG_SMP | ||
269 | const struct cpumask *affinity_hint; | ||
270 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
271 | cpumask_var_t pending_mask; | ||
272 | #endif | ||
273 | #endif | ||
274 | atomic_t threads_active; | ||
275 | wait_queue_head_t wait_for_threads; | ||
276 | #ifdef CONFIG_PROC_FS | ||
277 | struct proc_dir_entry *dir; | ||
278 | #endif | ||
279 | const char *name; | ||
280 | } ____cacheline_internodealigned_in_smp; | ||
281 | |||
282 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | ||
283 | struct irq_desc *desc, int node); | ||
284 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | ||
285 | |||
286 | #ifndef CONFIG_SPARSE_IRQ | ||
287 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
288 | #endif | ||
289 | |||
290 | #ifdef CONFIG_NUMA_IRQ_DESC | ||
291 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | ||
292 | #else | ||
293 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
294 | { | ||
295 | return desc; | ||
296 | } | ||
297 | #endif | ||
298 | |||
299 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
300 | 206 | ||
301 | /* | 207 | /* |
302 | * Pick up the arch-dependent methods: | 208 | * Pick up the arch-dependent methods: |
303 | */ | 209 | */ |
304 | #include <asm/hw_irq.h> | 210 | #include <asm/hw_irq.h> |
305 | 211 | ||
212 | struct irqaction; | ||
306 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 213 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
307 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 214 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
308 | 215 | ||
309 | #ifdef CONFIG_GENERIC_HARDIRQS | 216 | #ifdef CONFIG_GENERIC_HARDIRQS |
310 | 217 | ||
311 | #ifdef CONFIG_SMP | 218 | #ifdef CONFIG_SMP |
312 | 219 | # ifdef CONFIG_GENERIC_PENDING_IRQ | |
313 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
314 | |||
315 | void move_native_irq(int irq); | 220 | void move_native_irq(int irq); |
316 | void move_masked_irq(int irq); | 221 | void move_masked_irq(int irq); |
317 | 222 | # else | |
318 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 223 | static inline void move_irq(int irq) { } |
319 | 224 | static inline void move_native_irq(int irq) { } | |
320 | static inline void move_irq(int irq) | 225 | static inline void move_masked_irq(int irq) { } |
321 | { | 226 | # endif |
322 | } | 227 | #else |
323 | 228 | static inline void move_native_irq(int irq) { } | |
324 | static inline void move_native_irq(int irq) | 229 | static inline void move_masked_irq(int irq) { } |
325 | { | 230 | #endif |
326 | } | ||
327 | |||
328 | static inline void move_masked_irq(int irq) | ||
329 | { | ||
330 | } | ||
331 | |||
332 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | ||
333 | |||
334 | #else /* CONFIG_SMP */ | ||
335 | |||
336 | #define move_native_irq(x) | ||
337 | #define move_masked_irq(x) | ||
338 | |||
339 | #endif /* CONFIG_SMP */ | ||
340 | 231 | ||
341 | extern int no_irq_affinity; | 232 | extern int no_irq_affinity; |
342 | 233 | ||
343 | static inline int irq_balancing_disabled(unsigned int irq) | ||
344 | { | ||
345 | struct irq_desc *desc; | ||
346 | |||
347 | desc = irq_to_desc(irq); | ||
348 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
349 | } | ||
350 | |||
351 | /* Handle irq action chains: */ | 234 | /* Handle irq action chains: */ |
352 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 235 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
353 | 236 | ||
@@ -363,42 +246,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |||
363 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 246 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
364 | extern void handle_nested_irq(unsigned int irq); | 247 | extern void handle_nested_irq(unsigned int irq); |
365 | 248 | ||
366 | /* | ||
367 | * Monolithic do_IRQ implementation. | ||
368 | */ | ||
369 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
370 | extern unsigned int __do_IRQ(unsigned int irq); | ||
371 | #endif | ||
372 | |||
373 | /* | ||
374 | * Architectures call this to let the generic IRQ layer | ||
375 | * handle an interrupt. If the descriptor is attached to an | ||
376 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
377 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
378 | */ | ||
379 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
380 | { | ||
381 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
382 | desc->handle_irq(irq, desc); | ||
383 | #else | ||
384 | if (likely(desc->handle_irq)) | ||
385 | desc->handle_irq(irq, desc); | ||
386 | else | ||
387 | __do_IRQ(irq); | ||
388 | #endif | ||
389 | } | ||
390 | |||
391 | static inline void generic_handle_irq(unsigned int irq) | ||
392 | { | ||
393 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
394 | } | ||
395 | |||
396 | /* Handling of unhandled and spurious interrupts: */ | 249 | /* Handling of unhandled and spurious interrupts: */ |
397 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 250 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
398 | irqreturn_t action_ret); | 251 | irqreturn_t action_ret); |
399 | 252 | ||
400 | /* Resending of interrupts :*/ | ||
401 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
402 | 253 | ||
403 | /* Enable/disable irq debugging output: */ | 254 | /* Enable/disable irq debugging output: */ |
404 | extern int noirqdebug_setup(char *str); | 255 | extern int noirqdebug_setup(char *str); |
@@ -421,16 +272,6 @@ extern void | |||
421 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 272 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
422 | const char *name); | 273 | const char *name); |
423 | 274 | ||
424 | /* caller has locked the irq_desc and both params are valid */ | ||
425 | static inline void __set_irq_handler_unlocked(int irq, | ||
426 | irq_flow_handler_t handler) | ||
427 | { | ||
428 | struct irq_desc *desc; | ||
429 | |||
430 | desc = irq_to_desc(irq); | ||
431 | desc->handle_irq = handler; | ||
432 | } | ||
433 | |||
434 | /* | 275 | /* |
435 | * Set a highlevel flow handler for a given IRQ: | 276 | * Set a highlevel flow handler for a given IRQ: |
436 | */ | 277 | */ |
@@ -462,13 +303,6 @@ extern unsigned int create_irq_nr(unsigned int irq_want, int node); | |||
462 | extern int create_irq(void); | 303 | extern int create_irq(void); |
463 | extern void destroy_irq(unsigned int irq); | 304 | extern void destroy_irq(unsigned int irq); |
464 | 305 | ||
465 | /* Test to see if a driver has successfully requested an irq */ | ||
466 | static inline int irq_has_action(unsigned int irq) | ||
467 | { | ||
468 | struct irq_desc *desc = irq_to_desc(irq); | ||
469 | return desc->action != NULL; | ||
470 | } | ||
471 | |||
472 | /* Dynamic irq helper functions */ | 306 | /* Dynamic irq helper functions */ |
473 | extern void dynamic_irq_init(unsigned int irq); | 307 | extern void dynamic_irq_init(unsigned int irq); |
474 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | 308 | void dynamic_irq_init_keep_chip_data(unsigned int irq); |
@@ -487,108 +321,8 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
487 | #define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) | 321 | #define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) |
488 | #define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) | 322 | #define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) |
489 | 323 | ||
490 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | ||
491 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | ||
492 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | ||
493 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | ||
494 | |||
495 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 324 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
496 | 325 | ||
497 | #endif /* !CONFIG_S390 */ | 326 | #endif /* !CONFIG_S390 */ |
498 | 327 | ||
499 | #ifdef CONFIG_SMP | ||
500 | /** | ||
501 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
502 | * @desc: pointer to irq_desc struct | ||
503 | * @node: node which will be handling the cpumasks | ||
504 | * @boot: true if need bootmem | ||
505 | * | ||
506 | * Allocates affinity and pending_mask cpumask if required. | ||
507 | * Returns true if successful (or not required). | ||
508 | */ | ||
509 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
510 | bool boot) | ||
511 | { | ||
512 | gfp_t gfp = GFP_ATOMIC; | ||
513 | |||
514 | if (boot) | ||
515 | gfp = GFP_NOWAIT; | ||
516 | |||
517 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
518 | if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | ||
519 | return false; | ||
520 | |||
521 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
522 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | ||
523 | free_cpumask_var(desc->irq_data.affinity); | ||
524 | return false; | ||
525 | } | ||
526 | #endif | ||
527 | #endif | ||
528 | return true; | ||
529 | } | ||
530 | |||
531 | static inline void init_desc_masks(struct irq_desc *desc) | ||
532 | { | ||
533 | cpumask_setall(desc->irq_data.affinity); | ||
534 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
535 | cpumask_clear(desc->pending_mask); | ||
536 | #endif | ||
537 | } | ||
538 | |||
539 | /** | ||
540 | * init_copy_desc_masks - copy cpumasks for irq_desc | ||
541 | * @old_desc: pointer to old irq_desc struct | ||
542 | * @new_desc: pointer to new irq_desc struct | ||
543 | * | ||
544 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
545 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
546 | * irq_desc struct so the copy is redundant. | ||
547 | */ | ||
548 | |||
549 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
550 | struct irq_desc *new_desc) | ||
551 | { | ||
552 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
553 | cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); | ||
554 | |||
555 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
556 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | ||
557 | #endif | ||
558 | #endif | ||
559 | } | ||
560 | |||
561 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
562 | struct irq_desc *new_desc) | ||
563 | { | ||
564 | free_cpumask_var(old_desc->irq_data.affinity); | ||
565 | |||
566 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
567 | free_cpumask_var(old_desc->pending_mask); | ||
568 | #endif | ||
569 | } | ||
570 | |||
571 | #else /* !CONFIG_SMP */ | ||
572 | |||
573 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
574 | bool boot) | ||
575 | { | ||
576 | return true; | ||
577 | } | ||
578 | |||
579 | static inline void init_desc_masks(struct irq_desc *desc) | ||
580 | { | ||
581 | } | ||
582 | |||
583 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
584 | struct irq_desc *new_desc) | ||
585 | { | ||
586 | } | ||
587 | |||
588 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
589 | struct irq_desc *new_desc) | ||
590 | { | ||
591 | } | ||
592 | #endif /* CONFIG_SMP */ | ||
593 | |||
594 | #endif /* _LINUX_IRQ_H */ | 328 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 000000000000..22e426fdd301 --- /dev/null +++ b/include/linux/irqdesc.h | |||
@@ -0,0 +1,171 @@ | |||
1 | #ifndef _LINUX_IRQDESC_H | ||
2 | #define _LINUX_IRQDESC_H | ||
3 | |||
4 | /* | ||
5 | * Core internal functions to deal with irq descriptors | ||
6 | * | ||
7 | * This include will move to kernel/irq once we cleaned up the tree. | ||
8 | * For now it's included from <linux/irq.h> | ||
9 | */ | ||
10 | |||
11 | struct proc_dir_entry; | ||
12 | struct timer_rand_state; | ||
13 | struct irq_2_iommu; | ||
14 | /** | ||
15 | * struct irq_desc - interrupt descriptor | ||
16 | * @irq_data: per irq and chip data passed down to chip functions | ||
17 | * @timer_rand_state: pointer to timer rand state struct | ||
18 | * @kstat_irqs: irq stats per cpu | ||
19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
20 | * @action: the irq action chain | ||
21 | * @status: status information | ||
22 | * @depth: disable-depth, for nested irq_disable() calls | ||
23 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
24 | * @irq_count: stats field to detect stalled irqs | ||
25 | * @last_unhandled: aging timer for unhandled count | ||
26 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
27 | * @lock: locking for SMP | ||
28 | * @pending_mask: pending rebalanced interrupts | ||
29 | * @threads_active: number of irqaction threads currently running | ||
30 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
31 | * @dir: /proc/irq/ procfs entry | ||
32 | * @name: flow handler name for /proc/interrupts output | ||
33 | */ | ||
34 | struct irq_desc { | ||
35 | |||
36 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
37 | struct irq_data irq_data; | ||
38 | #else | ||
39 | /* | ||
40 | * This union will go away, once we fixed the direct access to | ||
41 | * irq_desc all over the place. The direct fields are a 1:1 | ||
42 | * overlay of irq_data. | ||
43 | */ | ||
44 | union { | ||
45 | struct irq_data irq_data; | ||
46 | struct { | ||
47 | unsigned int irq; | ||
48 | unsigned int node; | ||
49 | struct irq_chip *chip; | ||
50 | void *handler_data; | ||
51 | void *chip_data; | ||
52 | struct msi_desc *msi_desc; | ||
53 | #ifdef CONFIG_SMP | ||
54 | cpumask_var_t affinity; | ||
55 | #endif | ||
56 | #ifdef CONFIG_INTR_REMAP | ||
57 | struct irq_2_iommu *irq_2_iommu; | ||
58 | #endif | ||
59 | }; | ||
60 | }; | ||
61 | #endif | ||
62 | |||
63 | struct timer_rand_state *timer_rand_state; | ||
64 | unsigned int *kstat_irqs; | ||
65 | irq_flow_handler_t handle_irq; | ||
66 | struct irqaction *action; /* IRQ action list */ | ||
67 | unsigned int status; /* IRQ status */ | ||
68 | |||
69 | unsigned int depth; /* nested irq disables */ | ||
70 | unsigned int wake_depth; /* nested wake enables */ | ||
71 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
72 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
73 | unsigned int irqs_unhandled; | ||
74 | raw_spinlock_t lock; | ||
75 | #ifdef CONFIG_SMP | ||
76 | const struct cpumask *affinity_hint; | ||
77 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
78 | cpumask_var_t pending_mask; | ||
79 | #endif | ||
80 | #endif | ||
81 | atomic_t threads_active; | ||
82 | wait_queue_head_t wait_for_threads; | ||
83 | #ifdef CONFIG_PROC_FS | ||
84 | struct proc_dir_entry *dir; | ||
85 | #endif | ||
86 | const char *name; | ||
87 | } ____cacheline_internodealigned_in_smp; | ||
88 | |||
89 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | ||
90 | struct irq_desc *desc, int node); | ||
91 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | ||
92 | |||
93 | #ifndef CONFIG_SPARSE_IRQ | ||
94 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
95 | #endif | ||
96 | |||
97 | #ifdef CONFIG_NUMA_IRQ_DESC | ||
98 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | ||
99 | #else | ||
100 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
101 | { | ||
102 | return desc; | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
107 | |||
108 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
109 | |||
110 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | ||
111 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | ||
112 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | ||
113 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | ||
114 | |||
115 | /* | ||
116 | * Monolithic do_IRQ implementation. | ||
117 | */ | ||
118 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
119 | extern unsigned int __do_IRQ(unsigned int irq); | ||
120 | #endif | ||
121 | |||
122 | /* | ||
123 | * Architectures call this to let the generic IRQ layer | ||
124 | * handle an interrupt. If the descriptor is attached to an | ||
125 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
126 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
127 | */ | ||
128 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
129 | { | ||
130 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
131 | desc->handle_irq(irq, desc); | ||
132 | #else | ||
133 | if (likely(desc->handle_irq)) | ||
134 | desc->handle_irq(irq, desc); | ||
135 | else | ||
136 | __do_IRQ(irq); | ||
137 | #endif | ||
138 | } | ||
139 | |||
140 | static inline void generic_handle_irq(unsigned int irq) | ||
141 | { | ||
142 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
143 | } | ||
144 | |||
145 | /* Test to see if a driver has successfully requested an irq */ | ||
146 | static inline int irq_has_action(unsigned int irq) | ||
147 | { | ||
148 | struct irq_desc *desc = irq_to_desc(irq); | ||
149 | return desc->action != NULL; | ||
150 | } | ||
151 | |||
152 | static inline int irq_balancing_disabled(unsigned int irq) | ||
153 | { | ||
154 | struct irq_desc *desc; | ||
155 | |||
156 | desc = irq_to_desc(irq); | ||
157 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
158 | } | ||
159 | |||
160 | /* caller has locked the irq_desc and both params are valid */ | ||
161 | static inline void __set_irq_handler_unlocked(int irq, | ||
162 | irq_flow_handler_t handler) | ||
163 | { | ||
164 | struct irq_desc *desc; | ||
165 | |||
166 | desc = irq_to_desc(irq); | ||
167 | desc->handle_irq = handler; | ||
168 | } | ||
169 | #endif | ||
170 | |||
171 | #endif | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index b905f0ab1bb2..e281e45fbb55 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * IRQ subsystem internal functions and variables: | 2 | * IRQ subsystem internal functions and variables: |
3 | */ | 3 | */ |
4 | #include <linux/irqdesc.h> | ||
4 | 5 | ||
5 | extern int noirqdebug; | 6 | extern int noirqdebug; |
6 | 7 | ||
@@ -22,6 +23,9 @@ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | |||
22 | extern void clear_kstat_irqs(struct irq_desc *desc); | 23 | extern void clear_kstat_irqs(struct irq_desc *desc); |
23 | extern raw_spinlock_t sparse_irq_lock; | 24 | extern raw_spinlock_t sparse_irq_lock; |
24 | 25 | ||
26 | /* Resending of interrupts :*/ | ||
27 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
28 | |||
25 | #ifdef CONFIG_SPARSE_IRQ | 29 | #ifdef CONFIG_SPARSE_IRQ |
26 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc); | 30 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc); |
27 | #endif | 31 | #endif |
@@ -105,3 +109,99 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
105 | 109 | ||
106 | #undef P | 110 | #undef P |
107 | 111 | ||
112 | /* Stuff below will be cleaned up after the sparse allocator is done */ | ||
113 | |||
114 | #ifdef CONFIG_SMP | ||
115 | /** | ||
116 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
117 | * @desc: pointer to irq_desc struct | ||
118 | * @node: node which will be handling the cpumasks | ||
119 | * @boot: true if need bootmem | ||
120 | * | ||
121 | * Allocates affinity and pending_mask cpumask if required. | ||
122 | * Returns true if successful (or not required). | ||
123 | */ | ||
124 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
125 | bool boot) | ||
126 | { | ||
127 | gfp_t gfp = GFP_ATOMIC; | ||
128 | |||
129 | if (boot) | ||
130 | gfp = GFP_NOWAIT; | ||
131 | |||
132 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
133 | if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | ||
134 | return false; | ||
135 | |||
136 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
137 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | ||
138 | free_cpumask_var(desc->irq_data.affinity); | ||
139 | return false; | ||
140 | } | ||
141 | #endif | ||
142 | #endif | ||
143 | return true; | ||
144 | } | ||
145 | |||
146 | static inline void init_desc_masks(struct irq_desc *desc) | ||
147 | { | ||
148 | cpumask_setall(desc->irq_data.affinity); | ||
149 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
150 | cpumask_clear(desc->pending_mask); | ||
151 | #endif | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * init_copy_desc_masks - copy cpumasks for irq_desc | ||
156 | * @old_desc: pointer to old irq_desc struct | ||
157 | * @new_desc: pointer to new irq_desc struct | ||
158 | * | ||
159 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
160 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
161 | * irq_desc struct so the copy is redundant. | ||
162 | */ | ||
163 | |||
164 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
165 | struct irq_desc *new_desc) | ||
166 | { | ||
167 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
168 | cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); | ||
169 | |||
170 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
171 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | ||
172 | #endif | ||
173 | #endif | ||
174 | } | ||
175 | |||
176 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
177 | struct irq_desc *new_desc) | ||
178 | { | ||
179 | free_cpumask_var(old_desc->irq_data.affinity); | ||
180 | |||
181 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
182 | free_cpumask_var(old_desc->pending_mask); | ||
183 | #endif | ||
184 | } | ||
185 | |||
186 | #else /* !CONFIG_SMP */ | ||
187 | |||
188 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
189 | bool boot) | ||
190 | { | ||
191 | return true; | ||
192 | } | ||
193 | |||
194 | static inline void init_desc_masks(struct irq_desc *desc) | ||
195 | { | ||
196 | } | ||
197 | |||
198 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
199 | struct irq_desc *new_desc) | ||
200 | { | ||
201 | } | ||
202 | |||
203 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
204 | struct irq_desc *new_desc) | ||
205 | { | ||
206 | } | ||
207 | #endif /* CONFIG_SMP */ | ||