diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-10-01 10:03:45 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-10-12 10:39:04 -0400 |
commit | e144710b302525de5b90b9c3ba43562458d8957f (patch) | |
tree | 0a6ef61ccb4957512ebf4a1887ba3bc54e78f99e /include/linux/irq.h | |
parent | fe21221386e46b8e0f2cbd83559a29680c28473b (diff) |
genirq: Distangle irq.h
Move irq_desc and internal functions out of irq.h
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/irq.h')
-rw-r--r-- | include/linux/irq.h | 292 |
1 files changed, 13 insertions, 279 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 82ed8231394a..f5827abbc034 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -80,7 +80,6 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | struct proc_dir_entry; | ||
84 | struct msi_desc; | 83 | struct msi_desc; |
85 | 84 | ||
86 | /** | 85 | /** |
@@ -202,152 +201,36 @@ struct irq_chip { | |||
202 | #endif | 201 | #endif |
203 | }; | 202 | }; |
204 | 203 | ||
205 | struct timer_rand_state; | 204 | /* This include will go away once we isolated irq_desc usage to core code */ |
206 | struct irq_2_iommu; | 205 | #include <linux/irqdesc.h> |
207 | /** | ||
208 | * struct irq_desc - interrupt descriptor | ||
209 | * @irq_data: per irq and chip data passed down to chip functions | ||
210 | * @timer_rand_state: pointer to timer rand state struct | ||
211 | * @kstat_irqs: irq stats per cpu | ||
212 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
213 | * @action: the irq action chain | ||
214 | * @status: status information | ||
215 | * @depth: disable-depth, for nested irq_disable() calls | ||
216 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
217 | * @irq_count: stats field to detect stalled irqs | ||
218 | * @last_unhandled: aging timer for unhandled count | ||
219 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
220 | * @lock: locking for SMP | ||
221 | * @pending_mask: pending rebalanced interrupts | ||
222 | * @threads_active: number of irqaction threads currently running | ||
223 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
224 | * @dir: /proc/irq/ procfs entry | ||
225 | * @name: flow handler name for /proc/interrupts output | ||
226 | */ | ||
227 | struct irq_desc { | ||
228 | |||
229 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
230 | struct irq_data irq_data; | ||
231 | #else | ||
232 | /* | ||
233 | * This union will go away, once we fixed the direct access to | ||
234 | * irq_desc all over the place. The direct fields are a 1:1 | ||
235 | * overlay of irq_data. | ||
236 | */ | ||
237 | union { | ||
238 | struct irq_data irq_data; | ||
239 | struct { | ||
240 | unsigned int irq; | ||
241 | unsigned int node; | ||
242 | struct irq_chip *chip; | ||
243 | void *handler_data; | ||
244 | void *chip_data; | ||
245 | struct msi_desc *msi_desc; | ||
246 | #ifdef CONFIG_SMP | ||
247 | cpumask_var_t affinity; | ||
248 | #endif | ||
249 | #ifdef CONFIG_INTR_REMAP | ||
250 | struct irq_2_iommu *irq_2_iommu; | ||
251 | #endif | ||
252 | }; | ||
253 | }; | ||
254 | #endif | ||
255 | |||
256 | struct timer_rand_state *timer_rand_state; | ||
257 | unsigned int *kstat_irqs; | ||
258 | irq_flow_handler_t handle_irq; | ||
259 | struct irqaction *action; /* IRQ action list */ | ||
260 | unsigned int status; /* IRQ status */ | ||
261 | |||
262 | unsigned int depth; /* nested irq disables */ | ||
263 | unsigned int wake_depth; /* nested wake enables */ | ||
264 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
265 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
266 | unsigned int irqs_unhandled; | ||
267 | raw_spinlock_t lock; | ||
268 | #ifdef CONFIG_SMP | ||
269 | const struct cpumask *affinity_hint; | ||
270 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
271 | cpumask_var_t pending_mask; | ||
272 | #endif | ||
273 | #endif | ||
274 | atomic_t threads_active; | ||
275 | wait_queue_head_t wait_for_threads; | ||
276 | #ifdef CONFIG_PROC_FS | ||
277 | struct proc_dir_entry *dir; | ||
278 | #endif | ||
279 | const char *name; | ||
280 | } ____cacheline_internodealigned_in_smp; | ||
281 | |||
282 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | ||
283 | struct irq_desc *desc, int node); | ||
284 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | ||
285 | |||
286 | #ifndef CONFIG_SPARSE_IRQ | ||
287 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
288 | #endif | ||
289 | |||
290 | #ifdef CONFIG_NUMA_IRQ_DESC | ||
291 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | ||
292 | #else | ||
293 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
294 | { | ||
295 | return desc; | ||
296 | } | ||
297 | #endif | ||
298 | |||
299 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
300 | 206 | ||
301 | /* | 207 | /* |
302 | * Pick up the arch-dependent methods: | 208 | * Pick up the arch-dependent methods: |
303 | */ | 209 | */ |
304 | #include <asm/hw_irq.h> | 210 | #include <asm/hw_irq.h> |
305 | 211 | ||
212 | struct irqaction; | ||
306 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 213 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
307 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 214 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
308 | 215 | ||
309 | #ifdef CONFIG_GENERIC_HARDIRQS | 216 | #ifdef CONFIG_GENERIC_HARDIRQS |
310 | 217 | ||
311 | #ifdef CONFIG_SMP | 218 | #ifdef CONFIG_SMP |
312 | 219 | # ifdef CONFIG_GENERIC_PENDING_IRQ | |
313 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
314 | |||
315 | void move_native_irq(int irq); | 220 | void move_native_irq(int irq); |
316 | void move_masked_irq(int irq); | 221 | void move_masked_irq(int irq); |
317 | 222 | # else | |
318 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 223 | static inline void move_irq(int irq) { } |
319 | 224 | static inline void move_native_irq(int irq) { } | |
320 | static inline void move_irq(int irq) | 225 | static inline void move_masked_irq(int irq) { } |
321 | { | 226 | # endif |
322 | } | 227 | #else |
323 | 228 | static inline void move_native_irq(int irq) { } | |
324 | static inline void move_native_irq(int irq) | 229 | static inline void move_masked_irq(int irq) { } |
325 | { | 230 | #endif |
326 | } | ||
327 | |||
328 | static inline void move_masked_irq(int irq) | ||
329 | { | ||
330 | } | ||
331 | |||
332 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | ||
333 | |||
334 | #else /* CONFIG_SMP */ | ||
335 | |||
336 | #define move_native_irq(x) | ||
337 | #define move_masked_irq(x) | ||
338 | |||
339 | #endif /* CONFIG_SMP */ | ||
340 | 231 | ||
341 | extern int no_irq_affinity; | 232 | extern int no_irq_affinity; |
342 | 233 | ||
343 | static inline int irq_balancing_disabled(unsigned int irq) | ||
344 | { | ||
345 | struct irq_desc *desc; | ||
346 | |||
347 | desc = irq_to_desc(irq); | ||
348 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
349 | } | ||
350 | |||
351 | /* Handle irq action chains: */ | 234 | /* Handle irq action chains: */ |
352 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 235 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
353 | 236 | ||
@@ -363,42 +246,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |||
363 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 246 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
364 | extern void handle_nested_irq(unsigned int irq); | 247 | extern void handle_nested_irq(unsigned int irq); |
365 | 248 | ||
366 | /* | ||
367 | * Monolithic do_IRQ implementation. | ||
368 | */ | ||
369 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
370 | extern unsigned int __do_IRQ(unsigned int irq); | ||
371 | #endif | ||
372 | |||
373 | /* | ||
374 | * Architectures call this to let the generic IRQ layer | ||
375 | * handle an interrupt. If the descriptor is attached to an | ||
376 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
377 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
378 | */ | ||
379 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
380 | { | ||
381 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
382 | desc->handle_irq(irq, desc); | ||
383 | #else | ||
384 | if (likely(desc->handle_irq)) | ||
385 | desc->handle_irq(irq, desc); | ||
386 | else | ||
387 | __do_IRQ(irq); | ||
388 | #endif | ||
389 | } | ||
390 | |||
391 | static inline void generic_handle_irq(unsigned int irq) | ||
392 | { | ||
393 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
394 | } | ||
395 | |||
396 | /* Handling of unhandled and spurious interrupts: */ | 249 | /* Handling of unhandled and spurious interrupts: */ |
397 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 250 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
398 | irqreturn_t action_ret); | 251 | irqreturn_t action_ret); |
399 | 252 | ||
400 | /* Resending of interrupts :*/ | ||
401 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
402 | 253 | ||
403 | /* Enable/disable irq debugging output: */ | 254 | /* Enable/disable irq debugging output: */ |
404 | extern int noirqdebug_setup(char *str); | 255 | extern int noirqdebug_setup(char *str); |
@@ -421,16 +272,6 @@ extern void | |||
421 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 272 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
422 | const char *name); | 273 | const char *name); |
423 | 274 | ||
424 | /* caller has locked the irq_desc and both params are valid */ | ||
425 | static inline void __set_irq_handler_unlocked(int irq, | ||
426 | irq_flow_handler_t handler) | ||
427 | { | ||
428 | struct irq_desc *desc; | ||
429 | |||
430 | desc = irq_to_desc(irq); | ||
431 | desc->handle_irq = handler; | ||
432 | } | ||
433 | |||
434 | /* | 275 | /* |
435 | * Set a highlevel flow handler for a given IRQ: | 276 | * Set a highlevel flow handler for a given IRQ: |
436 | */ | 277 | */ |
@@ -462,13 +303,6 @@ extern unsigned int create_irq_nr(unsigned int irq_want, int node); | |||
462 | extern int create_irq(void); | 303 | extern int create_irq(void); |
463 | extern void destroy_irq(unsigned int irq); | 304 | extern void destroy_irq(unsigned int irq); |
464 | 305 | ||
465 | /* Test to see if a driver has successfully requested an irq */ | ||
466 | static inline int irq_has_action(unsigned int irq) | ||
467 | { | ||
468 | struct irq_desc *desc = irq_to_desc(irq); | ||
469 | return desc->action != NULL; | ||
470 | } | ||
471 | |||
472 | /* Dynamic irq helper functions */ | 306 | /* Dynamic irq helper functions */ |
473 | extern void dynamic_irq_init(unsigned int irq); | 307 | extern void dynamic_irq_init(unsigned int irq); |
474 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | 308 | void dynamic_irq_init_keep_chip_data(unsigned int irq); |
@@ -487,108 +321,8 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
487 | #define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) | 321 | #define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) |
488 | #define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) | 322 | #define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) |
489 | 323 | ||
490 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | ||
491 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | ||
492 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | ||
493 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | ||
494 | |||
495 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 324 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
496 | 325 | ||
497 | #endif /* !CONFIG_S390 */ | 326 | #endif /* !CONFIG_S390 */ |
498 | 327 | ||
499 | #ifdef CONFIG_SMP | ||
500 | /** | ||
501 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
502 | * @desc: pointer to irq_desc struct | ||
503 | * @node: node which will be handling the cpumasks | ||
504 | * @boot: true if need bootmem | ||
505 | * | ||
506 | * Allocates affinity and pending_mask cpumask if required. | ||
507 | * Returns true if successful (or not required). | ||
508 | */ | ||
509 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
510 | bool boot) | ||
511 | { | ||
512 | gfp_t gfp = GFP_ATOMIC; | ||
513 | |||
514 | if (boot) | ||
515 | gfp = GFP_NOWAIT; | ||
516 | |||
517 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
518 | if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | ||
519 | return false; | ||
520 | |||
521 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
522 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | ||
523 | free_cpumask_var(desc->irq_data.affinity); | ||
524 | return false; | ||
525 | } | ||
526 | #endif | ||
527 | #endif | ||
528 | return true; | ||
529 | } | ||
530 | |||
531 | static inline void init_desc_masks(struct irq_desc *desc) | ||
532 | { | ||
533 | cpumask_setall(desc->irq_data.affinity); | ||
534 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
535 | cpumask_clear(desc->pending_mask); | ||
536 | #endif | ||
537 | } | ||
538 | |||
539 | /** | ||
540 | * init_copy_desc_masks - copy cpumasks for irq_desc | ||
541 | * @old_desc: pointer to old irq_desc struct | ||
542 | * @new_desc: pointer to new irq_desc struct | ||
543 | * | ||
544 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
545 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
546 | * irq_desc struct so the copy is redundant. | ||
547 | */ | ||
548 | |||
549 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
550 | struct irq_desc *new_desc) | ||
551 | { | ||
552 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
553 | cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); | ||
554 | |||
555 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
556 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | ||
557 | #endif | ||
558 | #endif | ||
559 | } | ||
560 | |||
561 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
562 | struct irq_desc *new_desc) | ||
563 | { | ||
564 | free_cpumask_var(old_desc->irq_data.affinity); | ||
565 | |||
566 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
567 | free_cpumask_var(old_desc->pending_mask); | ||
568 | #endif | ||
569 | } | ||
570 | |||
571 | #else /* !CONFIG_SMP */ | ||
572 | |||
573 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
574 | bool boot) | ||
575 | { | ||
576 | return true; | ||
577 | } | ||
578 | |||
579 | static inline void init_desc_masks(struct irq_desc *desc) | ||
580 | { | ||
581 | } | ||
582 | |||
583 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
584 | struct irq_desc *new_desc) | ||
585 | { | ||
586 | } | ||
587 | |||
588 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
589 | struct irq_desc *new_desc) | ||
590 | { | ||
591 | } | ||
592 | #endif /* CONFIG_SMP */ | ||
593 | |||
594 | #endif /* _LINUX_IRQ_H */ | 328 | #endif /* _LINUX_IRQ_H */ |