diff options
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r-- | include/linux/interrupt.h | 85 |
1 files changed, 65 insertions, 20 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 55e0d4253e49..59b72ca1c5d1 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
17 | #include <linux/kref.h> | ||
18 | #include <linux/workqueue.h> | ||
17 | 19 | ||
18 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
19 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
@@ -55,7 +57,8 @@ | |||
55 | * Used by threaded interrupts which need to keep the | 57 | * Used by threaded interrupts which need to keep the |
56 | * irq line disabled until the threaded handler has been run. | 58 | * irq line disabled until the threaded handler has been run. |
57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 59 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
58 | * | 60 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
61 | * IRQF_NO_THREAD - Interrupt cannot be threaded | ||
59 | */ | 62 | */ |
60 | #define IRQF_DISABLED 0x00000020 | 63 | #define IRQF_DISABLED 0x00000020 |
61 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 64 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
@@ -67,22 +70,10 @@ | |||
67 | #define IRQF_IRQPOLL 0x00001000 | 70 | #define IRQF_IRQPOLL 0x00001000 |
68 | #define IRQF_ONESHOT 0x00002000 | 71 | #define IRQF_ONESHOT 0x00002000 |
69 | #define IRQF_NO_SUSPEND 0x00004000 | 72 | #define IRQF_NO_SUSPEND 0x00004000 |
73 | #define IRQF_FORCE_RESUME 0x00008000 | ||
74 | #define IRQF_NO_THREAD 0x00010000 | ||
70 | 75 | ||
71 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) | 76 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
72 | |||
73 | /* | ||
74 | * Bits used by threaded handlers: | ||
75 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
76 | * IRQTF_DIED - handler thread died | ||
77 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
78 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | ||
79 | */ | ||
80 | enum { | ||
81 | IRQTF_RUNTHREAD, | ||
82 | IRQTF_DIED, | ||
83 | IRQTF_WARNED, | ||
84 | IRQTF_AFFINITY, | ||
85 | }; | ||
86 | 77 | ||
87 | /* | 78 | /* |
88 | * These values can be returned by request_any_context_irq() and | 79 | * These values can be returned by request_any_context_irq() and |
@@ -110,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
110 | * @thread_fn: interupt handler function for threaded interrupts | 101 | * @thread_fn: interupt handler function for threaded interrupts |
111 | * @thread: thread pointer for threaded interrupts | 102 | * @thread: thread pointer for threaded interrupts |
112 | * @thread_flags: flags related to @thread | 103 | * @thread_flags: flags related to @thread |
104 | * @thread_mask: bitmask for keeping track of @thread activity | ||
113 | */ | 105 | */ |
114 | struct irqaction { | 106 | struct irqaction { |
115 | irq_handler_t handler; | 107 | irq_handler_t handler; |
@@ -120,6 +112,7 @@ struct irqaction { | |||
120 | irq_handler_t thread_fn; | 112 | irq_handler_t thread_fn; |
121 | struct task_struct *thread; | 113 | struct task_struct *thread; |
122 | unsigned long thread_flags; | 114 | unsigned long thread_flags; |
115 | unsigned long thread_mask; | ||
123 | const char *name; | 116 | const char *name; |
124 | struct proc_dir_entry *dir; | 117 | struct proc_dir_entry *dir; |
125 | } ____cacheline_internodealigned_in_smp; | 118 | } ____cacheline_internodealigned_in_smp; |
@@ -240,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq); | |||
240 | extern int irq_select_affinity(unsigned int irq); | 233 | extern int irq_select_affinity(unsigned int irq); |
241 | 234 | ||
242 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | 235 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
236 | |||
237 | /** | ||
238 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | ||
239 | * @irq: Interrupt to which notification applies | ||
240 | * @kref: Reference count, for internal use | ||
241 | * @work: Work item, for internal use | ||
242 | * @notify: Function to be called on change. This will be | ||
243 | * called in process context. | ||
244 | * @release: Function to be called on release. This will be | ||
245 | * called in process context. Once registered, the | ||
246 | * structure must only be freed when this function is | ||
247 | * called or later. | ||
248 | */ | ||
249 | struct irq_affinity_notify { | ||
250 | unsigned int irq; | ||
251 | struct kref kref; | ||
252 | struct work_struct work; | ||
253 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | ||
254 | void (*release)(struct kref *ref); | ||
255 | }; | ||
256 | |||
257 | extern int | ||
258 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | ||
259 | |||
260 | static inline void irq_run_affinity_notifiers(void) | ||
261 | { | ||
262 | flush_scheduled_work(); | ||
263 | } | ||
264 | |||
243 | #else /* CONFIG_SMP */ | 265 | #else /* CONFIG_SMP */ |
244 | 266 | ||
245 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 267 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
@@ -255,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq) | |||
255 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | 277 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
256 | 278 | ||
257 | static inline int irq_set_affinity_hint(unsigned int irq, | 279 | static inline int irq_set_affinity_hint(unsigned int irq, |
258 | const struct cpumask *m) | 280 | const struct cpumask *m) |
259 | { | 281 | { |
260 | return -EINVAL; | 282 | return -EINVAL; |
261 | } | 283 | } |
@@ -314,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long | |||
314 | } | 336 | } |
315 | 337 | ||
316 | /* IRQ wakeup (PM) control: */ | 338 | /* IRQ wakeup (PM) control: */ |
317 | extern int set_irq_wake(unsigned int irq, unsigned int on); | 339 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
340 | |||
341 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
342 | /* Please do not use: Use the replacement functions instead */ | ||
343 | static inline int set_irq_wake(unsigned int irq, unsigned int on) | ||
344 | { | ||
345 | return irq_set_irq_wake(irq, on); | ||
346 | } | ||
347 | #endif | ||
318 | 348 | ||
319 | static inline int enable_irq_wake(unsigned int irq) | 349 | static inline int enable_irq_wake(unsigned int irq) |
320 | { | 350 | { |
321 | return set_irq_wake(irq, 1); | 351 | return irq_set_irq_wake(irq, 1); |
322 | } | 352 | } |
323 | 353 | ||
324 | static inline int disable_irq_wake(unsigned int irq) | 354 | static inline int disable_irq_wake(unsigned int irq) |
325 | { | 355 | { |
326 | return set_irq_wake(irq, 0); | 356 | return irq_set_irq_wake(irq, 0); |
327 | } | 357 | } |
328 | 358 | ||
329 | #else /* !CONFIG_GENERIC_HARDIRQS */ | 359 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
@@ -353,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) | |||
353 | } | 383 | } |
354 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 384 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
355 | 385 | ||
386 | |||
387 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
388 | extern bool force_irqthreads; | ||
389 | #else | ||
390 | #define force_irqthreads (0) | ||
391 | #endif | ||
392 | |||
356 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 393 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
357 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 394 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
358 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 395 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
@@ -426,6 +463,13 @@ extern void raise_softirq(unsigned int nr); | |||
426 | */ | 463 | */ |
427 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | 464 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
428 | 465 | ||
466 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); | ||
467 | |||
468 | static inline struct task_struct *this_cpu_ksoftirqd(void) | ||
469 | { | ||
470 | return this_cpu_read(ksoftirqd); | ||
471 | } | ||
472 | |||
429 | /* Try to send a softirq to a remote cpu. If this cannot be done, the | 473 | /* Try to send a softirq to a remote cpu. If this cannot be done, the |
430 | * work will be queued to the local cpu. | 474 | * work will be queued to the local cpu. |
431 | */ | 475 | */ |
@@ -645,6 +689,7 @@ static inline void init_irq_proc(void) | |||
645 | 689 | ||
646 | struct seq_file; | 690 | struct seq_file; |
647 | int show_interrupts(struct seq_file *p, void *v); | 691 | int show_interrupts(struct seq_file *p, void *v); |
692 | int arch_show_interrupts(struct seq_file *p, int prec); | ||
648 | 693 | ||
649 | extern int early_irq_init(void); | 694 | extern int early_irq_init(void); |
650 | extern int arch_probe_nr_irqs(void); | 695 | extern int arch_probe_nr_irqs(void); |