aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/interrupt.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r--include/linux/interrupt.h101
1 files changed, 70 insertions, 31 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a0384a4d1e6f..f6efed0039ed 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,10 +14,13 @@
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/kref.h>
18#include <linux/workqueue.h>
17 19
18#include <asm/atomic.h> 20#include <asm/atomic.h>
19#include <asm/ptrace.h> 21#include <asm/ptrace.h>
20#include <asm/system.h> 22#include <asm/system.h>
23#include <trace/events/irq.h>
21 24
22/* 25/*
23 * These correspond to the IORESOURCE_IRQ_* defines in 26 * These correspond to the IORESOURCE_IRQ_* defines in
@@ -54,7 +57,8 @@
54 * Used by threaded interrupts which need to keep the 57 * Used by threaded interrupts which need to keep the
55 * irq line disabled until the threaded handler has been run. 58 * irq line disabled until the threaded handler has been run.
56 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend 59 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
57 * 60 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
61 * IRQF_NO_THREAD - Interrupt cannot be threaded
58 */ 62 */
59#define IRQF_DISABLED 0x00000020 63#define IRQF_DISABLED 0x00000020
60#define IRQF_SAMPLE_RANDOM 0x00000040 64#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -66,22 +70,10 @@
66#define IRQF_IRQPOLL 0x00001000 70#define IRQF_IRQPOLL 0x00001000
67#define IRQF_ONESHOT 0x00002000 71#define IRQF_ONESHOT 0x00002000
68#define IRQF_NO_SUSPEND 0x00004000 72#define IRQF_NO_SUSPEND 0x00004000
73#define IRQF_FORCE_RESUME 0x00008000
74#define IRQF_NO_THREAD 0x00010000
69 75
70#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) 76#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
71
72/*
73 * Bits used by threaded handlers:
74 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
75 * IRQTF_DIED - handler thread died
76 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
77 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
78 */
79enum {
80 IRQTF_RUNTHREAD,
81 IRQTF_DIED,
82 IRQTF_WARNED,
83 IRQTF_AFFINITY,
84};
85 77
86/* 78/*
87 * These values can be returned by request_any_context_irq() and 79 * These values can be returned by request_any_context_irq() and
@@ -106,22 +98,24 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
106 * @next: pointer to the next irqaction for shared interrupts 98 * @next: pointer to the next irqaction for shared interrupts
107 * @irq: interrupt number 99 * @irq: interrupt number
108 * @dir: pointer to the proc/irq/NN/name entry 100 * @dir: pointer to the proc/irq/NN/name entry
109 * @thread_fn: interupt handler function for threaded interrupts 101 * @thread_fn: interrupt handler function for threaded interrupts
110 * @thread: thread pointer for threaded interrupts 102 * @thread: thread pointer for threaded interrupts
111 * @thread_flags: flags related to @thread 103 * @thread_flags: flags related to @thread
104 * @thread_mask: bitmask for keeping track of @thread activity
112 */ 105 */
113struct irqaction { 106struct irqaction {
114 irq_handler_t handler; 107 irq_handler_t handler;
115 unsigned long flags; 108 unsigned long flags;
116 const char *name;
117 void *dev_id; 109 void *dev_id;
118 struct irqaction *next; 110 struct irqaction *next;
119 int irq; 111 int irq;
120 struct proc_dir_entry *dir;
121 irq_handler_t thread_fn; 112 irq_handler_t thread_fn;
122 struct task_struct *thread; 113 struct task_struct *thread;
123 unsigned long thread_flags; 114 unsigned long thread_flags;
124}; 115 unsigned long thread_mask;
116 const char *name;
117 struct proc_dir_entry *dir;
118} ____cacheline_internodealigned_in_smp;
125 119
126extern irqreturn_t no_action(int cpl, void *dev_id); 120extern irqreturn_t no_action(int cpl, void *dev_id);
127 121
@@ -239,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq);
239extern int irq_select_affinity(unsigned int irq); 233extern int irq_select_affinity(unsigned int irq);
240 234
241extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 235extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
236
237/**
238 * struct irq_affinity_notify - context for notification of IRQ affinity changes
239 * @irq: Interrupt to which notification applies
240 * @kref: Reference count, for internal use
241 * @work: Work item, for internal use
242 * @notify: Function to be called on change. This will be
243 * called in process context.
244 * @release: Function to be called on release. This will be
245 * called in process context. Once registered, the
246 * structure must only be freed when this function is
247 * called or later.
248 */
249struct irq_affinity_notify {
250 unsigned int irq;
251 struct kref kref;
252 struct work_struct work;
253 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
254 void (*release)(struct kref *ref);
255};
256
257extern int
258irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
259
260static inline void irq_run_affinity_notifiers(void)
261{
262 flush_scheduled_work();
263}
264
242#else /* CONFIG_SMP */ 265#else /* CONFIG_SMP */
243 266
244static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) 267static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -254,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
254static inline int irq_select_affinity(unsigned int irq) { return 0; } 277static inline int irq_select_affinity(unsigned int irq) { return 0; }
255 278
256static inline int irq_set_affinity_hint(unsigned int irq, 279static inline int irq_set_affinity_hint(unsigned int irq,
257 const struct cpumask *m) 280 const struct cpumask *m)
258{ 281{
259 return -EINVAL; 282 return -EINVAL;
260} 283}
@@ -313,16 +336,16 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long
313} 336}
314 337
315/* IRQ wakeup (PM) control: */ 338/* IRQ wakeup (PM) control: */
316extern int set_irq_wake(unsigned int irq, unsigned int on); 339extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
317 340
318static inline int enable_irq_wake(unsigned int irq) 341static inline int enable_irq_wake(unsigned int irq)
319{ 342{
320 return set_irq_wake(irq, 1); 343 return irq_set_irq_wake(irq, 1);
321} 344}
322 345
323static inline int disable_irq_wake(unsigned int irq) 346static inline int disable_irq_wake(unsigned int irq)
324{ 347{
325 return set_irq_wake(irq, 0); 348 return irq_set_irq_wake(irq, 0);
326} 349}
327 350
328#else /* !CONFIG_GENERIC_HARDIRQS */ 351#else /* !CONFIG_GENERIC_HARDIRQS */
@@ -352,6 +375,13 @@ static inline int disable_irq_wake(unsigned int irq)
352} 375}
353#endif /* CONFIG_GENERIC_HARDIRQS */ 376#endif /* CONFIG_GENERIC_HARDIRQS */
354 377
378
379#ifdef CONFIG_IRQ_FORCED_THREADING
380extern bool force_irqthreads;
381#else
382#define force_irqthreads (0)
383#endif
384
355#ifndef __ARCH_SET_SOFTIRQ_PENDING 385#ifndef __ARCH_SET_SOFTIRQ_PENDING
356#define set_softirq_pending(x) (local_softirq_pending() = (x)) 386#define set_softirq_pending(x) (local_softirq_pending() = (x))
357#define or_softirq_pending(x) (local_softirq_pending() |= (x)) 387#define or_softirq_pending(x) (local_softirq_pending() |= (x))
@@ -384,7 +414,7 @@ enum
384 TASKLET_SOFTIRQ, 414 TASKLET_SOFTIRQ,
385 SCHED_SOFTIRQ, 415 SCHED_SOFTIRQ,
386 HRTIMER_SOFTIRQ, 416 HRTIMER_SOFTIRQ,
387 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 417 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
388 418
389 NR_SOFTIRQS 419 NR_SOFTIRQS
390}; 420};
@@ -407,10 +437,14 @@ asmlinkage void do_softirq(void);
407asmlinkage void __do_softirq(void); 437asmlinkage void __do_softirq(void);
408extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 438extern void open_softirq(int nr, void (*action)(struct softirq_action *));
409extern void softirq_init(void); 439extern void softirq_init(void);
410#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) 440static inline void __raise_softirq_irqoff(unsigned int nr)
441{
442 trace_softirq_raise(nr);
443 or_softirq_pending(1UL << nr);
444}
445
411extern void raise_softirq_irqoff(unsigned int nr); 446extern void raise_softirq_irqoff(unsigned int nr);
412extern void raise_softirq(unsigned int nr); 447extern void raise_softirq(unsigned int nr);
413extern void wakeup_softirqd(void);
414 448
415/* This is the worklist that queues up per-cpu softirq work. 449/* This is the worklist that queues up per-cpu softirq work.
416 * 450 *
@@ -421,6 +455,13 @@ extern void wakeup_softirqd(void);
421 */ 455 */
422DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 456DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
423 457
458DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
459
460static inline struct task_struct *this_cpu_ksoftirqd(void)
461{
462 return this_cpu_read(ksoftirqd);
463}
464
424/* Try to send a softirq to a remote cpu. If this cannot be done, the 465/* Try to send a softirq to a remote cpu. If this cannot be done, the
425 * work will be queued to the local cpu. 466 * work will be queued to the local cpu.
426 */ 467 */
@@ -443,7 +484,7 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
443 Properties: 484 Properties:
444 * If tasklet_schedule() is called, then tasklet is guaranteed 485 * If tasklet_schedule() is called, then tasklet is guaranteed
445 to be executed on some cpu at least once after this. 486 to be executed on some cpu at least once after this.
446 * If the tasklet is already scheduled, but its excecution is still not 487 * If the tasklet is already scheduled, but its execution is still not
447 started, it will be executed only once. 488 started, it will be executed only once.
448 * If this tasklet is already running on another CPU (or schedule is called 489 * If this tasklet is already running on another CPU (or schedule is called
449 from tasklet itself), it is rescheduled for later. 490 from tasklet itself), it is rescheduled for later.
@@ -640,12 +681,10 @@ static inline void init_irq_proc(void)
640 681
641struct seq_file; 682struct seq_file;
642int show_interrupts(struct seq_file *p, void *v); 683int show_interrupts(struct seq_file *p, void *v);
643 684int arch_show_interrupts(struct seq_file *p, int prec);
644struct irq_desc;
645 685
646extern int early_irq_init(void); 686extern int early_irq_init(void);
647extern int arch_probe_nr_irqs(void); 687extern int arch_probe_nr_irqs(void);
648extern int arch_early_irq_init(void); 688extern int arch_early_irq_init(void);
649extern int arch_init_chip_data(struct irq_desc *desc, int node);
650 689
651#endif 690#endif