diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 22:23:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 22:23:40 -0400 |
commit | 5f6fb45466b2273ffb91c9cf209f164f666c33b1 (patch) | |
tree | 2b19f24b678ae379be1b19338c3095c1f76ed41d /include/linux | |
parent | 3904afb41d4316f7a2968c615d689e19149a4f84 (diff) | |
parent | c0185808eb85139f45dbfd0de66963c498d0c4db (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (116 commits)
x86: Enable forced interrupt threading support
x86: Mark low level interrupts IRQF_NO_THREAD
x86: Use generic show_interrupts
x86: ioapic: Avoid redundant lookup of irq_cfg
x86: ioapic: Use new move_irq functions
x86: Use the proper accessors in fixup_irqs()
x86: ioapic: Use irq_data->state
x86: ioapic: Simplify irq chip and handler setup
x86: Cleanup the genirq name space
genirq: Add chip flag to force mask on suspend
genirq: Add desc->irq_data accessor
genirq: Add comments to Kconfig switches
genirq: Fixup fasteoi handler for oneshot mode
genirq: Provide forced interrupt threading
sched: Switch wait_task_inactive to schedule_hrtimeout()
genirq: Add IRQF_NO_THREAD
genirq: Allow shared oneshot interrupts
genirq: Prepare the handling of shared oneshot interrupts
genirq: Make warning in handle_percpu_event useful
x86: ioapic: Move trigger defines to io_apic.h
...
Fix up trivial(?) conflicts in arch/x86/pci/xen.c due to genirq name
space changes clashing with the Xen cleanups. The set_irq_msi() had
moved to xen_bind_pirq_msi_to_irq().
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/interrupt.h | 75 | ||||
-rw-r--r-- | include/linux/irq.h | 368 | ||||
-rw-r--r-- | include/linux/irqdesc.h | 78 |
3 files changed, 427 insertions, 94 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2eb16e03422f..59b72ca1c5d1 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
17 | #include <linux/kref.h> | ||
18 | #include <linux/workqueue.h> | ||
17 | 19 | ||
18 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
19 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
@@ -56,6 +58,7 @@ | |||
56 | * irq line disabled until the threaded handler has been run. | 58 | * irq line disabled until the threaded handler has been run. |
57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 59 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
58 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set | 60 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
61 | * IRQF_NO_THREAD - Interrupt cannot be threaded | ||
59 | */ | 62 | */ |
60 | #define IRQF_DISABLED 0x00000020 | 63 | #define IRQF_DISABLED 0x00000020 |
61 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 64 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
@@ -68,22 +71,9 @@ | |||
68 | #define IRQF_ONESHOT 0x00002000 | 71 | #define IRQF_ONESHOT 0x00002000 |
69 | #define IRQF_NO_SUSPEND 0x00004000 | 72 | #define IRQF_NO_SUSPEND 0x00004000 |
70 | #define IRQF_FORCE_RESUME 0x00008000 | 73 | #define IRQF_FORCE_RESUME 0x00008000 |
74 | #define IRQF_NO_THREAD 0x00010000 | ||
71 | 75 | ||
72 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) | 76 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
73 | |||
74 | /* | ||
75 | * Bits used by threaded handlers: | ||
76 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
77 | * IRQTF_DIED - handler thread died | ||
78 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
79 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | ||
80 | */ | ||
81 | enum { | ||
82 | IRQTF_RUNTHREAD, | ||
83 | IRQTF_DIED, | ||
84 | IRQTF_WARNED, | ||
85 | IRQTF_AFFINITY, | ||
86 | }; | ||
87 | 77 | ||
88 | /* | 78 | /* |
89 | * These values can be returned by request_any_context_irq() and | 79 | * These values can be returned by request_any_context_irq() and |
@@ -111,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
111 | * @thread_fn: interupt handler function for threaded interrupts | 101 | * @thread_fn: interupt handler function for threaded interrupts |
112 | * @thread: thread pointer for threaded interrupts | 102 | * @thread: thread pointer for threaded interrupts |
113 | * @thread_flags: flags related to @thread | 103 | * @thread_flags: flags related to @thread |
104 | * @thread_mask: bitmask for keeping track of @thread activity | ||
114 | */ | 105 | */ |
115 | struct irqaction { | 106 | struct irqaction { |
116 | irq_handler_t handler; | 107 | irq_handler_t handler; |
@@ -121,6 +112,7 @@ struct irqaction { | |||
121 | irq_handler_t thread_fn; | 112 | irq_handler_t thread_fn; |
122 | struct task_struct *thread; | 113 | struct task_struct *thread; |
123 | unsigned long thread_flags; | 114 | unsigned long thread_flags; |
115 | unsigned long thread_mask; | ||
124 | const char *name; | 116 | const char *name; |
125 | struct proc_dir_entry *dir; | 117 | struct proc_dir_entry *dir; |
126 | } ____cacheline_internodealigned_in_smp; | 118 | } ____cacheline_internodealigned_in_smp; |
@@ -241,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq); | |||
241 | extern int irq_select_affinity(unsigned int irq); | 233 | extern int irq_select_affinity(unsigned int irq); |
242 | 234 | ||
243 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | 235 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
236 | |||
237 | /** | ||
238 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | ||
239 | * @irq: Interrupt to which notification applies | ||
240 | * @kref: Reference count, for internal use | ||
241 | * @work: Work item, for internal use | ||
242 | * @notify: Function to be called on change. This will be | ||
243 | * called in process context. | ||
244 | * @release: Function to be called on release. This will be | ||
245 | * called in process context. Once registered, the | ||
246 | * structure must only be freed when this function is | ||
247 | * called or later. | ||
248 | */ | ||
249 | struct irq_affinity_notify { | ||
250 | unsigned int irq; | ||
251 | struct kref kref; | ||
252 | struct work_struct work; | ||
253 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | ||
254 | void (*release)(struct kref *ref); | ||
255 | }; | ||
256 | |||
257 | extern int | ||
258 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | ||
259 | |||
260 | static inline void irq_run_affinity_notifiers(void) | ||
261 | { | ||
262 | flush_scheduled_work(); | ||
263 | } | ||
264 | |||
244 | #else /* CONFIG_SMP */ | 265 | #else /* CONFIG_SMP */ |
245 | 266 | ||
246 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 267 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
@@ -256,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq) | |||
256 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | 277 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
257 | 278 | ||
258 | static inline int irq_set_affinity_hint(unsigned int irq, | 279 | static inline int irq_set_affinity_hint(unsigned int irq, |
259 | const struct cpumask *m) | 280 | const struct cpumask *m) |
260 | { | 281 | { |
261 | return -EINVAL; | 282 | return -EINVAL; |
262 | } | 283 | } |
@@ -315,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long | |||
315 | } | 336 | } |
316 | 337 | ||
317 | /* IRQ wakeup (PM) control: */ | 338 | /* IRQ wakeup (PM) control: */ |
318 | extern int set_irq_wake(unsigned int irq, unsigned int on); | 339 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
340 | |||
341 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
342 | /* Please do not use: Use the replacement functions instead */ | ||
343 | static inline int set_irq_wake(unsigned int irq, unsigned int on) | ||
344 | { | ||
345 | return irq_set_irq_wake(irq, on); | ||
346 | } | ||
347 | #endif | ||
319 | 348 | ||
320 | static inline int enable_irq_wake(unsigned int irq) | 349 | static inline int enable_irq_wake(unsigned int irq) |
321 | { | 350 | { |
322 | return set_irq_wake(irq, 1); | 351 | return irq_set_irq_wake(irq, 1); |
323 | } | 352 | } |
324 | 353 | ||
325 | static inline int disable_irq_wake(unsigned int irq) | 354 | static inline int disable_irq_wake(unsigned int irq) |
326 | { | 355 | { |
327 | return set_irq_wake(irq, 0); | 356 | return irq_set_irq_wake(irq, 0); |
328 | } | 357 | } |
329 | 358 | ||
330 | #else /* !CONFIG_GENERIC_HARDIRQS */ | 359 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
@@ -354,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) | |||
354 | } | 383 | } |
355 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 384 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
356 | 385 | ||
386 | |||
387 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
388 | extern bool force_irqthreads; | ||
389 | #else | ||
390 | #define force_irqthreads (0) | ||
391 | #endif | ||
392 | |||
357 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 393 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
358 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 394 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
359 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 395 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
@@ -653,6 +689,7 @@ static inline void init_irq_proc(void) | |||
653 | 689 | ||
654 | struct seq_file; | 690 | struct seq_file; |
655 | int show_interrupts(struct seq_file *p, void *v); | 691 | int show_interrupts(struct seq_file *p, void *v); |
692 | int arch_show_interrupts(struct seq_file *p, int prec); | ||
656 | 693 | ||
657 | extern int early_irq_init(void); | 694 | extern int early_irq_init(void); |
658 | extern int arch_probe_nr_irqs(void); | 695 | extern int arch_probe_nr_irqs(void); |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 80fcb53057bc..1d3577f30d45 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -29,61 +29,104 @@ | |||
29 | #include <asm/irq_regs.h> | 29 | #include <asm/irq_regs.h> |
30 | 30 | ||
31 | struct irq_desc; | 31 | struct irq_desc; |
32 | struct irq_data; | ||
32 | typedef void (*irq_flow_handler_t)(unsigned int irq, | 33 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
33 | struct irq_desc *desc); | 34 | struct irq_desc *desc); |
34 | 35 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | |
35 | 36 | ||
36 | /* | 37 | /* |
37 | * IRQ line status. | 38 | * IRQ line status. |
38 | * | 39 | * |
39 | * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h | 40 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
41 | * | ||
42 | * IRQ_TYPE_NONE - default, unspecified type | ||
43 | * IRQ_TYPE_EDGE_RISING - rising edge triggered | ||
44 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered | ||
45 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered | ||
46 | * IRQ_TYPE_LEVEL_HIGH - high level triggered | ||
47 | * IRQ_TYPE_LEVEL_LOW - low level triggered | ||
48 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits | ||
49 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits | ||
50 | * IRQ_TYPE_PROBE - Special flag for probing in progress | ||
51 | * | ||
52 | * Bits which can be modified via irq_set/clear/modify_status_flags() | ||
53 | * IRQ_LEVEL - Interrupt is level type. Will be also | ||
54 | * updated in the code when the above trigger | ||
55 | * bits are modified via set_irq_type() | ||
56 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | ||
57 | * it from affinity setting | ||
58 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | ||
59 | * IRQ_NOREQUEST - Interrupt cannot be requested via | ||
60 | * request_irq() | ||
61 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | ||
62 | * request/setup_irq() | ||
63 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | ||
64 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | ||
65 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | ||
66 | * | ||
67 | * Deprecated bits. They are kept updated as long as | ||
68 | * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits | ||
69 | * are internal state of the core code and if you really need to acces | ||
70 | * them then talk to the genirq maintainer instead of hacking | ||
71 | * something weird. | ||
40 | * | 72 | * |
41 | * IRQ types | ||
42 | */ | 73 | */ |
43 | #define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ | 74 | enum { |
44 | #define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ | 75 | IRQ_TYPE_NONE = 0x00000000, |
45 | #define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ | 76 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) | 77 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
47 | #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ | 78 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
48 | #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ | 79 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
49 | #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ | 80 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
50 | #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ | 81 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
51 | 82 | IRQ_TYPE_SENSE_MASK = 0x0000000f, | |
52 | /* Internal flags */ | 83 | |
53 | #define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ | 84 | IRQ_TYPE_PROBE = 0x00000010, |
54 | #define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ | 85 | |
55 | #define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ | 86 | IRQ_LEVEL = (1 << 8), |
56 | #define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ | 87 | IRQ_PER_CPU = (1 << 9), |
57 | #define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ | 88 | IRQ_NOPROBE = (1 << 10), |
58 | #define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ | 89 | IRQ_NOREQUEST = (1 << 11), |
59 | #define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ | 90 | IRQ_NOAUTOEN = (1 << 12), |
60 | #define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ | 91 | IRQ_NO_BALANCING = (1 << 13), |
61 | #define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ | 92 | IRQ_MOVE_PCNTXT = (1 << 14), |
62 | #define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ | 93 | IRQ_NESTED_THREAD = (1 << 15), |
63 | #define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ | 94 | |
64 | #define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ | 95 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT |
65 | #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ | 96 | IRQ_INPROGRESS = (1 << 16), |
66 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ | 97 | IRQ_REPLAY = (1 << 17), |
67 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ | 98 | IRQ_WAITING = (1 << 18), |
68 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | 99 | IRQ_DISABLED = (1 << 19), |
69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | 100 | IRQ_PENDING = (1 << 20), |
70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ | 101 | IRQ_MASKED = (1 << 21), |
71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ | 102 | IRQ_MOVE_PENDING = (1 << 22), |
72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 103 | IRQ_AFFINITY_SET = (1 << 23), |
73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 104 | IRQ_WAKEUP = (1 << 24), |
105 | #endif | ||
106 | }; | ||
74 | 107 | ||
75 | #define IRQF_MODIFY_MASK \ | 108 | #define IRQF_MODIFY_MASK \ |
76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 109 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 110 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
78 | IRQ_PER_CPU) | 111 | IRQ_PER_CPU | IRQ_NESTED_THREAD) |
79 | 112 | ||
80 | #ifdef CONFIG_IRQ_PER_CPU | 113 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
81 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 114 | |
82 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 115 | static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status) |
83 | #else | 116 | { |
84 | # define CHECK_IRQ_PER_CPU(var) 0 | 117 | return status & IRQ_PER_CPU; |
85 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 118 | } |
86 | #endif | 119 | |
120 | /* | ||
121 | * Return value for chip->irq_set_affinity() | ||
122 | * | ||
123 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | ||
124 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | ||
125 | */ | ||
126 | enum { | ||
127 | IRQ_SET_MASK_OK = 0, | ||
128 | IRQ_SET_MASK_OK_NOCOPY, | ||
129 | }; | ||
87 | 130 | ||
88 | struct msi_desc; | 131 | struct msi_desc; |
89 | 132 | ||
@@ -91,6 +134,8 @@ struct msi_desc; | |||
91 | * struct irq_data - per irq and irq chip data passed down to chip functions | 134 | * struct irq_data - per irq and irq chip data passed down to chip functions |
92 | * @irq: interrupt number | 135 | * @irq: interrupt number |
93 | * @node: node index useful for balancing | 136 | * @node: node index useful for balancing |
137 | * @state_use_accessor: status information for irq chip functions. | ||
138 | * Use accessor functions to deal with it | ||
94 | * @chip: low level interrupt hardware access | 139 | * @chip: low level interrupt hardware access |
95 | * @handler_data: per-IRQ data for the irq_chip methods | 140 | * @handler_data: per-IRQ data for the irq_chip methods |
96 | * @chip_data: platform-specific per-chip private data for the chip | 141 | * @chip_data: platform-specific per-chip private data for the chip |
@@ -105,6 +150,7 @@ struct msi_desc; | |||
105 | struct irq_data { | 150 | struct irq_data { |
106 | unsigned int irq; | 151 | unsigned int irq; |
107 | unsigned int node; | 152 | unsigned int node; |
153 | unsigned int state_use_accessors; | ||
108 | struct irq_chip *chip; | 154 | struct irq_chip *chip; |
109 | void *handler_data; | 155 | void *handler_data; |
110 | void *chip_data; | 156 | void *chip_data; |
@@ -114,6 +160,80 @@ struct irq_data { | |||
114 | #endif | 160 | #endif |
115 | }; | 161 | }; |
116 | 162 | ||
163 | /* | ||
164 | * Bit masks for irq_data.state | ||
165 | * | ||
166 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | ||
167 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | ||
168 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | ||
169 | * IRQD_PER_CPU - Interrupt is per cpu | ||
170 | * IRQD_AFFINITY_SET - Interrupt affinity was set | ||
171 | * IRQD_LEVEL - Interrupt is level triggered | ||
172 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup | ||
173 | * from suspend | ||
174 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | ||
175 | * context | ||
176 | */ | ||
177 | enum { | ||
178 | IRQD_TRIGGER_MASK = 0xf, | ||
179 | IRQD_SETAFFINITY_PENDING = (1 << 8), | ||
180 | IRQD_NO_BALANCING = (1 << 10), | ||
181 | IRQD_PER_CPU = (1 << 11), | ||
182 | IRQD_AFFINITY_SET = (1 << 12), | ||
183 | IRQD_LEVEL = (1 << 13), | ||
184 | IRQD_WAKEUP_STATE = (1 << 14), | ||
185 | IRQD_MOVE_PCNTXT = (1 << 15), | ||
186 | }; | ||
187 | |||
188 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | ||
189 | { | ||
190 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | ||
191 | } | ||
192 | |||
193 | static inline bool irqd_is_per_cpu(struct irq_data *d) | ||
194 | { | ||
195 | return d->state_use_accessors & IRQD_PER_CPU; | ||
196 | } | ||
197 | |||
198 | static inline bool irqd_can_balance(struct irq_data *d) | ||
199 | { | ||
200 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | ||
201 | } | ||
202 | |||
203 | static inline bool irqd_affinity_was_set(struct irq_data *d) | ||
204 | { | ||
205 | return d->state_use_accessors & IRQD_AFFINITY_SET; | ||
206 | } | ||
207 | |||
208 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | ||
209 | { | ||
210 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Must only be called inside irq_chip.irq_set_type() functions. | ||
215 | */ | ||
216 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | ||
217 | { | ||
218 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | ||
219 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | ||
220 | } | ||
221 | |||
222 | static inline bool irqd_is_level_type(struct irq_data *d) | ||
223 | { | ||
224 | return d->state_use_accessors & IRQD_LEVEL; | ||
225 | } | ||
226 | |||
227 | static inline bool irqd_is_wakeup_set(struct irq_data *d) | ||
228 | { | ||
229 | return d->state_use_accessors & IRQD_WAKEUP_STATE; | ||
230 | } | ||
231 | |||
232 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | ||
233 | { | ||
234 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | ||
235 | } | ||
236 | |||
117 | /** | 237 | /** |
118 | * struct irq_chip - hardware interrupt chip descriptor | 238 | * struct irq_chip - hardware interrupt chip descriptor |
119 | * | 239 | * |
@@ -150,6 +270,7 @@ struct irq_data { | |||
150 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | 270 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
151 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | 271 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
152 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 272 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
273 | * @flags: chip specific flags | ||
153 | * | 274 | * |
154 | * @release: release function solely used by UML | 275 | * @release: release function solely used by UML |
155 | */ | 276 | */ |
@@ -196,12 +317,27 @@ struct irq_chip { | |||
196 | void (*irq_bus_lock)(struct irq_data *data); | 317 | void (*irq_bus_lock)(struct irq_data *data); |
197 | void (*irq_bus_sync_unlock)(struct irq_data *data); | 318 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
198 | 319 | ||
320 | unsigned long flags; | ||
321 | |||
199 | /* Currently used only by UML, might disappear one day.*/ | 322 | /* Currently used only by UML, might disappear one day.*/ |
200 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 323 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
201 | void (*release)(unsigned int irq, void *dev_id); | 324 | void (*release)(unsigned int irq, void *dev_id); |
202 | #endif | 325 | #endif |
203 | }; | 326 | }; |
204 | 327 | ||
328 | /* | ||
329 | * irq_chip specific flags | ||
330 | * | ||
331 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() | ||
332 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled | ||
333 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path | ||
334 | */ | ||
335 | enum { | ||
336 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | ||
337 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | ||
338 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | ||
339 | }; | ||
340 | |||
205 | /* This include will go away once we isolated irq_desc usage to core code */ | 341 | /* This include will go away once we isolated irq_desc usage to core code */ |
206 | #include <linux/irqdesc.h> | 342 | #include <linux/irqdesc.h> |
207 | 343 | ||
@@ -218,7 +354,7 @@ struct irq_chip { | |||
218 | # define ARCH_IRQ_INIT_FLAGS 0 | 354 | # define ARCH_IRQ_INIT_FLAGS 0 |
219 | #endif | 355 | #endif |
220 | 356 | ||
221 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) | 357 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
222 | 358 | ||
223 | struct irqaction; | 359 | struct irqaction; |
224 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 360 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
@@ -229,9 +365,13 @@ extern void remove_irq(unsigned int irq, struct irqaction *act); | |||
229 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 365 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
230 | void move_native_irq(int irq); | 366 | void move_native_irq(int irq); |
231 | void move_masked_irq(int irq); | 367 | void move_masked_irq(int irq); |
368 | void irq_move_irq(struct irq_data *data); | ||
369 | void irq_move_masked_irq(struct irq_data *data); | ||
232 | #else | 370 | #else |
233 | static inline void move_native_irq(int irq) { } | 371 | static inline void move_native_irq(int irq) { } |
234 | static inline void move_masked_irq(int irq) { } | 372 | static inline void move_masked_irq(int irq) { } |
373 | static inline void irq_move_irq(struct irq_data *data) { } | ||
374 | static inline void irq_move_masked_irq(struct irq_data *data) { } | ||
235 | #endif | 375 | #endif |
236 | 376 | ||
237 | extern int no_irq_affinity; | 377 | extern int no_irq_affinity; |
@@ -267,23 +407,23 @@ extern struct irq_chip no_irq_chip; | |||
267 | extern struct irq_chip dummy_irq_chip; | 407 | extern struct irq_chip dummy_irq_chip; |
268 | 408 | ||
269 | extern void | 409 | extern void |
270 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 410 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
271 | irq_flow_handler_t handle); | ||
272 | extern void | ||
273 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | ||
274 | irq_flow_handler_t handle, const char *name); | 411 | irq_flow_handler_t handle, const char *name); |
275 | 412 | ||
413 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
414 | irq_flow_handler_t handle) | ||
415 | { | ||
416 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); | ||
417 | } | ||
418 | |||
276 | extern void | 419 | extern void |
277 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 420 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
278 | const char *name); | 421 | const char *name); |
279 | 422 | ||
280 | /* | ||
281 | * Set a highlevel flow handler for a given IRQ: | ||
282 | */ | ||
283 | static inline void | 423 | static inline void |
284 | set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | 424 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
285 | { | 425 | { |
286 | __set_irq_handler(irq, handle, 0, NULL); | 426 | __irq_set_handler(irq, handle, 0, NULL); |
287 | } | 427 | } |
288 | 428 | ||
289 | /* | 429 | /* |
@@ -292,14 +432,11 @@ set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | |||
292 | * IRQ_NOREQUEST and IRQ_NOPROBE) | 432 | * IRQ_NOREQUEST and IRQ_NOPROBE) |
293 | */ | 433 | */ |
294 | static inline void | 434 | static inline void |
295 | set_irq_chained_handler(unsigned int irq, | 435 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
296 | irq_flow_handler_t handle) | ||
297 | { | 436 | { |
298 | __set_irq_handler(irq, handle, 1, NULL); | 437 | __irq_set_handler(irq, handle, 1, NULL); |
299 | } | 438 | } |
300 | 439 | ||
301 | extern void set_irq_nested_thread(unsigned int irq, int nest); | ||
302 | |||
303 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | 440 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
304 | 441 | ||
305 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | 442 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
@@ -312,16 +449,24 @@ static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | |||
312 | irq_modify_status(irq, clr, 0); | 449 | irq_modify_status(irq, clr, 0); |
313 | } | 450 | } |
314 | 451 | ||
315 | static inline void set_irq_noprobe(unsigned int irq) | 452 | static inline void irq_set_noprobe(unsigned int irq) |
316 | { | 453 | { |
317 | irq_modify_status(irq, 0, IRQ_NOPROBE); | 454 | irq_modify_status(irq, 0, IRQ_NOPROBE); |
318 | } | 455 | } |
319 | 456 | ||
320 | static inline void set_irq_probe(unsigned int irq) | 457 | static inline void irq_set_probe(unsigned int irq) |
321 | { | 458 | { |
322 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 459 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
323 | } | 460 | } |
324 | 461 | ||
462 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | ||
463 | { | ||
464 | if (nest) | ||
465 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); | ||
466 | else | ||
467 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | ||
468 | } | ||
469 | |||
325 | /* Handle dynamic irq creation and destruction */ | 470 | /* Handle dynamic irq creation and destruction */ |
326 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 471 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
327 | extern int create_irq(void); | 472 | extern int create_irq(void); |
@@ -338,14 +483,14 @@ static inline void dynamic_irq_init(unsigned int irq) | |||
338 | } | 483 | } |
339 | 484 | ||
340 | /* Set/get chip/data for an IRQ: */ | 485 | /* Set/get chip/data for an IRQ: */ |
341 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 486 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); |
342 | extern int set_irq_data(unsigned int irq, void *data); | 487 | extern int irq_set_handler_data(unsigned int irq, void *data); |
343 | extern int set_irq_chip_data(unsigned int irq, void *data); | 488 | extern int irq_set_chip_data(unsigned int irq, void *data); |
344 | extern int set_irq_type(unsigned int irq, unsigned int type); | 489 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
345 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 490 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
346 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | 491 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
347 | 492 | ||
348 | static inline struct irq_chip *get_irq_chip(unsigned int irq) | 493 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
349 | { | 494 | { |
350 | struct irq_data *d = irq_get_irq_data(irq); | 495 | struct irq_data *d = irq_get_irq_data(irq); |
351 | return d ? d->chip : NULL; | 496 | return d ? d->chip : NULL; |
@@ -356,7 +501,7 @@ static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | |||
356 | return d->chip; | 501 | return d->chip; |
357 | } | 502 | } |
358 | 503 | ||
359 | static inline void *get_irq_chip_data(unsigned int irq) | 504 | static inline void *irq_get_chip_data(unsigned int irq) |
360 | { | 505 | { |
361 | struct irq_data *d = irq_get_irq_data(irq); | 506 | struct irq_data *d = irq_get_irq_data(irq); |
362 | return d ? d->chip_data : NULL; | 507 | return d ? d->chip_data : NULL; |
@@ -367,18 +512,18 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | |||
367 | return d->chip_data; | 512 | return d->chip_data; |
368 | } | 513 | } |
369 | 514 | ||
370 | static inline void *get_irq_data(unsigned int irq) | 515 | static inline void *irq_get_handler_data(unsigned int irq) |
371 | { | 516 | { |
372 | struct irq_data *d = irq_get_irq_data(irq); | 517 | struct irq_data *d = irq_get_irq_data(irq); |
373 | return d ? d->handler_data : NULL; | 518 | return d ? d->handler_data : NULL; |
374 | } | 519 | } |
375 | 520 | ||
376 | static inline void *irq_data_get_irq_data(struct irq_data *d) | 521 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
377 | { | 522 | { |
378 | return d->handler_data; | 523 | return d->handler_data; |
379 | } | 524 | } |
380 | 525 | ||
381 | static inline struct msi_desc *get_irq_msi(unsigned int irq) | 526 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
382 | { | 527 | { |
383 | struct irq_data *d = irq_get_irq_data(irq); | 528 | struct irq_data *d = irq_get_irq_data(irq); |
384 | return d ? d->msi_desc : NULL; | 529 | return d ? d->msi_desc : NULL; |
@@ -389,6 +534,89 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | |||
389 | return d->msi_desc; | 534 | return d->msi_desc; |
390 | } | 535 | } |
391 | 536 | ||
537 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
538 | /* Please do not use: Use the replacement functions instead */ | ||
539 | static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip) | ||
540 | { | ||
541 | return irq_set_chip(irq, chip); | ||
542 | } | ||
543 | static inline int set_irq_data(unsigned int irq, void *data) | ||
544 | { | ||
545 | return irq_set_handler_data(irq, data); | ||
546 | } | ||
547 | static inline int set_irq_chip_data(unsigned int irq, void *data) | ||
548 | { | ||
549 | return irq_set_chip_data(irq, data); | ||
550 | } | ||
551 | static inline int set_irq_type(unsigned int irq, unsigned int type) | ||
552 | { | ||
553 | return irq_set_irq_type(irq, type); | ||
554 | } | ||
555 | static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry) | ||
556 | { | ||
557 | return irq_set_msi_desc(irq, entry); | ||
558 | } | ||
559 | static inline struct irq_chip *get_irq_chip(unsigned int irq) | ||
560 | { | ||
561 | return irq_get_chip(irq); | ||
562 | } | ||
563 | static inline void *get_irq_chip_data(unsigned int irq) | ||
564 | { | ||
565 | return irq_get_chip_data(irq); | ||
566 | } | ||
567 | static inline void *get_irq_data(unsigned int irq) | ||
568 | { | ||
569 | return irq_get_handler_data(irq); | ||
570 | } | ||
571 | static inline void *irq_data_get_irq_data(struct irq_data *d) | ||
572 | { | ||
573 | return irq_data_get_irq_handler_data(d); | ||
574 | } | ||
575 | static inline struct msi_desc *get_irq_msi(unsigned int irq) | ||
576 | { | ||
577 | return irq_get_msi_desc(irq); | ||
578 | } | ||
579 | static inline void set_irq_noprobe(unsigned int irq) | ||
580 | { | ||
581 | irq_set_noprobe(irq); | ||
582 | } | ||
583 | static inline void set_irq_probe(unsigned int irq) | ||
584 | { | ||
585 | irq_set_probe(irq); | ||
586 | } | ||
587 | static inline void set_irq_nested_thread(unsigned int irq, int nest) | ||
588 | { | ||
589 | irq_set_nested_thread(irq, nest); | ||
590 | } | ||
591 | static inline void | ||
592 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | ||
593 | irq_flow_handler_t handle, const char *name) | ||
594 | { | ||
595 | irq_set_chip_and_handler_name(irq, chip, handle, name); | ||
596 | } | ||
597 | static inline void | ||
598 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
599 | irq_flow_handler_t handle) | ||
600 | { | ||
601 | irq_set_chip_and_handler(irq, chip, handle); | ||
602 | } | ||
603 | static inline void | ||
604 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | ||
605 | const char *name) | ||
606 | { | ||
607 | __irq_set_handler(irq, handle, is_chained, name); | ||
608 | } | ||
609 | static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | ||
610 | { | ||
611 | irq_set_handler(irq, handle); | ||
612 | } | ||
613 | static inline void | ||
614 | set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle) | ||
615 | { | ||
616 | irq_set_chained_handler(irq, handle); | ||
617 | } | ||
618 | #endif | ||
619 | |||
392 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); | 620 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
393 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 621 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
394 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 622 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c1a95b7b58de..00218371518b 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -8,6 +8,7 @@ | |||
8 | * For now it's included from <linux/irq.h> | 8 | * For now it's included from <linux/irq.h> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | struct irq_affinity_notify; | ||
11 | struct proc_dir_entry; | 12 | struct proc_dir_entry; |
12 | struct timer_rand_state; | 13 | struct timer_rand_state; |
13 | /** | 14 | /** |
@@ -18,13 +19,16 @@ struct timer_rand_state; | |||
18 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | 19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] |
19 | * @action: the irq action chain | 20 | * @action: the irq action chain |
20 | * @status: status information | 21 | * @status: status information |
22 | * @core_internal_state__do_not_mess_with_it: core internal status information | ||
21 | * @depth: disable-depth, for nested irq_disable() calls | 23 | * @depth: disable-depth, for nested irq_disable() calls |
22 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | 24 | * @wake_depth: enable depth, for multiple set_irq_wake() callers |
23 | * @irq_count: stats field to detect stalled irqs | 25 | * @irq_count: stats field to detect stalled irqs |
24 | * @last_unhandled: aging timer for unhandled count | 26 | * @last_unhandled: aging timer for unhandled count |
25 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 27 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
26 | * @lock: locking for SMP | 28 | * @lock: locking for SMP |
29 | * @affinity_notify: context for notification of affinity changes | ||
27 | * @pending_mask: pending rebalanced interrupts | 30 | * @pending_mask: pending rebalanced interrupts |
31 | * @threads_oneshot: bitfield to handle shared oneshot threads | ||
28 | * @threads_active: number of irqaction threads currently running | 32 | * @threads_active: number of irqaction threads currently running |
29 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | 33 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
30 | * @dir: /proc/irq/ procfs entry | 34 | * @dir: /proc/irq/ procfs entry |
@@ -45,6 +49,7 @@ struct irq_desc { | |||
45 | struct { | 49 | struct { |
46 | unsigned int irq; | 50 | unsigned int irq; |
47 | unsigned int node; | 51 | unsigned int node; |
52 | unsigned int pad_do_not_even_think_about_it; | ||
48 | struct irq_chip *chip; | 53 | struct irq_chip *chip; |
49 | void *handler_data; | 54 | void *handler_data; |
50 | void *chip_data; | 55 | void *chip_data; |
@@ -59,9 +64,16 @@ struct irq_desc { | |||
59 | struct timer_rand_state *timer_rand_state; | 64 | struct timer_rand_state *timer_rand_state; |
60 | unsigned int __percpu *kstat_irqs; | 65 | unsigned int __percpu *kstat_irqs; |
61 | irq_flow_handler_t handle_irq; | 66 | irq_flow_handler_t handle_irq; |
67 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
68 | irq_preflow_handler_t preflow_handler; | ||
69 | #endif | ||
62 | struct irqaction *action; /* IRQ action list */ | 70 | struct irqaction *action; /* IRQ action list */ |
71 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
72 | unsigned int status_use_accessors; | ||
73 | #else | ||
63 | unsigned int status; /* IRQ status */ | 74 | unsigned int status; /* IRQ status */ |
64 | 75 | #endif | |
76 | unsigned int core_internal_state__do_not_mess_with_it; | ||
65 | unsigned int depth; /* nested irq disables */ | 77 | unsigned int depth; /* nested irq disables */ |
66 | unsigned int wake_depth; /* nested wake enables */ | 78 | unsigned int wake_depth; /* nested wake enables */ |
67 | unsigned int irq_count; /* For detecting broken IRQs */ | 79 | unsigned int irq_count; /* For detecting broken IRQs */ |
@@ -70,10 +82,12 @@ struct irq_desc { | |||
70 | raw_spinlock_t lock; | 82 | raw_spinlock_t lock; |
71 | #ifdef CONFIG_SMP | 83 | #ifdef CONFIG_SMP |
72 | const struct cpumask *affinity_hint; | 84 | const struct cpumask *affinity_hint; |
85 | struct irq_affinity_notify *affinity_notify; | ||
73 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 86 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
74 | cpumask_var_t pending_mask; | 87 | cpumask_var_t pending_mask; |
75 | #endif | 88 | #endif |
76 | #endif | 89 | #endif |
90 | unsigned long threads_oneshot; | ||
77 | atomic_t threads_active; | 91 | atomic_t threads_active; |
78 | wait_queue_head_t wait_for_threads; | 92 | wait_queue_head_t wait_for_threads; |
79 | #ifdef CONFIG_PROC_FS | 93 | #ifdef CONFIG_PROC_FS |
@@ -95,10 +109,51 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | |||
95 | 109 | ||
96 | #ifdef CONFIG_GENERIC_HARDIRQS | 110 | #ifdef CONFIG_GENERIC_HARDIRQS |
97 | 111 | ||
98 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | 112 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) |
99 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | 113 | { |
100 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | 114 | return &desc->irq_data; |
101 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | 115 | } |
116 | |||
117 | static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) | ||
118 | { | ||
119 | return desc->irq_data.chip; | ||
120 | } | ||
121 | |||
122 | static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | ||
123 | { | ||
124 | return desc->irq_data.chip_data; | ||
125 | } | ||
126 | |||
127 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | ||
128 | { | ||
129 | return desc->irq_data.handler_data; | ||
130 | } | ||
131 | |||
132 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | ||
133 | { | ||
134 | return desc->irq_data.msi_desc; | ||
135 | } | ||
136 | |||
137 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
138 | static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) | ||
139 | { | ||
140 | return irq_desc_get_chip(desc); | ||
141 | } | ||
142 | static inline void *get_irq_desc_data(struct irq_desc *desc) | ||
143 | { | ||
144 | return irq_desc_get_handler_data(desc); | ||
145 | } | ||
146 | |||
147 | static inline void *get_irq_desc_chip_data(struct irq_desc *desc) | ||
148 | { | ||
149 | return irq_desc_get_chip_data(desc); | ||
150 | } | ||
151 | |||
152 | static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) | ||
153 | { | ||
154 | return irq_desc_get_msi_desc(desc); | ||
155 | } | ||
156 | #endif | ||
102 | 157 | ||
103 | /* | 158 | /* |
104 | * Architectures call this to let the generic IRQ layer | 159 | * Architectures call this to let the generic IRQ layer |
@@ -123,6 +178,7 @@ static inline int irq_has_action(unsigned int irq) | |||
123 | return desc->action != NULL; | 178 | return desc->action != NULL; |
124 | } | 179 | } |
125 | 180 | ||
181 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
126 | static inline int irq_balancing_disabled(unsigned int irq) | 182 | static inline int irq_balancing_disabled(unsigned int irq) |
127 | { | 183 | { |
128 | struct irq_desc *desc; | 184 | struct irq_desc *desc; |
@@ -130,6 +186,7 @@ static inline int irq_balancing_disabled(unsigned int irq) | |||
130 | desc = irq_to_desc(irq); | 186 | desc = irq_to_desc(irq); |
131 | return desc->status & IRQ_NO_BALANCING_MASK; | 187 | return desc->status & IRQ_NO_BALANCING_MASK; |
132 | } | 188 | } |
189 | #endif | ||
133 | 190 | ||
134 | /* caller has locked the irq_desc and both params are valid */ | 191 | /* caller has locked the irq_desc and both params are valid */ |
135 | static inline void __set_irq_handler_unlocked(int irq, | 192 | static inline void __set_irq_handler_unlocked(int irq, |
@@ -140,6 +197,17 @@ static inline void __set_irq_handler_unlocked(int irq, | |||
140 | desc = irq_to_desc(irq); | 197 | desc = irq_to_desc(irq); |
141 | desc->handle_irq = handler; | 198 | desc->handle_irq = handler; |
142 | } | 199 | } |
200 | |||
201 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
202 | static inline void | ||
203 | __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) | ||
204 | { | ||
205 | struct irq_desc *desc; | ||
206 | |||
207 | desc = irq_to_desc(irq); | ||
208 | desc->preflow_handler = handler; | ||
209 | } | ||
210 | #endif | ||
143 | #endif | 211 | #endif |
144 | 212 | ||
145 | #endif | 213 | #endif |