diff options
-rw-r--r-- | Documentation/kernel-parameters.txt | 4 | ||||
-rw-r--r-- | include/linux/interrupt.h | 78 | ||||
-rw-r--r-- | include/linux/irq.h | 368 | ||||
-rw-r--r-- | include/linux/irqdesc.h | 78 | ||||
-rw-r--r-- | kernel/irq/Kconfig | 39 | ||||
-rw-r--r-- | kernel/irq/autoprobe.c | 54 | ||||
-rw-r--r-- | kernel/irq/chip.c | 483 | ||||
-rw-r--r-- | kernel/irq/compat.h | 72 | ||||
-rw-r--r-- | kernel/irq/debug.h | 40 | ||||
-rw-r--r-- | kernel/irq/handle.c | 144 | ||||
-rw-r--r-- | kernel/irq/internals.h | 173 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 79 | ||||
-rw-r--r-- | kernel/irq/manage.c | 604 | ||||
-rw-r--r-- | kernel/irq/migration.c | 38 | ||||
-rw-r--r-- | kernel/irq/pm.c | 30 | ||||
-rw-r--r-- | kernel/irq/proc.c | 70 | ||||
-rw-r--r-- | kernel/irq/resend.c | 19 | ||||
-rw-r--r-- | kernel/irq/settings.h | 138 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 163 | ||||
-rw-r--r-- | kernel/sched.c | 5 | ||||
-rw-r--r-- | kernel/softirq.c | 21 |
21 files changed, 1887 insertions, 813 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f4a04c0c7edc..738c6fda3fb0 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2444,6 +2444,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2444 | <deci-seconds>: poll all this frequency | 2444 | <deci-seconds>: poll all this frequency |
2445 | 0: no polling (default) | 2445 | 0: no polling (default) |
2446 | 2446 | ||
2447 | threadirqs [KNL] | ||
2448 | Force threading of all interrupt handlers except those | ||
2449 | marked explicitely IRQF_NO_THREAD. | ||
2450 | |||
2447 | topology= [S390] | 2451 | topology= [S390] |
2448 | Format: {off | on} | 2452 | Format: {off | on} |
2449 | Specify if the kernel should make use of the cpu | 2453 | Specify if the kernel should make use of the cpu |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 55e0d4253e49..f8a8af108e0c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/percpu.h> | 15 | #include <linux/percpu.h> |
16 | #include <linux/hrtimer.h> | 16 | #include <linux/hrtimer.h> |
17 | #include <linux/kref.h> | ||
18 | #include <linux/workqueue.h> | ||
17 | 19 | ||
18 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
19 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
@@ -55,7 +57,8 @@ | |||
55 | * Used by threaded interrupts which need to keep the | 57 | * Used by threaded interrupts which need to keep the |
56 | * irq line disabled until the threaded handler has been run. | 58 | * irq line disabled until the threaded handler has been run. |
57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 59 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
58 | * | 60 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
61 | * IRQF_NO_THREAD - Interrupt cannot be threaded | ||
59 | */ | 62 | */ |
60 | #define IRQF_DISABLED 0x00000020 | 63 | #define IRQF_DISABLED 0x00000020 |
61 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 64 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
@@ -67,22 +70,10 @@ | |||
67 | #define IRQF_IRQPOLL 0x00001000 | 70 | #define IRQF_IRQPOLL 0x00001000 |
68 | #define IRQF_ONESHOT 0x00002000 | 71 | #define IRQF_ONESHOT 0x00002000 |
69 | #define IRQF_NO_SUSPEND 0x00004000 | 72 | #define IRQF_NO_SUSPEND 0x00004000 |
73 | #define IRQF_FORCE_RESUME 0x00008000 | ||
74 | #define IRQF_NO_THREAD 0x00010000 | ||
70 | 75 | ||
71 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) | 76 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
72 | |||
73 | /* | ||
74 | * Bits used by threaded handlers: | ||
75 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
76 | * IRQTF_DIED - handler thread died | ||
77 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
78 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | ||
79 | */ | ||
80 | enum { | ||
81 | IRQTF_RUNTHREAD, | ||
82 | IRQTF_DIED, | ||
83 | IRQTF_WARNED, | ||
84 | IRQTF_AFFINITY, | ||
85 | }; | ||
86 | 77 | ||
87 | /* | 78 | /* |
88 | * These values can be returned by request_any_context_irq() and | 79 | * These values can be returned by request_any_context_irq() and |
@@ -110,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
110 | * @thread_fn: interupt handler function for threaded interrupts | 101 | * @thread_fn: interupt handler function for threaded interrupts |
111 | * @thread: thread pointer for threaded interrupts | 102 | * @thread: thread pointer for threaded interrupts |
112 | * @thread_flags: flags related to @thread | 103 | * @thread_flags: flags related to @thread |
104 | * @thread_mask: bitmask for keeping track of @thread activity | ||
113 | */ | 105 | */ |
114 | struct irqaction { | 106 | struct irqaction { |
115 | irq_handler_t handler; | 107 | irq_handler_t handler; |
@@ -120,6 +112,7 @@ struct irqaction { | |||
120 | irq_handler_t thread_fn; | 112 | irq_handler_t thread_fn; |
121 | struct task_struct *thread; | 113 | struct task_struct *thread; |
122 | unsigned long thread_flags; | 114 | unsigned long thread_flags; |
115 | unsigned long thread_mask; | ||
123 | const char *name; | 116 | const char *name; |
124 | struct proc_dir_entry *dir; | 117 | struct proc_dir_entry *dir; |
125 | } ____cacheline_internodealigned_in_smp; | 118 | } ____cacheline_internodealigned_in_smp; |
@@ -240,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq); | |||
240 | extern int irq_select_affinity(unsigned int irq); | 233 | extern int irq_select_affinity(unsigned int irq); |
241 | 234 | ||
242 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | 235 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
236 | |||
237 | /** | ||
238 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | ||
239 | * @irq: Interrupt to which notification applies | ||
240 | * @kref: Reference count, for internal use | ||
241 | * @work: Work item, for internal use | ||
242 | * @notify: Function to be called on change. This will be | ||
243 | * called in process context. | ||
244 | * @release: Function to be called on release. This will be | ||
245 | * called in process context. Once registered, the | ||
246 | * structure must only be freed when this function is | ||
247 | * called or later. | ||
248 | */ | ||
249 | struct irq_affinity_notify { | ||
250 | unsigned int irq; | ||
251 | struct kref kref; | ||
252 | struct work_struct work; | ||
253 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | ||
254 | void (*release)(struct kref *ref); | ||
255 | }; | ||
256 | |||
257 | extern int | ||
258 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | ||
259 | |||
260 | static inline void irq_run_affinity_notifiers(void) | ||
261 | { | ||
262 | flush_scheduled_work(); | ||
263 | } | ||
264 | |||
243 | #else /* CONFIG_SMP */ | 265 | #else /* CONFIG_SMP */ |
244 | 266 | ||
245 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) | 267 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
@@ -255,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq) | |||
255 | static inline int irq_select_affinity(unsigned int irq) { return 0; } | 277 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
256 | 278 | ||
257 | static inline int irq_set_affinity_hint(unsigned int irq, | 279 | static inline int irq_set_affinity_hint(unsigned int irq, |
258 | const struct cpumask *m) | 280 | const struct cpumask *m) |
259 | { | 281 | { |
260 | return -EINVAL; | 282 | return -EINVAL; |
261 | } | 283 | } |
@@ -314,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long | |||
314 | } | 336 | } |
315 | 337 | ||
316 | /* IRQ wakeup (PM) control: */ | 338 | /* IRQ wakeup (PM) control: */ |
317 | extern int set_irq_wake(unsigned int irq, unsigned int on); | 339 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
340 | |||
341 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
342 | /* Please do not use: Use the replacement functions instead */ | ||
343 | static inline int set_irq_wake(unsigned int irq, unsigned int on) | ||
344 | { | ||
345 | return irq_set_irq_wake(irq, on); | ||
346 | } | ||
347 | #endif | ||
318 | 348 | ||
319 | static inline int enable_irq_wake(unsigned int irq) | 349 | static inline int enable_irq_wake(unsigned int irq) |
320 | { | 350 | { |
321 | return set_irq_wake(irq, 1); | 351 | return irq_set_irq_wake(irq, 1); |
322 | } | 352 | } |
323 | 353 | ||
324 | static inline int disable_irq_wake(unsigned int irq) | 354 | static inline int disable_irq_wake(unsigned int irq) |
325 | { | 355 | { |
326 | return set_irq_wake(irq, 0); | 356 | return irq_set_irq_wake(irq, 0); |
327 | } | 357 | } |
328 | 358 | ||
329 | #else /* !CONFIG_GENERIC_HARDIRQS */ | 359 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
@@ -353,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) | |||
353 | } | 383 | } |
354 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 384 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
355 | 385 | ||
386 | |||
387 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
388 | extern bool force_irqthreads; | ||
389 | #else | ||
390 | #define force_irqthreads (0) | ||
391 | #endif | ||
392 | |||
356 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 393 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
357 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 394 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
358 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) | 395 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
@@ -645,6 +682,7 @@ static inline void init_irq_proc(void) | |||
645 | 682 | ||
646 | struct seq_file; | 683 | struct seq_file; |
647 | int show_interrupts(struct seq_file *p, void *v); | 684 | int show_interrupts(struct seq_file *p, void *v); |
685 | int arch_show_interrupts(struct seq_file *p, int prec); | ||
648 | 686 | ||
649 | extern int early_irq_init(void); | 687 | extern int early_irq_init(void); |
650 | extern int arch_probe_nr_irqs(void); | 688 | extern int arch_probe_nr_irqs(void); |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 80fcb53057bc..1d3577f30d45 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -29,61 +29,104 @@ | |||
29 | #include <asm/irq_regs.h> | 29 | #include <asm/irq_regs.h> |
30 | 30 | ||
31 | struct irq_desc; | 31 | struct irq_desc; |
32 | struct irq_data; | ||
32 | typedef void (*irq_flow_handler_t)(unsigned int irq, | 33 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
33 | struct irq_desc *desc); | 34 | struct irq_desc *desc); |
34 | 35 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | |
35 | 36 | ||
36 | /* | 37 | /* |
37 | * IRQ line status. | 38 | * IRQ line status. |
38 | * | 39 | * |
39 | * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h | 40 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
41 | * | ||
42 | * IRQ_TYPE_NONE - default, unspecified type | ||
43 | * IRQ_TYPE_EDGE_RISING - rising edge triggered | ||
44 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered | ||
45 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered | ||
46 | * IRQ_TYPE_LEVEL_HIGH - high level triggered | ||
47 | * IRQ_TYPE_LEVEL_LOW - low level triggered | ||
48 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits | ||
49 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits | ||
50 | * IRQ_TYPE_PROBE - Special flag for probing in progress | ||
51 | * | ||
52 | * Bits which can be modified via irq_set/clear/modify_status_flags() | ||
53 | * IRQ_LEVEL - Interrupt is level type. Will be also | ||
54 | * updated in the code when the above trigger | ||
55 | * bits are modified via set_irq_type() | ||
56 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | ||
57 | * it from affinity setting | ||
58 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | ||
59 | * IRQ_NOREQUEST - Interrupt cannot be requested via | ||
60 | * request_irq() | ||
61 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | ||
62 | * request/setup_irq() | ||
63 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | ||
64 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | ||
65 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | ||
66 | * | ||
67 | * Deprecated bits. They are kept updated as long as | ||
68 | * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits | ||
69 | * are internal state of the core code and if you really need to acces | ||
70 | * them then talk to the genirq maintainer instead of hacking | ||
71 | * something weird. | ||
40 | * | 72 | * |
41 | * IRQ types | ||
42 | */ | 73 | */ |
43 | #define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ | 74 | enum { |
44 | #define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ | 75 | IRQ_TYPE_NONE = 0x00000000, |
45 | #define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ | 76 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) | 77 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
47 | #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ | 78 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
48 | #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ | 79 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
49 | #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ | 80 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
50 | #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ | 81 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
51 | 82 | IRQ_TYPE_SENSE_MASK = 0x0000000f, | |
52 | /* Internal flags */ | 83 | |
53 | #define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ | 84 | IRQ_TYPE_PROBE = 0x00000010, |
54 | #define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ | 85 | |
55 | #define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ | 86 | IRQ_LEVEL = (1 << 8), |
56 | #define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ | 87 | IRQ_PER_CPU = (1 << 9), |
57 | #define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ | 88 | IRQ_NOPROBE = (1 << 10), |
58 | #define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ | 89 | IRQ_NOREQUEST = (1 << 11), |
59 | #define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ | 90 | IRQ_NOAUTOEN = (1 << 12), |
60 | #define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ | 91 | IRQ_NO_BALANCING = (1 << 13), |
61 | #define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ | 92 | IRQ_MOVE_PCNTXT = (1 << 14), |
62 | #define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ | 93 | IRQ_NESTED_THREAD = (1 << 15), |
63 | #define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ | 94 | |
64 | #define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ | 95 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT |
65 | #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ | 96 | IRQ_INPROGRESS = (1 << 16), |
66 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ | 97 | IRQ_REPLAY = (1 << 17), |
67 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ | 98 | IRQ_WAITING = (1 << 18), |
68 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | 99 | IRQ_DISABLED = (1 << 19), |
69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | 100 | IRQ_PENDING = (1 << 20), |
70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ | 101 | IRQ_MASKED = (1 << 21), |
71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ | 102 | IRQ_MOVE_PENDING = (1 << 22), |
72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 103 | IRQ_AFFINITY_SET = (1 << 23), |
73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 104 | IRQ_WAKEUP = (1 << 24), |
105 | #endif | ||
106 | }; | ||
74 | 107 | ||
75 | #define IRQF_MODIFY_MASK \ | 108 | #define IRQF_MODIFY_MASK \ |
76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 109 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 110 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
78 | IRQ_PER_CPU) | 111 | IRQ_PER_CPU | IRQ_NESTED_THREAD) |
79 | 112 | ||
80 | #ifdef CONFIG_IRQ_PER_CPU | 113 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
81 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 114 | |
82 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 115 | static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status) |
83 | #else | 116 | { |
84 | # define CHECK_IRQ_PER_CPU(var) 0 | 117 | return status & IRQ_PER_CPU; |
85 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 118 | } |
86 | #endif | 119 | |
120 | /* | ||
121 | * Return value for chip->irq_set_affinity() | ||
122 | * | ||
123 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | ||
124 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | ||
125 | */ | ||
126 | enum { | ||
127 | IRQ_SET_MASK_OK = 0, | ||
128 | IRQ_SET_MASK_OK_NOCOPY, | ||
129 | }; | ||
87 | 130 | ||
88 | struct msi_desc; | 131 | struct msi_desc; |
89 | 132 | ||
@@ -91,6 +134,8 @@ struct msi_desc; | |||
91 | * struct irq_data - per irq and irq chip data passed down to chip functions | 134 | * struct irq_data - per irq and irq chip data passed down to chip functions |
92 | * @irq: interrupt number | 135 | * @irq: interrupt number |
93 | * @node: node index useful for balancing | 136 | * @node: node index useful for balancing |
137 | * @state_use_accessor: status information for irq chip functions. | ||
138 | * Use accessor functions to deal with it | ||
94 | * @chip: low level interrupt hardware access | 139 | * @chip: low level interrupt hardware access |
95 | * @handler_data: per-IRQ data for the irq_chip methods | 140 | * @handler_data: per-IRQ data for the irq_chip methods |
96 | * @chip_data: platform-specific per-chip private data for the chip | 141 | * @chip_data: platform-specific per-chip private data for the chip |
@@ -105,6 +150,7 @@ struct msi_desc; | |||
105 | struct irq_data { | 150 | struct irq_data { |
106 | unsigned int irq; | 151 | unsigned int irq; |
107 | unsigned int node; | 152 | unsigned int node; |
153 | unsigned int state_use_accessors; | ||
108 | struct irq_chip *chip; | 154 | struct irq_chip *chip; |
109 | void *handler_data; | 155 | void *handler_data; |
110 | void *chip_data; | 156 | void *chip_data; |
@@ -114,6 +160,80 @@ struct irq_data { | |||
114 | #endif | 160 | #endif |
115 | }; | 161 | }; |
116 | 162 | ||
163 | /* | ||
164 | * Bit masks for irq_data.state | ||
165 | * | ||
166 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | ||
167 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | ||
168 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | ||
169 | * IRQD_PER_CPU - Interrupt is per cpu | ||
170 | * IRQD_AFFINITY_SET - Interrupt affinity was set | ||
171 | * IRQD_LEVEL - Interrupt is level triggered | ||
172 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup | ||
173 | * from suspend | ||
174 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | ||
175 | * context | ||
176 | */ | ||
177 | enum { | ||
178 | IRQD_TRIGGER_MASK = 0xf, | ||
179 | IRQD_SETAFFINITY_PENDING = (1 << 8), | ||
180 | IRQD_NO_BALANCING = (1 << 10), | ||
181 | IRQD_PER_CPU = (1 << 11), | ||
182 | IRQD_AFFINITY_SET = (1 << 12), | ||
183 | IRQD_LEVEL = (1 << 13), | ||
184 | IRQD_WAKEUP_STATE = (1 << 14), | ||
185 | IRQD_MOVE_PCNTXT = (1 << 15), | ||
186 | }; | ||
187 | |||
188 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | ||
189 | { | ||
190 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | ||
191 | } | ||
192 | |||
193 | static inline bool irqd_is_per_cpu(struct irq_data *d) | ||
194 | { | ||
195 | return d->state_use_accessors & IRQD_PER_CPU; | ||
196 | } | ||
197 | |||
198 | static inline bool irqd_can_balance(struct irq_data *d) | ||
199 | { | ||
200 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | ||
201 | } | ||
202 | |||
203 | static inline bool irqd_affinity_was_set(struct irq_data *d) | ||
204 | { | ||
205 | return d->state_use_accessors & IRQD_AFFINITY_SET; | ||
206 | } | ||
207 | |||
208 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | ||
209 | { | ||
210 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Must only be called inside irq_chip.irq_set_type() functions. | ||
215 | */ | ||
216 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | ||
217 | { | ||
218 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | ||
219 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | ||
220 | } | ||
221 | |||
222 | static inline bool irqd_is_level_type(struct irq_data *d) | ||
223 | { | ||
224 | return d->state_use_accessors & IRQD_LEVEL; | ||
225 | } | ||
226 | |||
227 | static inline bool irqd_is_wakeup_set(struct irq_data *d) | ||
228 | { | ||
229 | return d->state_use_accessors & IRQD_WAKEUP_STATE; | ||
230 | } | ||
231 | |||
232 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | ||
233 | { | ||
234 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | ||
235 | } | ||
236 | |||
117 | /** | 237 | /** |
118 | * struct irq_chip - hardware interrupt chip descriptor | 238 | * struct irq_chip - hardware interrupt chip descriptor |
119 | * | 239 | * |
@@ -150,6 +270,7 @@ struct irq_data { | |||
150 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | 270 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
151 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | 271 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
152 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 272 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
273 | * @flags: chip specific flags | ||
153 | * | 274 | * |
154 | * @release: release function solely used by UML | 275 | * @release: release function solely used by UML |
155 | */ | 276 | */ |
@@ -196,12 +317,27 @@ struct irq_chip { | |||
196 | void (*irq_bus_lock)(struct irq_data *data); | 317 | void (*irq_bus_lock)(struct irq_data *data); |
197 | void (*irq_bus_sync_unlock)(struct irq_data *data); | 318 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
198 | 319 | ||
320 | unsigned long flags; | ||
321 | |||
199 | /* Currently used only by UML, might disappear one day.*/ | 322 | /* Currently used only by UML, might disappear one day.*/ |
200 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 323 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
201 | void (*release)(unsigned int irq, void *dev_id); | 324 | void (*release)(unsigned int irq, void *dev_id); |
202 | #endif | 325 | #endif |
203 | }; | 326 | }; |
204 | 327 | ||
328 | /* | ||
329 | * irq_chip specific flags | ||
330 | * | ||
331 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() | ||
332 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled | ||
333 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path | ||
334 | */ | ||
335 | enum { | ||
336 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | ||
337 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | ||
338 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | ||
339 | }; | ||
340 | |||
205 | /* This include will go away once we isolated irq_desc usage to core code */ | 341 | /* This include will go away once we isolated irq_desc usage to core code */ |
206 | #include <linux/irqdesc.h> | 342 | #include <linux/irqdesc.h> |
207 | 343 | ||
@@ -218,7 +354,7 @@ struct irq_chip { | |||
218 | # define ARCH_IRQ_INIT_FLAGS 0 | 354 | # define ARCH_IRQ_INIT_FLAGS 0 |
219 | #endif | 355 | #endif |
220 | 356 | ||
221 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) | 357 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
222 | 358 | ||
223 | struct irqaction; | 359 | struct irqaction; |
224 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 360 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
@@ -229,9 +365,13 @@ extern void remove_irq(unsigned int irq, struct irqaction *act); | |||
229 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 365 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
230 | void move_native_irq(int irq); | 366 | void move_native_irq(int irq); |
231 | void move_masked_irq(int irq); | 367 | void move_masked_irq(int irq); |
368 | void irq_move_irq(struct irq_data *data); | ||
369 | void irq_move_masked_irq(struct irq_data *data); | ||
232 | #else | 370 | #else |
233 | static inline void move_native_irq(int irq) { } | 371 | static inline void move_native_irq(int irq) { } |
234 | static inline void move_masked_irq(int irq) { } | 372 | static inline void move_masked_irq(int irq) { } |
373 | static inline void irq_move_irq(struct irq_data *data) { } | ||
374 | static inline void irq_move_masked_irq(struct irq_data *data) { } | ||
235 | #endif | 375 | #endif |
236 | 376 | ||
237 | extern int no_irq_affinity; | 377 | extern int no_irq_affinity; |
@@ -267,23 +407,23 @@ extern struct irq_chip no_irq_chip; | |||
267 | extern struct irq_chip dummy_irq_chip; | 407 | extern struct irq_chip dummy_irq_chip; |
268 | 408 | ||
269 | extern void | 409 | extern void |
270 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 410 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
271 | irq_flow_handler_t handle); | ||
272 | extern void | ||
273 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | ||
274 | irq_flow_handler_t handle, const char *name); | 411 | irq_flow_handler_t handle, const char *name); |
275 | 412 | ||
413 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
414 | irq_flow_handler_t handle) | ||
415 | { | ||
416 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); | ||
417 | } | ||
418 | |||
276 | extern void | 419 | extern void |
277 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 420 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
278 | const char *name); | 421 | const char *name); |
279 | 422 | ||
280 | /* | ||
281 | * Set a highlevel flow handler for a given IRQ: | ||
282 | */ | ||
283 | static inline void | 423 | static inline void |
284 | set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | 424 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
285 | { | 425 | { |
286 | __set_irq_handler(irq, handle, 0, NULL); | 426 | __irq_set_handler(irq, handle, 0, NULL); |
287 | } | 427 | } |
288 | 428 | ||
289 | /* | 429 | /* |
@@ -292,14 +432,11 @@ set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | |||
292 | * IRQ_NOREQUEST and IRQ_NOPROBE) | 432 | * IRQ_NOREQUEST and IRQ_NOPROBE) |
293 | */ | 433 | */ |
294 | static inline void | 434 | static inline void |
295 | set_irq_chained_handler(unsigned int irq, | 435 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
296 | irq_flow_handler_t handle) | ||
297 | { | 436 | { |
298 | __set_irq_handler(irq, handle, 1, NULL); | 437 | __irq_set_handler(irq, handle, 1, NULL); |
299 | } | 438 | } |
300 | 439 | ||
301 | extern void set_irq_nested_thread(unsigned int irq, int nest); | ||
302 | |||
303 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | 440 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
304 | 441 | ||
305 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | 442 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
@@ -312,16 +449,24 @@ static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | |||
312 | irq_modify_status(irq, clr, 0); | 449 | irq_modify_status(irq, clr, 0); |
313 | } | 450 | } |
314 | 451 | ||
315 | static inline void set_irq_noprobe(unsigned int irq) | 452 | static inline void irq_set_noprobe(unsigned int irq) |
316 | { | 453 | { |
317 | irq_modify_status(irq, 0, IRQ_NOPROBE); | 454 | irq_modify_status(irq, 0, IRQ_NOPROBE); |
318 | } | 455 | } |
319 | 456 | ||
320 | static inline void set_irq_probe(unsigned int irq) | 457 | static inline void irq_set_probe(unsigned int irq) |
321 | { | 458 | { |
322 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 459 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
323 | } | 460 | } |
324 | 461 | ||
462 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | ||
463 | { | ||
464 | if (nest) | ||
465 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); | ||
466 | else | ||
467 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | ||
468 | } | ||
469 | |||
325 | /* Handle dynamic irq creation and destruction */ | 470 | /* Handle dynamic irq creation and destruction */ |
326 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 471 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
327 | extern int create_irq(void); | 472 | extern int create_irq(void); |
@@ -338,14 +483,14 @@ static inline void dynamic_irq_init(unsigned int irq) | |||
338 | } | 483 | } |
339 | 484 | ||
340 | /* Set/get chip/data for an IRQ: */ | 485 | /* Set/get chip/data for an IRQ: */ |
341 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 486 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); |
342 | extern int set_irq_data(unsigned int irq, void *data); | 487 | extern int irq_set_handler_data(unsigned int irq, void *data); |
343 | extern int set_irq_chip_data(unsigned int irq, void *data); | 488 | extern int irq_set_chip_data(unsigned int irq, void *data); |
344 | extern int set_irq_type(unsigned int irq, unsigned int type); | 489 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
345 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 490 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
346 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | 491 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
347 | 492 | ||
348 | static inline struct irq_chip *get_irq_chip(unsigned int irq) | 493 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
349 | { | 494 | { |
350 | struct irq_data *d = irq_get_irq_data(irq); | 495 | struct irq_data *d = irq_get_irq_data(irq); |
351 | return d ? d->chip : NULL; | 496 | return d ? d->chip : NULL; |
@@ -356,7 +501,7 @@ static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | |||
356 | return d->chip; | 501 | return d->chip; |
357 | } | 502 | } |
358 | 503 | ||
359 | static inline void *get_irq_chip_data(unsigned int irq) | 504 | static inline void *irq_get_chip_data(unsigned int irq) |
360 | { | 505 | { |
361 | struct irq_data *d = irq_get_irq_data(irq); | 506 | struct irq_data *d = irq_get_irq_data(irq); |
362 | return d ? d->chip_data : NULL; | 507 | return d ? d->chip_data : NULL; |
@@ -367,18 +512,18 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | |||
367 | return d->chip_data; | 512 | return d->chip_data; |
368 | } | 513 | } |
369 | 514 | ||
370 | static inline void *get_irq_data(unsigned int irq) | 515 | static inline void *irq_get_handler_data(unsigned int irq) |
371 | { | 516 | { |
372 | struct irq_data *d = irq_get_irq_data(irq); | 517 | struct irq_data *d = irq_get_irq_data(irq); |
373 | return d ? d->handler_data : NULL; | 518 | return d ? d->handler_data : NULL; |
374 | } | 519 | } |
375 | 520 | ||
376 | static inline void *irq_data_get_irq_data(struct irq_data *d) | 521 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
377 | { | 522 | { |
378 | return d->handler_data; | 523 | return d->handler_data; |
379 | } | 524 | } |
380 | 525 | ||
381 | static inline struct msi_desc *get_irq_msi(unsigned int irq) | 526 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
382 | { | 527 | { |
383 | struct irq_data *d = irq_get_irq_data(irq); | 528 | struct irq_data *d = irq_get_irq_data(irq); |
384 | return d ? d->msi_desc : NULL; | 529 | return d ? d->msi_desc : NULL; |
@@ -389,6 +534,89 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | |||
389 | return d->msi_desc; | 534 | return d->msi_desc; |
390 | } | 535 | } |
391 | 536 | ||
537 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
538 | /* Please do not use: Use the replacement functions instead */ | ||
539 | static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip) | ||
540 | { | ||
541 | return irq_set_chip(irq, chip); | ||
542 | } | ||
543 | static inline int set_irq_data(unsigned int irq, void *data) | ||
544 | { | ||
545 | return irq_set_handler_data(irq, data); | ||
546 | } | ||
547 | static inline int set_irq_chip_data(unsigned int irq, void *data) | ||
548 | { | ||
549 | return irq_set_chip_data(irq, data); | ||
550 | } | ||
551 | static inline int set_irq_type(unsigned int irq, unsigned int type) | ||
552 | { | ||
553 | return irq_set_irq_type(irq, type); | ||
554 | } | ||
555 | static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry) | ||
556 | { | ||
557 | return irq_set_msi_desc(irq, entry); | ||
558 | } | ||
559 | static inline struct irq_chip *get_irq_chip(unsigned int irq) | ||
560 | { | ||
561 | return irq_get_chip(irq); | ||
562 | } | ||
563 | static inline void *get_irq_chip_data(unsigned int irq) | ||
564 | { | ||
565 | return irq_get_chip_data(irq); | ||
566 | } | ||
567 | static inline void *get_irq_data(unsigned int irq) | ||
568 | { | ||
569 | return irq_get_handler_data(irq); | ||
570 | } | ||
571 | static inline void *irq_data_get_irq_data(struct irq_data *d) | ||
572 | { | ||
573 | return irq_data_get_irq_handler_data(d); | ||
574 | } | ||
575 | static inline struct msi_desc *get_irq_msi(unsigned int irq) | ||
576 | { | ||
577 | return irq_get_msi_desc(irq); | ||
578 | } | ||
579 | static inline void set_irq_noprobe(unsigned int irq) | ||
580 | { | ||
581 | irq_set_noprobe(irq); | ||
582 | } | ||
583 | static inline void set_irq_probe(unsigned int irq) | ||
584 | { | ||
585 | irq_set_probe(irq); | ||
586 | } | ||
587 | static inline void set_irq_nested_thread(unsigned int irq, int nest) | ||
588 | { | ||
589 | irq_set_nested_thread(irq, nest); | ||
590 | } | ||
591 | static inline void | ||
592 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | ||
593 | irq_flow_handler_t handle, const char *name) | ||
594 | { | ||
595 | irq_set_chip_and_handler_name(irq, chip, handle, name); | ||
596 | } | ||
597 | static inline void | ||
598 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
599 | irq_flow_handler_t handle) | ||
600 | { | ||
601 | irq_set_chip_and_handler(irq, chip, handle); | ||
602 | } | ||
603 | static inline void | ||
604 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | ||
605 | const char *name) | ||
606 | { | ||
607 | __irq_set_handler(irq, handle, is_chained, name); | ||
608 | } | ||
609 | static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle) | ||
610 | { | ||
611 | irq_set_handler(irq, handle); | ||
612 | } | ||
613 | static inline void | ||
614 | set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle) | ||
615 | { | ||
616 | irq_set_chained_handler(irq, handle); | ||
617 | } | ||
618 | #endif | ||
619 | |||
392 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); | 620 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
393 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 621 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
394 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 622 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c1a95b7b58de..00218371518b 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -8,6 +8,7 @@ | |||
8 | * For now it's included from <linux/irq.h> | 8 | * For now it's included from <linux/irq.h> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | struct irq_affinity_notify; | ||
11 | struct proc_dir_entry; | 12 | struct proc_dir_entry; |
12 | struct timer_rand_state; | 13 | struct timer_rand_state; |
13 | /** | 14 | /** |
@@ -18,13 +19,16 @@ struct timer_rand_state; | |||
18 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | 19 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] |
19 | * @action: the irq action chain | 20 | * @action: the irq action chain |
20 | * @status: status information | 21 | * @status: status information |
22 | * @core_internal_state__do_not_mess_with_it: core internal status information | ||
21 | * @depth: disable-depth, for nested irq_disable() calls | 23 | * @depth: disable-depth, for nested irq_disable() calls |
22 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | 24 | * @wake_depth: enable depth, for multiple set_irq_wake() callers |
23 | * @irq_count: stats field to detect stalled irqs | 25 | * @irq_count: stats field to detect stalled irqs |
24 | * @last_unhandled: aging timer for unhandled count | 26 | * @last_unhandled: aging timer for unhandled count |
25 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 27 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
26 | * @lock: locking for SMP | 28 | * @lock: locking for SMP |
29 | * @affinity_notify: context for notification of affinity changes | ||
27 | * @pending_mask: pending rebalanced interrupts | 30 | * @pending_mask: pending rebalanced interrupts |
31 | * @threads_oneshot: bitfield to handle shared oneshot threads | ||
28 | * @threads_active: number of irqaction threads currently running | 32 | * @threads_active: number of irqaction threads currently running |
29 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | 33 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
30 | * @dir: /proc/irq/ procfs entry | 34 | * @dir: /proc/irq/ procfs entry |
@@ -45,6 +49,7 @@ struct irq_desc { | |||
45 | struct { | 49 | struct { |
46 | unsigned int irq; | 50 | unsigned int irq; |
47 | unsigned int node; | 51 | unsigned int node; |
52 | unsigned int pad_do_not_even_think_about_it; | ||
48 | struct irq_chip *chip; | 53 | struct irq_chip *chip; |
49 | void *handler_data; | 54 | void *handler_data; |
50 | void *chip_data; | 55 | void *chip_data; |
@@ -59,9 +64,16 @@ struct irq_desc { | |||
59 | struct timer_rand_state *timer_rand_state; | 64 | struct timer_rand_state *timer_rand_state; |
60 | unsigned int __percpu *kstat_irqs; | 65 | unsigned int __percpu *kstat_irqs; |
61 | irq_flow_handler_t handle_irq; | 66 | irq_flow_handler_t handle_irq; |
67 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
68 | irq_preflow_handler_t preflow_handler; | ||
69 | #endif | ||
62 | struct irqaction *action; /* IRQ action list */ | 70 | struct irqaction *action; /* IRQ action list */ |
71 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
72 | unsigned int status_use_accessors; | ||
73 | #else | ||
63 | unsigned int status; /* IRQ status */ | 74 | unsigned int status; /* IRQ status */ |
64 | 75 | #endif | |
76 | unsigned int core_internal_state__do_not_mess_with_it; | ||
65 | unsigned int depth; /* nested irq disables */ | 77 | unsigned int depth; /* nested irq disables */ |
66 | unsigned int wake_depth; /* nested wake enables */ | 78 | unsigned int wake_depth; /* nested wake enables */ |
67 | unsigned int irq_count; /* For detecting broken IRQs */ | 79 | unsigned int irq_count; /* For detecting broken IRQs */ |
@@ -70,10 +82,12 @@ struct irq_desc { | |||
70 | raw_spinlock_t lock; | 82 | raw_spinlock_t lock; |
71 | #ifdef CONFIG_SMP | 83 | #ifdef CONFIG_SMP |
72 | const struct cpumask *affinity_hint; | 84 | const struct cpumask *affinity_hint; |
85 | struct irq_affinity_notify *affinity_notify; | ||
73 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 86 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
74 | cpumask_var_t pending_mask; | 87 | cpumask_var_t pending_mask; |
75 | #endif | 88 | #endif |
76 | #endif | 89 | #endif |
90 | unsigned long threads_oneshot; | ||
77 | atomic_t threads_active; | 91 | atomic_t threads_active; |
78 | wait_queue_head_t wait_for_threads; | 92 | wait_queue_head_t wait_for_threads; |
79 | #ifdef CONFIG_PROC_FS | 93 | #ifdef CONFIG_PROC_FS |
@@ -95,10 +109,51 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | |||
95 | 109 | ||
96 | #ifdef CONFIG_GENERIC_HARDIRQS | 110 | #ifdef CONFIG_GENERIC_HARDIRQS |
97 | 111 | ||
98 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | 112 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) |
99 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | 113 | { |
100 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | 114 | return &desc->irq_data; |
101 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | 115 | } |
116 | |||
117 | static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) | ||
118 | { | ||
119 | return desc->irq_data.chip; | ||
120 | } | ||
121 | |||
122 | static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | ||
123 | { | ||
124 | return desc->irq_data.chip_data; | ||
125 | } | ||
126 | |||
127 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | ||
128 | { | ||
129 | return desc->irq_data.handler_data; | ||
130 | } | ||
131 | |||
132 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | ||
133 | { | ||
134 | return desc->irq_data.msi_desc; | ||
135 | } | ||
136 | |||
137 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
138 | static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc) | ||
139 | { | ||
140 | return irq_desc_get_chip(desc); | ||
141 | } | ||
142 | static inline void *get_irq_desc_data(struct irq_desc *desc) | ||
143 | { | ||
144 | return irq_desc_get_handler_data(desc); | ||
145 | } | ||
146 | |||
147 | static inline void *get_irq_desc_chip_data(struct irq_desc *desc) | ||
148 | { | ||
149 | return irq_desc_get_chip_data(desc); | ||
150 | } | ||
151 | |||
152 | static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc) | ||
153 | { | ||
154 | return irq_desc_get_msi_desc(desc); | ||
155 | } | ||
156 | #endif | ||
102 | 157 | ||
103 | /* | 158 | /* |
104 | * Architectures call this to let the generic IRQ layer | 159 | * Architectures call this to let the generic IRQ layer |
@@ -123,6 +178,7 @@ static inline int irq_has_action(unsigned int irq) | |||
123 | return desc->action != NULL; | 178 | return desc->action != NULL; |
124 | } | 179 | } |
125 | 180 | ||
181 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
126 | static inline int irq_balancing_disabled(unsigned int irq) | 182 | static inline int irq_balancing_disabled(unsigned int irq) |
127 | { | 183 | { |
128 | struct irq_desc *desc; | 184 | struct irq_desc *desc; |
@@ -130,6 +186,7 @@ static inline int irq_balancing_disabled(unsigned int irq) | |||
130 | desc = irq_to_desc(irq); | 186 | desc = irq_to_desc(irq); |
131 | return desc->status & IRQ_NO_BALANCING_MASK; | 187 | return desc->status & IRQ_NO_BALANCING_MASK; |
132 | } | 188 | } |
189 | #endif | ||
133 | 190 | ||
134 | /* caller has locked the irq_desc and both params are valid */ | 191 | /* caller has locked the irq_desc and both params are valid */ |
135 | static inline void __set_irq_handler_unlocked(int irq, | 192 | static inline void __set_irq_handler_unlocked(int irq, |
@@ -140,6 +197,17 @@ static inline void __set_irq_handler_unlocked(int irq, | |||
140 | desc = irq_to_desc(irq); | 197 | desc = irq_to_desc(irq); |
141 | desc->handle_irq = handler; | 198 | desc->handle_irq = handler; |
142 | } | 199 | } |
200 | |||
201 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
202 | static inline void | ||
203 | __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) | ||
204 | { | ||
205 | struct irq_desc *desc; | ||
206 | |||
207 | desc = irq_to_desc(irq); | ||
208 | desc->preflow_handler = handler; | ||
209 | } | ||
210 | #endif | ||
143 | #endif | 211 | #endif |
144 | 212 | ||
145 | #endif | 213 | #endif |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 8e42fec7686d..09bef82d74cb 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | # Select this to activate the generic irq options below | ||
1 | config HAVE_GENERIC_HARDIRQS | 2 | config HAVE_GENERIC_HARDIRQS |
2 | def_bool n | 3 | bool |
3 | 4 | ||
4 | if HAVE_GENERIC_HARDIRQS | 5 | if HAVE_GENERIC_HARDIRQS |
5 | menu "IRQ subsystem" | 6 | menu "IRQ subsystem" |
@@ -11,26 +12,44 @@ config GENERIC_HARDIRQS | |||
11 | 12 | ||
12 | # Select this to disable the deprecated stuff | 13 | # Select this to disable the deprecated stuff |
13 | config GENERIC_HARDIRQS_NO_DEPRECATED | 14 | config GENERIC_HARDIRQS_NO_DEPRECATED |
14 | def_bool n | 15 | bool |
16 | |||
17 | config GENERIC_HARDIRQS_NO_COMPAT | ||
18 | bool | ||
15 | 19 | ||
16 | # Options selectable by the architecture code | 20 | # Options selectable by the architecture code |
21 | |||
22 | # Make sparse irq Kconfig switch below available | ||
17 | config HAVE_SPARSE_IRQ | 23 | config HAVE_SPARSE_IRQ |
18 | def_bool n | 24 | bool |
19 | 25 | ||
26 | # Enable the generic irq autoprobe mechanism | ||
20 | config GENERIC_IRQ_PROBE | 27 | config GENERIC_IRQ_PROBE |
21 | def_bool n | 28 | bool |
29 | |||
30 | # Use the generic /proc/interrupts implementation | ||
31 | config GENERIC_IRQ_SHOW | ||
32 | bool | ||
22 | 33 | ||
34 | # Support for delayed migration from interrupt context | ||
23 | config GENERIC_PENDING_IRQ | 35 | config GENERIC_PENDING_IRQ |
24 | def_bool n | 36 | bool |
25 | 37 | ||
38 | # Alpha specific irq affinity mechanism | ||
26 | config AUTO_IRQ_AFFINITY | 39 | config AUTO_IRQ_AFFINITY |
27 | def_bool n | 40 | bool |
28 | |||
29 | config IRQ_PER_CPU | ||
30 | def_bool n | ||
31 | 41 | ||
42 | # Tasklet based software resend for pending interrupts on enable_irq() | ||
32 | config HARDIRQS_SW_RESEND | 43 | config HARDIRQS_SW_RESEND |
33 | def_bool n | 44 | bool |
45 | |||
46 | # Preflow handler support for fasteoi (sparc64) | ||
47 | config IRQ_PREFLOW_FASTEOI | ||
48 | bool | ||
49 | |||
50 | # Support forced irq threading | ||
51 | config IRQ_FORCED_THREADING | ||
52 | bool | ||
34 | 53 | ||
35 | config SPARSE_IRQ | 54 | config SPARSE_IRQ |
36 | bool "Support sparse irq numbering" | 55 | bool "Support sparse irq numbering" |
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 505798f86c36..394784c57060 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
@@ -17,7 +17,7 @@ | |||
17 | /* | 17 | /* |
18 | * Autodetection depends on the fact that any interrupt that | 18 | * Autodetection depends on the fact that any interrupt that |
19 | * comes in on to an unassigned handler will get stuck with | 19 | * comes in on to an unassigned handler will get stuck with |
20 | * "IRQ_WAITING" cleared and the interrupt disabled. | 20 | * "IRQS_WAITING" cleared and the interrupt disabled. |
21 | */ | 21 | */ |
22 | static DEFINE_MUTEX(probing_active); | 22 | static DEFINE_MUTEX(probing_active); |
23 | 23 | ||
@@ -32,7 +32,6 @@ unsigned long probe_irq_on(void) | |||
32 | { | 32 | { |
33 | struct irq_desc *desc; | 33 | struct irq_desc *desc; |
34 | unsigned long mask = 0; | 34 | unsigned long mask = 0; |
35 | unsigned int status; | ||
36 | int i; | 35 | int i; |
37 | 36 | ||
38 | /* | 37 | /* |
@@ -46,13 +45,7 @@ unsigned long probe_irq_on(void) | |||
46 | */ | 45 | */ |
47 | for_each_irq_desc_reverse(i, desc) { | 46 | for_each_irq_desc_reverse(i, desc) { |
48 | raw_spin_lock_irq(&desc->lock); | 47 | raw_spin_lock_irq(&desc->lock); |
49 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 48 | if (!desc->action && irq_settings_can_probe(desc)) { |
50 | /* | ||
51 | * An old-style architecture might still have | ||
52 | * the handle_bad_irq handler there: | ||
53 | */ | ||
54 | compat_irq_chip_set_default_handler(desc); | ||
55 | |||
56 | /* | 49 | /* |
57 | * Some chips need to know about probing in | 50 | * Some chips need to know about probing in |
58 | * progress: | 51 | * progress: |
@@ -60,7 +53,7 @@ unsigned long probe_irq_on(void) | |||
60 | if (desc->irq_data.chip->irq_set_type) | 53 | if (desc->irq_data.chip->irq_set_type) |
61 | desc->irq_data.chip->irq_set_type(&desc->irq_data, | 54 | desc->irq_data.chip->irq_set_type(&desc->irq_data, |
62 | IRQ_TYPE_PROBE); | 55 | IRQ_TYPE_PROBE); |
63 | desc->irq_data.chip->irq_startup(&desc->irq_data); | 56 | irq_startup(desc); |
64 | } | 57 | } |
65 | raw_spin_unlock_irq(&desc->lock); | 58 | raw_spin_unlock_irq(&desc->lock); |
66 | } | 59 | } |
@@ -75,10 +68,12 @@ unsigned long probe_irq_on(void) | |||
75 | */ | 68 | */ |
76 | for_each_irq_desc_reverse(i, desc) { | 69 | for_each_irq_desc_reverse(i, desc) { |
77 | raw_spin_lock_irq(&desc->lock); | 70 | raw_spin_lock_irq(&desc->lock); |
78 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 71 | if (!desc->action && irq_settings_can_probe(desc)) { |
79 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; | 72 | desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; |
80 | if (desc->irq_data.chip->irq_startup(&desc->irq_data)) | 73 | if (irq_startup(desc)) { |
81 | desc->status |= IRQ_PENDING; | 74 | irq_compat_set_pending(desc); |
75 | desc->istate |= IRQS_PENDING; | ||
76 | } | ||
82 | } | 77 | } |
83 | raw_spin_unlock_irq(&desc->lock); | 78 | raw_spin_unlock_irq(&desc->lock); |
84 | } | 79 | } |
@@ -93,13 +88,12 @@ unsigned long probe_irq_on(void) | |||
93 | */ | 88 | */ |
94 | for_each_irq_desc(i, desc) { | 89 | for_each_irq_desc(i, desc) { |
95 | raw_spin_lock_irq(&desc->lock); | 90 | raw_spin_lock_irq(&desc->lock); |
96 | status = desc->status; | ||
97 | 91 | ||
98 | if (status & IRQ_AUTODETECT) { | 92 | if (desc->istate & IRQS_AUTODETECT) { |
99 | /* It triggered already - consider it spurious. */ | 93 | /* It triggered already - consider it spurious. */ |
100 | if (!(status & IRQ_WAITING)) { | 94 | if (!(desc->istate & IRQS_WAITING)) { |
101 | desc->status = status & ~IRQ_AUTODETECT; | 95 | desc->istate &= ~IRQS_AUTODETECT; |
102 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 96 | irq_shutdown(desc); |
103 | } else | 97 | } else |
104 | if (i < 32) | 98 | if (i < 32) |
105 | mask |= 1 << i; | 99 | mask |= 1 << i; |
@@ -125,20 +119,18 @@ EXPORT_SYMBOL(probe_irq_on); | |||
125 | */ | 119 | */ |
126 | unsigned int probe_irq_mask(unsigned long val) | 120 | unsigned int probe_irq_mask(unsigned long val) |
127 | { | 121 | { |
128 | unsigned int status, mask = 0; | 122 | unsigned int mask = 0; |
129 | struct irq_desc *desc; | 123 | struct irq_desc *desc; |
130 | int i; | 124 | int i; |
131 | 125 | ||
132 | for_each_irq_desc(i, desc) { | 126 | for_each_irq_desc(i, desc) { |
133 | raw_spin_lock_irq(&desc->lock); | 127 | raw_spin_lock_irq(&desc->lock); |
134 | status = desc->status; | 128 | if (desc->istate & IRQS_AUTODETECT) { |
135 | 129 | if (i < 16 && !(desc->istate & IRQS_WAITING)) | |
136 | if (status & IRQ_AUTODETECT) { | ||
137 | if (i < 16 && !(status & IRQ_WAITING)) | ||
138 | mask |= 1 << i; | 130 | mask |= 1 << i; |
139 | 131 | ||
140 | desc->status = status & ~IRQ_AUTODETECT; | 132 | desc->istate &= ~IRQS_AUTODETECT; |
141 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 133 | irq_shutdown(desc); |
142 | } | 134 | } |
143 | raw_spin_unlock_irq(&desc->lock); | 135 | raw_spin_unlock_irq(&desc->lock); |
144 | } | 136 | } |
@@ -169,20 +161,18 @@ int probe_irq_off(unsigned long val) | |||
169 | { | 161 | { |
170 | int i, irq_found = 0, nr_of_irqs = 0; | 162 | int i, irq_found = 0, nr_of_irqs = 0; |
171 | struct irq_desc *desc; | 163 | struct irq_desc *desc; |
172 | unsigned int status; | ||
173 | 164 | ||
174 | for_each_irq_desc(i, desc) { | 165 | for_each_irq_desc(i, desc) { |
175 | raw_spin_lock_irq(&desc->lock); | 166 | raw_spin_lock_irq(&desc->lock); |
176 | status = desc->status; | ||
177 | 167 | ||
178 | if (status & IRQ_AUTODETECT) { | 168 | if (desc->istate & IRQS_AUTODETECT) { |
179 | if (!(status & IRQ_WAITING)) { | 169 | if (!(desc->istate & IRQS_WAITING)) { |
180 | if (!nr_of_irqs) | 170 | if (!nr_of_irqs) |
181 | irq_found = i; | 171 | irq_found = i; |
182 | nr_of_irqs++; | 172 | nr_of_irqs++; |
183 | } | 173 | } |
184 | desc->status = status & ~IRQ_AUTODETECT; | 174 | desc->istate &= ~IRQS_AUTODETECT; |
185 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 175 | irq_shutdown(desc); |
186 | } | 176 | } |
187 | raw_spin_unlock_irq(&desc->lock); | 177 | raw_spin_unlock_irq(&desc->lock); |
188 | } | 178 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index baa5c4acad83..c9c0601f0615 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -19,140 +19,110 @@ | |||
19 | #include "internals.h" | 19 | #include "internals.h" |
20 | 20 | ||
21 | /** | 21 | /** |
22 | * set_irq_chip - set the irq chip for an irq | 22 | * irq_set_chip - set the irq chip for an irq |
23 | * @irq: irq number | 23 | * @irq: irq number |
24 | * @chip: pointer to irq chip description structure | 24 | * @chip: pointer to irq chip description structure |
25 | */ | 25 | */ |
26 | int set_irq_chip(unsigned int irq, struct irq_chip *chip) | 26 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
27 | { | 27 | { |
28 | struct irq_desc *desc = irq_to_desc(irq); | ||
29 | unsigned long flags; | 28 | unsigned long flags; |
29 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
30 | 30 | ||
31 | if (!desc) { | 31 | if (!desc) |
32 | WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); | ||
33 | return -EINVAL; | 32 | return -EINVAL; |
34 | } | ||
35 | 33 | ||
36 | if (!chip) | 34 | if (!chip) |
37 | chip = &no_irq_chip; | 35 | chip = &no_irq_chip; |
38 | 36 | ||
39 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
40 | irq_chip_set_defaults(chip); | 37 | irq_chip_set_defaults(chip); |
41 | desc->irq_data.chip = chip; | 38 | desc->irq_data.chip = chip; |
42 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 39 | irq_put_desc_unlock(desc, flags); |
43 | |||
44 | return 0; | 40 | return 0; |
45 | } | 41 | } |
46 | EXPORT_SYMBOL(set_irq_chip); | 42 | EXPORT_SYMBOL(irq_set_chip); |
47 | 43 | ||
48 | /** | 44 | /** |
49 | * set_irq_type - set the irq trigger type for an irq | 45 | * irq_set_type - set the irq trigger type for an irq |
50 | * @irq: irq number | 46 | * @irq: irq number |
51 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h | 47 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
52 | */ | 48 | */ |
53 | int set_irq_type(unsigned int irq, unsigned int type) | 49 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
54 | { | 50 | { |
55 | struct irq_desc *desc = irq_to_desc(irq); | ||
56 | unsigned long flags; | 51 | unsigned long flags; |
57 | int ret = -ENXIO; | 52 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); |
53 | int ret = 0; | ||
58 | 54 | ||
59 | if (!desc) { | 55 | if (!desc) |
60 | printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); | 56 | return -EINVAL; |
61 | return -ENODEV; | ||
62 | } | ||
63 | 57 | ||
64 | type &= IRQ_TYPE_SENSE_MASK; | 58 | type &= IRQ_TYPE_SENSE_MASK; |
65 | if (type == IRQ_TYPE_NONE) | 59 | if (type != IRQ_TYPE_NONE) |
66 | return 0; | 60 | ret = __irq_set_trigger(desc, irq, type); |
67 | 61 | irq_put_desc_busunlock(desc, flags); | |
68 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
69 | ret = __irq_set_trigger(desc, irq, type); | ||
70 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
71 | return ret; | 62 | return ret; |
72 | } | 63 | } |
73 | EXPORT_SYMBOL(set_irq_type); | 64 | EXPORT_SYMBOL(irq_set_irq_type); |
74 | 65 | ||
75 | /** | 66 | /** |
76 | * set_irq_data - set irq type data for an irq | 67 | * irq_set_handler_data - set irq handler data for an irq |
77 | * @irq: Interrupt number | 68 | * @irq: Interrupt number |
78 | * @data: Pointer to interrupt specific data | 69 | * @data: Pointer to interrupt specific data |
79 | * | 70 | * |
80 | * Set the hardware irq controller data for an irq | 71 | * Set the hardware irq controller data for an irq |
81 | */ | 72 | */ |
82 | int set_irq_data(unsigned int irq, void *data) | 73 | int irq_set_handler_data(unsigned int irq, void *data) |
83 | { | 74 | { |
84 | struct irq_desc *desc = irq_to_desc(irq); | ||
85 | unsigned long flags; | 75 | unsigned long flags; |
76 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
86 | 77 | ||
87 | if (!desc) { | 78 | if (!desc) |
88 | printk(KERN_ERR | ||
89 | "Trying to install controller data for IRQ%d\n", irq); | ||
90 | return -EINVAL; | 79 | return -EINVAL; |
91 | } | ||
92 | |||
93 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
94 | desc->irq_data.handler_data = data; | 80 | desc->irq_data.handler_data = data; |
95 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 81 | irq_put_desc_unlock(desc, flags); |
96 | return 0; | 82 | return 0; |
97 | } | 83 | } |
98 | EXPORT_SYMBOL(set_irq_data); | 84 | EXPORT_SYMBOL(irq_set_handler_data); |
99 | 85 | ||
100 | /** | 86 | /** |
101 | * set_irq_msi - set MSI descriptor data for an irq | 87 | * irq_set_msi_desc - set MSI descriptor data for an irq |
102 | * @irq: Interrupt number | 88 | * @irq: Interrupt number |
103 | * @entry: Pointer to MSI descriptor data | 89 | * @entry: Pointer to MSI descriptor data |
104 | * | 90 | * |
105 | * Set the MSI descriptor entry for an irq | 91 | * Set the MSI descriptor entry for an irq |
106 | */ | 92 | */ |
107 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) | 93 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
108 | { | 94 | { |
109 | struct irq_desc *desc = irq_to_desc(irq); | ||
110 | unsigned long flags; | 95 | unsigned long flags; |
96 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
111 | 97 | ||
112 | if (!desc) { | 98 | if (!desc) |
113 | printk(KERN_ERR | ||
114 | "Trying to install msi data for IRQ%d\n", irq); | ||
115 | return -EINVAL; | 99 | return -EINVAL; |
116 | } | ||
117 | |||
118 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
119 | desc->irq_data.msi_desc = entry; | 100 | desc->irq_data.msi_desc = entry; |
120 | if (entry) | 101 | if (entry) |
121 | entry->irq = irq; | 102 | entry->irq = irq; |
122 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 103 | irq_put_desc_unlock(desc, flags); |
123 | return 0; | 104 | return 0; |
124 | } | 105 | } |
125 | 106 | ||
126 | /** | 107 | /** |
127 | * set_irq_chip_data - set irq chip data for an irq | 108 | * irq_set_chip_data - set irq chip data for an irq |
128 | * @irq: Interrupt number | 109 | * @irq: Interrupt number |
129 | * @data: Pointer to chip specific data | 110 | * @data: Pointer to chip specific data |
130 | * | 111 | * |
131 | * Set the hardware irq chip data for an irq | 112 | * Set the hardware irq chip data for an irq |
132 | */ | 113 | */ |
133 | int set_irq_chip_data(unsigned int irq, void *data) | 114 | int irq_set_chip_data(unsigned int irq, void *data) |
134 | { | 115 | { |
135 | struct irq_desc *desc = irq_to_desc(irq); | ||
136 | unsigned long flags; | 116 | unsigned long flags; |
117 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
137 | 118 | ||
138 | if (!desc) { | 119 | if (!desc) |
139 | printk(KERN_ERR | ||
140 | "Trying to install chip data for IRQ%d\n", irq); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | if (!desc->irq_data.chip) { | ||
145 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); | ||
146 | return -EINVAL; | 120 | return -EINVAL; |
147 | } | ||
148 | |||
149 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
150 | desc->irq_data.chip_data = data; | 121 | desc->irq_data.chip_data = data; |
151 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 122 | irq_put_desc_unlock(desc, flags); |
152 | |||
153 | return 0; | 123 | return 0; |
154 | } | 124 | } |
155 | EXPORT_SYMBOL(set_irq_chip_data); | 125 | EXPORT_SYMBOL(irq_set_chip_data); |
156 | 126 | ||
157 | struct irq_data *irq_get_irq_data(unsigned int irq) | 127 | struct irq_data *irq_get_irq_data(unsigned int irq) |
158 | { | 128 | { |
@@ -162,72 +132,75 @@ struct irq_data *irq_get_irq_data(unsigned int irq) | |||
162 | } | 132 | } |
163 | EXPORT_SYMBOL_GPL(irq_get_irq_data); | 133 | EXPORT_SYMBOL_GPL(irq_get_irq_data); |
164 | 134 | ||
165 | /** | 135 | static void irq_state_clr_disabled(struct irq_desc *desc) |
166 | * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq | ||
167 | * | ||
168 | * @irq: Interrupt number | ||
169 | * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag | ||
170 | * | ||
171 | * The IRQ_NESTED_THREAD flag indicates that on | ||
172 | * request_threaded_irq() no separate interrupt thread should be | ||
173 | * created for the irq as the handler are called nested in the | ||
174 | * context of a demultiplexing interrupt handler thread. | ||
175 | */ | ||
176 | void set_irq_nested_thread(unsigned int irq, int nest) | ||
177 | { | 136 | { |
178 | struct irq_desc *desc = irq_to_desc(irq); | 137 | desc->istate &= ~IRQS_DISABLED; |
179 | unsigned long flags; | 138 | irq_compat_clr_disabled(desc); |
180 | |||
181 | if (!desc) | ||
182 | return; | ||
183 | |||
184 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
185 | if (nest) | ||
186 | desc->status |= IRQ_NESTED_THREAD; | ||
187 | else | ||
188 | desc->status &= ~IRQ_NESTED_THREAD; | ||
189 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
190 | } | 139 | } |
191 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); | ||
192 | 140 | ||
193 | /* | 141 | static void irq_state_set_disabled(struct irq_desc *desc) |
194 | * default enable function | ||
195 | */ | ||
196 | static void default_enable(struct irq_data *data) | ||
197 | { | 142 | { |
198 | struct irq_desc *desc = irq_data_to_desc(data); | 143 | desc->istate |= IRQS_DISABLED; |
144 | irq_compat_set_disabled(desc); | ||
145 | } | ||
199 | 146 | ||
200 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 147 | static void irq_state_clr_masked(struct irq_desc *desc) |
201 | desc->status &= ~IRQ_MASKED; | 148 | { |
149 | desc->istate &= ~IRQS_MASKED; | ||
150 | irq_compat_clr_masked(desc); | ||
202 | } | 151 | } |
203 | 152 | ||
204 | /* | 153 | static void irq_state_set_masked(struct irq_desc *desc) |
205 | * default disable function | ||
206 | */ | ||
207 | static void default_disable(struct irq_data *data) | ||
208 | { | 154 | { |
155 | desc->istate |= IRQS_MASKED; | ||
156 | irq_compat_set_masked(desc); | ||
209 | } | 157 | } |
210 | 158 | ||
211 | /* | 159 | int irq_startup(struct irq_desc *desc) |
212 | * default startup function | ||
213 | */ | ||
214 | static unsigned int default_startup(struct irq_data *data) | ||
215 | { | 160 | { |
216 | struct irq_desc *desc = irq_data_to_desc(data); | 161 | irq_state_clr_disabled(desc); |
162 | desc->depth = 0; | ||
163 | |||
164 | if (desc->irq_data.chip->irq_startup) { | ||
165 | int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
166 | irq_state_clr_masked(desc); | ||
167 | return ret; | ||
168 | } | ||
217 | 169 | ||
218 | desc->irq_data.chip->irq_enable(data); | 170 | irq_enable(desc); |
219 | return 0; | 171 | return 0; |
220 | } | 172 | } |
221 | 173 | ||
222 | /* | 174 | void irq_shutdown(struct irq_desc *desc) |
223 | * default shutdown function | ||
224 | */ | ||
225 | static void default_shutdown(struct irq_data *data) | ||
226 | { | 175 | { |
227 | struct irq_desc *desc = irq_data_to_desc(data); | 176 | irq_state_set_disabled(desc); |
177 | desc->depth = 1; | ||
178 | if (desc->irq_data.chip->irq_shutdown) | ||
179 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | ||
180 | if (desc->irq_data.chip->irq_disable) | ||
181 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
182 | else | ||
183 | desc->irq_data.chip->irq_mask(&desc->irq_data); | ||
184 | irq_state_set_masked(desc); | ||
185 | } | ||
228 | 186 | ||
229 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 187 | void irq_enable(struct irq_desc *desc) |
230 | desc->status |= IRQ_MASKED; | 188 | { |
189 | irq_state_clr_disabled(desc); | ||
190 | if (desc->irq_data.chip->irq_enable) | ||
191 | desc->irq_data.chip->irq_enable(&desc->irq_data); | ||
192 | else | ||
193 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | ||
194 | irq_state_clr_masked(desc); | ||
195 | } | ||
196 | |||
197 | void irq_disable(struct irq_desc *desc) | ||
198 | { | ||
199 | irq_state_set_disabled(desc); | ||
200 | if (desc->irq_data.chip->irq_disable) { | ||
201 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
202 | irq_state_set_masked(desc); | ||
203 | } | ||
231 | } | 204 | } |
232 | 205 | ||
233 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | 206 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
@@ -315,10 +288,6 @@ static void compat_bus_sync_unlock(struct irq_data *data) | |||
315 | void irq_chip_set_defaults(struct irq_chip *chip) | 288 | void irq_chip_set_defaults(struct irq_chip *chip) |
316 | { | 289 | { |
317 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | 290 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
318 | /* | ||
319 | * Compat fixup functions need to be before we set the | ||
320 | * defaults for enable/disable/startup/shutdown | ||
321 | */ | ||
322 | if (chip->enable) | 291 | if (chip->enable) |
323 | chip->irq_enable = compat_irq_enable; | 292 | chip->irq_enable = compat_irq_enable; |
324 | if (chip->disable) | 293 | if (chip->disable) |
@@ -327,33 +296,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) | |||
327 | chip->irq_shutdown = compat_irq_shutdown; | 296 | chip->irq_shutdown = compat_irq_shutdown; |
328 | if (chip->startup) | 297 | if (chip->startup) |
329 | chip->irq_startup = compat_irq_startup; | 298 | chip->irq_startup = compat_irq_startup; |
330 | #endif | ||
331 | /* | ||
332 | * The real defaults | ||
333 | */ | ||
334 | if (!chip->irq_enable) | ||
335 | chip->irq_enable = default_enable; | ||
336 | if (!chip->irq_disable) | ||
337 | chip->irq_disable = default_disable; | ||
338 | if (!chip->irq_startup) | ||
339 | chip->irq_startup = default_startup; | ||
340 | /* | ||
341 | * We use chip->irq_disable, when the user provided its own. When | ||
342 | * we have default_disable set for chip->irq_disable, then we need | ||
343 | * to use default_shutdown, otherwise the irq line is not | ||
344 | * disabled on free_irq(): | ||
345 | */ | ||
346 | if (!chip->irq_shutdown) | ||
347 | chip->irq_shutdown = chip->irq_disable != default_disable ? | ||
348 | chip->irq_disable : default_shutdown; | ||
349 | |||
350 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
351 | if (!chip->end) | 299 | if (!chip->end) |
352 | chip->end = dummy_irq_chip.end; | 300 | chip->end = dummy_irq_chip.end; |
353 | |||
354 | /* | ||
355 | * Now fix up the remaining compat handlers | ||
356 | */ | ||
357 | if (chip->bus_lock) | 301 | if (chip->bus_lock) |
358 | chip->irq_bus_lock = compat_bus_lock; | 302 | chip->irq_bus_lock = compat_bus_lock; |
359 | if (chip->bus_sync_unlock) | 303 | if (chip->bus_sync_unlock) |
@@ -388,22 +332,22 @@ static inline void mask_ack_irq(struct irq_desc *desc) | |||
388 | if (desc->irq_data.chip->irq_ack) | 332 | if (desc->irq_data.chip->irq_ack) |
389 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 333 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
390 | } | 334 | } |
391 | desc->status |= IRQ_MASKED; | 335 | irq_state_set_masked(desc); |
392 | } | 336 | } |
393 | 337 | ||
394 | static inline void mask_irq(struct irq_desc *desc) | 338 | void mask_irq(struct irq_desc *desc) |
395 | { | 339 | { |
396 | if (desc->irq_data.chip->irq_mask) { | 340 | if (desc->irq_data.chip->irq_mask) { |
397 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 341 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
398 | desc->status |= IRQ_MASKED; | 342 | irq_state_set_masked(desc); |
399 | } | 343 | } |
400 | } | 344 | } |
401 | 345 | ||
402 | static inline void unmask_irq(struct irq_desc *desc) | 346 | void unmask_irq(struct irq_desc *desc) |
403 | { | 347 | { |
404 | if (desc->irq_data.chip->irq_unmask) { | 348 | if (desc->irq_data.chip->irq_unmask) { |
405 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 349 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
406 | desc->status &= ~IRQ_MASKED; | 350 | irq_state_clr_masked(desc); |
407 | } | 351 | } |
408 | } | 352 | } |
409 | 353 | ||
@@ -428,10 +372,11 @@ void handle_nested_irq(unsigned int irq) | |||
428 | kstat_incr_irqs_this_cpu(irq, desc); | 372 | kstat_incr_irqs_this_cpu(irq, desc); |
429 | 373 | ||
430 | action = desc->action; | 374 | action = desc->action; |
431 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | 375 | if (unlikely(!action || (desc->istate & IRQS_DISABLED))) |
432 | goto out_unlock; | 376 | goto out_unlock; |
433 | 377 | ||
434 | desc->status |= IRQ_INPROGRESS; | 378 | irq_compat_set_progress(desc); |
379 | desc->istate |= IRQS_INPROGRESS; | ||
435 | raw_spin_unlock_irq(&desc->lock); | 380 | raw_spin_unlock_irq(&desc->lock); |
436 | 381 | ||
437 | action_ret = action->thread_fn(action->irq, action->dev_id); | 382 | action_ret = action->thread_fn(action->irq, action->dev_id); |
@@ -439,13 +384,21 @@ void handle_nested_irq(unsigned int irq) | |||
439 | note_interrupt(irq, desc, action_ret); | 384 | note_interrupt(irq, desc, action_ret); |
440 | 385 | ||
441 | raw_spin_lock_irq(&desc->lock); | 386 | raw_spin_lock_irq(&desc->lock); |
442 | desc->status &= ~IRQ_INPROGRESS; | 387 | desc->istate &= ~IRQS_INPROGRESS; |
388 | irq_compat_clr_progress(desc); | ||
443 | 389 | ||
444 | out_unlock: | 390 | out_unlock: |
445 | raw_spin_unlock_irq(&desc->lock); | 391 | raw_spin_unlock_irq(&desc->lock); |
446 | } | 392 | } |
447 | EXPORT_SYMBOL_GPL(handle_nested_irq); | 393 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
448 | 394 | ||
395 | static bool irq_check_poll(struct irq_desc *desc) | ||
396 | { | ||
397 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) | ||
398 | return false; | ||
399 | return irq_wait_for_poll(desc); | ||
400 | } | ||
401 | |||
449 | /** | 402 | /** |
450 | * handle_simple_irq - Simple and software-decoded IRQs. | 403 | * handle_simple_irq - Simple and software-decoded IRQs. |
451 | * @irq: the interrupt number | 404 | * @irq: the interrupt number |
@@ -461,29 +414,20 @@ EXPORT_SYMBOL_GPL(handle_nested_irq); | |||
461 | void | 414 | void |
462 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) | 415 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
463 | { | 416 | { |
464 | struct irqaction *action; | ||
465 | irqreturn_t action_ret; | ||
466 | |||
467 | raw_spin_lock(&desc->lock); | 417 | raw_spin_lock(&desc->lock); |
468 | 418 | ||
469 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 419 | if (unlikely(desc->istate & IRQS_INPROGRESS)) |
470 | goto out_unlock; | 420 | if (!irq_check_poll(desc)) |
471 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 421 | goto out_unlock; |
422 | |||
423 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
472 | kstat_incr_irqs_this_cpu(irq, desc); | 424 | kstat_incr_irqs_this_cpu(irq, desc); |
473 | 425 | ||
474 | action = desc->action; | 426 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) |
475 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | ||
476 | goto out_unlock; | 427 | goto out_unlock; |
477 | 428 | ||
478 | desc->status |= IRQ_INPROGRESS; | 429 | handle_irq_event(desc); |
479 | raw_spin_unlock(&desc->lock); | ||
480 | 430 | ||
481 | action_ret = handle_IRQ_event(irq, action); | ||
482 | if (!noirqdebug) | ||
483 | note_interrupt(irq, desc, action_ret); | ||
484 | |||
485 | raw_spin_lock(&desc->lock); | ||
486 | desc->status &= ~IRQ_INPROGRESS; | ||
487 | out_unlock: | 431 | out_unlock: |
488 | raw_spin_unlock(&desc->lock); | 432 | raw_spin_unlock(&desc->lock); |
489 | } | 433 | } |
@@ -501,42 +445,42 @@ out_unlock: | |||
501 | void | 445 | void |
502 | handle_level_irq(unsigned int irq, struct irq_desc *desc) | 446 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
503 | { | 447 | { |
504 | struct irqaction *action; | ||
505 | irqreturn_t action_ret; | ||
506 | |||
507 | raw_spin_lock(&desc->lock); | 448 | raw_spin_lock(&desc->lock); |
508 | mask_ack_irq(desc); | 449 | mask_ack_irq(desc); |
509 | 450 | ||
510 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 451 | if (unlikely(desc->istate & IRQS_INPROGRESS)) |
511 | goto out_unlock; | 452 | if (!irq_check_poll(desc)) |
512 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 453 | goto out_unlock; |
454 | |||
455 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | ||
513 | kstat_incr_irqs_this_cpu(irq, desc); | 456 | kstat_incr_irqs_this_cpu(irq, desc); |
514 | 457 | ||
515 | /* | 458 | /* |
516 | * If its disabled or no action available | 459 | * If its disabled or no action available |
517 | * keep it masked and get out of here | 460 | * keep it masked and get out of here |
518 | */ | 461 | */ |
519 | action = desc->action; | 462 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) |
520 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | ||
521 | goto out_unlock; | 463 | goto out_unlock; |
522 | 464 | ||
523 | desc->status |= IRQ_INPROGRESS; | 465 | handle_irq_event(desc); |
524 | raw_spin_unlock(&desc->lock); | ||
525 | |||
526 | action_ret = handle_IRQ_event(irq, action); | ||
527 | if (!noirqdebug) | ||
528 | note_interrupt(irq, desc, action_ret); | ||
529 | 466 | ||
530 | raw_spin_lock(&desc->lock); | 467 | if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) |
531 | desc->status &= ~IRQ_INPROGRESS; | ||
532 | |||
533 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) | ||
534 | unmask_irq(desc); | 468 | unmask_irq(desc); |
535 | out_unlock: | 469 | out_unlock: |
536 | raw_spin_unlock(&desc->lock); | 470 | raw_spin_unlock(&desc->lock); |
537 | } | 471 | } |
538 | EXPORT_SYMBOL_GPL(handle_level_irq); | 472 | EXPORT_SYMBOL_GPL(handle_level_irq); |
539 | 473 | ||
474 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | ||
475 | static inline void preflow_handler(struct irq_desc *desc) | ||
476 | { | ||
477 | if (desc->preflow_handler) | ||
478 | desc->preflow_handler(&desc->irq_data); | ||
479 | } | ||
480 | #else | ||
481 | static inline void preflow_handler(struct irq_desc *desc) { } | ||
482 | #endif | ||
483 | |||
540 | /** | 484 | /** |
541 | * handle_fasteoi_irq - irq handler for transparent controllers | 485 | * handle_fasteoi_irq - irq handler for transparent controllers |
542 | * @irq: the interrupt number | 486 | * @irq: the interrupt number |
@@ -550,42 +494,41 @@ EXPORT_SYMBOL_GPL(handle_level_irq); | |||
550 | void | 494 | void |
551 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | 495 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
552 | { | 496 | { |
553 | struct irqaction *action; | ||
554 | irqreturn_t action_ret; | ||
555 | |||
556 | raw_spin_lock(&desc->lock); | 497 | raw_spin_lock(&desc->lock); |
557 | 498 | ||
558 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 499 | if (unlikely(desc->istate & IRQS_INPROGRESS)) |
559 | goto out; | 500 | if (!irq_check_poll(desc)) |
501 | goto out; | ||
560 | 502 | ||
561 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 503 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
562 | kstat_incr_irqs_this_cpu(irq, desc); | 504 | kstat_incr_irqs_this_cpu(irq, desc); |
563 | 505 | ||
564 | /* | 506 | /* |
565 | * If its disabled or no action available | 507 | * If its disabled or no action available |
566 | * then mask it and get out of here: | 508 | * then mask it and get out of here: |
567 | */ | 509 | */ |
568 | action = desc->action; | 510 | if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { |
569 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 511 | irq_compat_set_pending(desc); |
570 | desc->status |= IRQ_PENDING; | 512 | desc->istate |= IRQS_PENDING; |
571 | mask_irq(desc); | 513 | mask_irq(desc); |
572 | goto out; | 514 | goto out; |
573 | } | 515 | } |
574 | 516 | ||
575 | desc->status |= IRQ_INPROGRESS; | 517 | if (desc->istate & IRQS_ONESHOT) |
576 | desc->status &= ~IRQ_PENDING; | 518 | mask_irq(desc); |
577 | raw_spin_unlock(&desc->lock); | ||
578 | 519 | ||
579 | action_ret = handle_IRQ_event(irq, action); | 520 | preflow_handler(desc); |
580 | if (!noirqdebug) | 521 | handle_irq_event(desc); |
581 | note_interrupt(irq, desc, action_ret); | ||
582 | 522 | ||
583 | raw_spin_lock(&desc->lock); | 523 | out_eoi: |
584 | desc->status &= ~IRQ_INPROGRESS; | ||
585 | out: | ||
586 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | 524 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
587 | 525 | out_unlock: | |
588 | raw_spin_unlock(&desc->lock); | 526 | raw_spin_unlock(&desc->lock); |
527 | return; | ||
528 | out: | ||
529 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) | ||
530 | goto out_eoi; | ||
531 | goto out_unlock; | ||
589 | } | 532 | } |
590 | 533 | ||
591 | /** | 534 | /** |
@@ -609,32 +552,28 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
609 | { | 552 | { |
610 | raw_spin_lock(&desc->lock); | 553 | raw_spin_lock(&desc->lock); |
611 | 554 | ||
612 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 555 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
613 | |||
614 | /* | 556 | /* |
615 | * If we're currently running this IRQ, or its disabled, | 557 | * If we're currently running this IRQ, or its disabled, |
616 | * we shouldn't process the IRQ. Mark it pending, handle | 558 | * we shouldn't process the IRQ. Mark it pending, handle |
617 | * the necessary masking and go out | 559 | * the necessary masking and go out |
618 | */ | 560 | */ |
619 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || | 561 | if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || |
620 | !desc->action)) { | 562 | !desc->action))) { |
621 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 563 | if (!irq_check_poll(desc)) { |
622 | mask_ack_irq(desc); | 564 | irq_compat_set_pending(desc); |
623 | goto out_unlock; | 565 | desc->istate |= IRQS_PENDING; |
566 | mask_ack_irq(desc); | ||
567 | goto out_unlock; | ||
568 | } | ||
624 | } | 569 | } |
625 | kstat_incr_irqs_this_cpu(irq, desc); | 570 | kstat_incr_irqs_this_cpu(irq, desc); |
626 | 571 | ||
627 | /* Start handling the irq */ | 572 | /* Start handling the irq */ |
628 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 573 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
629 | 574 | ||
630 | /* Mark the IRQ currently in progress.*/ | ||
631 | desc->status |= IRQ_INPROGRESS; | ||
632 | |||
633 | do { | 575 | do { |
634 | struct irqaction *action = desc->action; | 576 | if (unlikely(!desc->action)) { |
635 | irqreturn_t action_ret; | ||
636 | |||
637 | if (unlikely(!action)) { | ||
638 | mask_irq(desc); | 577 | mask_irq(desc); |
639 | goto out_unlock; | 578 | goto out_unlock; |
640 | } | 579 | } |
@@ -644,22 +583,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
644 | * one, we could have masked the irq. | 583 | * one, we could have masked the irq. |
645 | * Renable it, if it was not disabled in meantime. | 584 | * Renable it, if it was not disabled in meantime. |
646 | */ | 585 | */ |
647 | if (unlikely((desc->status & | 586 | if (unlikely(desc->istate & IRQS_PENDING)) { |
648 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == | 587 | if (!(desc->istate & IRQS_DISABLED) && |
649 | (IRQ_PENDING | IRQ_MASKED))) { | 588 | (desc->istate & IRQS_MASKED)) |
650 | unmask_irq(desc); | 589 | unmask_irq(desc); |
651 | } | 590 | } |
652 | 591 | ||
653 | desc->status &= ~IRQ_PENDING; | 592 | handle_irq_event(desc); |
654 | raw_spin_unlock(&desc->lock); | ||
655 | action_ret = handle_IRQ_event(irq, action); | ||
656 | if (!noirqdebug) | ||
657 | note_interrupt(irq, desc, action_ret); | ||
658 | raw_spin_lock(&desc->lock); | ||
659 | 593 | ||
660 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | 594 | } while ((desc->istate & IRQS_PENDING) && |
595 | !(desc->istate & IRQS_DISABLED)); | ||
661 | 596 | ||
662 | desc->status &= ~IRQ_INPROGRESS; | ||
663 | out_unlock: | 597 | out_unlock: |
664 | raw_spin_unlock(&desc->lock); | 598 | raw_spin_unlock(&desc->lock); |
665 | } | 599 | } |
@@ -674,103 +608,84 @@ out_unlock: | |||
674 | void | 608 | void |
675 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | 609 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
676 | { | 610 | { |
677 | irqreturn_t action_ret; | 611 | struct irq_chip *chip = irq_desc_get_chip(desc); |
678 | 612 | ||
679 | kstat_incr_irqs_this_cpu(irq, desc); | 613 | kstat_incr_irqs_this_cpu(irq, desc); |
680 | 614 | ||
681 | if (desc->irq_data.chip->irq_ack) | 615 | if (chip->irq_ack) |
682 | desc->irq_data.chip->irq_ack(&desc->irq_data); | 616 | chip->irq_ack(&desc->irq_data); |
683 | 617 | ||
684 | action_ret = handle_IRQ_event(irq, desc->action); | 618 | handle_irq_event_percpu(desc, desc->action); |
685 | if (!noirqdebug) | ||
686 | note_interrupt(irq, desc, action_ret); | ||
687 | 619 | ||
688 | if (desc->irq_data.chip->irq_eoi) | 620 | if (chip->irq_eoi) |
689 | desc->irq_data.chip->irq_eoi(&desc->irq_data); | 621 | chip->irq_eoi(&desc->irq_data); |
690 | } | 622 | } |
691 | 623 | ||
692 | void | 624 | void |
693 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 625 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
694 | const char *name) | 626 | const char *name) |
695 | { | 627 | { |
696 | struct irq_desc *desc = irq_to_desc(irq); | ||
697 | unsigned long flags; | 628 | unsigned long flags; |
629 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
698 | 630 | ||
699 | if (!desc) { | 631 | if (!desc) |
700 | printk(KERN_ERR | ||
701 | "Trying to install type control for IRQ%d\n", irq); | ||
702 | return; | 632 | return; |
703 | } | ||
704 | 633 | ||
705 | if (!handle) | 634 | if (!handle) { |
706 | handle = handle_bad_irq; | 635 | handle = handle_bad_irq; |
707 | else if (desc->irq_data.chip == &no_irq_chip) { | 636 | } else { |
708 | printk(KERN_WARNING "Trying to install %sinterrupt handler " | 637 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) |
709 | "for IRQ%d\n", is_chained ? "chained " : "", irq); | 638 | goto out; |
710 | /* | ||
711 | * Some ARM implementations install a handler for really dumb | ||
712 | * interrupt hardware without setting an irq_chip. This worked | ||
713 | * with the ARM no_irq_chip but the check in setup_irq would | ||
714 | * prevent us to setup the interrupt at all. Switch it to | ||
715 | * dummy_irq_chip for easy transition. | ||
716 | */ | ||
717 | desc->irq_data.chip = &dummy_irq_chip; | ||
718 | } | 639 | } |
719 | 640 | ||
720 | chip_bus_lock(desc); | ||
721 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
722 | |||
723 | /* Uninstall? */ | 641 | /* Uninstall? */ |
724 | if (handle == handle_bad_irq) { | 642 | if (handle == handle_bad_irq) { |
725 | if (desc->irq_data.chip != &no_irq_chip) | 643 | if (desc->irq_data.chip != &no_irq_chip) |
726 | mask_ack_irq(desc); | 644 | mask_ack_irq(desc); |
727 | desc->status |= IRQ_DISABLED; | 645 | irq_compat_set_disabled(desc); |
646 | desc->istate |= IRQS_DISABLED; | ||
728 | desc->depth = 1; | 647 | desc->depth = 1; |
729 | } | 648 | } |
730 | desc->handle_irq = handle; | 649 | desc->handle_irq = handle; |
731 | desc->name = name; | 650 | desc->name = name; |
732 | 651 | ||
733 | if (handle != handle_bad_irq && is_chained) { | 652 | if (handle != handle_bad_irq && is_chained) { |
734 | desc->status &= ~IRQ_DISABLED; | 653 | irq_settings_set_noprobe(desc); |
735 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 654 | irq_settings_set_norequest(desc); |
736 | desc->depth = 0; | 655 | irq_startup(desc); |
737 | desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
738 | } | 656 | } |
739 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 657 | out: |
740 | chip_bus_sync_unlock(desc); | 658 | irq_put_desc_busunlock(desc, flags); |
741 | } | ||
742 | EXPORT_SYMBOL_GPL(__set_irq_handler); | ||
743 | |||
744 | void | ||
745 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | ||
746 | irq_flow_handler_t handle) | ||
747 | { | ||
748 | set_irq_chip(irq, chip); | ||
749 | __set_irq_handler(irq, handle, 0, NULL); | ||
750 | } | 659 | } |
660 | EXPORT_SYMBOL_GPL(__irq_set_handler); | ||
751 | 661 | ||
752 | void | 662 | void |
753 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | 663 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
754 | irq_flow_handler_t handle, const char *name) | 664 | irq_flow_handler_t handle, const char *name) |
755 | { | 665 | { |
756 | set_irq_chip(irq, chip); | 666 | irq_set_chip(irq, chip); |
757 | __set_irq_handler(irq, handle, 0, name); | 667 | __irq_set_handler(irq, handle, 0, name); |
758 | } | 668 | } |
759 | 669 | ||
760 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | 670 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
761 | { | 671 | { |
762 | struct irq_desc *desc = irq_to_desc(irq); | ||
763 | unsigned long flags; | 672 | unsigned long flags; |
673 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
764 | 674 | ||
765 | if (!desc) | 675 | if (!desc) |
766 | return; | 676 | return; |
677 | irq_settings_clr_and_set(desc, clr, set); | ||
678 | |||
679 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | | ||
680 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); | ||
681 | if (irq_settings_has_no_balance_set(desc)) | ||
682 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | ||
683 | if (irq_settings_is_per_cpu(desc)) | ||
684 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | ||
685 | if (irq_settings_can_move_pcntxt(desc)) | ||
686 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); | ||
767 | 687 | ||
768 | /* Sanitize flags */ | 688 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
769 | set &= IRQF_MODIFY_MASK; | ||
770 | clr &= IRQF_MODIFY_MASK; | ||
771 | 689 | ||
772 | raw_spin_lock_irqsave(&desc->lock, flags); | 690 | irq_put_desc_unlock(desc, flags); |
773 | desc->status &= ~clr; | ||
774 | desc->status |= set; | ||
775 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
776 | } | 691 | } |
diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h new file mode 100644 index 000000000000..6bbaf66aca85 --- /dev/null +++ b/kernel/irq/compat.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Compat layer for transition period | ||
3 | */ | ||
4 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
5 | static inline void irq_compat_set_progress(struct irq_desc *desc) | ||
6 | { | ||
7 | desc->status |= IRQ_INPROGRESS; | ||
8 | } | ||
9 | |||
10 | static inline void irq_compat_clr_progress(struct irq_desc *desc) | ||
11 | { | ||
12 | desc->status &= ~IRQ_INPROGRESS; | ||
13 | } | ||
14 | static inline void irq_compat_set_disabled(struct irq_desc *desc) | ||
15 | { | ||
16 | desc->status |= IRQ_DISABLED; | ||
17 | } | ||
18 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) | ||
19 | { | ||
20 | desc->status &= ~IRQ_DISABLED; | ||
21 | } | ||
22 | static inline void irq_compat_set_pending(struct irq_desc *desc) | ||
23 | { | ||
24 | desc->status |= IRQ_PENDING; | ||
25 | } | ||
26 | |||
27 | static inline void irq_compat_clr_pending(struct irq_desc *desc) | ||
28 | { | ||
29 | desc->status &= ~IRQ_PENDING; | ||
30 | } | ||
31 | static inline void irq_compat_set_masked(struct irq_desc *desc) | ||
32 | { | ||
33 | desc->status |= IRQ_MASKED; | ||
34 | } | ||
35 | |||
36 | static inline void irq_compat_clr_masked(struct irq_desc *desc) | ||
37 | { | ||
38 | desc->status &= ~IRQ_MASKED; | ||
39 | } | ||
40 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) | ||
41 | { | ||
42 | desc->status |= IRQ_MOVE_PENDING; | ||
43 | } | ||
44 | |||
45 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) | ||
46 | { | ||
47 | desc->status &= ~IRQ_MOVE_PENDING; | ||
48 | } | ||
49 | static inline void irq_compat_set_affinity(struct irq_desc *desc) | ||
50 | { | ||
51 | desc->status |= IRQ_AFFINITY_SET; | ||
52 | } | ||
53 | |||
54 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) | ||
55 | { | ||
56 | desc->status &= ~IRQ_AFFINITY_SET; | ||
57 | } | ||
58 | #else | ||
59 | static inline void irq_compat_set_progress(struct irq_desc *desc) { } | ||
60 | static inline void irq_compat_clr_progress(struct irq_desc *desc) { } | ||
61 | static inline void irq_compat_set_disabled(struct irq_desc *desc) { } | ||
62 | static inline void irq_compat_clr_disabled(struct irq_desc *desc) { } | ||
63 | static inline void irq_compat_set_pending(struct irq_desc *desc) { } | ||
64 | static inline void irq_compat_clr_pending(struct irq_desc *desc) { } | ||
65 | static inline void irq_compat_set_masked(struct irq_desc *desc) { } | ||
66 | static inline void irq_compat_clr_masked(struct irq_desc *desc) { } | ||
67 | static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } | ||
68 | static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } | ||
69 | static inline void irq_compat_set_affinity(struct irq_desc *desc) { } | ||
70 | static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } | ||
71 | #endif | ||
72 | |||
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h new file mode 100644 index 000000000000..d1a33b7fa61d --- /dev/null +++ b/kernel/irq/debug.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Debugging printout: | ||
3 | */ | ||
4 | |||
5 | #include <linux/kallsyms.h> | ||
6 | |||
7 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) | ||
8 | #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) | ||
9 | |||
10 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
11 | { | ||
12 | printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", | ||
13 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); | ||
14 | printk("->handle_irq(): %p, ", desc->handle_irq); | ||
15 | print_symbol("%s\n", (unsigned long)desc->handle_irq); | ||
16 | printk("->irq_data.chip(): %p, ", desc->irq_data.chip); | ||
17 | print_symbol("%s\n", (unsigned long)desc->irq_data.chip); | ||
18 | printk("->action(): %p\n", desc->action); | ||
19 | if (desc->action) { | ||
20 | printk("->action->handler(): %p, ", desc->action->handler); | ||
21 | print_symbol("%s\n", (unsigned long)desc->action->handler); | ||
22 | } | ||
23 | |||
24 | P(IRQ_LEVEL); | ||
25 | P(IRQ_PER_CPU); | ||
26 | P(IRQ_NOPROBE); | ||
27 | P(IRQ_NOREQUEST); | ||
28 | P(IRQ_NOAUTOEN); | ||
29 | |||
30 | PS(IRQS_AUTODETECT); | ||
31 | PS(IRQS_INPROGRESS); | ||
32 | PS(IRQS_REPLAY); | ||
33 | PS(IRQS_WAITING); | ||
34 | PS(IRQS_DISABLED); | ||
35 | PS(IRQS_PENDING); | ||
36 | PS(IRQS_MASKED); | ||
37 | } | ||
38 | |||
39 | #undef P | ||
40 | #undef PS | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 3540a7190122..517561fc7317 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -51,30 +51,92 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
51 | "but no thread function available.", irq, action->name); | 51 | "but no thread function available.", irq, action->name); |
52 | } | 52 | } |
53 | 53 | ||
54 | /** | 54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) |
55 | * handle_IRQ_event - irq action chain handler | 55 | { |
56 | * @irq: the interrupt number | 56 | /* |
57 | * @action: the interrupt action chain for this irq | 57 | * Wake up the handler thread for this action. In case the |
58 | * | 58 | * thread crashed and was killed we just pretend that we |
59 | * Handles the action chain of an irq event | 59 | * handled the interrupt. The hardirq handler has disabled the |
60 | */ | 60 | * device interrupt, so no irq storm is lurking. If the |
61 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | 61 | * RUNTHREAD bit is already set, nothing to do. |
62 | */ | ||
63 | if (test_bit(IRQTF_DIED, &action->thread_flags) || | ||
64 | test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
65 | return; | ||
66 | |||
67 | /* | ||
68 | * It's safe to OR the mask lockless here. We have only two | ||
69 | * places which write to threads_oneshot: This code and the | ||
70 | * irq thread. | ||
71 | * | ||
72 | * This code is the hard irq context and can never run on two | ||
73 | * cpus in parallel. If it ever does we have more serious | ||
74 | * problems than this bitmask. | ||
75 | * | ||
76 | * The irq threads of this irq which clear their "running" bit | ||
77 | * in threads_oneshot are serialized via desc->lock against | ||
78 | * each other and they are serialized against this code by | ||
79 | * IRQS_INPROGRESS. | ||
80 | * | ||
81 | * Hard irq handler: | ||
82 | * | ||
83 | * spin_lock(desc->lock); | ||
84 | * desc->state |= IRQS_INPROGRESS; | ||
85 | * spin_unlock(desc->lock); | ||
86 | * set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
87 | * desc->threads_oneshot |= mask; | ||
88 | * spin_lock(desc->lock); | ||
89 | * desc->state &= ~IRQS_INPROGRESS; | ||
90 | * spin_unlock(desc->lock); | ||
91 | * | ||
92 | * irq thread: | ||
93 | * | ||
94 | * again: | ||
95 | * spin_lock(desc->lock); | ||
96 | * if (desc->state & IRQS_INPROGRESS) { | ||
97 | * spin_unlock(desc->lock); | ||
98 | * while(desc->state & IRQS_INPROGRESS) | ||
99 | * cpu_relax(); | ||
100 | * goto again; | ||
101 | * } | ||
102 | * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
103 | * desc->threads_oneshot &= ~mask; | ||
104 | * spin_unlock(desc->lock); | ||
105 | * | ||
106 | * So either the thread waits for us to clear IRQS_INPROGRESS | ||
107 | * or we are waiting in the flow handler for desc->lock to be | ||
108 | * released before we reach this point. The thread also checks | ||
109 | * IRQTF_RUNTHREAD under desc->lock. If set it leaves | ||
110 | * threads_oneshot untouched and runs the thread another time. | ||
111 | */ | ||
112 | desc->threads_oneshot |= action->thread_mask; | ||
113 | wake_up_process(action->thread); | ||
114 | } | ||
115 | |||
116 | irqreturn_t | ||
117 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | ||
62 | { | 118 | { |
63 | irqreturn_t ret, retval = IRQ_NONE; | 119 | irqreturn_t retval = IRQ_NONE; |
64 | unsigned int status = 0; | 120 | unsigned int random = 0, irq = desc->irq_data.irq; |
65 | 121 | ||
66 | do { | 122 | do { |
123 | irqreturn_t res; | ||
124 | |||
67 | trace_irq_handler_entry(irq, action); | 125 | trace_irq_handler_entry(irq, action); |
68 | ret = action->handler(irq, action->dev_id); | 126 | res = action->handler(irq, action->dev_id); |
69 | trace_irq_handler_exit(irq, action, ret); | 127 | trace_irq_handler_exit(irq, action, res); |
70 | 128 | ||
71 | switch (ret) { | 129 | if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", |
130 | irq, action->handler)) | ||
131 | local_irq_disable(); | ||
132 | |||
133 | switch (res) { | ||
72 | case IRQ_WAKE_THREAD: | 134 | case IRQ_WAKE_THREAD: |
73 | /* | 135 | /* |
74 | * Set result to handled so the spurious check | 136 | * Set result to handled so the spurious check |
75 | * does not trigger. | 137 | * does not trigger. |
76 | */ | 138 | */ |
77 | ret = IRQ_HANDLED; | 139 | res = IRQ_HANDLED; |
78 | 140 | ||
79 | /* | 141 | /* |
80 | * Catch drivers which return WAKE_THREAD but | 142 | * Catch drivers which return WAKE_THREAD but |
@@ -85,36 +147,56 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
85 | break; | 147 | break; |
86 | } | 148 | } |
87 | 149 | ||
88 | /* | 150 | irq_wake_thread(desc, action); |
89 | * Wake up the handler thread for this | ||
90 | * action. In case the thread crashed and was | ||
91 | * killed we just pretend that we handled the | ||
92 | * interrupt. The hardirq handler above has | ||
93 | * disabled the device interrupt, so no irq | ||
94 | * storm is lurking. | ||
95 | */ | ||
96 | if (likely(!test_bit(IRQTF_DIED, | ||
97 | &action->thread_flags))) { | ||
98 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
99 | wake_up_process(action->thread); | ||
100 | } | ||
101 | 151 | ||
102 | /* Fall through to add to randomness */ | 152 | /* Fall through to add to randomness */ |
103 | case IRQ_HANDLED: | 153 | case IRQ_HANDLED: |
104 | status |= action->flags; | 154 | random |= action->flags; |
105 | break; | 155 | break; |
106 | 156 | ||
107 | default: | 157 | default: |
108 | break; | 158 | break; |
109 | } | 159 | } |
110 | 160 | ||
111 | retval |= ret; | 161 | retval |= res; |
112 | action = action->next; | 162 | action = action->next; |
113 | } while (action); | 163 | } while (action); |
114 | 164 | ||
115 | if (status & IRQF_SAMPLE_RANDOM) | 165 | if (random & IRQF_SAMPLE_RANDOM) |
116 | add_interrupt_randomness(irq); | 166 | add_interrupt_randomness(irq); |
117 | local_irq_disable(); | ||
118 | 167 | ||
168 | if (!noirqdebug) | ||
169 | note_interrupt(irq, desc, retval); | ||
119 | return retval; | 170 | return retval; |
120 | } | 171 | } |
172 | |||
173 | irqreturn_t handle_irq_event(struct irq_desc *desc) | ||
174 | { | ||
175 | struct irqaction *action = desc->action; | ||
176 | irqreturn_t ret; | ||
177 | |||
178 | irq_compat_clr_pending(desc); | ||
179 | desc->istate &= ~IRQS_PENDING; | ||
180 | irq_compat_set_progress(desc); | ||
181 | desc->istate |= IRQS_INPROGRESS; | ||
182 | raw_spin_unlock(&desc->lock); | ||
183 | |||
184 | ret = handle_irq_event_percpu(desc, action); | ||
185 | |||
186 | raw_spin_lock(&desc->lock); | ||
187 | desc->istate &= ~IRQS_INPROGRESS; | ||
188 | irq_compat_clr_progress(desc); | ||
189 | return ret; | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * handle_IRQ_event - irq action chain handler | ||
194 | * @irq: the interrupt number | ||
195 | * @action: the interrupt action chain for this irq | ||
196 | * | ||
197 | * Handles the action chain of an irq event | ||
198 | */ | ||
199 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | ||
200 | { | ||
201 | return handle_irq_event_percpu(irq_to_desc(irq), action); | ||
202 | } | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 4571ae7e085a..6c6ec9a49027 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -1,27 +1,101 @@ | |||
1 | /* | 1 | /* |
2 | * IRQ subsystem internal functions and variables: | 2 | * IRQ subsystem internal functions and variables: |
3 | * | ||
4 | * Do not ever include this file from anything else than | ||
5 | * kernel/irq/. Do not even think about using any information outside | ||
6 | * of this file for your non core code. | ||
3 | */ | 7 | */ |
4 | #include <linux/irqdesc.h> | 8 | #include <linux/irqdesc.h> |
5 | 9 | ||
10 | #ifdef CONFIG_SPARSE_IRQ | ||
11 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | ||
12 | #else | ||
13 | # define IRQ_BITMAP_BITS NR_IRQS | ||
14 | #endif | ||
15 | |||
16 | #define istate core_internal_state__do_not_mess_with_it | ||
17 | |||
18 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT | ||
19 | # define status status_use_accessors | ||
20 | #endif | ||
21 | |||
6 | extern int noirqdebug; | 22 | extern int noirqdebug; |
7 | 23 | ||
24 | /* | ||
25 | * Bits used by threaded handlers: | ||
26 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
27 | * IRQTF_DIED - handler thread died | ||
28 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
29 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | ||
30 | * IRQTF_FORCED_THREAD - irq action is force threaded | ||
31 | */ | ||
32 | enum { | ||
33 | IRQTF_RUNTHREAD, | ||
34 | IRQTF_DIED, | ||
35 | IRQTF_WARNED, | ||
36 | IRQTF_AFFINITY, | ||
37 | IRQTF_FORCED_THREAD, | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * Bit masks for desc->state | ||
42 | * | ||
43 | * IRQS_AUTODETECT - autodetection in progress | ||
44 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt | ||
45 | * detection | ||
46 | * IRQS_POLL_INPROGRESS - polling in progress | ||
47 | * IRQS_INPROGRESS - Interrupt in progress | ||
48 | * IRQS_ONESHOT - irq is not unmasked in primary handler | ||
49 | * IRQS_REPLAY - irq is replayed | ||
50 | * IRQS_WAITING - irq is waiting | ||
51 | * IRQS_DISABLED - irq is disabled | ||
52 | * IRQS_PENDING - irq is pending and replayed later | ||
53 | * IRQS_MASKED - irq is masked | ||
54 | * IRQS_SUSPENDED - irq is suspended | ||
55 | */ | ||
56 | enum { | ||
57 | IRQS_AUTODETECT = 0x00000001, | ||
58 | IRQS_SPURIOUS_DISABLED = 0x00000002, | ||
59 | IRQS_POLL_INPROGRESS = 0x00000008, | ||
60 | IRQS_INPROGRESS = 0x00000010, | ||
61 | IRQS_ONESHOT = 0x00000020, | ||
62 | IRQS_REPLAY = 0x00000040, | ||
63 | IRQS_WAITING = 0x00000080, | ||
64 | IRQS_DISABLED = 0x00000100, | ||
65 | IRQS_PENDING = 0x00000200, | ||
66 | IRQS_MASKED = 0x00000400, | ||
67 | IRQS_SUSPENDED = 0x00000800, | ||
68 | }; | ||
69 | |||
70 | #include "compat.h" | ||
71 | #include "debug.h" | ||
72 | #include "settings.h" | ||
73 | |||
8 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 74 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
9 | 75 | ||
10 | /* Set default functions for irq_chip structures: */ | 76 | /* Set default functions for irq_chip structures: */ |
11 | extern void irq_chip_set_defaults(struct irq_chip *chip); | 77 | extern void irq_chip_set_defaults(struct irq_chip *chip); |
12 | 78 | ||
13 | /* Set default handler: */ | ||
14 | extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); | ||
15 | |||
16 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 79 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
17 | unsigned long flags); | 80 | unsigned long flags); |
18 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 81 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
19 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 82 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
20 | 83 | ||
84 | extern int irq_startup(struct irq_desc *desc); | ||
85 | extern void irq_shutdown(struct irq_desc *desc); | ||
86 | extern void irq_enable(struct irq_desc *desc); | ||
87 | extern void irq_disable(struct irq_desc *desc); | ||
88 | extern void mask_irq(struct irq_desc *desc); | ||
89 | extern void unmask_irq(struct irq_desc *desc); | ||
90 | |||
21 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 91 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
22 | 92 | ||
93 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); | ||
94 | irqreturn_t handle_irq_event(struct irq_desc *desc); | ||
95 | |||
23 | /* Resending of interrupts :*/ | 96 | /* Resending of interrupts :*/ |
24 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 97 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
98 | bool irq_wait_for_poll(struct irq_desc *desc); | ||
25 | 99 | ||
26 | #ifdef CONFIG_PROC_FS | 100 | #ifdef CONFIG_PROC_FS |
27 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 101 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
@@ -37,20 +111,10 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
37 | struct irqaction *action) { } | 111 | struct irqaction *action) { } |
38 | #endif | 112 | #endif |
39 | 113 | ||
40 | extern int irq_select_affinity_usr(unsigned int irq); | 114 | extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); |
41 | 115 | ||
42 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 116 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
43 | 117 | ||
44 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
45 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) | ||
46 | { | ||
47 | if (desc->irq_data.chip && desc->irq_data.chip->end) | ||
48 | desc->irq_data.chip->end(irq); | ||
49 | } | ||
50 | #else | ||
51 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } | ||
52 | #endif | ||
53 | |||
54 | /* Inline functions for support of irq chips on slow busses */ | 118 | /* Inline functions for support of irq chips on slow busses */ |
55 | static inline void chip_bus_lock(struct irq_desc *desc) | 119 | static inline void chip_bus_lock(struct irq_desc *desc) |
56 | { | 120 | { |
@@ -64,43 +128,60 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) | |||
64 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); | 128 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
65 | } | 129 | } |
66 | 130 | ||
131 | struct irq_desc * | ||
132 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus); | ||
133 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); | ||
134 | |||
135 | static inline struct irq_desc * | ||
136 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags) | ||
137 | { | ||
138 | return __irq_get_desc_lock(irq, flags, true); | ||
139 | } | ||
140 | |||
141 | static inline void | ||
142 | irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) | ||
143 | { | ||
144 | __irq_put_desc_unlock(desc, flags, true); | ||
145 | } | ||
146 | |||
147 | static inline struct irq_desc * | ||
148 | irq_get_desc_lock(unsigned int irq, unsigned long *flags) | ||
149 | { | ||
150 | return __irq_get_desc_lock(irq, flags, false); | ||
151 | } | ||
152 | |||
153 | static inline void | ||
154 | irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) | ||
155 | { | ||
156 | __irq_put_desc_unlock(desc, flags, false); | ||
157 | } | ||
158 | |||
67 | /* | 159 | /* |
68 | * Debugging printout: | 160 | * Manipulation functions for irq_data.state |
69 | */ | 161 | */ |
162 | static inline void irqd_set_move_pending(struct irq_data *d) | ||
163 | { | ||
164 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; | ||
165 | irq_compat_set_move_pending(irq_data_to_desc(d)); | ||
166 | } | ||
70 | 167 | ||
71 | #include <linux/kallsyms.h> | 168 | static inline void irqd_clr_move_pending(struct irq_data *d) |
72 | 169 | { | |
73 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) | 170 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; |
171 | irq_compat_clr_move_pending(irq_data_to_desc(d)); | ||
172 | } | ||
74 | 173 | ||
75 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | 174 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
76 | { | 175 | { |
77 | printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", | 176 | d->state_use_accessors &= ~mask; |
78 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); | ||
79 | printk("->handle_irq(): %p, ", desc->handle_irq); | ||
80 | print_symbol("%s\n", (unsigned long)desc->handle_irq); | ||
81 | printk("->irq_data.chip(): %p, ", desc->irq_data.chip); | ||
82 | print_symbol("%s\n", (unsigned long)desc->irq_data.chip); | ||
83 | printk("->action(): %p\n", desc->action); | ||
84 | if (desc->action) { | ||
85 | printk("->action->handler(): %p, ", desc->action->handler); | ||
86 | print_symbol("%s\n", (unsigned long)desc->action->handler); | ||
87 | } | ||
88 | |||
89 | P(IRQ_INPROGRESS); | ||
90 | P(IRQ_DISABLED); | ||
91 | P(IRQ_PENDING); | ||
92 | P(IRQ_REPLAY); | ||
93 | P(IRQ_AUTODETECT); | ||
94 | P(IRQ_WAITING); | ||
95 | P(IRQ_LEVEL); | ||
96 | P(IRQ_MASKED); | ||
97 | #ifdef CONFIG_IRQ_PER_CPU | ||
98 | P(IRQ_PER_CPU); | ||
99 | #endif | ||
100 | P(IRQ_NOPROBE); | ||
101 | P(IRQ_NOREQUEST); | ||
102 | P(IRQ_NOAUTOEN); | ||
103 | } | 177 | } |
104 | 178 | ||
105 | #undef P | 179 | static inline void irqd_set(struct irq_data *d, unsigned int mask) |
180 | { | ||
181 | d->state_use_accessors |= mask; | ||
182 | } | ||
106 | 183 | ||
184 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | ||
185 | { | ||
186 | return d->state_use_accessors & mask; | ||
187 | } | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 282f20230e67..dbccc799407f 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -79,7 +79,8 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
79 | desc->irq_data.chip_data = NULL; | 79 | desc->irq_data.chip_data = NULL; |
80 | desc->irq_data.handler_data = NULL; | 80 | desc->irq_data.handler_data = NULL; |
81 | desc->irq_data.msi_desc = NULL; | 81 | desc->irq_data.msi_desc = NULL; |
82 | desc->status = IRQ_DEFAULT_INIT_FLAGS; | 82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
83 | desc->istate = IRQS_DISABLED; | ||
83 | desc->handle_irq = handle_bad_irq; | 84 | desc->handle_irq = handle_bad_irq; |
84 | desc->depth = 1; | 85 | desc->depth = 1; |
85 | desc->irq_count = 0; | 86 | desc->irq_count = 0; |
@@ -94,7 +95,7 @@ int nr_irqs = NR_IRQS; | |||
94 | EXPORT_SYMBOL_GPL(nr_irqs); | 95 | EXPORT_SYMBOL_GPL(nr_irqs); |
95 | 96 | ||
96 | static DEFINE_MUTEX(sparse_irq_lock); | 97 | static DEFINE_MUTEX(sparse_irq_lock); |
97 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); | 98 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
98 | 99 | ||
99 | #ifdef CONFIG_SPARSE_IRQ | 100 | #ifdef CONFIG_SPARSE_IRQ |
100 | 101 | ||
@@ -206,6 +207,14 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | |||
206 | return NULL; | 207 | return NULL; |
207 | } | 208 | } |
208 | 209 | ||
210 | static int irq_expand_nr_irqs(unsigned int nr) | ||
211 | { | ||
212 | if (nr > IRQ_BITMAP_BITS) | ||
213 | return -ENOMEM; | ||
214 | nr_irqs = nr; | ||
215 | return 0; | ||
216 | } | ||
217 | |||
209 | int __init early_irq_init(void) | 218 | int __init early_irq_init(void) |
210 | { | 219 | { |
211 | int i, initcnt, node = first_online_node; | 220 | int i, initcnt, node = first_online_node; |
@@ -217,6 +226,15 @@ int __init early_irq_init(void) | |||
217 | initcnt = arch_probe_nr_irqs(); | 226 | initcnt = arch_probe_nr_irqs(); |
218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 227 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
219 | 228 | ||
229 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | ||
230 | nr_irqs = IRQ_BITMAP_BITS; | ||
231 | |||
232 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | ||
233 | initcnt = IRQ_BITMAP_BITS; | ||
234 | |||
235 | if (initcnt > nr_irqs) | ||
236 | nr_irqs = initcnt; | ||
237 | |||
220 | for (i = 0; i < initcnt; i++) { | 238 | for (i = 0; i < initcnt; i++) { |
221 | desc = alloc_desc(i, node); | 239 | desc = alloc_desc(i, node); |
222 | set_bit(i, allocated_irqs); | 240 | set_bit(i, allocated_irqs); |
@@ -229,7 +247,7 @@ int __init early_irq_init(void) | |||
229 | 247 | ||
230 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 248 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
231 | [0 ... NR_IRQS-1] = { | 249 | [0 ... NR_IRQS-1] = { |
232 | .status = IRQ_DEFAULT_INIT_FLAGS, | 250 | .istate = IRQS_DISABLED, |
233 | .handle_irq = handle_bad_irq, | 251 | .handle_irq = handle_bad_irq, |
234 | .depth = 1, | 252 | .depth = 1, |
235 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 253 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
@@ -251,8 +269,8 @@ int __init early_irq_init(void) | |||
251 | for (i = 0; i < count; i++) { | 269 | for (i = 0; i < count; i++) { |
252 | desc[i].irq_data.irq = i; | 270 | desc[i].irq_data.irq = i; |
253 | desc[i].irq_data.chip = &no_irq_chip; | 271 | desc[i].irq_data.chip = &no_irq_chip; |
254 | /* TODO : do this allocation on-demand ... */ | ||
255 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | 272 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
273 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | ||
256 | alloc_masks(desc + i, GFP_KERNEL, node); | 274 | alloc_masks(desc + i, GFP_KERNEL, node); |
257 | desc_smp_init(desc + i, node); | 275 | desc_smp_init(desc + i, node); |
258 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 276 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
@@ -277,24 +295,14 @@ static void free_desc(unsigned int irq) | |||
277 | 295 | ||
278 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 296 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) |
279 | { | 297 | { |
280 | #if defined(CONFIG_KSTAT_IRQS_ONDEMAND) | ||
281 | struct irq_desc *desc; | ||
282 | unsigned int i; | ||
283 | |||
284 | for (i = 0; i < cnt; i++) { | ||
285 | desc = irq_to_desc(start + i); | ||
286 | if (desc && !desc->kstat_irqs) { | ||
287 | unsigned int __percpu *stats = alloc_percpu(unsigned int); | ||
288 | |||
289 | if (!stats) | ||
290 | return -1; | ||
291 | if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL) | ||
292 | free_percpu(stats); | ||
293 | } | ||
294 | } | ||
295 | #endif | ||
296 | return start; | 298 | return start; |
297 | } | 299 | } |
300 | |||
301 | static int irq_expand_nr_irqs(unsigned int nr) | ||
302 | { | ||
303 | return -ENOMEM; | ||
304 | } | ||
305 | |||
298 | #endif /* !CONFIG_SPARSE_IRQ */ | 306 | #endif /* !CONFIG_SPARSE_IRQ */ |
299 | 307 | ||
300 | /* Dynamic interrupt handling */ | 308 | /* Dynamic interrupt handling */ |
@@ -338,14 +346,17 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | |||
338 | 346 | ||
339 | mutex_lock(&sparse_irq_lock); | 347 | mutex_lock(&sparse_irq_lock); |
340 | 348 | ||
341 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | 349 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
350 | from, cnt, 0); | ||
342 | ret = -EEXIST; | 351 | ret = -EEXIST; |
343 | if (irq >=0 && start != irq) | 352 | if (irq >=0 && start != irq) |
344 | goto err; | 353 | goto err; |
345 | 354 | ||
346 | ret = -ENOMEM; | 355 | if (start + cnt > nr_irqs) { |
347 | if (start >= nr_irqs) | 356 | ret = irq_expand_nr_irqs(start + cnt); |
348 | goto err; | 357 | if (ret) |
358 | goto err; | ||
359 | } | ||
349 | 360 | ||
350 | bitmap_set(allocated_irqs, start, cnt); | 361 | bitmap_set(allocated_irqs, start, cnt); |
351 | mutex_unlock(&sparse_irq_lock); | 362 | mutex_unlock(&sparse_irq_lock); |
@@ -392,6 +403,26 @@ unsigned int irq_get_next_irq(unsigned int offset) | |||
392 | return find_next_bit(allocated_irqs, nr_irqs, offset); | 403 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
393 | } | 404 | } |
394 | 405 | ||
406 | struct irq_desc * | ||
407 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) | ||
408 | { | ||
409 | struct irq_desc *desc = irq_to_desc(irq); | ||
410 | |||
411 | if (desc) { | ||
412 | if (bus) | ||
413 | chip_bus_lock(desc); | ||
414 | raw_spin_lock_irqsave(&desc->lock, *flags); | ||
415 | } | ||
416 | return desc; | ||
417 | } | ||
418 | |||
419 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | ||
420 | { | ||
421 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
422 | if (bus) | ||
423 | chip_bus_sync_unlock(desc); | ||
424 | } | ||
425 | |||
395 | /** | 426 | /** |
396 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 427 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq |
397 | * @irq: irq number to initialize | 428 | * @irq: irq number to initialize |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747dd..acd599a43bfb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -17,6 +17,17 @@ | |||
17 | 17 | ||
18 | #include "internals.h" | 18 | #include "internals.h" |
19 | 19 | ||
20 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
21 | __read_mostly bool force_irqthreads; | ||
22 | |||
23 | static int __init setup_forced_irqthreads(char *arg) | ||
24 | { | ||
25 | force_irqthreads = true; | ||
26 | return 0; | ||
27 | } | ||
28 | early_param("threadirqs", setup_forced_irqthreads); | ||
29 | #endif | ||
30 | |||
20 | /** | 31 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 32 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
22 | * @irq: interrupt number to wait for | 33 | * @irq: interrupt number to wait for |
@@ -30,7 +41,7 @@ | |||
30 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
31 | { | 42 | { |
32 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
33 | unsigned int status; | 44 | unsigned int state; |
34 | 45 | ||
35 | if (!desc) | 46 | if (!desc) |
36 | return; | 47 | return; |
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq) | |||
42 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
43 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
44 | */ | 55 | */ |
45 | while (desc->status & IRQ_INPROGRESS) | 56 | while (desc->istate & IRQS_INPROGRESS) |
46 | cpu_relax(); | 57 | cpu_relax(); |
47 | 58 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
49 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
50 | status = desc->status; | 61 | state = desc->istate; |
51 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
52 | 63 | ||
53 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
54 | } while (status & IRQ_INPROGRESS); | 65 | } while (state & IRQS_INPROGRESS); |
55 | 66 | ||
56 | /* | 67 | /* |
57 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq) | |||
73 | { | 84 | { |
74 | struct irq_desc *desc = irq_to_desc(irq); | 85 | struct irq_desc *desc = irq_to_desc(irq); |
75 | 86 | ||
76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || | 87 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
77 | !desc->irq_data.chip->irq_set_affinity) | 88 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
78 | return 0; | 89 | return 0; |
79 | 90 | ||
80 | return 1; | 91 | return 1; |
@@ -100,67 +111,169 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
100 | } | 111 | } |
101 | } | 112 | } |
102 | 113 | ||
114 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
115 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) | ||
116 | { | ||
117 | return irq_settings_can_move_pcntxt(desc); | ||
118 | } | ||
119 | static inline bool irq_move_pending(struct irq_desc *desc) | ||
120 | { | ||
121 | return irqd_is_setaffinity_pending(&desc->irq_data); | ||
122 | } | ||
123 | static inline void | ||
124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | ||
125 | { | ||
126 | cpumask_copy(desc->pending_mask, mask); | ||
127 | } | ||
128 | static inline void | ||
129 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | ||
130 | { | ||
131 | cpumask_copy(mask, desc->pending_mask); | ||
132 | } | ||
133 | #else | ||
134 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } | ||
135 | static inline bool irq_move_pending(struct irq_desc *desc) { return false; } | ||
136 | static inline void | ||
137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | ||
138 | static inline void | ||
139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | ||
140 | #endif | ||
141 | |||
103 | /** | 142 | /** |
104 | * irq_set_affinity - Set the irq affinity of a given irq | 143 | * irq_set_affinity - Set the irq affinity of a given irq |
105 | * @irq: Interrupt to set affinity | 144 | * @irq: Interrupt to set affinity |
106 | * @cpumask: cpumask | 145 | * @cpumask: cpumask |
107 | * | 146 | * |
108 | */ | 147 | */ |
109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 148 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) |
110 | { | 149 | { |
111 | struct irq_desc *desc = irq_to_desc(irq); | 150 | struct irq_desc *desc = irq_to_desc(irq); |
112 | struct irq_chip *chip = desc->irq_data.chip; | 151 | struct irq_chip *chip = desc->irq_data.chip; |
113 | unsigned long flags; | 152 | unsigned long flags; |
153 | int ret = 0; | ||
114 | 154 | ||
115 | if (!chip->irq_set_affinity) | 155 | if (!chip->irq_set_affinity) |
116 | return -EINVAL; | 156 | return -EINVAL; |
117 | 157 | ||
118 | raw_spin_lock_irqsave(&desc->lock, flags); | 158 | raw_spin_lock_irqsave(&desc->lock, flags); |
119 | 159 | ||
120 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 160 | if (irq_can_move_pcntxt(desc)) { |
121 | if (desc->status & IRQ_MOVE_PCNTXT) { | 161 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
122 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | 162 | switch (ret) { |
123 | cpumask_copy(desc->irq_data.affinity, cpumask); | 163 | case IRQ_SET_MASK_OK: |
164 | cpumask_copy(desc->irq_data.affinity, mask); | ||
165 | case IRQ_SET_MASK_OK_NOCOPY: | ||
124 | irq_set_thread_affinity(desc); | 166 | irq_set_thread_affinity(desc); |
167 | ret = 0; | ||
125 | } | 168 | } |
169 | } else { | ||
170 | irqd_set_move_pending(&desc->irq_data); | ||
171 | irq_copy_pending(desc, mask); | ||
126 | } | 172 | } |
127 | else { | 173 | |
128 | desc->status |= IRQ_MOVE_PENDING; | 174 | if (desc->affinity_notify) { |
129 | cpumask_copy(desc->pending_mask, cpumask); | 175 | kref_get(&desc->affinity_notify->kref); |
130 | } | 176 | schedule_work(&desc->affinity_notify->work); |
131 | #else | ||
132 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | ||
133 | cpumask_copy(desc->irq_data.affinity, cpumask); | ||
134 | irq_set_thread_affinity(desc); | ||
135 | } | 177 | } |
136 | #endif | 178 | irq_compat_set_affinity(desc); |
137 | desc->status |= IRQ_AFFINITY_SET; | 179 | irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); |
138 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 180 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
139 | return 0; | 181 | return ret; |
140 | } | 182 | } |
141 | 183 | ||
142 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 184 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
143 | { | 185 | { |
186 | unsigned long flags; | ||
187 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
188 | |||
189 | if (!desc) | ||
190 | return -EINVAL; | ||
191 | desc->affinity_hint = m; | ||
192 | irq_put_desc_unlock(desc, flags); | ||
193 | return 0; | ||
194 | } | ||
195 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | ||
196 | |||
197 | static void irq_affinity_notify(struct work_struct *work) | ||
198 | { | ||
199 | struct irq_affinity_notify *notify = | ||
200 | container_of(work, struct irq_affinity_notify, work); | ||
201 | struct irq_desc *desc = irq_to_desc(notify->irq); | ||
202 | cpumask_var_t cpumask; | ||
203 | unsigned long flags; | ||
204 | |||
205 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | ||
206 | goto out; | ||
207 | |||
208 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
209 | if (irq_move_pending(desc)) | ||
210 | irq_get_pending(cpumask, desc); | ||
211 | else | ||
212 | cpumask_copy(cpumask, desc->irq_data.affinity); | ||
213 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
214 | |||
215 | notify->notify(notify, cpumask); | ||
216 | |||
217 | free_cpumask_var(cpumask); | ||
218 | out: | ||
219 | kref_put(¬ify->kref, notify->release); | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | ||
224 | * @irq: Interrupt for which to enable/disable notification | ||
225 | * @notify: Context for notification, or %NULL to disable | ||
226 | * notification. Function pointers must be initialised; | ||
227 | * the other fields will be initialised by this function. | ||
228 | * | ||
229 | * Must be called in process context. Notification may only be enabled | ||
230 | * after the IRQ is allocated and must be disabled before the IRQ is | ||
231 | * freed using free_irq(). | ||
232 | */ | ||
233 | int | ||
234 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | ||
235 | { | ||
144 | struct irq_desc *desc = irq_to_desc(irq); | 236 | struct irq_desc *desc = irq_to_desc(irq); |
237 | struct irq_affinity_notify *old_notify; | ||
145 | unsigned long flags; | 238 | unsigned long flags; |
146 | 239 | ||
240 | /* The release function is promised process context */ | ||
241 | might_sleep(); | ||
242 | |||
147 | if (!desc) | 243 | if (!desc) |
148 | return -EINVAL; | 244 | return -EINVAL; |
149 | 245 | ||
246 | /* Complete initialisation of *notify */ | ||
247 | if (notify) { | ||
248 | notify->irq = irq; | ||
249 | kref_init(¬ify->kref); | ||
250 | INIT_WORK(¬ify->work, irq_affinity_notify); | ||
251 | } | ||
252 | |||
150 | raw_spin_lock_irqsave(&desc->lock, flags); | 253 | raw_spin_lock_irqsave(&desc->lock, flags); |
151 | desc->affinity_hint = m; | 254 | old_notify = desc->affinity_notify; |
255 | desc->affinity_notify = notify; | ||
152 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 256 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
153 | 257 | ||
258 | if (old_notify) | ||
259 | kref_put(&old_notify->kref, old_notify->release); | ||
260 | |||
154 | return 0; | 261 | return 0; |
155 | } | 262 | } |
156 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 263 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
157 | 264 | ||
158 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 265 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
159 | /* | 266 | /* |
160 | * Generic version of the affinity autoselector. | 267 | * Generic version of the affinity autoselector. |
161 | */ | 268 | */ |
162 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) | 269 | static int |
270 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
163 | { | 271 | { |
272 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
273 | struct cpumask *set = irq_default_affinity; | ||
274 | int ret; | ||
275 | |||
276 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | ||
164 | if (!irq_can_set_affinity(irq)) | 277 | if (!irq_can_set_affinity(irq)) |
165 | return 0; | 278 | return 0; |
166 | 279 | ||
@@ -168,22 +281,29 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
168 | * Preserve an userspace affinity setup, but make sure that | 281 | * Preserve an userspace affinity setup, but make sure that |
169 | * one of the targets is online. | 282 | * one of the targets is online. |
170 | */ | 283 | */ |
171 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 284 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
172 | if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) | 285 | if (cpumask_intersects(desc->irq_data.affinity, |
173 | < nr_cpu_ids) | 286 | cpu_online_mask)) |
174 | goto set_affinity; | 287 | set = desc->irq_data.affinity; |
175 | else | 288 | else { |
176 | desc->status &= ~IRQ_AFFINITY_SET; | 289 | irq_compat_clr_affinity(desc); |
290 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | ||
291 | } | ||
177 | } | 292 | } |
178 | 293 | ||
179 | cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); | 294 | cpumask_and(mask, cpu_online_mask, set); |
180 | set_affinity: | 295 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
181 | desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); | 296 | switch (ret) { |
182 | 297 | case IRQ_SET_MASK_OK: | |
298 | cpumask_copy(desc->irq_data.affinity, mask); | ||
299 | case IRQ_SET_MASK_OK_NOCOPY: | ||
300 | irq_set_thread_affinity(desc); | ||
301 | } | ||
183 | return 0; | 302 | return 0; |
184 | } | 303 | } |
185 | #else | 304 | #else |
186 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | 305 | static inline int |
306 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | ||
187 | { | 307 | { |
188 | return irq_select_affinity(irq); | 308 | return irq_select_affinity(irq); |
189 | } | 309 | } |
@@ -192,23 +312,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | |||
192 | /* | 312 | /* |
193 | * Called when affinity is set via /proc/irq | 313 | * Called when affinity is set via /proc/irq |
194 | */ | 314 | */ |
195 | int irq_select_affinity_usr(unsigned int irq) | 315 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
196 | { | 316 | { |
197 | struct irq_desc *desc = irq_to_desc(irq); | 317 | struct irq_desc *desc = irq_to_desc(irq); |
198 | unsigned long flags; | 318 | unsigned long flags; |
199 | int ret; | 319 | int ret; |
200 | 320 | ||
201 | raw_spin_lock_irqsave(&desc->lock, flags); | 321 | raw_spin_lock_irqsave(&desc->lock, flags); |
202 | ret = setup_affinity(irq, desc); | 322 | ret = setup_affinity(irq, desc, mask); |
203 | if (!ret) | ||
204 | irq_set_thread_affinity(desc); | ||
205 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 323 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
206 | |||
207 | return ret; | 324 | return ret; |
208 | } | 325 | } |
209 | 326 | ||
210 | #else | 327 | #else |
211 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | 328 | static inline int |
329 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
212 | { | 330 | { |
213 | return 0; | 331 | return 0; |
214 | } | 332 | } |
@@ -219,13 +337,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
219 | if (suspend) { | 337 | if (suspend) { |
220 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 338 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
221 | return; | 339 | return; |
222 | desc->status |= IRQ_SUSPENDED; | 340 | desc->istate |= IRQS_SUSPENDED; |
223 | } | 341 | } |
224 | 342 | ||
225 | if (!desc->depth++) { | 343 | if (!desc->depth++) |
226 | desc->status |= IRQ_DISABLED; | 344 | irq_disable(desc); |
227 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 345 | } |
228 | } | 346 | |
347 | static int __disable_irq_nosync(unsigned int irq) | ||
348 | { | ||
349 | unsigned long flags; | ||
350 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
351 | |||
352 | if (!desc) | ||
353 | return -EINVAL; | ||
354 | __disable_irq(desc, irq, false); | ||
355 | irq_put_desc_busunlock(desc, flags); | ||
356 | return 0; | ||
229 | } | 357 | } |
230 | 358 | ||
231 | /** | 359 | /** |
@@ -241,17 +369,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
241 | */ | 369 | */ |
242 | void disable_irq_nosync(unsigned int irq) | 370 | void disable_irq_nosync(unsigned int irq) |
243 | { | 371 | { |
244 | struct irq_desc *desc = irq_to_desc(irq); | 372 | __disable_irq_nosync(irq); |
245 | unsigned long flags; | ||
246 | |||
247 | if (!desc) | ||
248 | return; | ||
249 | |||
250 | chip_bus_lock(desc); | ||
251 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
252 | __disable_irq(desc, irq, false); | ||
253 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
254 | chip_bus_sync_unlock(desc); | ||
255 | } | 373 | } |
256 | EXPORT_SYMBOL(disable_irq_nosync); | 374 | EXPORT_SYMBOL(disable_irq_nosync); |
257 | 375 | ||
@@ -269,21 +387,24 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
269 | */ | 387 | */ |
270 | void disable_irq(unsigned int irq) | 388 | void disable_irq(unsigned int irq) |
271 | { | 389 | { |
272 | struct irq_desc *desc = irq_to_desc(irq); | 390 | if (!__disable_irq_nosync(irq)) |
273 | |||
274 | if (!desc) | ||
275 | return; | ||
276 | |||
277 | disable_irq_nosync(irq); | ||
278 | if (desc->action) | ||
279 | synchronize_irq(irq); | 391 | synchronize_irq(irq); |
280 | } | 392 | } |
281 | EXPORT_SYMBOL(disable_irq); | 393 | EXPORT_SYMBOL(disable_irq); |
282 | 394 | ||
283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 395 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
284 | { | 396 | { |
285 | if (resume) | 397 | if (resume) { |
286 | desc->status &= ~IRQ_SUSPENDED; | 398 | if (!(desc->istate & IRQS_SUSPENDED)) { |
399 | if (!desc->action) | ||
400 | return; | ||
401 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
402 | return; | ||
403 | /* Pretend that it got disabled ! */ | ||
404 | desc->depth++; | ||
405 | } | ||
406 | desc->istate &= ~IRQS_SUSPENDED; | ||
407 | } | ||
287 | 408 | ||
288 | switch (desc->depth) { | 409 | switch (desc->depth) { |
289 | case 0: | 410 | case 0: |
@@ -291,12 +412,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
291 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 412 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
292 | break; | 413 | break; |
293 | case 1: { | 414 | case 1: { |
294 | unsigned int status = desc->status & ~IRQ_DISABLED; | 415 | if (desc->istate & IRQS_SUSPENDED) |
295 | |||
296 | if (desc->status & IRQ_SUSPENDED) | ||
297 | goto err_out; | 416 | goto err_out; |
298 | /* Prevent probing on this irq: */ | 417 | /* Prevent probing on this irq: */ |
299 | desc->status = status | IRQ_NOPROBE; | 418 | irq_settings_set_noprobe(desc); |
419 | irq_enable(desc); | ||
300 | check_irq_resend(desc, irq); | 420 | check_irq_resend(desc, irq); |
301 | /* fall-through */ | 421 | /* fall-through */ |
302 | } | 422 | } |
@@ -318,21 +438,18 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
318 | */ | 438 | */ |
319 | void enable_irq(unsigned int irq) | 439 | void enable_irq(unsigned int irq) |
320 | { | 440 | { |
321 | struct irq_desc *desc = irq_to_desc(irq); | ||
322 | unsigned long flags; | 441 | unsigned long flags; |
442 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
323 | 443 | ||
324 | if (!desc) | 444 | if (!desc) |
325 | return; | 445 | return; |
446 | if (WARN(!desc->irq_data.chip, | ||
447 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | ||
448 | goto out; | ||
326 | 449 | ||
327 | if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable, | ||
328 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | ||
329 | return; | ||
330 | |||
331 | chip_bus_lock(desc); | ||
332 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
333 | __enable_irq(desc, irq, false); | 450 | __enable_irq(desc, irq, false); |
334 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 451 | out: |
335 | chip_bus_sync_unlock(desc); | 452 | irq_put_desc_busunlock(desc, flags); |
336 | } | 453 | } |
337 | EXPORT_SYMBOL(enable_irq); | 454 | EXPORT_SYMBOL(enable_irq); |
338 | 455 | ||
@@ -348,7 +465,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
348 | } | 465 | } |
349 | 466 | ||
350 | /** | 467 | /** |
351 | * set_irq_wake - control irq power management wakeup | 468 | * irq_set_irq_wake - control irq power management wakeup |
352 | * @irq: interrupt to control | 469 | * @irq: interrupt to control |
353 | * @on: enable/disable power management wakeup | 470 | * @on: enable/disable power management wakeup |
354 | * | 471 | * |
@@ -359,23 +476,22 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
359 | * Wakeup mode lets this IRQ wake the system from sleep | 476 | * Wakeup mode lets this IRQ wake the system from sleep |
360 | * states like "suspend to RAM". | 477 | * states like "suspend to RAM". |
361 | */ | 478 | */ |
362 | int set_irq_wake(unsigned int irq, unsigned int on) | 479 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
363 | { | 480 | { |
364 | struct irq_desc *desc = irq_to_desc(irq); | ||
365 | unsigned long flags; | 481 | unsigned long flags; |
482 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
366 | int ret = 0; | 483 | int ret = 0; |
367 | 484 | ||
368 | /* wakeup-capable irqs can be shared between drivers that | 485 | /* wakeup-capable irqs can be shared between drivers that |
369 | * don't need to have the same sleep mode behaviors. | 486 | * don't need to have the same sleep mode behaviors. |
370 | */ | 487 | */ |
371 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
372 | if (on) { | 488 | if (on) { |
373 | if (desc->wake_depth++ == 0) { | 489 | if (desc->wake_depth++ == 0) { |
374 | ret = set_irq_wake_real(irq, on); | 490 | ret = set_irq_wake_real(irq, on); |
375 | if (ret) | 491 | if (ret) |
376 | desc->wake_depth = 0; | 492 | desc->wake_depth = 0; |
377 | else | 493 | else |
378 | desc->status |= IRQ_WAKEUP; | 494 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
379 | } | 495 | } |
380 | } else { | 496 | } else { |
381 | if (desc->wake_depth == 0) { | 497 | if (desc->wake_depth == 0) { |
@@ -385,14 +501,13 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
385 | if (ret) | 501 | if (ret) |
386 | desc->wake_depth = 1; | 502 | desc->wake_depth = 1; |
387 | else | 503 | else |
388 | desc->status &= ~IRQ_WAKEUP; | 504 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
389 | } | 505 | } |
390 | } | 506 | } |
391 | 507 | irq_put_desc_busunlock(desc, flags); | |
392 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
393 | return ret; | 508 | return ret; |
394 | } | 509 | } |
395 | EXPORT_SYMBOL(set_irq_wake); | 510 | EXPORT_SYMBOL(irq_set_irq_wake); |
396 | 511 | ||
397 | /* | 512 | /* |
398 | * Internal function that tells the architecture code whether a | 513 | * Internal function that tells the architecture code whether a |
@@ -401,43 +516,27 @@ EXPORT_SYMBOL(set_irq_wake); | |||
401 | */ | 516 | */ |
402 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 517 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
403 | { | 518 | { |
404 | struct irq_desc *desc = irq_to_desc(irq); | ||
405 | struct irqaction *action; | ||
406 | unsigned long flags; | 519 | unsigned long flags; |
520 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
521 | int canrequest = 0; | ||
407 | 522 | ||
408 | if (!desc) | 523 | if (!desc) |
409 | return 0; | 524 | return 0; |
410 | 525 | ||
411 | if (desc->status & IRQ_NOREQUEST) | 526 | if (irq_settings_can_request(desc)) { |
412 | return 0; | 527 | if (desc->action) |
413 | 528 | if (irqflags & desc->action->flags & IRQF_SHARED) | |
414 | raw_spin_lock_irqsave(&desc->lock, flags); | 529 | canrequest =1; |
415 | action = desc->action; | 530 | } |
416 | if (action) | 531 | irq_put_desc_unlock(desc, flags); |
417 | if (irqflags & action->flags & IRQF_SHARED) | 532 | return canrequest; |
418 | action = NULL; | ||
419 | |||
420 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
421 | |||
422 | return !action; | ||
423 | } | ||
424 | |||
425 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | ||
426 | { | ||
427 | /* | ||
428 | * If the architecture still has not overriden | ||
429 | * the flow handler then zap the default. This | ||
430 | * should catch incorrect flow-type setting. | ||
431 | */ | ||
432 | if (desc->handle_irq == &handle_bad_irq) | ||
433 | desc->handle_irq = NULL; | ||
434 | } | 533 | } |
435 | 534 | ||
436 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 535 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
437 | unsigned long flags) | 536 | unsigned long flags) |
438 | { | 537 | { |
439 | int ret; | ||
440 | struct irq_chip *chip = desc->irq_data.chip; | 538 | struct irq_chip *chip = desc->irq_data.chip; |
539 | int ret, unmask = 0; | ||
441 | 540 | ||
442 | if (!chip || !chip->irq_set_type) { | 541 | if (!chip || !chip->irq_set_type) { |
443 | /* | 542 | /* |
@@ -449,23 +548,43 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
449 | return 0; | 548 | return 0; |
450 | } | 549 | } |
451 | 550 | ||
551 | flags &= IRQ_TYPE_SENSE_MASK; | ||
552 | |||
553 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | ||
554 | if (!(desc->istate & IRQS_MASKED)) | ||
555 | mask_irq(desc); | ||
556 | if (!(desc->istate & IRQS_DISABLED)) | ||
557 | unmask = 1; | ||
558 | } | ||
559 | |||
452 | /* caller masked out all except trigger mode flags */ | 560 | /* caller masked out all except trigger mode flags */ |
453 | ret = chip->irq_set_type(&desc->irq_data, flags); | 561 | ret = chip->irq_set_type(&desc->irq_data, flags); |
454 | 562 | ||
455 | if (ret) | 563 | switch (ret) { |
456 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | 564 | case IRQ_SET_MASK_OK: |
457 | flags, irq, chip->irq_set_type); | 565 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
458 | else { | 566 | irqd_set(&desc->irq_data, flags); |
459 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 567 | |
460 | flags |= IRQ_LEVEL; | 568 | case IRQ_SET_MASK_OK_NOCOPY: |
461 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | 569 | flags = irqd_get_trigger_type(&desc->irq_data); |
462 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 570 | irq_settings_set_trigger_mask(desc, flags); |
463 | desc->status |= flags; | 571 | irqd_clear(&desc->irq_data, IRQD_LEVEL); |
572 | irq_settings_clr_level(desc); | ||
573 | if (flags & IRQ_TYPE_LEVEL_MASK) { | ||
574 | irq_settings_set_level(desc); | ||
575 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
576 | } | ||
464 | 577 | ||
465 | if (chip != desc->irq_data.chip) | 578 | if (chip != desc->irq_data.chip) |
466 | irq_chip_set_defaults(desc->irq_data.chip); | 579 | irq_chip_set_defaults(desc->irq_data.chip); |
580 | ret = 0; | ||
581 | break; | ||
582 | default: | ||
583 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | ||
584 | flags, irq, chip->irq_set_type); | ||
467 | } | 585 | } |
468 | 586 | if (unmask) | |
587 | unmask_irq(desc); | ||
469 | return ret; | 588 | return ret; |
470 | } | 589 | } |
471 | 590 | ||
@@ -509,8 +628,11 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
509 | * handler finished. unmask if the interrupt has not been disabled and | 628 | * handler finished. unmask if the interrupt has not been disabled and |
510 | * is marked MASKED. | 629 | * is marked MASKED. |
511 | */ | 630 | */ |
512 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 631 | static void irq_finalize_oneshot(struct irq_desc *desc, |
632 | struct irqaction *action, bool force) | ||
513 | { | 633 | { |
634 | if (!(desc->istate & IRQS_ONESHOT)) | ||
635 | return; | ||
514 | again: | 636 | again: |
515 | chip_bus_lock(desc); | 637 | chip_bus_lock(desc); |
516 | raw_spin_lock_irq(&desc->lock); | 638 | raw_spin_lock_irq(&desc->lock); |
@@ -522,26 +644,44 @@ again: | |||
522 | * The thread is faster done than the hard interrupt handler | 644 | * The thread is faster done than the hard interrupt handler |
523 | * on the other CPU. If we unmask the irq line then the | 645 | * on the other CPU. If we unmask the irq line then the |
524 | * interrupt can come in again and masks the line, leaves due | 646 | * interrupt can come in again and masks the line, leaves due |
525 | * to IRQ_INPROGRESS and the irq line is masked forever. | 647 | * to IRQS_INPROGRESS and the irq line is masked forever. |
648 | * | ||
649 | * This also serializes the state of shared oneshot handlers | ||
650 | * versus "desc->threads_onehsot |= action->thread_mask;" in | ||
651 | * irq_wake_thread(). See the comment there which explains the | ||
652 | * serialization. | ||
526 | */ | 653 | */ |
527 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | 654 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { |
528 | raw_spin_unlock_irq(&desc->lock); | 655 | raw_spin_unlock_irq(&desc->lock); |
529 | chip_bus_sync_unlock(desc); | 656 | chip_bus_sync_unlock(desc); |
530 | cpu_relax(); | 657 | cpu_relax(); |
531 | goto again; | 658 | goto again; |
532 | } | 659 | } |
533 | 660 | ||
534 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 661 | /* |
535 | desc->status &= ~IRQ_MASKED; | 662 | * Now check again, whether the thread should run. Otherwise |
663 | * we would clear the threads_oneshot bit of this thread which | ||
664 | * was just set. | ||
665 | */ | ||
666 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
667 | goto out_unlock; | ||
668 | |||
669 | desc->threads_oneshot &= ~action->thread_mask; | ||
670 | |||
671 | if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && | ||
672 | (desc->istate & IRQS_MASKED)) { | ||
673 | irq_compat_clr_masked(desc); | ||
674 | desc->istate &= ~IRQS_MASKED; | ||
536 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 675 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
537 | } | 676 | } |
677 | out_unlock: | ||
538 | raw_spin_unlock_irq(&desc->lock); | 678 | raw_spin_unlock_irq(&desc->lock); |
539 | chip_bus_sync_unlock(desc); | 679 | chip_bus_sync_unlock(desc); |
540 | } | 680 | } |
541 | 681 | ||
542 | #ifdef CONFIG_SMP | 682 | #ifdef CONFIG_SMP |
543 | /* | 683 | /* |
544 | * Check whether we need to change the affinity of the interrupt thread. | 684 | * Check whether we need to chasnge the affinity of the interrupt thread. |
545 | */ | 685 | */ |
546 | static void | 686 | static void |
547 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 687 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
@@ -573,6 +713,32 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
573 | #endif | 713 | #endif |
574 | 714 | ||
575 | /* | 715 | /* |
716 | * Interrupts which are not explicitely requested as threaded | ||
717 | * interrupts rely on the implicit bh/preempt disable of the hard irq | ||
718 | * context. So we need to disable bh here to avoid deadlocks and other | ||
719 | * side effects. | ||
720 | */ | ||
721 | static void | ||
722 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
723 | { | ||
724 | local_bh_disable(); | ||
725 | action->thread_fn(action->irq, action->dev_id); | ||
726 | irq_finalize_oneshot(desc, action, false); | ||
727 | local_bh_enable(); | ||
728 | } | ||
729 | |||
730 | /* | ||
731 | * Interrupts explicitely requested as threaded interupts want to be | ||
732 | * preemtible - many of them need to sleep and wait for slow busses to | ||
733 | * complete. | ||
734 | */ | ||
735 | static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
736 | { | ||
737 | action->thread_fn(action->irq, action->dev_id); | ||
738 | irq_finalize_oneshot(desc, action, false); | ||
739 | } | ||
740 | |||
741 | /* | ||
576 | * Interrupt handler thread | 742 | * Interrupt handler thread |
577 | */ | 743 | */ |
578 | static int irq_thread(void *data) | 744 | static int irq_thread(void *data) |
@@ -582,7 +748,14 @@ static int irq_thread(void *data) | |||
582 | }; | 748 | }; |
583 | struct irqaction *action = data; | 749 | struct irqaction *action = data; |
584 | struct irq_desc *desc = irq_to_desc(action->irq); | 750 | struct irq_desc *desc = irq_to_desc(action->irq); |
585 | int wake, oneshot = desc->status & IRQ_ONESHOT; | 751 | void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); |
752 | int wake; | ||
753 | |||
754 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | ||
755 | &action->thread_flags)) | ||
756 | handler_fn = irq_forced_thread_fn; | ||
757 | else | ||
758 | handler_fn = irq_thread_fn; | ||
586 | 759 | ||
587 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 760 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
588 | current->irqaction = action; | 761 | current->irqaction = action; |
@@ -594,23 +767,20 @@ static int irq_thread(void *data) | |||
594 | atomic_inc(&desc->threads_active); | 767 | atomic_inc(&desc->threads_active); |
595 | 768 | ||
596 | raw_spin_lock_irq(&desc->lock); | 769 | raw_spin_lock_irq(&desc->lock); |
597 | if (unlikely(desc->status & IRQ_DISABLED)) { | 770 | if (unlikely(desc->istate & IRQS_DISABLED)) { |
598 | /* | 771 | /* |
599 | * CHECKME: We might need a dedicated | 772 | * CHECKME: We might need a dedicated |
600 | * IRQ_THREAD_PENDING flag here, which | 773 | * IRQ_THREAD_PENDING flag here, which |
601 | * retriggers the thread in check_irq_resend() | 774 | * retriggers the thread in check_irq_resend() |
602 | * but AFAICT IRQ_PENDING should be fine as it | 775 | * but AFAICT IRQS_PENDING should be fine as it |
603 | * retriggers the interrupt itself --- tglx | 776 | * retriggers the interrupt itself --- tglx |
604 | */ | 777 | */ |
605 | desc->status |= IRQ_PENDING; | 778 | irq_compat_set_pending(desc); |
779 | desc->istate |= IRQS_PENDING; | ||
606 | raw_spin_unlock_irq(&desc->lock); | 780 | raw_spin_unlock_irq(&desc->lock); |
607 | } else { | 781 | } else { |
608 | raw_spin_unlock_irq(&desc->lock); | 782 | raw_spin_unlock_irq(&desc->lock); |
609 | 783 | handler_fn(desc, action); | |
610 | action->thread_fn(action->irq, action->dev_id); | ||
611 | |||
612 | if (oneshot) | ||
613 | irq_finalize_oneshot(action->irq, desc); | ||
614 | } | 784 | } |
615 | 785 | ||
616 | wake = atomic_dec_and_test(&desc->threads_active); | 786 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -619,6 +789,9 @@ static int irq_thread(void *data) | |||
619 | wake_up(&desc->wait_for_threads); | 789 | wake_up(&desc->wait_for_threads); |
620 | } | 790 | } |
621 | 791 | ||
792 | /* Prevent a stale desc->threads_oneshot */ | ||
793 | irq_finalize_oneshot(desc, action, true); | ||
794 | |||
622 | /* | 795 | /* |
623 | * Clear irqaction. Otherwise exit_irq_thread() would make | 796 | * Clear irqaction. Otherwise exit_irq_thread() would make |
624 | * fuzz about an active irq thread going into nirvana. | 797 | * fuzz about an active irq thread going into nirvana. |
@@ -633,6 +806,7 @@ static int irq_thread(void *data) | |||
633 | void exit_irq_thread(void) | 806 | void exit_irq_thread(void) |
634 | { | 807 | { |
635 | struct task_struct *tsk = current; | 808 | struct task_struct *tsk = current; |
809 | struct irq_desc *desc; | ||
636 | 810 | ||
637 | if (!tsk->irqaction) | 811 | if (!tsk->irqaction) |
638 | return; | 812 | return; |
@@ -641,6 +815,14 @@ void exit_irq_thread(void) | |||
641 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 815 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
642 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 816 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); |
643 | 817 | ||
818 | desc = irq_to_desc(tsk->irqaction->irq); | ||
819 | |||
820 | /* | ||
821 | * Prevent a stale desc->threads_oneshot. Must be called | ||
822 | * before setting the IRQTF_DIED flag. | ||
823 | */ | ||
824 | irq_finalize_oneshot(desc, tsk->irqaction, true); | ||
825 | |||
644 | /* | 826 | /* |
645 | * Set the THREAD DIED flag to prevent further wakeups of the | 827 | * Set the THREAD DIED flag to prevent further wakeups of the |
646 | * soon to be gone threaded handler. | 828 | * soon to be gone threaded handler. |
@@ -648,6 +830,22 @@ void exit_irq_thread(void) | |||
648 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 830 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); |
649 | } | 831 | } |
650 | 832 | ||
833 | static void irq_setup_forced_threading(struct irqaction *new) | ||
834 | { | ||
835 | if (!force_irqthreads) | ||
836 | return; | ||
837 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | ||
838 | return; | ||
839 | |||
840 | new->flags |= IRQF_ONESHOT; | ||
841 | |||
842 | if (!new->thread_fn) { | ||
843 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | ||
844 | new->thread_fn = new->handler; | ||
845 | new->handler = irq_default_primary_handler; | ||
846 | } | ||
847 | } | ||
848 | |||
651 | /* | 849 | /* |
652 | * Internal function to register an irqaction - typically used to | 850 | * Internal function to register an irqaction - typically used to |
653 | * allocate special interrupts that are part of the architecture. | 851 | * allocate special interrupts that are part of the architecture. |
@@ -657,9 +855,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
657 | { | 855 | { |
658 | struct irqaction *old, **old_ptr; | 856 | struct irqaction *old, **old_ptr; |
659 | const char *old_name = NULL; | 857 | const char *old_name = NULL; |
660 | unsigned long flags; | 858 | unsigned long flags, thread_mask = 0; |
661 | int nested, shared = 0; | 859 | int ret, nested, shared = 0; |
662 | int ret; | 860 | cpumask_var_t mask; |
663 | 861 | ||
664 | if (!desc) | 862 | if (!desc) |
665 | return -EINVAL; | 863 | return -EINVAL; |
@@ -683,15 +881,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
683 | rand_initialize_irq(irq); | 881 | rand_initialize_irq(irq); |
684 | } | 882 | } |
685 | 883 | ||
686 | /* Oneshot interrupts are not allowed with shared */ | ||
687 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | ||
688 | return -EINVAL; | ||
689 | |||
690 | /* | 884 | /* |
691 | * Check whether the interrupt nests into another interrupt | 885 | * Check whether the interrupt nests into another interrupt |
692 | * thread. | 886 | * thread. |
693 | */ | 887 | */ |
694 | nested = desc->status & IRQ_NESTED_THREAD; | 888 | nested = irq_settings_is_nested_thread(desc); |
695 | if (nested) { | 889 | if (nested) { |
696 | if (!new->thread_fn) | 890 | if (!new->thread_fn) |
697 | return -EINVAL; | 891 | return -EINVAL; |
@@ -701,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
701 | * dummy function which warns when called. | 895 | * dummy function which warns when called. |
702 | */ | 896 | */ |
703 | new->handler = irq_nested_primary_handler; | 897 | new->handler = irq_nested_primary_handler; |
898 | } else { | ||
899 | irq_setup_forced_threading(new); | ||
704 | } | 900 | } |
705 | 901 | ||
706 | /* | 902 | /* |
@@ -724,6 +920,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
724 | new->thread = t; | 920 | new->thread = t; |
725 | } | 921 | } |
726 | 922 | ||
923 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
924 | ret = -ENOMEM; | ||
925 | goto out_thread; | ||
926 | } | ||
927 | |||
727 | /* | 928 | /* |
728 | * The following block of code has to be executed atomically | 929 | * The following block of code has to be executed atomically |
729 | */ | 930 | */ |
@@ -735,29 +936,40 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
735 | * Can't share interrupts unless both agree to and are | 936 | * Can't share interrupts unless both agree to and are |
736 | * the same type (level, edge, polarity). So both flag | 937 | * the same type (level, edge, polarity). So both flag |
737 | * fields must have IRQF_SHARED set and the bits which | 938 | * fields must have IRQF_SHARED set and the bits which |
738 | * set the trigger type must match. | 939 | * set the trigger type must match. Also all must |
940 | * agree on ONESHOT. | ||
739 | */ | 941 | */ |
740 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 942 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
741 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | 943 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
944 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) { | ||
742 | old_name = old->name; | 945 | old_name = old->name; |
743 | goto mismatch; | 946 | goto mismatch; |
744 | } | 947 | } |
745 | 948 | ||
746 | #if defined(CONFIG_IRQ_PER_CPU) | ||
747 | /* All handlers must agree on per-cpuness */ | 949 | /* All handlers must agree on per-cpuness */ |
748 | if ((old->flags & IRQF_PERCPU) != | 950 | if ((old->flags & IRQF_PERCPU) != |
749 | (new->flags & IRQF_PERCPU)) | 951 | (new->flags & IRQF_PERCPU)) |
750 | goto mismatch; | 952 | goto mismatch; |
751 | #endif | ||
752 | 953 | ||
753 | /* add new interrupt at end of irq queue */ | 954 | /* add new interrupt at end of irq queue */ |
754 | do { | 955 | do { |
956 | thread_mask |= old->thread_mask; | ||
755 | old_ptr = &old->next; | 957 | old_ptr = &old->next; |
756 | old = *old_ptr; | 958 | old = *old_ptr; |
757 | } while (old); | 959 | } while (old); |
758 | shared = 1; | 960 | shared = 1; |
759 | } | 961 | } |
760 | 962 | ||
963 | /* | ||
964 | * Setup the thread mask for this irqaction. Unlikely to have | ||
965 | * 32 resp 64 irqs sharing one line, but who knows. | ||
966 | */ | ||
967 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | ||
968 | ret = -EBUSY; | ||
969 | goto out_mask; | ||
970 | } | ||
971 | new->thread_mask = 1 << ffz(thread_mask); | ||
972 | |||
761 | if (!shared) { | 973 | if (!shared) { |
762 | irq_chip_set_defaults(desc->irq_data.chip); | 974 | irq_chip_set_defaults(desc->irq_data.chip); |
763 | 975 | ||
@@ -769,42 +981,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
769 | new->flags & IRQF_TRIGGER_MASK); | 981 | new->flags & IRQF_TRIGGER_MASK); |
770 | 982 | ||
771 | if (ret) | 983 | if (ret) |
772 | goto out_thread; | 984 | goto out_mask; |
773 | } else | 985 | } |
774 | compat_irq_chip_set_default_handler(desc); | ||
775 | #if defined(CONFIG_IRQ_PER_CPU) | ||
776 | if (new->flags & IRQF_PERCPU) | ||
777 | desc->status |= IRQ_PER_CPU; | ||
778 | #endif | ||
779 | 986 | ||
780 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | | 987 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
781 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 988 | IRQS_INPROGRESS | IRQS_ONESHOT | \ |
989 | IRQS_WAITING); | ||
990 | |||
991 | if (new->flags & IRQF_PERCPU) { | ||
992 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | ||
993 | irq_settings_set_per_cpu(desc); | ||
994 | } | ||
782 | 995 | ||
783 | if (new->flags & IRQF_ONESHOT) | 996 | if (new->flags & IRQF_ONESHOT) |
784 | desc->status |= IRQ_ONESHOT; | 997 | desc->istate |= IRQS_ONESHOT; |
785 | 998 | ||
786 | if (!(desc->status & IRQ_NOAUTOEN)) { | 999 | if (irq_settings_can_autoenable(desc)) |
787 | desc->depth = 0; | 1000 | irq_startup(desc); |
788 | desc->status &= ~IRQ_DISABLED; | 1001 | else |
789 | desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
790 | } else | ||
791 | /* Undo nested disables: */ | 1002 | /* Undo nested disables: */ |
792 | desc->depth = 1; | 1003 | desc->depth = 1; |
793 | 1004 | ||
794 | /* Exclude IRQ from balancing if requested */ | 1005 | /* Exclude IRQ from balancing if requested */ |
795 | if (new->flags & IRQF_NOBALANCING) | 1006 | if (new->flags & IRQF_NOBALANCING) { |
796 | desc->status |= IRQ_NO_BALANCING; | 1007 | irq_settings_set_no_balancing(desc); |
1008 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | ||
1009 | } | ||
797 | 1010 | ||
798 | /* Set default affinity mask once everything is setup */ | 1011 | /* Set default affinity mask once everything is setup */ |
799 | setup_affinity(irq, desc); | 1012 | setup_affinity(irq, desc, mask); |
800 | 1013 | ||
801 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 1014 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
802 | && (new->flags & IRQF_TRIGGER_MASK) | 1015 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
803 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | 1016 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
804 | /* hope the handler works with the actual trigger mode... */ | 1017 | |
805 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | 1018 | if (nmsk != omsk) |
806 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | 1019 | /* hope the handler works with current trigger mode */ |
807 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 1020 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", |
1021 | irq, nmsk, omsk); | ||
808 | } | 1022 | } |
809 | 1023 | ||
810 | new->irq = irq; | 1024 | new->irq = irq; |
@@ -818,8 +1032,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
818 | * Check whether we disabled the irq via the spurious handler | 1032 | * Check whether we disabled the irq via the spurious handler |
819 | * before. Reenable it and give it another chance. | 1033 | * before. Reenable it and give it another chance. |
820 | */ | 1034 | */ |
821 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 1035 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
822 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 1036 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
823 | __enable_irq(desc, irq, false); | 1037 | __enable_irq(desc, irq, false); |
824 | } | 1038 | } |
825 | 1039 | ||
@@ -849,6 +1063,9 @@ mismatch: | |||
849 | #endif | 1063 | #endif |
850 | ret = -EBUSY; | 1064 | ret = -EBUSY; |
851 | 1065 | ||
1066 | out_mask: | ||
1067 | free_cpumask_var(mask); | ||
1068 | |||
852 | out_thread: | 1069 | out_thread: |
853 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1070 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
854 | if (new->thread) { | 1071 | if (new->thread) { |
@@ -871,9 +1088,14 @@ out_thread: | |||
871 | */ | 1088 | */ |
872 | int setup_irq(unsigned int irq, struct irqaction *act) | 1089 | int setup_irq(unsigned int irq, struct irqaction *act) |
873 | { | 1090 | { |
1091 | int retval; | ||
874 | struct irq_desc *desc = irq_to_desc(irq); | 1092 | struct irq_desc *desc = irq_to_desc(irq); |
875 | 1093 | ||
876 | return __setup_irq(irq, desc, act); | 1094 | chip_bus_lock(desc); |
1095 | retval = __setup_irq(irq, desc, act); | ||
1096 | chip_bus_sync_unlock(desc); | ||
1097 | |||
1098 | return retval; | ||
877 | } | 1099 | } |
878 | EXPORT_SYMBOL_GPL(setup_irq); | 1100 | EXPORT_SYMBOL_GPL(setup_irq); |
879 | 1101 | ||
@@ -924,13 +1146,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
924 | #endif | 1146 | #endif |
925 | 1147 | ||
926 | /* If this was the last handler, shut down the IRQ line: */ | 1148 | /* If this was the last handler, shut down the IRQ line: */ |
927 | if (!desc->action) { | 1149 | if (!desc->action) |
928 | desc->status |= IRQ_DISABLED; | 1150 | irq_shutdown(desc); |
929 | if (desc->irq_data.chip->irq_shutdown) | ||
930 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | ||
931 | else | ||
932 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
933 | } | ||
934 | 1151 | ||
935 | #ifdef CONFIG_SMP | 1152 | #ifdef CONFIG_SMP |
936 | /* make sure affinity_hint is cleaned up */ | 1153 | /* make sure affinity_hint is cleaned up */ |
@@ -1004,6 +1221,11 @@ void free_irq(unsigned int irq, void *dev_id) | |||
1004 | if (!desc) | 1221 | if (!desc) |
1005 | return; | 1222 | return; |
1006 | 1223 | ||
1224 | #ifdef CONFIG_SMP | ||
1225 | if (WARN_ON(desc->affinity_notify)) | ||
1226 | desc->affinity_notify = NULL; | ||
1227 | #endif | ||
1228 | |||
1007 | chip_bus_lock(desc); | 1229 | chip_bus_lock(desc); |
1008 | kfree(__free_irq(irq, dev_id)); | 1230 | kfree(__free_irq(irq, dev_id)); |
1009 | chip_bus_sync_unlock(desc); | 1231 | chip_bus_sync_unlock(desc); |
@@ -1074,7 +1296,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1074 | if (!desc) | 1296 | if (!desc) |
1075 | return -EINVAL; | 1297 | return -EINVAL; |
1076 | 1298 | ||
1077 | if (desc->status & IRQ_NOREQUEST) | 1299 | if (!irq_settings_can_request(desc)) |
1078 | return -EINVAL; | 1300 | return -EINVAL; |
1079 | 1301 | ||
1080 | if (!handler) { | 1302 | if (!handler) { |
@@ -1100,7 +1322,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1100 | if (retval) | 1322 | if (retval) |
1101 | kfree(action); | 1323 | kfree(action); |
1102 | 1324 | ||
1103 | #ifdef CONFIG_DEBUG_SHIRQ | 1325 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1104 | if (!retval && (irqflags & IRQF_SHARED)) { | 1326 | if (!retval && (irqflags & IRQF_SHARED)) { |
1105 | /* | 1327 | /* |
1106 | * It's a shared IRQ -- the driver ought to be prepared for it | 1328 | * It's a shared IRQ -- the driver ought to be prepared for it |
@@ -1149,7 +1371,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |||
1149 | if (!desc) | 1371 | if (!desc) |
1150 | return -EINVAL; | 1372 | return -EINVAL; |
1151 | 1373 | ||
1152 | if (desc->status & IRQ_NESTED_THREAD) { | 1374 | if (irq_settings_is_nested_thread(desc)) { |
1153 | ret = request_threaded_irq(irq, NULL, handler, | 1375 | ret = request_threaded_irq(irq, NULL, handler, |
1154 | flags, name, dev_id); | 1376 | flags, name, dev_id); |
1155 | return !ret ? IRQC_IS_NESTED : ret; | 1377 | return !ret ? IRQC_IS_NESTED : ret; |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 441fd629ff04..ec4806d4778b 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -4,23 +4,23 @@ | |||
4 | 4 | ||
5 | #include "internals.h" | 5 | #include "internals.h" |
6 | 6 | ||
7 | void move_masked_irq(int irq) | 7 | void irq_move_masked_irq(struct irq_data *idata) |
8 | { | 8 | { |
9 | struct irq_desc *desc = irq_to_desc(irq); | 9 | struct irq_desc *desc = irq_data_to_desc(idata); |
10 | struct irq_chip *chip = desc->irq_data.chip; | 10 | struct irq_chip *chip = idata->chip; |
11 | 11 | ||
12 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 12 | if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) |
13 | return; | 13 | return; |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. | 16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. |
17 | */ | 17 | */ |
18 | if (CHECK_IRQ_PER_CPU(desc->status)) { | 18 | if (!irqd_can_balance(&desc->irq_data)) { |
19 | WARN_ON(1); | 19 | WARN_ON(1); |
20 | return; | 20 | return; |
21 | } | 21 | } |
22 | 22 | ||
23 | desc->status &= ~IRQ_MOVE_PENDING; | 23 | irqd_clr_move_pending(&desc->irq_data); |
24 | 24 | ||
25 | if (unlikely(cpumask_empty(desc->pending_mask))) | 25 | if (unlikely(cpumask_empty(desc->pending_mask))) |
26 | return; | 26 | return; |
@@ -53,15 +53,20 @@ void move_masked_irq(int irq) | |||
53 | cpumask_clear(desc->pending_mask); | 53 | cpumask_clear(desc->pending_mask); |
54 | } | 54 | } |
55 | 55 | ||
56 | void move_native_irq(int irq) | 56 | void move_masked_irq(int irq) |
57 | { | ||
58 | irq_move_masked_irq(irq_get_irq_data(irq)); | ||
59 | } | ||
60 | |||
61 | void irq_move_irq(struct irq_data *idata) | ||
57 | { | 62 | { |
58 | struct irq_desc *desc = irq_to_desc(irq); | 63 | struct irq_desc *desc = irq_data_to_desc(idata); |
59 | bool masked; | 64 | bool masked; |
60 | 65 | ||
61 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 66 | if (likely(!irqd_is_setaffinity_pending(idata))) |
62 | return; | 67 | return; |
63 | 68 | ||
64 | if (unlikely(desc->status & IRQ_DISABLED)) | 69 | if (unlikely(desc->istate & IRQS_DISABLED)) |
65 | return; | 70 | return; |
66 | 71 | ||
67 | /* | 72 | /* |
@@ -69,10 +74,15 @@ void move_native_irq(int irq) | |||
69 | * threaded interrupt with ONESHOT set, we can end up with an | 74 | * threaded interrupt with ONESHOT set, we can end up with an |
70 | * interrupt storm. | 75 | * interrupt storm. |
71 | */ | 76 | */ |
72 | masked = desc->status & IRQ_MASKED; | 77 | masked = desc->istate & IRQS_MASKED; |
73 | if (!masked) | 78 | if (!masked) |
74 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 79 | idata->chip->irq_mask(idata); |
75 | move_masked_irq(irq); | 80 | irq_move_masked_irq(idata); |
76 | if (!masked) | 81 | if (!masked) |
77 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 82 | idata->chip->irq_unmask(idata); |
83 | } | ||
84 | |||
85 | void move_native_irq(int irq) | ||
86 | { | ||
87 | irq_move_irq(irq_get_irq_data(irq)); | ||
78 | } | 88 | } |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 0d4005d85b03..f76fc00c9877 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -18,7 +18,7 @@ | |||
18 | * During system-wide suspend or hibernation device drivers need to be prevented | 18 | * During system-wide suspend or hibernation device drivers need to be prevented |
19 | * from receiving interrupts and this function is provided for this purpose. | 19 | * from receiving interrupts and this function is provided for this purpose. |
20 | * It marks all interrupt lines in use, except for the timer ones, as disabled | 20 | * It marks all interrupt lines in use, except for the timer ones, as disabled |
21 | * and sets the IRQ_SUSPENDED flag for each of them. | 21 | * and sets the IRQS_SUSPENDED flag for each of them. |
22 | */ | 22 | */ |
23 | void suspend_device_irqs(void) | 23 | void suspend_device_irqs(void) |
24 | { | 24 | { |
@@ -34,7 +34,7 @@ void suspend_device_irqs(void) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | for_each_irq_desc(irq, desc) | 36 | for_each_irq_desc(irq, desc) |
37 | if (desc->status & IRQ_SUSPENDED) | 37 | if (desc->istate & IRQS_SUSPENDED) |
38 | synchronize_irq(irq); | 38 | synchronize_irq(irq); |
39 | } | 39 | } |
40 | EXPORT_SYMBOL_GPL(suspend_device_irqs); | 40 | EXPORT_SYMBOL_GPL(suspend_device_irqs); |
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(suspend_device_irqs); | |||
43 | * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() | 43 | * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() |
44 | * | 44 | * |
45 | * Enable all interrupt lines previously disabled by suspend_device_irqs() that | 45 | * Enable all interrupt lines previously disabled by suspend_device_irqs() that |
46 | * have the IRQ_SUSPENDED flag set. | 46 | * have the IRQS_SUSPENDED flag set. |
47 | */ | 47 | */ |
48 | void resume_device_irqs(void) | 48 | void resume_device_irqs(void) |
49 | { | 49 | { |
@@ -53,9 +53,6 @@ void resume_device_irqs(void) | |||
53 | for_each_irq_desc(irq, desc) { | 53 | for_each_irq_desc(irq, desc) { |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | 55 | ||
56 | if (!(desc->status & IRQ_SUSPENDED)) | ||
57 | continue; | ||
58 | |||
59 | raw_spin_lock_irqsave(&desc->lock, flags); | 56 | raw_spin_lock_irqsave(&desc->lock, flags); |
60 | __enable_irq(desc, irq, true); | 57 | __enable_irq(desc, irq, true); |
61 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 58 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
@@ -71,9 +68,24 @@ int check_wakeup_irqs(void) | |||
71 | struct irq_desc *desc; | 68 | struct irq_desc *desc; |
72 | int irq; | 69 | int irq; |
73 | 70 | ||
74 | for_each_irq_desc(irq, desc) | 71 | for_each_irq_desc(irq, desc) { |
75 | if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING)) | 72 | if (irqd_is_wakeup_set(&desc->irq_data)) { |
76 | return -EBUSY; | 73 | if (desc->istate & IRQS_PENDING) |
74 | return -EBUSY; | ||
75 | continue; | ||
76 | } | ||
77 | /* | ||
78 | * Check the non wakeup interrupts whether they need | ||
79 | * to be masked before finally going into suspend | ||
80 | * state. That's for hardware which has no wakeup | ||
81 | * source configuration facility. The chip | ||
82 | * implementation indicates that with | ||
83 | * IRQCHIP_MASK_ON_SUSPEND. | ||
84 | */ | ||
85 | if (desc->istate & IRQS_SUSPENDED && | ||
86 | irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
87 | mask_irq(desc); | ||
88 | } | ||
77 | 89 | ||
78 | return 0; | 90 | return 0; |
79 | } | 91 | } |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 6c8a2a9f8a7b..4cc2e5ed0bec 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | ||
14 | 15 | ||
15 | #include "internals.h" | 16 | #include "internals.h" |
16 | 17 | ||
@@ -24,7 +25,7 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v) | |||
24 | const struct cpumask *mask = desc->irq_data.affinity; | 25 | const struct cpumask *mask = desc->irq_data.affinity; |
25 | 26 | ||
26 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 27 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
27 | if (desc->status & IRQ_MOVE_PENDING) | 28 | if (irqd_is_setaffinity_pending(&desc->irq_data)) |
28 | mask = desc->pending_mask; | 29 | mask = desc->pending_mask; |
29 | #endif | 30 | #endif |
30 | seq_cpumask(m, mask); | 31 | seq_cpumask(m, mask); |
@@ -65,8 +66,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
65 | cpumask_var_t new_value; | 66 | cpumask_var_t new_value; |
66 | int err; | 67 | int err; |
67 | 68 | ||
68 | if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || | 69 | if (!irq_can_set_affinity(irq) || no_irq_affinity) |
69 | irq_balancing_disabled(irq)) | ||
70 | return -EIO; | 70 | return -EIO; |
71 | 71 | ||
72 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | 72 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
@@ -89,7 +89,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
89 | if (!cpumask_intersects(new_value, cpu_online_mask)) { | 89 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
90 | /* Special case for empty set - allow the architecture | 90 | /* Special case for empty set - allow the architecture |
91 | code to set default SMP affinity. */ | 91 | code to set default SMP affinity. */ |
92 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; | 92 | err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count; |
93 | } else { | 93 | } else { |
94 | irq_set_affinity(irq, new_value); | 94 | irq_set_affinity(irq, new_value); |
95 | err = count; | 95 | err = count; |
@@ -357,3 +357,65 @@ void init_irq_proc(void) | |||
357 | } | 357 | } |
358 | } | 358 | } |
359 | 359 | ||
360 | #ifdef CONFIG_GENERIC_IRQ_SHOW | ||
361 | |||
362 | int __weak arch_show_interrupts(struct seq_file *p, int prec) | ||
363 | { | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | int show_interrupts(struct seq_file *p, void *v) | ||
368 | { | ||
369 | static int prec; | ||
370 | |||
371 | unsigned long flags, any_count = 0; | ||
372 | int i = *(loff_t *) v, j; | ||
373 | struct irqaction *action; | ||
374 | struct irq_desc *desc; | ||
375 | |||
376 | if (i > nr_irqs) | ||
377 | return 0; | ||
378 | |||
379 | if (i == nr_irqs) | ||
380 | return arch_show_interrupts(p, prec); | ||
381 | |||
382 | /* print header and calculate the width of the first column */ | ||
383 | if (i == 0) { | ||
384 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
385 | j *= 10; | ||
386 | |||
387 | seq_printf(p, "%*s", prec + 8, ""); | ||
388 | for_each_online_cpu(j) | ||
389 | seq_printf(p, "CPU%-8d", j); | ||
390 | seq_putc(p, '\n'); | ||
391 | } | ||
392 | |||
393 | desc = irq_to_desc(i); | ||
394 | if (!desc) | ||
395 | return 0; | ||
396 | |||
397 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
398 | for_each_online_cpu(j) | ||
399 | any_count |= kstat_irqs_cpu(i, j); | ||
400 | action = desc->action; | ||
401 | if (!action && !any_count) | ||
402 | goto out; | ||
403 | |||
404 | seq_printf(p, "%*d: ", prec, i); | ||
405 | for_each_online_cpu(j) | ||
406 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
407 | seq_printf(p, " %8s", desc->irq_data.chip->name); | ||
408 | seq_printf(p, "-%-8s", desc->name); | ||
409 | |||
410 | if (action) { | ||
411 | seq_printf(p, " %s", action->name); | ||
412 | while ((action = action->next) != NULL) | ||
413 | seq_printf(p, ", %s", action->name); | ||
414 | } | ||
415 | |||
416 | seq_putc(p, '\n'); | ||
417 | out: | ||
418 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
419 | return 0; | ||
420 | } | ||
421 | #endif | ||
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 891115a929aa..ad683a99b1ec 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 23 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
24 | 24 | ||
25 | /* Bitmap to handle software resend of interrupts: */ | 25 | /* Bitmap to handle software resend of interrupts: */ |
26 | static DECLARE_BITMAP(irqs_resend, NR_IRQS); | 26 | static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Run software resends of IRQ's | 29 | * Run software resends of IRQ's |
@@ -55,20 +55,19 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); | |||
55 | */ | 55 | */ |
56 | void check_irq_resend(struct irq_desc *desc, unsigned int irq) | 56 | void check_irq_resend(struct irq_desc *desc, unsigned int irq) |
57 | { | 57 | { |
58 | unsigned int status = desc->status; | ||
59 | |||
60 | /* | ||
61 | * Make sure the interrupt is enabled, before resending it: | ||
62 | */ | ||
63 | desc->irq_data.chip->irq_enable(&desc->irq_data); | ||
64 | |||
65 | /* | 58 | /* |
66 | * We do not resend level type interrupts. Level type | 59 | * We do not resend level type interrupts. Level type |
67 | * interrupts are resent by hardware when they are still | 60 | * interrupts are resent by hardware when they are still |
68 | * active. | 61 | * active. |
69 | */ | 62 | */ |
70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | 63 | if (irq_settings_is_level(desc)) |
71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; | 64 | return; |
65 | if (desc->istate & IRQS_REPLAY) | ||
66 | return; | ||
67 | if (desc->istate & IRQS_PENDING) { | ||
68 | irq_compat_clr_pending(desc); | ||
69 | desc->istate &= ~IRQS_PENDING; | ||
70 | desc->istate |= IRQS_REPLAY; | ||
72 | 71 | ||
73 | if (!desc->irq_data.chip->irq_retrigger || | 72 | if (!desc->irq_data.chip->irq_retrigger || |
74 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { | 73 | !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { |
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h new file mode 100644 index 000000000000..0227ad358272 --- /dev/null +++ b/kernel/irq/settings.h | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * Internal header to deal with irq_desc->status which will be renamed | ||
3 | * to irq_desc->settings. | ||
4 | */ | ||
5 | enum { | ||
6 | _IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS, | ||
7 | _IRQ_PER_CPU = IRQ_PER_CPU, | ||
8 | _IRQ_LEVEL = IRQ_LEVEL, | ||
9 | _IRQ_NOPROBE = IRQ_NOPROBE, | ||
10 | _IRQ_NOREQUEST = IRQ_NOREQUEST, | ||
11 | _IRQ_NOAUTOEN = IRQ_NOAUTOEN, | ||
12 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, | ||
13 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, | ||
14 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, | ||
15 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, | ||
16 | }; | ||
17 | |||
18 | #define IRQ_INPROGRESS GOT_YOU_MORON | ||
19 | #define IRQ_REPLAY GOT_YOU_MORON | ||
20 | #define IRQ_WAITING GOT_YOU_MORON | ||
21 | #define IRQ_DISABLED GOT_YOU_MORON | ||
22 | #define IRQ_PENDING GOT_YOU_MORON | ||
23 | #define IRQ_MASKED GOT_YOU_MORON | ||
24 | #define IRQ_WAKEUP GOT_YOU_MORON | ||
25 | #define IRQ_MOVE_PENDING GOT_YOU_MORON | ||
26 | #define IRQ_PER_CPU GOT_YOU_MORON | ||
27 | #define IRQ_NO_BALANCING GOT_YOU_MORON | ||
28 | #define IRQ_AFFINITY_SET GOT_YOU_MORON | ||
29 | #define IRQ_LEVEL GOT_YOU_MORON | ||
30 | #define IRQ_NOPROBE GOT_YOU_MORON | ||
31 | #define IRQ_NOREQUEST GOT_YOU_MORON | ||
32 | #define IRQ_NOAUTOEN GOT_YOU_MORON | ||
33 | #define IRQ_NESTED_THREAD GOT_YOU_MORON | ||
34 | #undef IRQF_MODIFY_MASK | ||
35 | #define IRQF_MODIFY_MASK GOT_YOU_MORON | ||
36 | |||
37 | static inline void | ||
38 | irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) | ||
39 | { | ||
40 | desc->status &= ~(clr & _IRQF_MODIFY_MASK); | ||
41 | desc->status |= (set & _IRQF_MODIFY_MASK); | ||
42 | } | ||
43 | |||
44 | static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) | ||
45 | { | ||
46 | return desc->status & _IRQ_PER_CPU; | ||
47 | } | ||
48 | |||
49 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) | ||
50 | { | ||
51 | desc->status |= _IRQ_PER_CPU; | ||
52 | } | ||
53 | |||
54 | static inline void irq_settings_set_no_balancing(struct irq_desc *desc) | ||
55 | { | ||
56 | desc->status |= _IRQ_NO_BALANCING; | ||
57 | } | ||
58 | |||
59 | static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) | ||
60 | { | ||
61 | return desc->status & _IRQ_NO_BALANCING; | ||
62 | } | ||
63 | |||
64 | static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) | ||
65 | { | ||
66 | return desc->status & IRQ_TYPE_SENSE_MASK; | ||
67 | } | ||
68 | |||
69 | static inline void | ||
70 | irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) | ||
71 | { | ||
72 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
73 | desc->status |= mask & IRQ_TYPE_SENSE_MASK; | ||
74 | } | ||
75 | |||
76 | static inline bool irq_settings_is_level(struct irq_desc *desc) | ||
77 | { | ||
78 | return desc->status & _IRQ_LEVEL; | ||
79 | } | ||
80 | |||
81 | static inline void irq_settings_clr_level(struct irq_desc *desc) | ||
82 | { | ||
83 | desc->status &= ~_IRQ_LEVEL; | ||
84 | } | ||
85 | |||
86 | static inline void irq_settings_set_level(struct irq_desc *desc) | ||
87 | { | ||
88 | desc->status |= _IRQ_LEVEL; | ||
89 | } | ||
90 | |||
91 | static inline bool irq_settings_can_request(struct irq_desc *desc) | ||
92 | { | ||
93 | return !(desc->status & _IRQ_NOREQUEST); | ||
94 | } | ||
95 | |||
96 | static inline void irq_settings_clr_norequest(struct irq_desc *desc) | ||
97 | { | ||
98 | desc->status &= ~_IRQ_NOREQUEST; | ||
99 | } | ||
100 | |||
101 | static inline void irq_settings_set_norequest(struct irq_desc *desc) | ||
102 | { | ||
103 | desc->status |= _IRQ_NOREQUEST; | ||
104 | } | ||
105 | |||
106 | static inline bool irq_settings_can_probe(struct irq_desc *desc) | ||
107 | { | ||
108 | return !(desc->status & _IRQ_NOPROBE); | ||
109 | } | ||
110 | |||
111 | static inline void irq_settings_clr_noprobe(struct irq_desc *desc) | ||
112 | { | ||
113 | desc->status &= ~_IRQ_NOPROBE; | ||
114 | } | ||
115 | |||
116 | static inline void irq_settings_set_noprobe(struct irq_desc *desc) | ||
117 | { | ||
118 | desc->status |= _IRQ_NOPROBE; | ||
119 | } | ||
120 | |||
121 | static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) | ||
122 | { | ||
123 | return desc->status & _IRQ_MOVE_PCNTXT; | ||
124 | } | ||
125 | |||
126 | static inline bool irq_settings_can_autoenable(struct irq_desc *desc) | ||
127 | { | ||
128 | return !(desc->status & _IRQ_NOAUTOEN); | ||
129 | } | ||
130 | |||
131 | static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) | ||
132 | { | ||
133 | return desc->status & _IRQ_NESTED_THREAD; | ||
134 | } | ||
135 | |||
136 | /* Nothing should touch desc->status from now on */ | ||
137 | #undef status | ||
138 | #define status USE_THE_PROPER_WRAPPERS_YOU_MORON | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 3089d3b9d5f3..dd586ebf9c8c 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -21,70 +21,94 @@ static int irqfixup __read_mostly; | |||
21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
22 | static void poll_spurious_irqs(unsigned long dummy); | 22 | static void poll_spurious_irqs(unsigned long dummy); |
23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
24 | static int irq_poll_cpu; | ||
25 | static atomic_t irq_poll_active; | ||
26 | |||
27 | /* | ||
28 | * We wait here for a poller to finish. | ||
29 | * | ||
30 | * If the poll runs on this CPU, then we yell loudly and return | ||
31 | * false. That will leave the interrupt line disabled in the worst | ||
32 | * case, but it should never happen. | ||
33 | * | ||
34 | * We wait until the poller is done and then recheck disabled and | ||
35 | * action (about to be disabled). Only if it's still active, we return | ||
36 | * true and let the handler run. | ||
37 | */ | ||
38 | bool irq_wait_for_poll(struct irq_desc *desc) | ||
39 | { | ||
40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | ||
41 | "irq poll in progress on cpu %d for irq %d\n", | ||
42 | smp_processor_id(), desc->irq_data.irq)) | ||
43 | return false; | ||
44 | |||
45 | #ifdef CONFIG_SMP | ||
46 | do { | ||
47 | raw_spin_unlock(&desc->lock); | ||
48 | while (desc->istate & IRQS_INPROGRESS) | ||
49 | cpu_relax(); | ||
50 | raw_spin_lock(&desc->lock); | ||
51 | } while (desc->istate & IRQS_INPROGRESS); | ||
52 | /* Might have been disabled in meantime */ | ||
53 | return !(desc->istate & IRQS_DISABLED) && desc->action; | ||
54 | #else | ||
55 | return false; | ||
56 | #endif | ||
57 | } | ||
58 | |||
24 | 59 | ||
25 | /* | 60 | /* |
26 | * Recovery handler for misrouted interrupts. | 61 | * Recovery handler for misrouted interrupts. |
27 | */ | 62 | */ |
28 | static int try_one_irq(int irq, struct irq_desc *desc) | 63 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
29 | { | 64 | { |
65 | irqreturn_t ret = IRQ_NONE; | ||
30 | struct irqaction *action; | 66 | struct irqaction *action; |
31 | int ok = 0, work = 0; | ||
32 | 67 | ||
33 | raw_spin_lock(&desc->lock); | 68 | raw_spin_lock(&desc->lock); |
34 | /* Already running on another processor */ | ||
35 | if (desc->status & IRQ_INPROGRESS) { | ||
36 | /* | ||
37 | * Already running: If it is shared get the other | ||
38 | * CPU to go looking for our mystery interrupt too | ||
39 | */ | ||
40 | if (desc->action && (desc->action->flags & IRQF_SHARED)) | ||
41 | desc->status |= IRQ_PENDING; | ||
42 | raw_spin_unlock(&desc->lock); | ||
43 | return ok; | ||
44 | } | ||
45 | /* Honour the normal IRQ locking */ | ||
46 | desc->status |= IRQ_INPROGRESS; | ||
47 | action = desc->action; | ||
48 | raw_spin_unlock(&desc->lock); | ||
49 | 69 | ||
50 | while (action) { | 70 | /* PER_CPU and nested thread interrupts are never polled */ |
51 | /* Only shared IRQ handlers are safe to call */ | 71 | if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) |
52 | if (action->flags & IRQF_SHARED) { | 72 | goto out; |
53 | if (action->handler(irq, action->dev_id) == | ||
54 | IRQ_HANDLED) | ||
55 | ok = 1; | ||
56 | } | ||
57 | action = action->next; | ||
58 | } | ||
59 | local_irq_disable(); | ||
60 | /* Now clean up the flags */ | ||
61 | raw_spin_lock(&desc->lock); | ||
62 | action = desc->action; | ||
63 | 73 | ||
64 | /* | 74 | /* |
65 | * While we were looking for a fixup someone queued a real | 75 | * Do not poll disabled interrupts unless the spurious |
66 | * IRQ clashing with our walk: | 76 | * disabled poller asks explicitely. |
67 | */ | 77 | */ |
68 | while ((desc->status & IRQ_PENDING) && action) { | 78 | if ((desc->istate & IRQS_DISABLED) && !force) |
79 | goto out; | ||
80 | |||
81 | /* | ||
82 | * All handlers must agree on IRQF_SHARED, so we test just the | ||
83 | * first. Check for action->next as well. | ||
84 | */ | ||
85 | action = desc->action; | ||
86 | if (!action || !(action->flags & IRQF_SHARED) || | ||
87 | (action->flags & __IRQF_TIMER) || !action->next) | ||
88 | goto out; | ||
89 | |||
90 | /* Already running on another processor */ | ||
91 | if (desc->istate & IRQS_INPROGRESS) { | ||
69 | /* | 92 | /* |
70 | * Perform real IRQ processing for the IRQ we deferred | 93 | * Already running: If it is shared get the other |
94 | * CPU to go looking for our mystery interrupt too | ||
71 | */ | 95 | */ |
72 | work = 1; | 96 | irq_compat_set_pending(desc); |
73 | raw_spin_unlock(&desc->lock); | 97 | desc->istate |= IRQS_PENDING; |
74 | handle_IRQ_event(irq, action); | 98 | goto out; |
75 | raw_spin_lock(&desc->lock); | ||
76 | desc->status &= ~IRQ_PENDING; | ||
77 | } | 99 | } |
78 | desc->status &= ~IRQ_INPROGRESS; | ||
79 | /* | ||
80 | * If we did actual work for the real IRQ line we must let the | ||
81 | * IRQ controller clean up too | ||
82 | */ | ||
83 | if (work) | ||
84 | irq_end(irq, desc); | ||
85 | raw_spin_unlock(&desc->lock); | ||
86 | 100 | ||
87 | return ok; | 101 | /* Mark it poll in progress */ |
102 | desc->istate |= IRQS_POLL_INPROGRESS; | ||
103 | do { | ||
104 | if (handle_irq_event(desc) == IRQ_HANDLED) | ||
105 | ret = IRQ_HANDLED; | ||
106 | action = desc->action; | ||
107 | } while ((desc->istate & IRQS_PENDING) && action); | ||
108 | desc->istate &= ~IRQS_POLL_INPROGRESS; | ||
109 | out: | ||
110 | raw_spin_unlock(&desc->lock); | ||
111 | return ret == IRQ_HANDLED; | ||
88 | } | 112 | } |
89 | 113 | ||
90 | static int misrouted_irq(int irq) | 114 | static int misrouted_irq(int irq) |
@@ -92,6 +116,11 @@ static int misrouted_irq(int irq) | |||
92 | struct irq_desc *desc; | 116 | struct irq_desc *desc; |
93 | int i, ok = 0; | 117 | int i, ok = 0; |
94 | 118 | ||
119 | if (atomic_inc_return(&irq_poll_active) == 1) | ||
120 | goto out; | ||
121 | |||
122 | irq_poll_cpu = smp_processor_id(); | ||
123 | |||
95 | for_each_irq_desc(i, desc) { | 124 | for_each_irq_desc(i, desc) { |
96 | if (!i) | 125 | if (!i) |
97 | continue; | 126 | continue; |
@@ -99,9 +128,11 @@ static int misrouted_irq(int irq) | |||
99 | if (i == irq) /* Already tried */ | 128 | if (i == irq) /* Already tried */ |
100 | continue; | 129 | continue; |
101 | 130 | ||
102 | if (try_one_irq(i, desc)) | 131 | if (try_one_irq(i, desc, false)) |
103 | ok = 1; | 132 | ok = 1; |
104 | } | 133 | } |
134 | out: | ||
135 | atomic_dec(&irq_poll_active); | ||
105 | /* So the caller can adjust the irq error counts */ | 136 | /* So the caller can adjust the irq error counts */ |
106 | return ok; | 137 | return ok; |
107 | } | 138 | } |
@@ -111,23 +142,28 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
111 | struct irq_desc *desc; | 142 | struct irq_desc *desc; |
112 | int i; | 143 | int i; |
113 | 144 | ||
145 | if (atomic_inc_return(&irq_poll_active) != 1) | ||
146 | goto out; | ||
147 | irq_poll_cpu = smp_processor_id(); | ||
148 | |||
114 | for_each_irq_desc(i, desc) { | 149 | for_each_irq_desc(i, desc) { |
115 | unsigned int status; | 150 | unsigned int state; |
116 | 151 | ||
117 | if (!i) | 152 | if (!i) |
118 | continue; | 153 | continue; |
119 | 154 | ||
120 | /* Racy but it doesn't matter */ | 155 | /* Racy but it doesn't matter */ |
121 | status = desc->status; | 156 | state = desc->istate; |
122 | barrier(); | 157 | barrier(); |
123 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 158 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
124 | continue; | 159 | continue; |
125 | 160 | ||
126 | local_irq_disable(); | 161 | local_irq_disable(); |
127 | try_one_irq(i, desc); | 162 | try_one_irq(i, desc, true); |
128 | local_irq_enable(); | 163 | local_irq_enable(); |
129 | } | 164 | } |
130 | 165 | out: | |
166 | atomic_dec(&irq_poll_active); | ||
131 | mod_timer(&poll_spurious_irq_timer, | 167 | mod_timer(&poll_spurious_irq_timer, |
132 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 168 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
133 | } | 169 | } |
@@ -139,15 +175,13 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
139 | * | 175 | * |
140 | * (The other 100-of-100,000 interrupts may have been a correctly | 176 | * (The other 100-of-100,000 interrupts may have been a correctly |
141 | * functioning device sharing an IRQ with the failing one) | 177 | * functioning device sharing an IRQ with the failing one) |
142 | * | ||
143 | * Called under desc->lock | ||
144 | */ | 178 | */ |
145 | |||
146 | static void | 179 | static void |
147 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, | 180 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
148 | irqreturn_t action_ret) | 181 | irqreturn_t action_ret) |
149 | { | 182 | { |
150 | struct irqaction *action; | 183 | struct irqaction *action; |
184 | unsigned long flags; | ||
151 | 185 | ||
152 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { | 186 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { |
153 | printk(KERN_ERR "irq event %d: bogus return value %x\n", | 187 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
@@ -159,6 +193,13 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
159 | dump_stack(); | 193 | dump_stack(); |
160 | printk(KERN_ERR "handlers:\n"); | 194 | printk(KERN_ERR "handlers:\n"); |
161 | 195 | ||
196 | /* | ||
197 | * We need to take desc->lock here. note_interrupt() is called | ||
198 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | ||
199 | * with something else removing an action. It's ok to take | ||
200 | * desc->lock here. See synchronize_irq(). | ||
201 | */ | ||
202 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
162 | action = desc->action; | 203 | action = desc->action; |
163 | while (action) { | 204 | while (action) { |
164 | printk(KERN_ERR "[<%p>]", action->handler); | 205 | printk(KERN_ERR "[<%p>]", action->handler); |
@@ -167,6 +208,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
167 | printk("\n"); | 208 | printk("\n"); |
168 | action = action->next; | 209 | action = action->next; |
169 | } | 210 | } |
211 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
170 | } | 212 | } |
171 | 213 | ||
172 | static void | 214 | static void |
@@ -218,6 +260,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
218 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 260 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
219 | irqreturn_t action_ret) | 261 | irqreturn_t action_ret) |
220 | { | 262 | { |
263 | if (desc->istate & IRQS_POLL_INPROGRESS) | ||
264 | return; | ||
265 | |||
221 | if (unlikely(action_ret != IRQ_HANDLED)) { | 266 | if (unlikely(action_ret != IRQ_HANDLED)) { |
222 | /* | 267 | /* |
223 | * If we are seeing only the odd spurious IRQ caused by | 268 | * If we are seeing only the odd spurious IRQ caused by |
@@ -254,9 +299,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
254 | * Now kill the IRQ | 299 | * Now kill the IRQ |
255 | */ | 300 | */ |
256 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 301 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
257 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 302 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
258 | desc->depth++; | 303 | desc->depth++; |
259 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 304 | irq_disable(desc); |
260 | 305 | ||
261 | mod_timer(&poll_spurious_irq_timer, | 306 | mod_timer(&poll_spurious_irq_timer, |
262 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 307 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
diff --git a/kernel/sched.c b/kernel/sched.c index 18d38e4ec7ba..66ca5d9ba83c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2224,7 +2224,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
2224 | * yield - it could be a while. | 2224 | * yield - it could be a while. |
2225 | */ | 2225 | */ |
2226 | if (unlikely(on_rq)) { | 2226 | if (unlikely(on_rq)) { |
2227 | schedule_timeout_uninterruptible(1); | 2227 | ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); |
2228 | |||
2229 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
2230 | schedule_hrtimeout(&to, HRTIMER_MODE_REL); | ||
2228 | continue; | 2231 | continue; |
2229 | } | 2232 | } |
2230 | 2233 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 68eb5efec388..a33fb2911248 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -311,9 +311,21 @@ void irq_enter(void) | |||
311 | } | 311 | } |
312 | 312 | ||
313 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 313 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED |
314 | # define invoke_softirq() __do_softirq() | 314 | static inline void invoke_softirq(void) |
315 | { | ||
316 | if (!force_irqthreads) | ||
317 | __do_softirq(); | ||
318 | else | ||
319 | wakeup_softirqd(); | ||
320 | } | ||
315 | #else | 321 | #else |
316 | # define invoke_softirq() do_softirq() | 322 | static inline void invoke_softirq(void) |
323 | { | ||
324 | if (!force_irqthreads) | ||
325 | do_softirq(); | ||
326 | else | ||
327 | wakeup_softirqd(); | ||
328 | } | ||
317 | #endif | 329 | #endif |
318 | 330 | ||
319 | /* | 331 | /* |
@@ -738,7 +750,10 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
738 | don't process */ | 750 | don't process */ |
739 | if (cpu_is_offline((long)__bind_cpu)) | 751 | if (cpu_is_offline((long)__bind_cpu)) |
740 | goto wait_to_die; | 752 | goto wait_to_die; |
741 | do_softirq(); | 753 | local_irq_disable(); |
754 | if (local_softirq_pending()) | ||
755 | __do_softirq(); | ||
756 | local_irq_enable(); | ||
742 | preempt_enable_no_resched(); | 757 | preempt_enable_no_resched(); |
743 | cond_resched(); | 758 | cond_resched(); |
744 | preempt_disable(); | 759 | preempt_disable(); |