diff options
author | Arnd Bergmann <arnd@arndb.de> | 2011-10-31 09:08:10 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2011-10-31 09:08:10 -0400 |
commit | 08cab72f91c8b28ffabfd143119bccdd4a115ad7 (patch) | |
tree | ccd5583971caecd82bf2d1e62691bf6e0362d650 /kernel | |
parent | 86c1e5a74af11e4817ffa6d7748d9ac1353b5b53 (diff) | |
parent | f37a53cc5d8a8fb199e41386d125d8c2ed9e54ef (diff) |
Merge branch 'dt/gic' into next/dt
Conflicts:
arch/arm/include/asm/localtimer.h
arch/arm/mach-msm/board-msm8x60.c
arch/arm/mach-omap2/board-generic.c
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/cpu_pm.c | 233 | ||||
-rw-r--r-- | kernel/events/core.c | 4 | ||||
-rw-r--r-- | kernel/irq/chip.c | 64 | ||||
-rw-r--r-- | kernel/irq/internals.h | 19 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 32 | ||||
-rw-r--r-- | kernel/irq/irqdomain.c | 12 | ||||
-rw-r--r-- | kernel/irq/manage.c | 218 | ||||
-rw-r--r-- | kernel/irq/settings.h | 7 | ||||
-rw-r--r-- | kernel/power/Kconfig | 4 |
10 files changed, 564 insertions, 30 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index eca595e2fd52..988cb3da7031 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -101,6 +101,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/ | |||
101 | obj-$(CONFIG_TRACEPOINTS) += trace/ | 101 | obj-$(CONFIG_TRACEPOINTS) += trace/ |
102 | obj-$(CONFIG_SMP) += sched_cpupri.o | 102 | obj-$(CONFIG_SMP) += sched_cpupri.o |
103 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 103 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
104 | obj-$(CONFIG_CPU_PM) += cpu_pm.o | ||
104 | 105 | ||
105 | obj-$(CONFIG_PERF_EVENTS) += events/ | 106 | obj-$(CONFIG_PERF_EVENTS) += events/ |
106 | 107 | ||
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c new file mode 100644 index 000000000000..249152e15308 --- /dev/null +++ b/kernel/cpu_pm.c | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Google, Inc. | ||
3 | * | ||
4 | * Author: | ||
5 | * Colin Cross <ccross@android.com> | ||
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/cpu_pm.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/notifier.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/syscore_ops.h> | ||
24 | |||
25 | static DEFINE_RWLOCK(cpu_pm_notifier_lock); | ||
26 | static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain); | ||
27 | |||
28 | static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) | ||
29 | { | ||
30 | int ret; | ||
31 | |||
32 | ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, | ||
33 | nr_to_call, nr_calls); | ||
34 | |||
35 | return notifier_to_errno(ret); | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * cpu_pm_register_notifier - register a driver with cpu_pm | ||
40 | * @nb: notifier block to register | ||
41 | * | ||
42 | * Add a driver to a list of drivers that are notified about | ||
43 | * CPU and CPU cluster low power entry and exit. | ||
44 | * | ||
45 | * This function may sleep, and has the same return conditions as | ||
46 | * raw_notifier_chain_register. | ||
47 | */ | ||
48 | int cpu_pm_register_notifier(struct notifier_block *nb) | ||
49 | { | ||
50 | unsigned long flags; | ||
51 | int ret; | ||
52 | |||
53 | write_lock_irqsave(&cpu_pm_notifier_lock, flags); | ||
54 | ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb); | ||
55 | write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); | ||
56 | |||
57 | return ret; | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); | ||
60 | |||
61 | /** | ||
62 | * cpu_pm_unregister_notifier - unregister a driver with cpu_pm | ||
63 | * @nb: notifier block to be unregistered | ||
64 | * | ||
65 | * Remove a driver from the CPU PM notifier list. | ||
66 | * | ||
67 | * This function may sleep, and has the same return conditions as | ||
68 | * raw_notifier_chain_unregister. | ||
69 | */ | ||
70 | int cpu_pm_unregister_notifier(struct notifier_block *nb) | ||
71 | { | ||
72 | unsigned long flags; | ||
73 | int ret; | ||
74 | |||
75 | write_lock_irqsave(&cpu_pm_notifier_lock, flags); | ||
76 | ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb); | ||
77 | write_unlock_irqrestore(&cpu_pm_notifier_lock, flags); | ||
78 | |||
79 | return ret; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); | ||
82 | |||
83 | /** | ||
84 | * cpm_pm_enter - CPU low power entry notifier | ||
85 | * | ||
86 | * Notifies listeners that a single CPU is entering a low power state that may | ||
87 | * cause some blocks in the same power domain as the cpu to reset. | ||
88 | * | ||
89 | * Must be called on the affected CPU with interrupts disabled. Platform is | ||
90 | * responsible for ensuring that cpu_pm_enter is not called twice on the same | ||
91 | * CPU before cpu_pm_exit is called. Notified drivers can include VFP | ||
92 | * co-processor, interrupt controller and it's PM extensions, local CPU | ||
93 | * timers context save/restore which shouldn't be interrupted. Hence it | ||
94 | * must be called with interrupts disabled. | ||
95 | * | ||
96 | * Return conditions are same as __raw_notifier_call_chain. | ||
97 | */ | ||
98 | int cpu_pm_enter(void) | ||
99 | { | ||
100 | int nr_calls; | ||
101 | int ret = 0; | ||
102 | |||
103 | read_lock(&cpu_pm_notifier_lock); | ||
104 | ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); | ||
105 | if (ret) | ||
106 | /* | ||
107 | * Inform listeners (nr_calls - 1) about failure of CPU PM | ||
108 | * PM entry who are notified earlier to prepare for it. | ||
109 | */ | ||
110 | cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); | ||
111 | read_unlock(&cpu_pm_notifier_lock); | ||
112 | |||
113 | return ret; | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(cpu_pm_enter); | ||
116 | |||
117 | /** | ||
118 | * cpm_pm_exit - CPU low power exit notifier | ||
119 | * | ||
120 | * Notifies listeners that a single CPU is exiting a low power state that may | ||
121 | * have caused some blocks in the same power domain as the cpu to reset. | ||
122 | * | ||
123 | * Notified drivers can include VFP co-processor, interrupt controller | ||
124 | * and it's PM extensions, local CPU timers context save/restore which | ||
125 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. | ||
126 | * | ||
127 | * Return conditions are same as __raw_notifier_call_chain. | ||
128 | */ | ||
129 | int cpu_pm_exit(void) | ||
130 | { | ||
131 | int ret; | ||
132 | |||
133 | read_lock(&cpu_pm_notifier_lock); | ||
134 | ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL); | ||
135 | read_unlock(&cpu_pm_notifier_lock); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | EXPORT_SYMBOL_GPL(cpu_pm_exit); | ||
140 | |||
141 | /** | ||
142 | * cpm_cluster_pm_enter - CPU cluster low power entry notifier | ||
143 | * | ||
144 | * Notifies listeners that all cpus in a power domain are entering a low power | ||
145 | * state that may cause some blocks in the same power domain to reset. | ||
146 | * | ||
147 | * Must be called after cpu_pm_enter has been called on all cpus in the power | ||
148 | * domain, and before cpu_pm_exit has been called on any cpu in the power | ||
149 | * domain. Notified drivers can include VFP co-processor, interrupt controller | ||
150 | * and it's PM extensions, local CPU timers context save/restore which | ||
151 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. | ||
152 | * | ||
153 | * Must be called with interrupts disabled. | ||
154 | * | ||
155 | * Return conditions are same as __raw_notifier_call_chain. | ||
156 | */ | ||
157 | int cpu_cluster_pm_enter(void) | ||
158 | { | ||
159 | int nr_calls; | ||
160 | int ret = 0; | ||
161 | |||
162 | read_lock(&cpu_pm_notifier_lock); | ||
163 | ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); | ||
164 | if (ret) | ||
165 | /* | ||
166 | * Inform listeners (nr_calls - 1) about failure of CPU cluster | ||
167 | * PM entry who are notified earlier to prepare for it. | ||
168 | */ | ||
169 | cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); | ||
170 | read_unlock(&cpu_pm_notifier_lock); | ||
171 | |||
172 | return ret; | ||
173 | } | ||
174 | EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); | ||
175 | |||
176 | /** | ||
177 | * cpm_cluster_pm_exit - CPU cluster low power exit notifier | ||
178 | * | ||
179 | * Notifies listeners that all cpus in a power domain are exiting form a | ||
180 | * low power state that may have caused some blocks in the same power domain | ||
181 | * to reset. | ||
182 | * | ||
183 | * Must be called after cpu_pm_exit has been called on all cpus in the power | ||
184 | * domain, and before cpu_pm_exit has been called on any cpu in the power | ||
185 | * domain. Notified drivers can include VFP co-processor, interrupt controller | ||
186 | * and it's PM extensions, local CPU timers context save/restore which | ||
187 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. | ||
188 | * | ||
189 | * Return conditions are same as __raw_notifier_call_chain. | ||
190 | */ | ||
191 | int cpu_cluster_pm_exit(void) | ||
192 | { | ||
193 | int ret; | ||
194 | |||
195 | read_lock(&cpu_pm_notifier_lock); | ||
196 | ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); | ||
197 | read_unlock(&cpu_pm_notifier_lock); | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); | ||
202 | |||
203 | #ifdef CONFIG_PM | ||
204 | static int cpu_pm_suspend(void) | ||
205 | { | ||
206 | int ret; | ||
207 | |||
208 | ret = cpu_pm_enter(); | ||
209 | if (ret) | ||
210 | return ret; | ||
211 | |||
212 | ret = cpu_cluster_pm_enter(); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | static void cpu_pm_resume(void) | ||
217 | { | ||
218 | cpu_cluster_pm_exit(); | ||
219 | cpu_pm_exit(); | ||
220 | } | ||
221 | |||
222 | static struct syscore_ops cpu_pm_syscore_ops = { | ||
223 | .suspend = cpu_pm_suspend, | ||
224 | .resume = cpu_pm_resume, | ||
225 | }; | ||
226 | |||
227 | static int cpu_pm_init(void) | ||
228 | { | ||
229 | register_syscore_ops(&cpu_pm_syscore_ops); | ||
230 | return 0; | ||
231 | } | ||
232 | core_initcall(cpu_pm_init); | ||
233 | #endif | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 0f857782d06f..fbe38f2e8edb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -5758,6 +5758,7 @@ struct pmu *perf_init_event(struct perf_event *event) | |||
5758 | pmu = idr_find(&pmu_idr, event->attr.type); | 5758 | pmu = idr_find(&pmu_idr, event->attr.type); |
5759 | rcu_read_unlock(); | 5759 | rcu_read_unlock(); |
5760 | if (pmu) { | 5760 | if (pmu) { |
5761 | event->pmu = pmu; | ||
5761 | ret = pmu->event_init(event); | 5762 | ret = pmu->event_init(event); |
5762 | if (ret) | 5763 | if (ret) |
5763 | pmu = ERR_PTR(ret); | 5764 | pmu = ERR_PTR(ret); |
@@ -5765,6 +5766,7 @@ struct pmu *perf_init_event(struct perf_event *event) | |||
5765 | } | 5766 | } |
5766 | 5767 | ||
5767 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 5768 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
5769 | event->pmu = pmu; | ||
5768 | ret = pmu->event_init(event); | 5770 | ret = pmu->event_init(event); |
5769 | if (!ret) | 5771 | if (!ret) |
5770 | goto unlock; | 5772 | goto unlock; |
@@ -5891,8 +5893,6 @@ done: | |||
5891 | return ERR_PTR(err); | 5893 | return ERR_PTR(err); |
5892 | } | 5894 | } |
5893 | 5895 | ||
5894 | event->pmu = pmu; | ||
5895 | |||
5896 | if (!event->parent) { | 5896 | if (!event->parent) { |
5897 | if (event->attach_state & PERF_ATTACH_TASK) | 5897 | if (event->attach_state & PERF_ATTACH_TASK) |
5898 | jump_label_inc(&perf_sched_events); | 5898 | jump_label_inc(&perf_sched_events); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index dc5114b4c16c..f7c543a801d9 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -26,7 +26,7 @@ | |||
26 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) | 26 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
27 | { | 27 | { |
28 | unsigned long flags; | 28 | unsigned long flags; |
29 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 29 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
30 | 30 | ||
31 | if (!desc) | 31 | if (!desc) |
32 | return -EINVAL; | 32 | return -EINVAL; |
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_set_chip); | |||
54 | int irq_set_irq_type(unsigned int irq, unsigned int type) | 54 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
55 | { | 55 | { |
56 | unsigned long flags; | 56 | unsigned long flags; |
57 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 57 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
58 | int ret = 0; | 58 | int ret = 0; |
59 | 59 | ||
60 | if (!desc) | 60 | if (!desc) |
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(irq_set_irq_type); | |||
78 | int irq_set_handler_data(unsigned int irq, void *data) | 78 | int irq_set_handler_data(unsigned int irq, void *data) |
79 | { | 79 | { |
80 | unsigned long flags; | 80 | unsigned long flags; |
81 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 81 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
82 | 82 | ||
83 | if (!desc) | 83 | if (!desc) |
84 | return -EINVAL; | 84 | return -EINVAL; |
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_set_handler_data); | |||
98 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) | 98 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
99 | { | 99 | { |
100 | unsigned long flags; | 100 | unsigned long flags; |
101 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 101 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
102 | 102 | ||
103 | if (!desc) | 103 | if (!desc) |
104 | return -EINVAL; | 104 | return -EINVAL; |
@@ -119,7 +119,7 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) | |||
119 | int irq_set_chip_data(unsigned int irq, void *data) | 119 | int irq_set_chip_data(unsigned int irq, void *data) |
120 | { | 120 | { |
121 | unsigned long flags; | 121 | unsigned long flags; |
122 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 122 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
123 | 123 | ||
124 | if (!desc) | 124 | if (!desc) |
125 | return -EINVAL; | 125 | return -EINVAL; |
@@ -204,6 +204,24 @@ void irq_disable(struct irq_desc *desc) | |||
204 | } | 204 | } |
205 | } | 205 | } |
206 | 206 | ||
207 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) | ||
208 | { | ||
209 | if (desc->irq_data.chip->irq_enable) | ||
210 | desc->irq_data.chip->irq_enable(&desc->irq_data); | ||
211 | else | ||
212 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | ||
213 | cpumask_set_cpu(cpu, desc->percpu_enabled); | ||
214 | } | ||
215 | |||
216 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) | ||
217 | { | ||
218 | if (desc->irq_data.chip->irq_disable) | ||
219 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
220 | else | ||
221 | desc->irq_data.chip->irq_mask(&desc->irq_data); | ||
222 | cpumask_clear_cpu(cpu, desc->percpu_enabled); | ||
223 | } | ||
224 | |||
207 | static inline void mask_ack_irq(struct irq_desc *desc) | 225 | static inline void mask_ack_irq(struct irq_desc *desc) |
208 | { | 226 | { |
209 | if (desc->irq_data.chip->irq_mask_ack) | 227 | if (desc->irq_data.chip->irq_mask_ack) |
@@ -544,12 +562,44 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
544 | chip->irq_eoi(&desc->irq_data); | 562 | chip->irq_eoi(&desc->irq_data); |
545 | } | 563 | } |
546 | 564 | ||
565 | /** | ||
566 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids | ||
567 | * @irq: the interrupt number | ||
568 | * @desc: the interrupt description structure for this irq | ||
569 | * | ||
570 | * Per CPU interrupts on SMP machines without locking requirements. Same as | ||
571 | * handle_percpu_irq() above but with the following extras: | ||
572 | * | ||
573 | * action->percpu_dev_id is a pointer to percpu variables which | ||
574 | * contain the real device id for the cpu on which this handler is | ||
575 | * called | ||
576 | */ | ||
577 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) | ||
578 | { | ||
579 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
580 | struct irqaction *action = desc->action; | ||
581 | void *dev_id = __this_cpu_ptr(action->percpu_dev_id); | ||
582 | irqreturn_t res; | ||
583 | |||
584 | kstat_incr_irqs_this_cpu(irq, desc); | ||
585 | |||
586 | if (chip->irq_ack) | ||
587 | chip->irq_ack(&desc->irq_data); | ||
588 | |||
589 | trace_irq_handler_entry(irq, action); | ||
590 | res = action->handler(irq, dev_id); | ||
591 | trace_irq_handler_exit(irq, action, res); | ||
592 | |||
593 | if (chip->irq_eoi) | ||
594 | chip->irq_eoi(&desc->irq_data); | ||
595 | } | ||
596 | |||
547 | void | 597 | void |
548 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 598 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
549 | const char *name) | 599 | const char *name) |
550 | { | 600 | { |
551 | unsigned long flags; | 601 | unsigned long flags; |
552 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 602 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); |
553 | 603 | ||
554 | if (!desc) | 604 | if (!desc) |
555 | return; | 605 | return; |
@@ -593,7 +643,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
593 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | 643 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
594 | { | 644 | { |
595 | unsigned long flags; | 645 | unsigned long flags; |
596 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 646 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
597 | 647 | ||
598 | if (!desc) | 648 | if (!desc) |
599 | return; | 649 | return; |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 6546431447d7..a73dd6c7372d 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -71,6 +71,8 @@ extern int irq_startup(struct irq_desc *desc); | |||
71 | extern void irq_shutdown(struct irq_desc *desc); | 71 | extern void irq_shutdown(struct irq_desc *desc); |
72 | extern void irq_enable(struct irq_desc *desc); | 72 | extern void irq_enable(struct irq_desc *desc); |
73 | extern void irq_disable(struct irq_desc *desc); | 73 | extern void irq_disable(struct irq_desc *desc); |
74 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); | ||
75 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); | ||
74 | extern void mask_irq(struct irq_desc *desc); | 76 | extern void mask_irq(struct irq_desc *desc); |
75 | extern void unmask_irq(struct irq_desc *desc); | 77 | extern void unmask_irq(struct irq_desc *desc); |
76 | 78 | ||
@@ -114,14 +116,21 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc) | |||
114 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); | 116 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
115 | } | 117 | } |
116 | 118 | ||
119 | #define _IRQ_DESC_CHECK (1 << 0) | ||
120 | #define _IRQ_DESC_PERCPU (1 << 1) | ||
121 | |||
122 | #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) | ||
123 | #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) | ||
124 | |||
117 | struct irq_desc * | 125 | struct irq_desc * |
118 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus); | 126 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
127 | unsigned int check); | ||
119 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); | 128 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); |
120 | 129 | ||
121 | static inline struct irq_desc * | 130 | static inline struct irq_desc * |
122 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags) | 131 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) |
123 | { | 132 | { |
124 | return __irq_get_desc_lock(irq, flags, true); | 133 | return __irq_get_desc_lock(irq, flags, true, check); |
125 | } | 134 | } |
126 | 135 | ||
127 | static inline void | 136 | static inline void |
@@ -131,9 +140,9 @@ irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) | |||
131 | } | 140 | } |
132 | 141 | ||
133 | static inline struct irq_desc * | 142 | static inline struct irq_desc * |
134 | irq_get_desc_lock(unsigned int irq, unsigned long *flags) | 143 | irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) |
135 | { | 144 | { |
136 | return __irq_get_desc_lock(irq, flags, false); | 145 | return __irq_get_desc_lock(irq, flags, false, check); |
137 | } | 146 | } |
138 | 147 | ||
139 | static inline void | 148 | static inline void |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 039b889ea053..1550e8447a16 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -424,11 +424,22 @@ unsigned int irq_get_next_irq(unsigned int offset) | |||
424 | } | 424 | } |
425 | 425 | ||
426 | struct irq_desc * | 426 | struct irq_desc * |
427 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) | 427 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
428 | unsigned int check) | ||
428 | { | 429 | { |
429 | struct irq_desc *desc = irq_to_desc(irq); | 430 | struct irq_desc *desc = irq_to_desc(irq); |
430 | 431 | ||
431 | if (desc) { | 432 | if (desc) { |
433 | if (check & _IRQ_DESC_CHECK) { | ||
434 | if ((check & _IRQ_DESC_PERCPU) && | ||
435 | !irq_settings_is_per_cpu_devid(desc)) | ||
436 | return NULL; | ||
437 | |||
438 | if (!(check & _IRQ_DESC_PERCPU) && | ||
439 | irq_settings_is_per_cpu_devid(desc)) | ||
440 | return NULL; | ||
441 | } | ||
442 | |||
432 | if (bus) | 443 | if (bus) |
433 | chip_bus_lock(desc); | 444 | chip_bus_lock(desc); |
434 | raw_spin_lock_irqsave(&desc->lock, *flags); | 445 | raw_spin_lock_irqsave(&desc->lock, *flags); |
@@ -443,6 +454,25 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | |||
443 | chip_bus_sync_unlock(desc); | 454 | chip_bus_sync_unlock(desc); |
444 | } | 455 | } |
445 | 456 | ||
457 | int irq_set_percpu_devid(unsigned int irq) | ||
458 | { | ||
459 | struct irq_desc *desc = irq_to_desc(irq); | ||
460 | |||
461 | if (!desc) | ||
462 | return -EINVAL; | ||
463 | |||
464 | if (desc->percpu_enabled) | ||
465 | return -EINVAL; | ||
466 | |||
467 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); | ||
468 | |||
469 | if (!desc->percpu_enabled) | ||
470 | return -ENOMEM; | ||
471 | |||
472 | irq_set_percpu_devid_flags(irq); | ||
473 | return 0; | ||
474 | } | ||
475 | |||
446 | /** | 476 | /** |
447 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 477 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq |
448 | * @irq: irq number to initialize | 478 | * @irq: irq number to initialize |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index b57a3776de44..200ce832c585 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -20,15 +20,15 @@ static DEFINE_MUTEX(irq_domain_mutex); | |||
20 | void irq_domain_add(struct irq_domain *domain) | 20 | void irq_domain_add(struct irq_domain *domain) |
21 | { | 21 | { |
22 | struct irq_data *d; | 22 | struct irq_data *d; |
23 | int hwirq; | 23 | int hwirq, irq; |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * This assumes that the irq_domain owner has already allocated | 26 | * This assumes that the irq_domain owner has already allocated |
27 | * the irq_descs. This block will be removed when support for dynamic | 27 | * the irq_descs. This block will be removed when support for dynamic |
28 | * allocation of irq_descs is added to irq_domain. | 28 | * allocation of irq_descs is added to irq_domain. |
29 | */ | 29 | */ |
30 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { | 30 | irq_domain_for_each_irq(domain, hwirq, irq) { |
31 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); | 31 | d = irq_get_irq_data(irq); |
32 | if (!d) { | 32 | if (!d) { |
33 | WARN(1, "error: assigning domain to non existant irq_desc"); | 33 | WARN(1, "error: assigning domain to non existant irq_desc"); |
34 | return; | 34 | return; |
@@ -54,15 +54,15 @@ void irq_domain_add(struct irq_domain *domain) | |||
54 | void irq_domain_del(struct irq_domain *domain) | 54 | void irq_domain_del(struct irq_domain *domain) |
55 | { | 55 | { |
56 | struct irq_data *d; | 56 | struct irq_data *d; |
57 | int hwirq; | 57 | int hwirq, irq; |
58 | 58 | ||
59 | mutex_lock(&irq_domain_mutex); | 59 | mutex_lock(&irq_domain_mutex); |
60 | list_del(&domain->list); | 60 | list_del(&domain->list); |
61 | mutex_unlock(&irq_domain_mutex); | 61 | mutex_unlock(&irq_domain_mutex); |
62 | 62 | ||
63 | /* Clear the irq_domain assignments */ | 63 | /* Clear the irq_domain assignments */ |
64 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { | 64 | irq_domain_for_each_irq(domain, hwirq, irq) { |
65 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); | 65 | d = irq_get_irq_data(irq); |
66 | d->domain = NULL; | 66 | d->domain = NULL; |
67 | } | 67 | } |
68 | } | 68 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9b956fa20308..67ce837ae52c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -195,7 +195,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
195 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 195 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
196 | { | 196 | { |
197 | unsigned long flags; | 197 | unsigned long flags; |
198 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 198 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
199 | 199 | ||
200 | if (!desc) | 200 | if (!desc) |
201 | return -EINVAL; | 201 | return -EINVAL; |
@@ -356,7 +356,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
356 | static int __disable_irq_nosync(unsigned int irq) | 356 | static int __disable_irq_nosync(unsigned int irq) |
357 | { | 357 | { |
358 | unsigned long flags; | 358 | unsigned long flags; |
359 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 359 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
360 | 360 | ||
361 | if (!desc) | 361 | if (!desc) |
362 | return -EINVAL; | 362 | return -EINVAL; |
@@ -448,7 +448,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
448 | void enable_irq(unsigned int irq) | 448 | void enable_irq(unsigned int irq) |
449 | { | 449 | { |
450 | unsigned long flags; | 450 | unsigned long flags; |
451 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 451 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
452 | 452 | ||
453 | if (!desc) | 453 | if (!desc) |
454 | return; | 454 | return; |
@@ -467,6 +467,9 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
467 | struct irq_desc *desc = irq_to_desc(irq); | 467 | struct irq_desc *desc = irq_to_desc(irq); |
468 | int ret = -ENXIO; | 468 | int ret = -ENXIO; |
469 | 469 | ||
470 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) | ||
471 | return 0; | ||
472 | |||
470 | if (desc->irq_data.chip->irq_set_wake) | 473 | if (desc->irq_data.chip->irq_set_wake) |
471 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | 474 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); |
472 | 475 | ||
@@ -488,7 +491,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
488 | int irq_set_irq_wake(unsigned int irq, unsigned int on) | 491 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
489 | { | 492 | { |
490 | unsigned long flags; | 493 | unsigned long flags; |
491 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 494 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
492 | int ret = 0; | 495 | int ret = 0; |
493 | 496 | ||
494 | if (!desc) | 497 | if (!desc) |
@@ -529,7 +532,7 @@ EXPORT_SYMBOL(irq_set_irq_wake); | |||
529 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 532 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
530 | { | 533 | { |
531 | unsigned long flags; | 534 | unsigned long flags; |
532 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 535 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
533 | int canrequest = 0; | 536 | int canrequest = 0; |
534 | 537 | ||
535 | if (!desc) | 538 | if (!desc) |
@@ -1118,6 +1121,8 @@ int setup_irq(unsigned int irq, struct irqaction *act) | |||
1118 | int retval; | 1121 | int retval; |
1119 | struct irq_desc *desc = irq_to_desc(irq); | 1122 | struct irq_desc *desc = irq_to_desc(irq); |
1120 | 1123 | ||
1124 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
1125 | return -EINVAL; | ||
1121 | chip_bus_lock(desc); | 1126 | chip_bus_lock(desc); |
1122 | retval = __setup_irq(irq, desc, act); | 1127 | retval = __setup_irq(irq, desc, act); |
1123 | chip_bus_sync_unlock(desc); | 1128 | chip_bus_sync_unlock(desc); |
@@ -1126,7 +1131,7 @@ int setup_irq(unsigned int irq, struct irqaction *act) | |||
1126 | } | 1131 | } |
1127 | EXPORT_SYMBOL_GPL(setup_irq); | 1132 | EXPORT_SYMBOL_GPL(setup_irq); |
1128 | 1133 | ||
1129 | /* | 1134 | /* |
1130 | * Internal function to unregister an irqaction - used to free | 1135 | * Internal function to unregister an irqaction - used to free |
1131 | * regular and special interrupts that are part of the architecture. | 1136 | * regular and special interrupts that are part of the architecture. |
1132 | */ | 1137 | */ |
@@ -1224,7 +1229,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1224 | */ | 1229 | */ |
1225 | void remove_irq(unsigned int irq, struct irqaction *act) | 1230 | void remove_irq(unsigned int irq, struct irqaction *act) |
1226 | { | 1231 | { |
1227 | __free_irq(irq, act->dev_id); | 1232 | struct irq_desc *desc = irq_to_desc(irq); |
1233 | |||
1234 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
1235 | __free_irq(irq, act->dev_id); | ||
1228 | } | 1236 | } |
1229 | EXPORT_SYMBOL_GPL(remove_irq); | 1237 | EXPORT_SYMBOL_GPL(remove_irq); |
1230 | 1238 | ||
@@ -1246,7 +1254,7 @@ void free_irq(unsigned int irq, void *dev_id) | |||
1246 | { | 1254 | { |
1247 | struct irq_desc *desc = irq_to_desc(irq); | 1255 | struct irq_desc *desc = irq_to_desc(irq); |
1248 | 1256 | ||
1249 | if (!desc) | 1257 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1250 | return; | 1258 | return; |
1251 | 1259 | ||
1252 | #ifdef CONFIG_SMP | 1260 | #ifdef CONFIG_SMP |
@@ -1324,7 +1332,8 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1324 | if (!desc) | 1332 | if (!desc) |
1325 | return -EINVAL; | 1333 | return -EINVAL; |
1326 | 1334 | ||
1327 | if (!irq_settings_can_request(desc)) | 1335 | if (!irq_settings_can_request(desc) || |
1336 | WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
1328 | return -EINVAL; | 1337 | return -EINVAL; |
1329 | 1338 | ||
1330 | if (!handler) { | 1339 | if (!handler) { |
@@ -1409,3 +1418,194 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |||
1409 | return !ret ? IRQC_IS_HARDIRQ : ret; | 1418 | return !ret ? IRQC_IS_HARDIRQ : ret; |
1410 | } | 1419 | } |
1411 | EXPORT_SYMBOL_GPL(request_any_context_irq); | 1420 | EXPORT_SYMBOL_GPL(request_any_context_irq); |
1421 | |||
1422 | void enable_percpu_irq(unsigned int irq, unsigned int type) | ||
1423 | { | ||
1424 | unsigned int cpu = smp_processor_id(); | ||
1425 | unsigned long flags; | ||
1426 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | ||
1427 | |||
1428 | if (!desc) | ||
1429 | return; | ||
1430 | |||
1431 | type &= IRQ_TYPE_SENSE_MASK; | ||
1432 | if (type != IRQ_TYPE_NONE) { | ||
1433 | int ret; | ||
1434 | |||
1435 | ret = __irq_set_trigger(desc, irq, type); | ||
1436 | |||
1437 | if (ret) { | ||
1438 | WARN(1, "failed to set type for IRQ%d\n", irq); | ||
1439 | goto out; | ||
1440 | } | ||
1441 | } | ||
1442 | |||
1443 | irq_percpu_enable(desc, cpu); | ||
1444 | out: | ||
1445 | irq_put_desc_unlock(desc, flags); | ||
1446 | } | ||
1447 | |||
1448 | void disable_percpu_irq(unsigned int irq) | ||
1449 | { | ||
1450 | unsigned int cpu = smp_processor_id(); | ||
1451 | unsigned long flags; | ||
1452 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); | ||
1453 | |||
1454 | if (!desc) | ||
1455 | return; | ||
1456 | |||
1457 | irq_percpu_disable(desc, cpu); | ||
1458 | irq_put_desc_unlock(desc, flags); | ||
1459 | } | ||
1460 | |||
1461 | /* | ||
1462 | * Internal function to unregister a percpu irqaction. | ||
1463 | */ | ||
1464 | static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) | ||
1465 | { | ||
1466 | struct irq_desc *desc = irq_to_desc(irq); | ||
1467 | struct irqaction *action; | ||
1468 | unsigned long flags; | ||
1469 | |||
1470 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | ||
1471 | |||
1472 | if (!desc) | ||
1473 | return NULL; | ||
1474 | |||
1475 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
1476 | |||
1477 | action = desc->action; | ||
1478 | if (!action || action->percpu_dev_id != dev_id) { | ||
1479 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | ||
1480 | goto bad; | ||
1481 | } | ||
1482 | |||
1483 | if (!cpumask_empty(desc->percpu_enabled)) { | ||
1484 | WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", | ||
1485 | irq, cpumask_first(desc->percpu_enabled)); | ||
1486 | goto bad; | ||
1487 | } | ||
1488 | |||
1489 | /* Found it - now remove it from the list of entries: */ | ||
1490 | desc->action = NULL; | ||
1491 | |||
1492 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
1493 | |||
1494 | unregister_handler_proc(irq, action); | ||
1495 | |||
1496 | module_put(desc->owner); | ||
1497 | return action; | ||
1498 | |||
1499 | bad: | ||
1500 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
1501 | return NULL; | ||
1502 | } | ||
1503 | |||
1504 | /** | ||
1505 | * remove_percpu_irq - free a per-cpu interrupt | ||
1506 | * @irq: Interrupt line to free | ||
1507 | * @act: irqaction for the interrupt | ||
1508 | * | ||
1509 | * Used to remove interrupts statically setup by the early boot process. | ||
1510 | */ | ||
1511 | void remove_percpu_irq(unsigned int irq, struct irqaction *act) | ||
1512 | { | ||
1513 | struct irq_desc *desc = irq_to_desc(irq); | ||
1514 | |||
1515 | if (desc && irq_settings_is_per_cpu_devid(desc)) | ||
1516 | __free_percpu_irq(irq, act->percpu_dev_id); | ||
1517 | } | ||
1518 | |||
1519 | /** | ||
1520 | * free_percpu_irq - free an interrupt allocated with request_percpu_irq | ||
1521 | * @irq: Interrupt line to free | ||
1522 | * @dev_id: Device identity to free | ||
1523 | * | ||
1524 | * Remove a percpu interrupt handler. The handler is removed, but | ||
1525 | * the interrupt line is not disabled. This must be done on each | ||
1526 | * CPU before calling this function. The function does not return | ||
1527 | * until any executing interrupts for this IRQ have completed. | ||
1528 | * | ||
1529 | * This function must not be called from interrupt context. | ||
1530 | */ | ||
1531 | void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | ||
1532 | { | ||
1533 | struct irq_desc *desc = irq_to_desc(irq); | ||
1534 | |||
1535 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | ||
1536 | return; | ||
1537 | |||
1538 | chip_bus_lock(desc); | ||
1539 | kfree(__free_percpu_irq(irq, dev_id)); | ||
1540 | chip_bus_sync_unlock(desc); | ||
1541 | } | ||
1542 | |||
1543 | /** | ||
1544 | * setup_percpu_irq - setup a per-cpu interrupt | ||
1545 | * @irq: Interrupt line to setup | ||
1546 | * @act: irqaction for the interrupt | ||
1547 | * | ||
1548 | * Used to statically setup per-cpu interrupts in the early boot process. | ||
1549 | */ | ||
1550 | int setup_percpu_irq(unsigned int irq, struct irqaction *act) | ||
1551 | { | ||
1552 | struct irq_desc *desc = irq_to_desc(irq); | ||
1553 | int retval; | ||
1554 | |||
1555 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | ||
1556 | return -EINVAL; | ||
1557 | chip_bus_lock(desc); | ||
1558 | retval = __setup_irq(irq, desc, act); | ||
1559 | chip_bus_sync_unlock(desc); | ||
1560 | |||
1561 | return retval; | ||
1562 | } | ||
1563 | |||
1564 | /** | ||
1565 | * request_percpu_irq - allocate a percpu interrupt line | ||
1566 | * @irq: Interrupt line to allocate | ||
1567 | * @handler: Function to be called when the IRQ occurs. | ||
1568 | * @devname: An ascii name for the claiming device | ||
1569 | * @dev_id: A percpu cookie passed back to the handler function | ||
1570 | * | ||
1571 | * This call allocates interrupt resources, but doesn't | ||
1572 | * automatically enable the interrupt. It has to be done on each | ||
1573 | * CPU using enable_percpu_irq(). | ||
1574 | * | ||
1575 | * Dev_id must be globally unique. It is a per-cpu variable, and | ||
1576 | * the handler gets called with the interrupted CPU's instance of | ||
1577 | * that variable. | ||
1578 | */ | ||
1579 | int request_percpu_irq(unsigned int irq, irq_handler_t handler, | ||
1580 | const char *devname, void __percpu *dev_id) | ||
1581 | { | ||
1582 | struct irqaction *action; | ||
1583 | struct irq_desc *desc; | ||
1584 | int retval; | ||
1585 | |||
1586 | if (!dev_id) | ||
1587 | return -EINVAL; | ||
1588 | |||
1589 | desc = irq_to_desc(irq); | ||
1590 | if (!desc || !irq_settings_can_request(desc) || | ||
1591 | !irq_settings_is_per_cpu_devid(desc)) | ||
1592 | return -EINVAL; | ||
1593 | |||
1594 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
1595 | if (!action) | ||
1596 | return -ENOMEM; | ||
1597 | |||
1598 | action->handler = handler; | ||
1599 | action->flags = IRQF_PERCPU; | ||
1600 | action->name = devname; | ||
1601 | action->percpu_dev_id = dev_id; | ||
1602 | |||
1603 | chip_bus_lock(desc); | ||
1604 | retval = __setup_irq(irq, desc, action); | ||
1605 | chip_bus_sync_unlock(desc); | ||
1606 | |||
1607 | if (retval) | ||
1608 | kfree(action); | ||
1609 | |||
1610 | return retval; | ||
1611 | } | ||
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index f1667833d444..1162f1030f18 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h | |||
@@ -13,6 +13,7 @@ enum { | |||
13 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, | 13 | _IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT, |
14 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, | 14 | _IRQ_NO_BALANCING = IRQ_NO_BALANCING, |
15 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, | 15 | _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, |
16 | _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, | ||
16 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, | 17 | _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, |
17 | }; | 18 | }; |
18 | 19 | ||
@@ -24,6 +25,7 @@ enum { | |||
24 | #define IRQ_NOTHREAD GOT_YOU_MORON | 25 | #define IRQ_NOTHREAD GOT_YOU_MORON |
25 | #define IRQ_NOAUTOEN GOT_YOU_MORON | 26 | #define IRQ_NOAUTOEN GOT_YOU_MORON |
26 | #define IRQ_NESTED_THREAD GOT_YOU_MORON | 27 | #define IRQ_NESTED_THREAD GOT_YOU_MORON |
28 | #define IRQ_PER_CPU_DEVID GOT_YOU_MORON | ||
27 | #undef IRQF_MODIFY_MASK | 29 | #undef IRQF_MODIFY_MASK |
28 | #define IRQF_MODIFY_MASK GOT_YOU_MORON | 30 | #define IRQF_MODIFY_MASK GOT_YOU_MORON |
29 | 31 | ||
@@ -39,6 +41,11 @@ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) | |||
39 | return desc->status_use_accessors & _IRQ_PER_CPU; | 41 | return desc->status_use_accessors & _IRQ_PER_CPU; |
40 | } | 42 | } |
41 | 43 | ||
44 | static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc) | ||
45 | { | ||
46 | return desc->status_use_accessors & _IRQ_PER_CPU_DEVID; | ||
47 | } | ||
48 | |||
42 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) | 49 | static inline void irq_settings_set_per_cpu(struct irq_desc *desc) |
43 | { | 50 | { |
44 | desc->status_use_accessors |= _IRQ_PER_CPU; | 51 | desc->status_use_accessors |= _IRQ_PER_CPU; |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 3744c594b19b..80a85971cf64 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -235,3 +235,7 @@ config PM_GENERIC_DOMAINS | |||
235 | config PM_GENERIC_DOMAINS_RUNTIME | 235 | config PM_GENERIC_DOMAINS_RUNTIME |
236 | def_bool y | 236 | def_bool y |
237 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS | 237 | depends on PM_RUNTIME && PM_GENERIC_DOMAINS |
238 | |||
239 | config CPU_PM | ||
240 | bool | ||
241 | depends on SUSPEND || CPU_IDLE | ||