aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:14:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:14:35 -0400
commita4883ef6af5e513a1e8c2ab9aab721604aa3a4f5 (patch)
treee893f951d150c1d760f46040483193a3ac713a4e /kernel/irq
parentab3d681e9d41816f90836ea8fe235168d973207f (diff)
parentd2e08473f2488d53a71c2f53455f934ec6c44c53 (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core irq changes from Ingo Molnar: "The main changes: - generic-irqchip driver additions, cleanups and fixes - 3 new irqchip drivers: ARMv7-M NVIC, TB10x and Marvell Orion SoCs - irq_get_trigger_type() simplification and cross-arch cleanup - various cleanups, simplifications - documentation updates" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits) softirq: Use _RET_IP_ genirq: Add the generic chip to the genirq docbook genirq: generic-chip: Export some irq_gc_ functions genirq: Fix can_request_irq() for IRQs without an action irqchip: exynos-combiner: Staticize combiner_init irqchip: Add support for ARMv7-M NVIC irqchip: Add TB10x interrupt controller driver irqdomain: Use irq_get_trigger_type() to get IRQ flags MIPS: octeon: Use irq_get_trigger_type() to get IRQ flags arm: orion: Use irq_get_trigger_type() to get IRQ flags mfd: stmpe: use irq_get_trigger_type() to get IRQ flags mfd: twl4030-irq: Use irq_get_trigger_type() to get IRQ flags gpio: mvebu: Use irq_get_trigger_type() to get IRQ flags genirq: Add irq_get_trigger_type() to get IRQ flags genirq: Irqchip: document gcflags arg of irq_alloc_domain_generic_chips genirq: Set irq thread to RT priority on creation irqchip: Add support for Marvell Orion SoCs genirq: Add kerneldoc for irq_disable. genirq: irqchip: Add mask to block out invalid irqs genirq: Generic chip: Add linear irq domain support ...
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c13
-rw-r--r--kernel/irq/generic-chip.c314
-rw-r--r--kernel/irq/irqdomain.c8
-rw-r--r--kernel/irq/manage.c17
4 files changed, 290 insertions, 62 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index cbd97ce0b000..a3bb14fbe5c6 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -213,6 +213,19 @@ void irq_enable(struct irq_desc *desc)
213 irq_state_clr_masked(desc); 213 irq_state_clr_masked(desc);
214} 214}
215 215
216/**
217 * irq_disable - Mark interupt disabled
218 * @desc: irq descriptor which should be disabled
219 *
220 * If the chip does not implement the irq_disable callback, we
221 * use a lazy disable approach. That means we mark the interrupt
222 * disabled, but leave the hardware unmasked. That's an
223 * optimization because we avoid the hardware access for the
224 * common case where no interrupt happens after we marked it
225 * disabled. If an interrupt happens, then the interrupt flow
226 * handler masks the line at the hardware level and marks it
227 * pending.
228 */
216void irq_disable(struct irq_desc *desc) 229void irq_disable(struct irq_desc *desc)
217{ 230{
218 irq_state_set_disabled(desc); 231 irq_state_set_disabled(desc);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index c89295a8f668..1c39eccc1eaf 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -7,6 +7,7 @@
7#include <linux/irq.h> 7#include <linux/irq.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/irqdomain.h>
10#include <linux/interrupt.h> 11#include <linux/interrupt.h>
11#include <linux/kernel_stat.h> 12#include <linux/kernel_stat.h>
12#include <linux/syscore_ops.h> 13#include <linux/syscore_ops.h>
@@ -16,11 +17,6 @@
16static LIST_HEAD(gc_list); 17static LIST_HEAD(gc_list);
17static DEFINE_RAW_SPINLOCK(gc_lock); 18static DEFINE_RAW_SPINLOCK(gc_lock);
18 19
19static inline struct irq_chip_regs *cur_regs(struct irq_data *d)
20{
21 return &container_of(d->chip, struct irq_chip_type, chip)->regs;
22}
23
24/** 20/**
25 * irq_gc_noop - NOOP function 21 * irq_gc_noop - NOOP function
26 * @d: irq_data 22 * @d: irq_data
@@ -39,16 +35,17 @@ void irq_gc_noop(struct irq_data *d)
39void irq_gc_mask_disable_reg(struct irq_data *d) 35void irq_gc_mask_disable_reg(struct irq_data *d)
40{ 36{
41 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 37 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
42 u32 mask = 1 << (d->irq - gc->irq_base); 38 struct irq_chip_type *ct = irq_data_get_chip_type(d);
39 u32 mask = d->mask;
43 40
44 irq_gc_lock(gc); 41 irq_gc_lock(gc);
45 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable); 42 irq_reg_writel(mask, gc->reg_base + ct->regs.disable);
46 gc->mask_cache &= ~mask; 43 *ct->mask_cache &= ~mask;
47 irq_gc_unlock(gc); 44 irq_gc_unlock(gc);
48} 45}
49 46
50/** 47/**
51 * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register 48 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
52 * @d: irq_data 49 * @d: irq_data
53 * 50 *
54 * Chip has a single mask register. Values of this register are cached 51 * Chip has a single mask register. Values of this register are cached
@@ -57,16 +54,18 @@ void irq_gc_mask_disable_reg(struct irq_data *d)
57void irq_gc_mask_set_bit(struct irq_data *d) 54void irq_gc_mask_set_bit(struct irq_data *d)
58{ 55{
59 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 56 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
60 u32 mask = 1 << (d->irq - gc->irq_base); 57 struct irq_chip_type *ct = irq_data_get_chip_type(d);
58 u32 mask = d->mask;
61 59
62 irq_gc_lock(gc); 60 irq_gc_lock(gc);
63 gc->mask_cache |= mask; 61 *ct->mask_cache |= mask;
64 irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); 62 irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
65 irq_gc_unlock(gc); 63 irq_gc_unlock(gc);
66} 64}
65EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
67 66
68/** 67/**
69 * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register 68 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
70 * @d: irq_data 69 * @d: irq_data
71 * 70 *
72 * Chip has a single mask register. Values of this register are cached 71 * Chip has a single mask register. Values of this register are cached
@@ -75,13 +74,15 @@ void irq_gc_mask_set_bit(struct irq_data *d)
75void irq_gc_mask_clr_bit(struct irq_data *d) 74void irq_gc_mask_clr_bit(struct irq_data *d)
76{ 75{
77 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 76 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
78 u32 mask = 1 << (d->irq - gc->irq_base); 77 struct irq_chip_type *ct = irq_data_get_chip_type(d);
78 u32 mask = d->mask;
79 79
80 irq_gc_lock(gc); 80 irq_gc_lock(gc);
81 gc->mask_cache &= ~mask; 81 *ct->mask_cache &= ~mask;
82 irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); 82 irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
83 irq_gc_unlock(gc); 83 irq_gc_unlock(gc);
84} 84}
85EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
85 86
86/** 87/**
87 * irq_gc_unmask_enable_reg - Unmask chip via enable register 88 * irq_gc_unmask_enable_reg - Unmask chip via enable register
@@ -93,11 +94,12 @@ void irq_gc_mask_clr_bit(struct irq_data *d)
93void irq_gc_unmask_enable_reg(struct irq_data *d) 94void irq_gc_unmask_enable_reg(struct irq_data *d)
94{ 95{
95 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 96 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
96 u32 mask = 1 << (d->irq - gc->irq_base); 97 struct irq_chip_type *ct = irq_data_get_chip_type(d);
98 u32 mask = d->mask;
97 99
98 irq_gc_lock(gc); 100 irq_gc_lock(gc);
99 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable); 101 irq_reg_writel(mask, gc->reg_base + ct->regs.enable);
100 gc->mask_cache |= mask; 102 *ct->mask_cache |= mask;
101 irq_gc_unlock(gc); 103 irq_gc_unlock(gc);
102} 104}
103 105
@@ -108,12 +110,14 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
108void irq_gc_ack_set_bit(struct irq_data *d) 110void irq_gc_ack_set_bit(struct irq_data *d)
109{ 111{
110 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 112 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
111 u32 mask = 1 << (d->irq - gc->irq_base); 113 struct irq_chip_type *ct = irq_data_get_chip_type(d);
114 u32 mask = d->mask;
112 115
113 irq_gc_lock(gc); 116 irq_gc_lock(gc);
114 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); 117 irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
115 irq_gc_unlock(gc); 118 irq_gc_unlock(gc);
116} 119}
120EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
117 121
118/** 122/**
119 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit 123 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
@@ -122,10 +126,11 @@ void irq_gc_ack_set_bit(struct irq_data *d)
122void irq_gc_ack_clr_bit(struct irq_data *d) 126void irq_gc_ack_clr_bit(struct irq_data *d)
123{ 127{
124 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 128 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
125 u32 mask = ~(1 << (d->irq - gc->irq_base)); 129 struct irq_chip_type *ct = irq_data_get_chip_type(d);
130 u32 mask = ~d->mask;
126 131
127 irq_gc_lock(gc); 132 irq_gc_lock(gc);
128 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); 133 irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
129 irq_gc_unlock(gc); 134 irq_gc_unlock(gc);
130} 135}
131 136
@@ -136,11 +141,12 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
136void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) 141void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
137{ 142{
138 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 143 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
139 u32 mask = 1 << (d->irq - gc->irq_base); 144 struct irq_chip_type *ct = irq_data_get_chip_type(d);
145 u32 mask = d->mask;
140 146
141 irq_gc_lock(gc); 147 irq_gc_lock(gc);
142 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask); 148 irq_reg_writel(mask, gc->reg_base + ct->regs.mask);
143 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); 149 irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
144 irq_gc_unlock(gc); 150 irq_gc_unlock(gc);
145} 151}
146 152
@@ -151,16 +157,18 @@ void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
151void irq_gc_eoi(struct irq_data *d) 157void irq_gc_eoi(struct irq_data *d)
152{ 158{
153 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 159 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
154 u32 mask = 1 << (d->irq - gc->irq_base); 160 struct irq_chip_type *ct = irq_data_get_chip_type(d);
161 u32 mask = d->mask;
155 162
156 irq_gc_lock(gc); 163 irq_gc_lock(gc);
157 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi); 164 irq_reg_writel(mask, gc->reg_base + ct->regs.eoi);
158 irq_gc_unlock(gc); 165 irq_gc_unlock(gc);
159} 166}
160 167
161/** 168/**
162 * irq_gc_set_wake - Set/clr wake bit for an interrupt 169 * irq_gc_set_wake - Set/clr wake bit for an interrupt
163 * @d: irq_data 170 * @d: irq_data
171 * @on: Indicates whether the wake bit should be set or cleared
164 * 172 *
165 * For chips where the wake from suspend functionality is not 173 * For chips where the wake from suspend functionality is not
166 * configured in a separate register and the wakeup active state is 174 * configured in a separate register and the wakeup active state is
@@ -169,7 +177,7 @@ void irq_gc_eoi(struct irq_data *d)
169int irq_gc_set_wake(struct irq_data *d, unsigned int on) 177int irq_gc_set_wake(struct irq_data *d, unsigned int on)
170{ 178{
171 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 179 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
172 u32 mask = 1 << (d->irq - gc->irq_base); 180 u32 mask = d->mask;
173 181
174 if (!(mask & gc->wake_enabled)) 182 if (!(mask & gc->wake_enabled))
175 return -EINVAL; 183 return -EINVAL;
@@ -183,6 +191,19 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on)
183 return 0; 191 return 0;
184} 192}
185 193
194static void
195irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
196 int num_ct, unsigned int irq_base,
197 void __iomem *reg_base, irq_flow_handler_t handler)
198{
199 raw_spin_lock_init(&gc->lock);
200 gc->num_ct = num_ct;
201 gc->irq_base = irq_base;
202 gc->reg_base = reg_base;
203 gc->chip_types->chip.name = name;
204 gc->chip_types->handler = handler;
205}
206
186/** 207/**
187 * irq_alloc_generic_chip - Allocate a generic chip and initialize it 208 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
188 * @name: Name of the irq chip 209 * @name: Name of the irq chip
@@ -203,23 +224,185 @@ irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
203 224
204 gc = kzalloc(sz, GFP_KERNEL); 225 gc = kzalloc(sz, GFP_KERNEL);
205 if (gc) { 226 if (gc) {
206 raw_spin_lock_init(&gc->lock); 227 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
207 gc->num_ct = num_ct; 228 handler);
208 gc->irq_base = irq_base;
209 gc->reg_base = reg_base;
210 gc->chip_types->chip.name = name;
211 gc->chip_types->handler = handler;
212 } 229 }
213 return gc; 230 return gc;
214} 231}
215EXPORT_SYMBOL_GPL(irq_alloc_generic_chip); 232EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
216 233
234static void
235irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
236{
237 struct irq_chip_type *ct = gc->chip_types;
238 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
239 int i;
240
241 for (i = 0; i < gc->num_ct; i++) {
242 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
243 mskptr = &ct[i].mask_cache_priv;
244 mskreg = ct[i].regs.mask;
245 }
246 ct[i].mask_cache = mskptr;
247 if (flags & IRQ_GC_INIT_MASK_CACHE)
248 *mskptr = irq_reg_readl(gc->reg_base + mskreg);
249 }
250}
251
252/**
253 * irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
254 * @d: irq domain for which to allocate chips
255 * @irqs_per_chip: Number of interrupts each chip handles
256 * @num_ct: Number of irq_chip_type instances associated with this
257 * @name: Name of the irq chip
258 * @handler: Default flow handler associated with these chips
259 * @clr: IRQ_* bits to clear in the mapping function
260 * @set: IRQ_* bits to set in the mapping function
261 * @gcflags: Generic chip specific setup flags
262 */
263int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
264 int num_ct, const char *name,
265 irq_flow_handler_t handler,
266 unsigned int clr, unsigned int set,
267 enum irq_gc_flags gcflags)
268{
269 struct irq_domain_chip_generic *dgc;
270 struct irq_chip_generic *gc;
271 int numchips, sz, i;
272 unsigned long flags;
273 void *tmp;
274
275 if (d->gc)
276 return -EBUSY;
277
278 if (d->revmap_type != IRQ_DOMAIN_MAP_LINEAR)
279 return -EINVAL;
280
281 numchips = d->revmap_data.linear.size / irqs_per_chip;
282 if (!numchips)
283 return -EINVAL;
284
285 /* Allocate a pointer, generic chip and chiptypes for each chip */
286 sz = sizeof(*dgc) + numchips * sizeof(gc);
287 sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
288
289 tmp = dgc = kzalloc(sz, GFP_KERNEL);
290 if (!dgc)
291 return -ENOMEM;
292 dgc->irqs_per_chip = irqs_per_chip;
293 dgc->num_chips = numchips;
294 dgc->irq_flags_to_set = set;
295 dgc->irq_flags_to_clear = clr;
296 dgc->gc_flags = gcflags;
297 d->gc = dgc;
298
299 /* Calc pointer to the first generic chip */
300 tmp += sizeof(*dgc) + numchips * sizeof(gc);
301 for (i = 0; i < numchips; i++) {
302 /* Store the pointer to the generic chip */
303 dgc->gc[i] = gc = tmp;
304 irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
305 NULL, handler);
306 gc->domain = d;
307 raw_spin_lock_irqsave(&gc_lock, flags);
308 list_add_tail(&gc->list, &gc_list);
309 raw_spin_unlock_irqrestore(&gc_lock, flags);
310 /* Calc pointer to the next generic chip */
311 tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
312 }
313 return 0;
314}
315EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
316
317/**
318 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
319 * @d: irq domain pointer
320 * @hw_irq: Hardware interrupt number
321 */
322struct irq_chip_generic *
323irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
324{
325 struct irq_domain_chip_generic *dgc = d->gc;
326 int idx;
327
328 if (!dgc)
329 return NULL;
330 idx = hw_irq / dgc->irqs_per_chip;
331 if (idx >= dgc->num_chips)
332 return NULL;
333 return dgc->gc[idx];
334}
335EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
336
217/* 337/*
218 * Separate lockdep class for interrupt chip which can nest irq_desc 338 * Separate lockdep class for interrupt chip which can nest irq_desc
219 * lock. 339 * lock.
220 */ 340 */
221static struct lock_class_key irq_nested_lock_class; 341static struct lock_class_key irq_nested_lock_class;
222 342
343/*
344 * irq_map_generic_chip - Map a generic chip for an irq domain
345 */
346static int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
347 irq_hw_number_t hw_irq)
348{
349 struct irq_data *data = irq_get_irq_data(virq);
350 struct irq_domain_chip_generic *dgc = d->gc;
351 struct irq_chip_generic *gc;
352 struct irq_chip_type *ct;
353 struct irq_chip *chip;
354 unsigned long flags;
355 int idx;
356
357 if (!d->gc)
358 return -ENODEV;
359
360 idx = hw_irq / dgc->irqs_per_chip;
361 if (idx >= dgc->num_chips)
362 return -EINVAL;
363 gc = dgc->gc[idx];
364
365 idx = hw_irq % dgc->irqs_per_chip;
366
367 if (test_bit(idx, &gc->unused))
368 return -ENOTSUPP;
369
370 if (test_bit(idx, &gc->installed))
371 return -EBUSY;
372
373 ct = gc->chip_types;
374 chip = &ct->chip;
375
376 /* We only init the cache for the first mapping of a generic chip */
377 if (!gc->installed) {
378 raw_spin_lock_irqsave(&gc->lock, flags);
379 irq_gc_init_mask_cache(gc, dgc->gc_flags);
380 raw_spin_unlock_irqrestore(&gc->lock, flags);
381 }
382
383 /* Mark the interrupt as installed */
384 set_bit(idx, &gc->installed);
385
386 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
387 irq_set_lockdep_class(virq, &irq_nested_lock_class);
388
389 if (chip->irq_calc_mask)
390 chip->irq_calc_mask(data);
391 else
392 data->mask = 1 << idx;
393
394 irq_set_chip_and_handler(virq, chip, ct->handler);
395 irq_set_chip_data(virq, gc);
396 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
397 return 0;
398}
399
400struct irq_domain_ops irq_generic_chip_ops = {
401 .map = irq_map_generic_chip,
402 .xlate = irq_domain_xlate_onetwocell,
403};
404EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
405
223/** 406/**
224 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip 407 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
225 * @gc: Generic irq chip holding all data 408 * @gc: Generic irq chip holding all data
@@ -237,15 +420,14 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
237 unsigned int set) 420 unsigned int set)
238{ 421{
239 struct irq_chip_type *ct = gc->chip_types; 422 struct irq_chip_type *ct = gc->chip_types;
423 struct irq_chip *chip = &ct->chip;
240 unsigned int i; 424 unsigned int i;
241 425
242 raw_spin_lock(&gc_lock); 426 raw_spin_lock(&gc_lock);
243 list_add_tail(&gc->list, &gc_list); 427 list_add_tail(&gc->list, &gc_list);
244 raw_spin_unlock(&gc_lock); 428 raw_spin_unlock(&gc_lock);
245 429
246 /* Init mask cache ? */ 430 irq_gc_init_mask_cache(gc, flags);
247 if (flags & IRQ_GC_INIT_MASK_CACHE)
248 gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
249 431
250 for (i = gc->irq_base; msk; msk >>= 1, i++) { 432 for (i = gc->irq_base; msk; msk >>= 1, i++) {
251 if (!(msk & 0x01)) 433 if (!(msk & 0x01))
@@ -254,7 +436,15 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
254 if (flags & IRQ_GC_INIT_NESTED_LOCK) 436 if (flags & IRQ_GC_INIT_NESTED_LOCK)
255 irq_set_lockdep_class(i, &irq_nested_lock_class); 437 irq_set_lockdep_class(i, &irq_nested_lock_class);
256 438
257 irq_set_chip_and_handler(i, &ct->chip, ct->handler); 439 if (!(flags & IRQ_GC_NO_MASK)) {
440 struct irq_data *d = irq_get_irq_data(i);
441
442 if (chip->irq_calc_mask)
443 chip->irq_calc_mask(d);
444 else
445 d->mask = 1 << (i - gc->irq_base);
446 }
447 irq_set_chip_and_handler(i, chip, ct->handler);
258 irq_set_chip_data(i, gc); 448 irq_set_chip_data(i, gc);
259 irq_modify_status(i, clr, set); 449 irq_modify_status(i, clr, set);
260 } 450 }
@@ -265,7 +455,7 @@ EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
265/** 455/**
266 * irq_setup_alt_chip - Switch to alternative chip 456 * irq_setup_alt_chip - Switch to alternative chip
267 * @d: irq_data for this interrupt 457 * @d: irq_data for this interrupt
268 * @type Flow type to be initialized 458 * @type: Flow type to be initialized
269 * 459 *
270 * Only to be called from chip->irq_set_type() callbacks. 460 * Only to be called from chip->irq_set_type() callbacks.
271 */ 461 */
@@ -317,6 +507,24 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
317} 507}
318EXPORT_SYMBOL_GPL(irq_remove_generic_chip); 508EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
319 509
510static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
511{
512 unsigned int virq;
513
514 if (!gc->domain)
515 return irq_get_irq_data(gc->irq_base);
516
517 /*
518 * We don't know which of the irqs has been actually
519 * installed. Use the first one.
520 */
521 if (!gc->installed)
522 return NULL;
523
524 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
525 return virq ? irq_get_irq_data(virq) : NULL;
526}
527
320#ifdef CONFIG_PM 528#ifdef CONFIG_PM
321static int irq_gc_suspend(void) 529static int irq_gc_suspend(void)
322{ 530{
@@ -325,8 +533,12 @@ static int irq_gc_suspend(void)
325 list_for_each_entry(gc, &gc_list, list) { 533 list_for_each_entry(gc, &gc_list, list) {
326 struct irq_chip_type *ct = gc->chip_types; 534 struct irq_chip_type *ct = gc->chip_types;
327 535
328 if (ct->chip.irq_suspend) 536 if (ct->chip.irq_suspend) {
329 ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base)); 537 struct irq_data *data = irq_gc_get_irq_data(gc);
538
539 if (data)
540 ct->chip.irq_suspend(data);
541 }
330 } 542 }
331 return 0; 543 return 0;
332} 544}
@@ -338,8 +550,12 @@ static void irq_gc_resume(void)
338 list_for_each_entry(gc, &gc_list, list) { 550 list_for_each_entry(gc, &gc_list, list) {
339 struct irq_chip_type *ct = gc->chip_types; 551 struct irq_chip_type *ct = gc->chip_types;
340 552
341 if (ct->chip.irq_resume) 553 if (ct->chip.irq_resume) {
342 ct->chip.irq_resume(irq_get_irq_data(gc->irq_base)); 554 struct irq_data *data = irq_gc_get_irq_data(gc);
555
556 if (data)
557 ct->chip.irq_resume(data);
558 }
343 } 559 }
344} 560}
345#else 561#else
@@ -354,8 +570,12 @@ static void irq_gc_shutdown(void)
354 list_for_each_entry(gc, &gc_list, list) { 570 list_for_each_entry(gc, &gc_list, list) {
355 struct irq_chip_type *ct = gc->chip_types; 571 struct irq_chip_type *ct = gc->chip_types;
356 572
357 if (ct->chip.irq_pm_shutdown) 573 if (ct->chip.irq_pm_shutdown) {
358 ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base)); 574 struct irq_data *data = irq_gc_get_irq_data(gc);
575
576 if (data)
577 ct->chip.irq_pm_shutdown(data);
578 }
359 } 579 }
360} 580}
361 581
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 54a4d5223238..1ed8dff17eb9 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -16,12 +16,6 @@
16#include <linux/smp.h> 16#include <linux/smp.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18 18
19#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
20 * ie. legacy 8259, gets irqs 1..15 */
21#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
22#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
23#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
24
25static LIST_HEAD(irq_domain_list); 19static LIST_HEAD(irq_domain_list);
26static DEFINE_MUTEX(irq_domain_mutex); 20static DEFINE_MUTEX(irq_domain_mutex);
27 21
@@ -698,7 +692,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
698 692
699 /* Set type if specified and different than the current one */ 693 /* Set type if specified and different than the current one */
700 if (type != IRQ_TYPE_NONE && 694 if (type != IRQ_TYPE_NONE &&
701 type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) 695 type != irq_get_trigger_type(virq))
702 irq_set_irq_type(virq, type); 696 irq_set_irq_type(virq, type);
703 return virq; 697 return virq;
704} 698}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index fa17855ca65a..514bcfd855a8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -555,9 +555,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
555 return 0; 555 return 0;
556 556
557 if (irq_settings_can_request(desc)) { 557 if (irq_settings_can_request(desc)) {
558 if (desc->action) 558 if (!desc->action ||
559 if (irqflags & desc->action->flags & IRQF_SHARED) 559 irqflags & desc->action->flags & IRQF_SHARED)
560 canrequest =1; 560 canrequest = 1;
561 } 561 }
562 irq_put_desc_unlock(desc, flags); 562 irq_put_desc_unlock(desc, flags);
563 return canrequest; 563 return canrequest;
@@ -840,9 +840,6 @@ static void irq_thread_dtor(struct callback_head *unused)
840static int irq_thread(void *data) 840static int irq_thread(void *data)
841{ 841{
842 struct callback_head on_exit_work; 842 struct callback_head on_exit_work;
843 static const struct sched_param param = {
844 .sched_priority = MAX_USER_RT_PRIO/2,
845 };
846 struct irqaction *action = data; 843 struct irqaction *action = data;
847 struct irq_desc *desc = irq_to_desc(action->irq); 844 struct irq_desc *desc = irq_to_desc(action->irq);
848 irqreturn_t (*handler_fn)(struct irq_desc *desc, 845 irqreturn_t (*handler_fn)(struct irq_desc *desc,
@@ -854,8 +851,6 @@ static int irq_thread(void *data)
854 else 851 else
855 handler_fn = irq_thread_fn; 852 handler_fn = irq_thread_fn;
856 853
857 sched_setscheduler(current, SCHED_FIFO, &param);
858
859 init_task_work(&on_exit_work, irq_thread_dtor); 854 init_task_work(&on_exit_work, irq_thread_dtor);
860 task_work_add(current, &on_exit_work, false); 855 task_work_add(current, &on_exit_work, false);
861 856
@@ -950,6 +945,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
950 */ 945 */
951 if (new->thread_fn && !nested) { 946 if (new->thread_fn && !nested) {
952 struct task_struct *t; 947 struct task_struct *t;
948 static const struct sched_param param = {
949 .sched_priority = MAX_USER_RT_PRIO/2,
950 };
953 951
954 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 952 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
955 new->name); 953 new->name);
@@ -957,6 +955,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
957 ret = PTR_ERR(t); 955 ret = PTR_ERR(t);
958 goto out_mput; 956 goto out_mput;
959 } 957 }
958
959 sched_setscheduler(t, SCHED_FIFO, &param);
960
960 /* 961 /*
961 * We keep the reference to the task struct even if 962 * We keep the reference to the task struct even if
962 * the thread dies to avoid that the interrupt code 963 * the thread dies to avoid that the interrupt code