diff options
author | Aurelien Jacquiot <a-jacquiot@ti.com> | 2011-10-04 11:06:27 -0400 |
---|---|---|
committer | Mark Salter <msalter@redhat.com> | 2011-10-06 19:47:54 -0400 |
commit | ec500af3059b474df35418c41c684c1cde830c81 (patch) | |
tree | fca5ee52137efe4fc9d9c07ddce4f4e4ea52ba16 /arch/c6x | |
parent | 546a39546c64ad7e73796c5508ef5487af42cae2 (diff) |
C6X: interrupt handling
Original port to early 2.6 kernel using TI COFF toolchain.
Brought up to date by Mark Salter <msalter@redhat.com>
Signed-off-by: Aurelien Jacquiot <a-jacquiot@ti.com>
Signed-off-by: Mark Salter <msalter@redhat.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/c6x')
-rw-r--r-- | arch/c6x/include/asm/hardirq.h | 20 | ||||
-rw-r--r-- | arch/c6x/include/asm/irq.h | 302 | ||||
-rw-r--r-- | arch/c6x/include/asm/irqflags.h | 72 | ||||
-rw-r--r-- | arch/c6x/include/asm/megamod-pic.h | 9 | ||||
-rw-r--r-- | arch/c6x/kernel/irq.c | 728 | ||||
-rw-r--r-- | arch/c6x/platforms/megamod-pic.c | 349 |
6 files changed, 1480 insertions, 0 deletions
diff --git a/arch/c6x/include/asm/hardirq.h b/arch/c6x/include/asm/hardirq.h new file mode 100644 index 000000000000..9621954f98f4 --- /dev/null +++ b/arch/c6x/include/asm/hardirq.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Port on Texas Instruments TMS320C6x architecture | ||
3 | * | ||
4 | * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated | ||
5 | * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_C6X_HARDIRQ_H | ||
13 | #define _ASM_C6X_HARDIRQ_H | ||
14 | |||
15 | extern void ack_bad_irq(int irq); | ||
16 | #define ack_bad_irq ack_bad_irq | ||
17 | |||
18 | #include <asm-generic/hardirq.h> | ||
19 | |||
20 | #endif /* _ASM_C6X_HARDIRQ_H */ | ||
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h new file mode 100644 index 000000000000..a6ae3c9d9c40 --- /dev/null +++ b/arch/c6x/include/asm/irq.h | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * Port on Texas Instruments TMS320C6x architecture | ||
3 | * | ||
4 | * Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated | ||
5 | * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) | ||
6 | * | ||
7 | * Large parts taken directly from powerpc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | #ifndef _ASM_C6X_IRQ_H | ||
14 | #define _ASM_C6X_IRQ_H | ||
15 | |||
16 | #include <linux/threads.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/radix-tree.h> | ||
19 | #include <asm/percpu.h> | ||
20 | |||
21 | #define irq_canonicalize(irq) (irq) | ||
22 | |||
23 | /* | ||
24 | * The C64X+ core has 16 IRQ vectors. One each is used by Reset and NMI. Two | ||
25 | * are reserved. The remaining 12 vectors are used to route SoC interrupts. | ||
26 | * These interrupt vectors are prioritized with IRQ 4 having the highest | ||
27 | * priority and IRQ 15 having the lowest. | ||
28 | * | ||
29 | * The C64x+ megamodule provides a PIC which combines SoC IRQ sources into a | ||
30 | * single core IRQ vector. There are four combined sources, each of which | ||
31 | * feed into one of the 12 general interrupt vectors. The remaining 8 vectors | ||
32 | * can each route a single SoC interrupt directly. | ||
33 | */ | ||
34 | #define NR_PRIORITY_IRQS 16 | ||
35 | |||
36 | #define NR_IRQS_LEGACY NR_PRIORITY_IRQS | ||
37 | |||
38 | /* Total number of virq in the platform */ | ||
39 | #define NR_IRQS 256 | ||
40 | |||
41 | /* This number is used when no interrupt has been assigned */ | ||
42 | #define NO_IRQ 0 | ||
43 | |||
44 | /* This type is the placeholder for a hardware interrupt number. It has to | ||
45 | * be big enough to enclose whatever representation is used by a given | ||
46 | * platform. | ||
47 | */ | ||
48 | typedef unsigned long irq_hw_number_t; | ||
49 | |||
50 | /* Interrupt controller "host" data structure. This could be defined as a | ||
51 | * irq domain controller. That is, it handles the mapping between hardware | ||
52 | * and virtual interrupt numbers for a given interrupt domain. The host | ||
53 | * structure is generally created by the PIC code for a given PIC instance | ||
54 | * (though a host can cover more than one PIC if they have a flat number | ||
55 | * model). It's the host callbacks that are responsible for setting the | ||
56 | * irq_chip on a given irq_desc after it's been mapped. | ||
57 | * | ||
58 | * The host code and data structures are fairly agnostic to the fact that | ||
59 | * we use an open firmware device-tree. We do have references to struct | ||
60 | * device_node in two places: in irq_find_host() to find the host matching | ||
61 | * a given interrupt controller node, and of course as an argument to its | ||
62 | * counterpart host->ops->match() callback. However, those are treated as | ||
63 | * generic pointers by the core and the fact that it's actually a device-node | ||
64 | * pointer is purely a convention between callers and implementation. This | ||
65 | * code could thus be used on other architectures by replacing those two | ||
66 | * by some sort of arch-specific void * "token" used to identify interrupt | ||
67 | * controllers. | ||
68 | */ | ||
69 | struct irq_host; | ||
70 | struct radix_tree_root; | ||
71 | struct device_node; | ||
72 | |||
73 | /* Functions below are provided by the host and called whenever a new mapping | ||
74 | * is created or an old mapping is disposed. The host can then proceed to | ||
75 | * whatever internal data structures management is required. It also needs | ||
76 | * to setup the irq_desc when returning from map(). | ||
77 | */ | ||
78 | struct irq_host_ops { | ||
79 | /* Match an interrupt controller device node to a host, returns | ||
80 | * 1 on a match | ||
81 | */ | ||
82 | int (*match)(struct irq_host *h, struct device_node *node); | ||
83 | |||
84 | /* Create or update a mapping between a virtual irq number and a hw | ||
85 | * irq number. This is called only once for a given mapping. | ||
86 | */ | ||
87 | int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw); | ||
88 | |||
89 | /* Dispose of such a mapping */ | ||
90 | void (*unmap)(struct irq_host *h, unsigned int virq); | ||
91 | |||
92 | /* Translate device-tree interrupt specifier from raw format coming | ||
93 | * from the firmware to a irq_hw_number_t (interrupt line number) and | ||
94 | * type (sense) that can be passed to set_irq_type(). In the absence | ||
95 | * of this callback, irq_create_of_mapping() and irq_of_parse_and_map() | ||
96 | * will return the hw number in the first cell and IRQ_TYPE_NONE for | ||
97 | * the type (which amount to keeping whatever default value the | ||
98 | * interrupt controller has for that line) | ||
99 | */ | ||
100 | int (*xlate)(struct irq_host *h, struct device_node *ctrler, | ||
101 | const u32 *intspec, unsigned int intsize, | ||
102 | irq_hw_number_t *out_hwirq, unsigned int *out_type); | ||
103 | }; | ||
104 | |||
105 | struct irq_host { | ||
106 | struct list_head link; | ||
107 | |||
108 | /* type of reverse mapping technique */ | ||
109 | unsigned int revmap_type; | ||
110 | #define IRQ_HOST_MAP_PRIORITY 0 /* core priority irqs, get irqs 1..15 */ | ||
111 | #define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */ | ||
112 | #define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */ | ||
113 | #define IRQ_HOST_MAP_TREE 3 /* radix tree */ | ||
114 | union { | ||
115 | struct { | ||
116 | unsigned int size; | ||
117 | unsigned int *revmap; | ||
118 | } linear; | ||
119 | struct radix_tree_root tree; | ||
120 | } revmap_data; | ||
121 | struct irq_host_ops *ops; | ||
122 | void *host_data; | ||
123 | irq_hw_number_t inval_irq; | ||
124 | |||
125 | /* Optional device node pointer */ | ||
126 | struct device_node *of_node; | ||
127 | }; | ||
128 | |||
129 | struct irq_data; | ||
130 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); | ||
131 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | ||
132 | extern bool virq_is_host(unsigned int virq, struct irq_host *host); | ||
133 | |||
134 | /** | ||
135 | * irq_alloc_host - Allocate a new irq_host data structure | ||
136 | * @of_node: optional device-tree node of the interrupt controller | ||
137 | * @revmap_type: type of reverse mapping to use | ||
138 | * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map | ||
139 | * @ops: map/unmap host callbacks | ||
140 | * @inval_irq: provide a hw number in that host space that is always invalid | ||
141 | * | ||
142 | * Allocates and initialize and irq_host structure. Note that in the case of | ||
143 | * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns | ||
144 | * for all legacy interrupts except 0 (which is always the invalid irq for | ||
145 | * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by | ||
146 | * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated | ||
147 | * later during boot automatically (the reverse mapping will use the slow path | ||
148 | * until that happens). | ||
149 | */ | ||
150 | extern struct irq_host *irq_alloc_host(struct device_node *of_node, | ||
151 | unsigned int revmap_type, | ||
152 | unsigned int revmap_arg, | ||
153 | struct irq_host_ops *ops, | ||
154 | irq_hw_number_t inval_irq); | ||
155 | |||
156 | |||
157 | /** | ||
158 | * irq_find_host - Locates a host for a given device node | ||
159 | * @node: device-tree node of the interrupt controller | ||
160 | */ | ||
161 | extern struct irq_host *irq_find_host(struct device_node *node); | ||
162 | |||
163 | |||
164 | /** | ||
165 | * irq_set_default_host - Set a "default" host | ||
166 | * @host: default host pointer | ||
167 | * | ||
168 | * For convenience, it's possible to set a "default" host that will be used | ||
169 | * whenever NULL is passed to irq_create_mapping(). It makes life easier for | ||
170 | * platforms that want to manipulate a few hard coded interrupt numbers that | ||
171 | * aren't properly represented in the device-tree. | ||
172 | */ | ||
173 | extern void irq_set_default_host(struct irq_host *host); | ||
174 | |||
175 | |||
176 | /** | ||
177 | * irq_set_virq_count - Set the maximum number of virt irqs | ||
178 | * @count: number of linux virtual irqs, capped with NR_IRQS | ||
179 | * | ||
180 | * This is mainly for use by platforms like iSeries who want to program | ||
181 | * the virtual irq number in the controller to avoid the reverse mapping | ||
182 | */ | ||
183 | extern void irq_set_virq_count(unsigned int count); | ||
184 | |||
185 | |||
186 | /** | ||
187 | * irq_create_mapping - Map a hardware interrupt into linux virq space | ||
188 | * @host: host owning this hardware interrupt or NULL for default host | ||
189 | * @hwirq: hardware irq number in that host space | ||
190 | * | ||
191 | * Only one mapping per hardware interrupt is permitted. Returns a linux | ||
192 | * virq number. | ||
193 | * If the sense/trigger is to be specified, set_irq_type() should be called | ||
194 | * on the number returned from that call. | ||
195 | */ | ||
196 | extern unsigned int irq_create_mapping(struct irq_host *host, | ||
197 | irq_hw_number_t hwirq); | ||
198 | |||
199 | |||
200 | /** | ||
201 | * irq_dispose_mapping - Unmap an interrupt | ||
202 | * @virq: linux virq number of the interrupt to unmap | ||
203 | */ | ||
204 | extern void irq_dispose_mapping(unsigned int virq); | ||
205 | |||
206 | /** | ||
207 | * irq_find_mapping - Find a linux virq from an hw irq number. | ||
208 | * @host: host owning this hardware interrupt | ||
209 | * @hwirq: hardware irq number in that host space | ||
210 | * | ||
211 | * This is a slow path, for use by generic code. It's expected that an | ||
212 | * irq controller implementation directly calls the appropriate low level | ||
213 | * mapping function. | ||
214 | */ | ||
215 | extern unsigned int irq_find_mapping(struct irq_host *host, | ||
216 | irq_hw_number_t hwirq); | ||
217 | |||
218 | /** | ||
219 | * irq_create_direct_mapping - Allocate a virq for direct mapping | ||
220 | * @host: host to allocate the virq for or NULL for default host | ||
221 | * | ||
222 | * This routine is used for irq controllers which can choose the hardware | ||
223 | * interrupt numbers they generate. In such a case it's simplest to use | ||
224 | * the linux virq as the hardware interrupt number. | ||
225 | */ | ||
226 | extern unsigned int irq_create_direct_mapping(struct irq_host *host); | ||
227 | |||
228 | /** | ||
229 | * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping. | ||
230 | * @host: host owning this hardware interrupt | ||
231 | * @virq: linux irq number | ||
232 | * @hwirq: hardware irq number in that host space | ||
233 | * | ||
234 | * This is for use by irq controllers that use a radix tree reverse | ||
235 | * mapping for fast lookup. | ||
236 | */ | ||
237 | extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | ||
238 | irq_hw_number_t hwirq); | ||
239 | |||
240 | /** | ||
241 | * irq_radix_revmap_lookup - Find a linux virq from a hw irq number. | ||
242 | * @host: host owning this hardware interrupt | ||
243 | * @hwirq: hardware irq number in that host space | ||
244 | * | ||
245 | * This is a fast path, for use by irq controller code that uses radix tree | ||
246 | * revmaps | ||
247 | */ | ||
248 | extern unsigned int irq_radix_revmap_lookup(struct irq_host *host, | ||
249 | irq_hw_number_t hwirq); | ||
250 | |||
251 | /** | ||
252 | * irq_linear_revmap - Find a linux virq from a hw irq number. | ||
253 | * @host: host owning this hardware interrupt | ||
254 | * @hwirq: hardware irq number in that host space | ||
255 | * | ||
256 | * This is a fast path, for use by irq controller code that uses linear | ||
257 | * revmaps. It does fallback to the slow path if the revmap doesn't exist | ||
258 | * yet and will create the revmap entry with appropriate locking | ||
259 | */ | ||
260 | |||
261 | extern unsigned int irq_linear_revmap(struct irq_host *host, | ||
262 | irq_hw_number_t hwirq); | ||
263 | |||
264 | |||
265 | |||
266 | /** | ||
267 | * irq_alloc_virt - Allocate virtual irq numbers | ||
268 | * @host: host owning these new virtual irqs | ||
269 | * @count: number of consecutive numbers to allocate | ||
270 | * @hint: pass a hint number, the allocator will try to use a 1:1 mapping | ||
271 | * | ||
272 | * This is a low level function that is used internally by irq_create_mapping() | ||
273 | * and that can be used by some irq controllers implementations for things | ||
274 | * like allocating ranges of numbers for MSIs. The revmaps are left untouched. | ||
275 | */ | ||
276 | extern unsigned int irq_alloc_virt(struct irq_host *host, | ||
277 | unsigned int count, | ||
278 | unsigned int hint); | ||
279 | |||
280 | /** | ||
281 | * irq_free_virt - Free virtual irq numbers | ||
282 | * @virq: virtual irq number of the first interrupt to free | ||
283 | * @count: number of interrupts to free | ||
284 | * | ||
285 | * This function is the opposite of irq_alloc_virt. It will not clear reverse | ||
286 | * maps, this should be done previously by unmap'ing the interrupt. In fact, | ||
287 | * all interrupts covered by the range being freed should have been unmapped | ||
288 | * prior to calling this. | ||
289 | */ | ||
290 | extern void irq_free_virt(unsigned int virq, unsigned int count); | ||
291 | |||
292 | extern void __init init_pic_c64xplus(void); | ||
293 | |||
294 | extern void init_IRQ(void); | ||
295 | |||
296 | struct pt_regs; | ||
297 | |||
298 | extern asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs); | ||
299 | |||
300 | extern unsigned long irq_err_count; | ||
301 | |||
302 | #endif /* _ASM_C6X_IRQ_H */ | ||
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h new file mode 100644 index 000000000000..cf78e09e18c3 --- /dev/null +++ b/arch/c6x/include/asm/irqflags.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * C6X IRQ flag handling | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments Incorporated | ||
5 | * Written by Mark Salter (msalter@redhat.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public Licence | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the Licence, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #ifndef _ASM_IRQFLAGS_H | ||
14 | #define _ASM_IRQFLAGS_H | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | |||
18 | /* read interrupt enabled status */ | ||
19 | static inline unsigned long arch_local_save_flags(void) | ||
20 | { | ||
21 | unsigned long flags; | ||
22 | |||
23 | asm volatile (" mvc .s2 CSR,%0\n" : "=b"(flags)); | ||
24 | return flags; | ||
25 | } | ||
26 | |||
27 | /* set interrupt enabled status */ | ||
28 | static inline void arch_local_irq_restore(unsigned long flags) | ||
29 | { | ||
30 | asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); | ||
31 | } | ||
32 | |||
33 | /* unconditionally enable interrupts */ | ||
34 | static inline void arch_local_irq_enable(void) | ||
35 | { | ||
36 | unsigned long flags = arch_local_save_flags(); | ||
37 | flags |= 1; | ||
38 | arch_local_irq_restore(flags); | ||
39 | } | ||
40 | |||
41 | /* unconditionally disable interrupts */ | ||
42 | static inline void arch_local_irq_disable(void) | ||
43 | { | ||
44 | unsigned long flags = arch_local_save_flags(); | ||
45 | flags &= ~1; | ||
46 | arch_local_irq_restore(flags); | ||
47 | } | ||
48 | |||
49 | /* get status and disable interrupts */ | ||
50 | static inline unsigned long arch_local_irq_save(void) | ||
51 | { | ||
52 | unsigned long flags; | ||
53 | |||
54 | flags = arch_local_save_flags(); | ||
55 | arch_local_irq_restore(flags & ~1); | ||
56 | return flags; | ||
57 | } | ||
58 | |||
59 | /* test flags */ | ||
60 | static inline int arch_irqs_disabled_flags(unsigned long flags) | ||
61 | { | ||
62 | return (flags & 1) == 0; | ||
63 | } | ||
64 | |||
65 | /* test hardware interrupt enable bit */ | ||
66 | static inline int arch_irqs_disabled(void) | ||
67 | { | ||
68 | return arch_irqs_disabled_flags(arch_local_save_flags()); | ||
69 | } | ||
70 | |||
71 | #endif /* __ASSEMBLY__ */ | ||
72 | #endif /* __ASM_IRQFLAGS_H */ | ||
diff --git a/arch/c6x/include/asm/megamod-pic.h b/arch/c6x/include/asm/megamod-pic.h new file mode 100644 index 000000000000..eca0a8678034 --- /dev/null +++ b/arch/c6x/include/asm/megamod-pic.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _C6X_MEGAMOD_PIC_H | ||
2 | #define _C6X_MEGAMOD_PIC_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | extern void __init megamod_pic_init(void); | ||
7 | |||
8 | #endif /* __KERNEL__ */ | ||
9 | #endif /* _C6X_MEGAMOD_PIC_H */ | ||
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c new file mode 100644 index 000000000000..0929e4b2b244 --- /dev/null +++ b/arch/c6x/kernel/irq.c | |||
@@ -0,0 +1,728 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Texas Instruments Incorporated | ||
3 | * | ||
4 | * This borrows heavily from powerpc version, which is: | ||
5 | * | ||
6 | * Derived from arch/i386/kernel/irq.c | ||
7 | * Copyright (C) 1992 Linus Torvalds | ||
8 | * Adapted from arch/i386 by Gary Thomas | ||
9 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
10 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> | ||
11 | * Copyright (C) 1996-2001 Cort Dougan | ||
12 | * Adapted for Power Macintosh by Paul Mackerras | ||
13 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | */ | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/seq_file.h> | ||
22 | #include <linux/radix-tree.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/of.h> | ||
25 | #include <linux/of_irq.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/kernel_stat.h> | ||
28 | |||
29 | #include <asm/megamod-pic.h> | ||
30 | |||
31 | unsigned long irq_err_count; | ||
32 | |||
33 | static DEFINE_RAW_SPINLOCK(core_irq_lock); | ||
34 | |||
35 | static void mask_core_irq(struct irq_data *data) | ||
36 | { | ||
37 | unsigned int prio = data->irq; | ||
38 | |||
39 | BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS); | ||
40 | |||
41 | raw_spin_lock(&core_irq_lock); | ||
42 | and_creg(IER, ~(1 << prio)); | ||
43 | raw_spin_unlock(&core_irq_lock); | ||
44 | } | ||
45 | |||
46 | static void unmask_core_irq(struct irq_data *data) | ||
47 | { | ||
48 | unsigned int prio = data->irq; | ||
49 | |||
50 | raw_spin_lock(&core_irq_lock); | ||
51 | or_creg(IER, 1 << prio); | ||
52 | raw_spin_unlock(&core_irq_lock); | ||
53 | } | ||
54 | |||
55 | static struct irq_chip core_chip = { | ||
56 | .name = "core", | ||
57 | .irq_mask = mask_core_irq, | ||
58 | .irq_unmask = unmask_core_irq, | ||
59 | }; | ||
60 | |||
61 | asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs) | ||
62 | { | ||
63 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
64 | |||
65 | irq_enter(); | ||
66 | |||
67 | BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS); | ||
68 | |||
69 | generic_handle_irq(prio); | ||
70 | |||
71 | irq_exit(); | ||
72 | |||
73 | set_irq_regs(old_regs); | ||
74 | } | ||
75 | |||
76 | static struct irq_host *core_host; | ||
77 | |||
78 | static int core_host_map(struct irq_host *h, unsigned int virq, | ||
79 | irq_hw_number_t hw) | ||
80 | { | ||
81 | if (hw < 4 || hw >= NR_PRIORITY_IRQS) | ||
82 | return -EINVAL; | ||
83 | |||
84 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
85 | irq_set_chip_and_handler(virq, &core_chip, handle_level_irq); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static struct irq_host_ops core_host_ops = { | ||
90 | .map = core_host_map, | ||
91 | }; | ||
92 | |||
93 | void __init init_IRQ(void) | ||
94 | { | ||
95 | struct device_node *np; | ||
96 | |||
97 | /* Mask all priority IRQs */ | ||
98 | and_creg(IER, ~0xfff0); | ||
99 | |||
100 | np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic"); | ||
101 | if (np != NULL) { | ||
102 | /* create the core host */ | ||
103 | core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0, | ||
104 | &core_host_ops, 0); | ||
105 | if (core_host) | ||
106 | irq_set_default_host(core_host); | ||
107 | of_node_put(np); | ||
108 | } | ||
109 | |||
110 | printk(KERN_INFO "Core interrupt controller initialized\n"); | ||
111 | |||
112 | /* now we're ready for other SoC controllers */ | ||
113 | megamod_pic_init(); | ||
114 | |||
115 | /* Clear all general IRQ flags */ | ||
116 | set_creg(ICR, 0xfff0); | ||
117 | } | ||
118 | |||
119 | void ack_bad_irq(int irq) | ||
120 | { | ||
121 | printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); | ||
122 | irq_err_count++; | ||
123 | } | ||
124 | |||
125 | int arch_show_interrupts(struct seq_file *p, int prec) | ||
126 | { | ||
127 | seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * IRQ controller and virtual interrupts | ||
133 | */ | ||
134 | |||
135 | /* The main irq map itself is an array of NR_IRQ entries containing the | ||
136 | * associate host and irq number. An entry with a host of NULL is free. | ||
137 | * An entry can be allocated if it's free, the allocator always then sets | ||
138 | * hwirq first to the host's invalid irq number and then fills ops. | ||
139 | */ | ||
140 | struct irq_map_entry { | ||
141 | irq_hw_number_t hwirq; | ||
142 | struct irq_host *host; | ||
143 | }; | ||
144 | |||
145 | static LIST_HEAD(irq_hosts); | ||
146 | static DEFINE_RAW_SPINLOCK(irq_big_lock); | ||
147 | static DEFINE_MUTEX(revmap_trees_mutex); | ||
148 | static struct irq_map_entry irq_map[NR_IRQS]; | ||
149 | static unsigned int irq_virq_count = NR_IRQS; | ||
150 | static struct irq_host *irq_default_host; | ||
151 | |||
152 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | ||
153 | { | ||
154 | return irq_map[d->irq].hwirq; | ||
155 | } | ||
156 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); | ||
157 | |||
158 | irq_hw_number_t virq_to_hw(unsigned int virq) | ||
159 | { | ||
160 | return irq_map[virq].hwirq; | ||
161 | } | ||
162 | EXPORT_SYMBOL_GPL(virq_to_hw); | ||
163 | |||
164 | bool virq_is_host(unsigned int virq, struct irq_host *host) | ||
165 | { | ||
166 | return irq_map[virq].host == host; | ||
167 | } | ||
168 | EXPORT_SYMBOL_GPL(virq_is_host); | ||
169 | |||
170 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) | ||
171 | { | ||
172 | return h->of_node != NULL && h->of_node == np; | ||
173 | } | ||
174 | |||
175 | struct irq_host *irq_alloc_host(struct device_node *of_node, | ||
176 | unsigned int revmap_type, | ||
177 | unsigned int revmap_arg, | ||
178 | struct irq_host_ops *ops, | ||
179 | irq_hw_number_t inval_irq) | ||
180 | { | ||
181 | struct irq_host *host; | ||
182 | unsigned int size = sizeof(struct irq_host); | ||
183 | unsigned int i; | ||
184 | unsigned int *rmap; | ||
185 | unsigned long flags; | ||
186 | |||
187 | /* Allocate structure and revmap table if using linear mapping */ | ||
188 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | ||
189 | size += revmap_arg * sizeof(unsigned int); | ||
190 | host = kzalloc(size, GFP_KERNEL); | ||
191 | if (host == NULL) | ||
192 | return NULL; | ||
193 | |||
194 | /* Fill structure */ | ||
195 | host->revmap_type = revmap_type; | ||
196 | host->inval_irq = inval_irq; | ||
197 | host->ops = ops; | ||
198 | host->of_node = of_node_get(of_node); | ||
199 | |||
200 | if (host->ops->match == NULL) | ||
201 | host->ops->match = default_irq_host_match; | ||
202 | |||
203 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
204 | |||
205 | /* Check for the priority controller. */ | ||
206 | if (revmap_type == IRQ_HOST_MAP_PRIORITY) { | ||
207 | if (irq_map[0].host != NULL) { | ||
208 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
209 | of_node_put(host->of_node); | ||
210 | kfree(host); | ||
211 | return NULL; | ||
212 | } | ||
213 | irq_map[0].host = host; | ||
214 | } | ||
215 | |||
216 | list_add(&host->link, &irq_hosts); | ||
217 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
218 | |||
219 | /* Additional setups per revmap type */ | ||
220 | switch (revmap_type) { | ||
221 | case IRQ_HOST_MAP_PRIORITY: | ||
222 | /* 0 is always the invalid number for priority */ | ||
223 | host->inval_irq = 0; | ||
224 | /* setup us as the host for all priority interrupts */ | ||
225 | for (i = 1; i < NR_PRIORITY_IRQS; i++) { | ||
226 | irq_map[i].hwirq = i; | ||
227 | smp_wmb(); | ||
228 | irq_map[i].host = host; | ||
229 | smp_wmb(); | ||
230 | |||
231 | ops->map(host, i, i); | ||
232 | } | ||
233 | break; | ||
234 | case IRQ_HOST_MAP_LINEAR: | ||
235 | rmap = (unsigned int *)(host + 1); | ||
236 | for (i = 0; i < revmap_arg; i++) | ||
237 | rmap[i] = NO_IRQ; | ||
238 | host->revmap_data.linear.size = revmap_arg; | ||
239 | smp_wmb(); | ||
240 | host->revmap_data.linear.revmap = rmap; | ||
241 | break; | ||
242 | case IRQ_HOST_MAP_TREE: | ||
243 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); | ||
244 | break; | ||
245 | default: | ||
246 | break; | ||
247 | } | ||
248 | |||
249 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | ||
250 | |||
251 | return host; | ||
252 | } | ||
253 | |||
254 | struct irq_host *irq_find_host(struct device_node *node) | ||
255 | { | ||
256 | struct irq_host *h, *found = NULL; | ||
257 | unsigned long flags; | ||
258 | |||
259 | /* We might want to match the legacy controller last since | ||
260 | * it might potentially be set to match all interrupts in | ||
261 | * the absence of a device node. This isn't a problem so far | ||
262 | * yet though... | ||
263 | */ | ||
264 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
265 | list_for_each_entry(h, &irq_hosts, link) | ||
266 | if (h->ops->match(h, node)) { | ||
267 | found = h; | ||
268 | break; | ||
269 | } | ||
270 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
271 | return found; | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(irq_find_host); | ||
274 | |||
275 | void irq_set_default_host(struct irq_host *host) | ||
276 | { | ||
277 | pr_debug("irq: Default host set to @0x%p\n", host); | ||
278 | |||
279 | irq_default_host = host; | ||
280 | } | ||
281 | |||
282 | void irq_set_virq_count(unsigned int count) | ||
283 | { | ||
284 | pr_debug("irq: Trying to set virq count to %d\n", count); | ||
285 | |||
286 | BUG_ON(count < NR_PRIORITY_IRQS); | ||
287 | if (count < NR_IRQS) | ||
288 | irq_virq_count = count; | ||
289 | } | ||
290 | |||
291 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | ||
292 | irq_hw_number_t hwirq) | ||
293 | { | ||
294 | int res; | ||
295 | |||
296 | res = irq_alloc_desc_at(virq, 0); | ||
297 | if (res != virq) { | ||
298 | pr_debug("irq: -> allocating desc failed\n"); | ||
299 | goto error; | ||
300 | } | ||
301 | |||
302 | /* map it */ | ||
303 | smp_wmb(); | ||
304 | irq_map[virq].hwirq = hwirq; | ||
305 | smp_mb(); | ||
306 | |||
307 | if (host->ops->map(host, virq, hwirq)) { | ||
308 | pr_debug("irq: -> mapping failed, freeing\n"); | ||
309 | goto errdesc; | ||
310 | } | ||
311 | |||
312 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | errdesc: | ||
317 | irq_free_descs(virq, 1); | ||
318 | error: | ||
319 | irq_free_virt(virq, 1); | ||
320 | return -1; | ||
321 | } | ||
322 | |||
323 | unsigned int irq_create_direct_mapping(struct irq_host *host) | ||
324 | { | ||
325 | unsigned int virq; | ||
326 | |||
327 | if (host == NULL) | ||
328 | host = irq_default_host; | ||
329 | |||
330 | BUG_ON(host == NULL); | ||
331 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); | ||
332 | |||
333 | virq = irq_alloc_virt(host, 1, 0); | ||
334 | if (virq == NO_IRQ) { | ||
335 | pr_debug("irq: create_direct virq allocation failed\n"); | ||
336 | return NO_IRQ; | ||
337 | } | ||
338 | |||
339 | pr_debug("irq: create_direct obtained virq %d\n", virq); | ||
340 | |||
341 | if (irq_setup_virq(host, virq, virq)) | ||
342 | return NO_IRQ; | ||
343 | |||
344 | return virq; | ||
345 | } | ||
346 | |||
347 | unsigned int irq_create_mapping(struct irq_host *host, | ||
348 | irq_hw_number_t hwirq) | ||
349 | { | ||
350 | unsigned int virq, hint; | ||
351 | |||
352 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); | ||
353 | |||
354 | /* Look for default host if nececssary */ | ||
355 | if (host == NULL) | ||
356 | host = irq_default_host; | ||
357 | if (host == NULL) { | ||
358 | printk(KERN_WARNING "irq_create_mapping called for" | ||
359 | " NULL host, hwirq=%lx\n", hwirq); | ||
360 | WARN_ON(1); | ||
361 | return NO_IRQ; | ||
362 | } | ||
363 | pr_debug("irq: -> using host @%p\n", host); | ||
364 | |||
365 | /* Check if mapping already exists */ | ||
366 | virq = irq_find_mapping(host, hwirq); | ||
367 | if (virq != NO_IRQ) { | ||
368 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | ||
369 | return virq; | ||
370 | } | ||
371 | |||
372 | /* Allocate a virtual interrupt number */ | ||
373 | hint = hwirq % irq_virq_count; | ||
374 | virq = irq_alloc_virt(host, 1, hint); | ||
375 | if (virq == NO_IRQ) { | ||
376 | pr_debug("irq: -> virq allocation failed\n"); | ||
377 | return NO_IRQ; | ||
378 | } | ||
379 | |||
380 | if (irq_setup_virq(host, virq, hwirq)) | ||
381 | return NO_IRQ; | ||
382 | |||
383 | pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", | ||
384 | hwirq, host->of_node ? host->of_node->full_name : "null", virq); | ||
385 | |||
386 | return virq; | ||
387 | } | ||
388 | EXPORT_SYMBOL_GPL(irq_create_mapping); | ||
389 | |||
390 | unsigned int irq_create_of_mapping(struct device_node *controller, | ||
391 | const u32 *intspec, unsigned int intsize) | ||
392 | { | ||
393 | struct irq_host *host; | ||
394 | irq_hw_number_t hwirq; | ||
395 | unsigned int type = IRQ_TYPE_NONE; | ||
396 | unsigned int virq; | ||
397 | |||
398 | if (controller == NULL) | ||
399 | host = irq_default_host; | ||
400 | else | ||
401 | host = irq_find_host(controller); | ||
402 | if (host == NULL) { | ||
403 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | ||
404 | controller->full_name); | ||
405 | return NO_IRQ; | ||
406 | } | ||
407 | |||
408 | /* If host has no translation, then we assume interrupt line */ | ||
409 | if (host->ops->xlate == NULL) | ||
410 | hwirq = intspec[0]; | ||
411 | else { | ||
412 | if (host->ops->xlate(host, controller, intspec, intsize, | ||
413 | &hwirq, &type)) | ||
414 | return NO_IRQ; | ||
415 | } | ||
416 | |||
417 | /* Create mapping */ | ||
418 | virq = irq_create_mapping(host, hwirq); | ||
419 | if (virq == NO_IRQ) | ||
420 | return virq; | ||
421 | |||
422 | /* Set type if specified and different than the current one */ | ||
423 | if (type != IRQ_TYPE_NONE && | ||
424 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) | ||
425 | irq_set_irq_type(virq, type); | ||
426 | return virq; | ||
427 | } | ||
428 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | ||
429 | |||
430 | void irq_dispose_mapping(unsigned int virq) | ||
431 | { | ||
432 | struct irq_host *host; | ||
433 | irq_hw_number_t hwirq; | ||
434 | |||
435 | if (virq == NO_IRQ) | ||
436 | return; | ||
437 | |||
438 | /* Never unmap priority interrupts */ | ||
439 | if (virq < NR_PRIORITY_IRQS) | ||
440 | return; | ||
441 | |||
442 | host = irq_map[virq].host; | ||
443 | if (WARN_ON(host == NULL)) | ||
444 | return; | ||
445 | |||
446 | irq_set_status_flags(virq, IRQ_NOREQUEST); | ||
447 | |||
448 | /* remove chip and handler */ | ||
449 | irq_set_chip_and_handler(virq, NULL, NULL); | ||
450 | |||
451 | /* Make sure it's completed */ | ||
452 | synchronize_irq(virq); | ||
453 | |||
454 | /* Tell the PIC about it */ | ||
455 | if (host->ops->unmap) | ||
456 | host->ops->unmap(host, virq); | ||
457 | smp_mb(); | ||
458 | |||
459 | /* Clear reverse map */ | ||
460 | hwirq = irq_map[virq].hwirq; | ||
461 | switch (host->revmap_type) { | ||
462 | case IRQ_HOST_MAP_LINEAR: | ||
463 | if (hwirq < host->revmap_data.linear.size) | ||
464 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | ||
465 | break; | ||
466 | case IRQ_HOST_MAP_TREE: | ||
467 | mutex_lock(&revmap_trees_mutex); | ||
468 | radix_tree_delete(&host->revmap_data.tree, hwirq); | ||
469 | mutex_unlock(&revmap_trees_mutex); | ||
470 | break; | ||
471 | } | ||
472 | |||
473 | /* Destroy map */ | ||
474 | smp_mb(); | ||
475 | irq_map[virq].hwirq = host->inval_irq; | ||
476 | |||
477 | irq_free_descs(virq, 1); | ||
478 | /* Free it */ | ||
479 | irq_free_virt(virq, 1); | ||
480 | } | ||
481 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); | ||
482 | |||
483 | unsigned int irq_find_mapping(struct irq_host *host, | ||
484 | irq_hw_number_t hwirq) | ||
485 | { | ||
486 | unsigned int i; | ||
487 | unsigned int hint = hwirq % irq_virq_count; | ||
488 | |||
489 | /* Look for default host if nececssary */ | ||
490 | if (host == NULL) | ||
491 | host = irq_default_host; | ||
492 | if (host == NULL) | ||
493 | return NO_IRQ; | ||
494 | |||
495 | /* Slow path does a linear search of the map */ | ||
496 | i = hint; | ||
497 | do { | ||
498 | if (irq_map[i].host == host && | ||
499 | irq_map[i].hwirq == hwirq) | ||
500 | return i; | ||
501 | i++; | ||
502 | if (i >= irq_virq_count) | ||
503 | i = 4; | ||
504 | } while (i != hint); | ||
505 | return NO_IRQ; | ||
506 | } | ||
507 | EXPORT_SYMBOL_GPL(irq_find_mapping); | ||
508 | |||
509 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, | ||
510 | irq_hw_number_t hwirq) | ||
511 | { | ||
512 | struct irq_map_entry *ptr; | ||
513 | unsigned int virq; | ||
514 | |||
515 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) | ||
516 | return irq_find_mapping(host, hwirq); | ||
517 | |||
518 | /* | ||
519 | * The ptr returned references the static global irq_map. | ||
520 | * but freeing an irq can delete nodes along the path to | ||
521 | * do the lookup via call_rcu. | ||
522 | */ | ||
523 | rcu_read_lock(); | ||
524 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | ||
525 | rcu_read_unlock(); | ||
526 | |||
527 | /* | ||
528 | * If found in radix tree, then fine. | ||
529 | * Else fallback to linear lookup - this should not happen in practice | ||
530 | * as it means that we failed to insert the node in the radix tree. | ||
531 | */ | ||
532 | if (ptr) | ||
533 | virq = ptr - irq_map; | ||
534 | else | ||
535 | virq = irq_find_mapping(host, hwirq); | ||
536 | |||
537 | return virq; | ||
538 | } | ||
539 | |||
540 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | ||
541 | irq_hw_number_t hwirq) | ||
542 | { | ||
543 | if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) | ||
544 | return; | ||
545 | |||
546 | if (virq != NO_IRQ) { | ||
547 | mutex_lock(&revmap_trees_mutex); | ||
548 | radix_tree_insert(&host->revmap_data.tree, hwirq, | ||
549 | &irq_map[virq]); | ||
550 | mutex_unlock(&revmap_trees_mutex); | ||
551 | } | ||
552 | } | ||
553 | |||
554 | unsigned int irq_linear_revmap(struct irq_host *host, | ||
555 | irq_hw_number_t hwirq) | ||
556 | { | ||
557 | unsigned int *revmap; | ||
558 | |||
559 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) | ||
560 | return irq_find_mapping(host, hwirq); | ||
561 | |||
562 | /* Check revmap bounds */ | ||
563 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | ||
564 | return irq_find_mapping(host, hwirq); | ||
565 | |||
566 | /* Check if revmap was allocated */ | ||
567 | revmap = host->revmap_data.linear.revmap; | ||
568 | if (unlikely(revmap == NULL)) | ||
569 | return irq_find_mapping(host, hwirq); | ||
570 | |||
571 | /* Fill up revmap with slow path if no mapping found */ | ||
572 | if (unlikely(revmap[hwirq] == NO_IRQ)) | ||
573 | revmap[hwirq] = irq_find_mapping(host, hwirq); | ||
574 | |||
575 | return revmap[hwirq]; | ||
576 | } | ||
577 | |||
578 | unsigned int irq_alloc_virt(struct irq_host *host, | ||
579 | unsigned int count, | ||
580 | unsigned int hint) | ||
581 | { | ||
582 | unsigned long flags; | ||
583 | unsigned int i, j, found = NO_IRQ; | ||
584 | |||
585 | if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS)) | ||
586 | return NO_IRQ; | ||
587 | |||
588 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
589 | |||
590 | /* Use hint for 1 interrupt if any */ | ||
591 | if (count == 1 && hint >= NR_PRIORITY_IRQS && | ||
592 | hint < irq_virq_count && irq_map[hint].host == NULL) { | ||
593 | found = hint; | ||
594 | goto hint_found; | ||
595 | } | ||
596 | |||
597 | /* Look for count consecutive numbers in the allocatable | ||
598 | * (non-legacy) space | ||
599 | */ | ||
600 | for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) { | ||
601 | if (irq_map[i].host != NULL) | ||
602 | j = 0; | ||
603 | else | ||
604 | j++; | ||
605 | |||
606 | if (j == count) { | ||
607 | found = i - count + 1; | ||
608 | break; | ||
609 | } | ||
610 | } | ||
611 | if (found == NO_IRQ) { | ||
612 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
613 | return NO_IRQ; | ||
614 | } | ||
615 | hint_found: | ||
616 | for (i = found; i < (found + count); i++) { | ||
617 | irq_map[i].hwirq = host->inval_irq; | ||
618 | smp_wmb(); | ||
619 | irq_map[i].host = host; | ||
620 | } | ||
621 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
622 | return found; | ||
623 | } | ||
624 | |||
625 | void irq_free_virt(unsigned int virq, unsigned int count) | ||
626 | { | ||
627 | unsigned long flags; | ||
628 | unsigned int i; | ||
629 | |||
630 | WARN_ON(virq < NR_PRIORITY_IRQS); | ||
631 | WARN_ON(count == 0 || (virq + count) > irq_virq_count); | ||
632 | |||
633 | if (virq < NR_PRIORITY_IRQS) { | ||
634 | if (virq + count < NR_PRIORITY_IRQS) | ||
635 | return; | ||
636 | count -= NR_PRIORITY_IRQS - virq; | ||
637 | virq = NR_PRIORITY_IRQS; | ||
638 | } | ||
639 | |||
640 | if (count > irq_virq_count || virq > irq_virq_count - count) { | ||
641 | if (virq > irq_virq_count) | ||
642 | return; | ||
643 | count = irq_virq_count - virq; | ||
644 | } | ||
645 | |||
646 | raw_spin_lock_irqsave(&irq_big_lock, flags); | ||
647 | for (i = virq; i < (virq + count); i++) { | ||
648 | struct irq_host *host; | ||
649 | |||
650 | host = irq_map[i].host; | ||
651 | irq_map[i].hwirq = host->inval_irq; | ||
652 | smp_wmb(); | ||
653 | irq_map[i].host = NULL; | ||
654 | } | ||
655 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | ||
656 | } | ||
657 | |||
658 | #ifdef CONFIG_VIRQ_DEBUG | ||
659 | static int virq_debug_show(struct seq_file *m, void *private) | ||
660 | { | ||
661 | unsigned long flags; | ||
662 | struct irq_desc *desc; | ||
663 | const char *p; | ||
664 | static const char none[] = "none"; | ||
665 | void *data; | ||
666 | int i; | ||
667 | |||
668 | seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", | ||
669 | "chip name", "chip data", "host name"); | ||
670 | |||
671 | for (i = 1; i < nr_irqs; i++) { | ||
672 | desc = irq_to_desc(i); | ||
673 | if (!desc) | ||
674 | continue; | ||
675 | |||
676 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
677 | |||
678 | if (desc->action && desc->action->handler) { | ||
679 | struct irq_chip *chip; | ||
680 | |||
681 | seq_printf(m, "%5d ", i); | ||
682 | seq_printf(m, "0x%05lx ", irq_map[i].hwirq); | ||
683 | |||
684 | chip = irq_desc_get_chip(desc); | ||
685 | if (chip && chip->name) | ||
686 | p = chip->name; | ||
687 | else | ||
688 | p = none; | ||
689 | seq_printf(m, "%-15s ", p); | ||
690 | |||
691 | data = irq_desc_get_chip_data(desc); | ||
692 | seq_printf(m, "0x%16p ", data); | ||
693 | |||
694 | if (irq_map[i].host && irq_map[i].host->of_node) | ||
695 | p = irq_map[i].host->of_node->full_name; | ||
696 | else | ||
697 | p = none; | ||
698 | seq_printf(m, "%s\n", p); | ||
699 | } | ||
700 | |||
701 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
702 | } | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static int virq_debug_open(struct inode *inode, struct file *file) | ||
708 | { | ||
709 | return single_open(file, virq_debug_show, inode->i_private); | ||
710 | } | ||
711 | |||
712 | static const struct file_operations virq_debug_fops = { | ||
713 | .open = virq_debug_open, | ||
714 | .read = seq_read, | ||
715 | .llseek = seq_lseek, | ||
716 | .release = single_release, | ||
717 | }; | ||
718 | |||
719 | static int __init irq_debugfs_init(void) | ||
720 | { | ||
721 | if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, | ||
722 | NULL, &virq_debug_fops) == NULL) | ||
723 | return -ENOMEM; | ||
724 | |||
725 | return 0; | ||
726 | } | ||
727 | device_initcall(irq_debugfs_init); | ||
728 | #endif /* CONFIG_VIRQ_DEBUG */ | ||
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c new file mode 100644 index 000000000000..7c37a947fb1c --- /dev/null +++ b/arch/c6x/platforms/megamod-pic.c | |||
@@ -0,0 +1,349 @@ | |||
1 | /* | ||
2 | * Support for C64x+ Megamodule Interrupt Controller | ||
3 | * | ||
4 | * Copyright (C) 2010, 2011 Texas Instruments Incorporated | ||
5 | * Contributed by: Mark Salter <msalter@redhat.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_irq.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <asm/soc.h> | ||
19 | #include <asm/megamod-pic.h> | ||
20 | |||
21 | #define NR_COMBINERS 4 | ||
22 | #define NR_MUX_OUTPUTS 12 | ||
23 | |||
24 | #define IRQ_UNMAPPED 0xffff | ||
25 | |||
26 | /* | ||
27 | * Megamodule Interrupt Controller register layout | ||
28 | */ | ||
29 | struct megamod_regs { | ||
30 | u32 evtflag[8]; | ||
31 | u32 evtset[8]; | ||
32 | u32 evtclr[8]; | ||
33 | u32 reserved0[8]; | ||
34 | u32 evtmask[8]; | ||
35 | u32 mevtflag[8]; | ||
36 | u32 expmask[8]; | ||
37 | u32 mexpflag[8]; | ||
38 | u32 intmux_unused; | ||
39 | u32 intmux[7]; | ||
40 | u32 reserved1[8]; | ||
41 | u32 aegmux[2]; | ||
42 | u32 reserved2[14]; | ||
43 | u32 intxstat; | ||
44 | u32 intxclr; | ||
45 | u32 intdmask; | ||
46 | u32 reserved3[13]; | ||
47 | u32 evtasrt; | ||
48 | }; | ||
49 | |||
50 | struct megamod_pic { | ||
51 | struct irq_host *irqhost; | ||
52 | struct megamod_regs __iomem *regs; | ||
53 | raw_spinlock_t lock; | ||
54 | |||
55 | /* hw mux mapping */ | ||
56 | unsigned int output_to_irq[NR_MUX_OUTPUTS]; | ||
57 | }; | ||
58 | |||
59 | static struct megamod_pic *mm_pic; | ||
60 | |||
61 | struct megamod_cascade_data { | ||
62 | struct megamod_pic *pic; | ||
63 | int index; | ||
64 | }; | ||
65 | |||
66 | static struct megamod_cascade_data cascade_data[NR_COMBINERS]; | ||
67 | |||
68 | static void mask_megamod(struct irq_data *data) | ||
69 | { | ||
70 | struct megamod_pic *pic = irq_data_get_irq_chip_data(data); | ||
71 | irq_hw_number_t src = irqd_to_hwirq(data); | ||
72 | u32 __iomem *evtmask = &pic->regs->evtmask[src / 32]; | ||
73 | |||
74 | raw_spin_lock(&pic->lock); | ||
75 | soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask); | ||
76 | raw_spin_unlock(&pic->lock); | ||
77 | } | ||
78 | |||
79 | static void unmask_megamod(struct irq_data *data) | ||
80 | { | ||
81 | struct megamod_pic *pic = irq_data_get_irq_chip_data(data); | ||
82 | irq_hw_number_t src = irqd_to_hwirq(data); | ||
83 | u32 __iomem *evtmask = &pic->regs->evtmask[src / 32]; | ||
84 | |||
85 | raw_spin_lock(&pic->lock); | ||
86 | soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask); | ||
87 | raw_spin_unlock(&pic->lock); | ||
88 | } | ||
89 | |||
90 | static struct irq_chip megamod_chip = { | ||
91 | .name = "megamod", | ||
92 | .irq_mask = mask_megamod, | ||
93 | .irq_unmask = unmask_megamod, | ||
94 | }; | ||
95 | |||
96 | static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc) | ||
97 | { | ||
98 | struct megamod_cascade_data *cascade; | ||
99 | struct megamod_pic *pic; | ||
100 | u32 events; | ||
101 | int n, idx; | ||
102 | |||
103 | cascade = irq_desc_get_handler_data(desc); | ||
104 | |||
105 | pic = cascade->pic; | ||
106 | idx = cascade->index; | ||
107 | |||
108 | while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) { | ||
109 | n = __ffs(events); | ||
110 | |||
111 | irq = irq_linear_revmap(pic->irqhost, idx * 32 + n); | ||
112 | |||
113 | soc_writel(1 << n, &pic->regs->evtclr[idx]); | ||
114 | |||
115 | generic_handle_irq(irq); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | static int megamod_map(struct irq_host *h, unsigned int virq, | ||
120 | irq_hw_number_t hw) | ||
121 | { | ||
122 | struct megamod_pic *pic = h->host_data; | ||
123 | int i; | ||
124 | |||
125 | /* We shouldn't see a hwirq which is muxed to core controller */ | ||
126 | for (i = 0; i < NR_MUX_OUTPUTS; i++) | ||
127 | if (pic->output_to_irq[i] == hw) | ||
128 | return -1; | ||
129 | |||
130 | irq_set_chip_data(virq, pic); | ||
131 | irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq); | ||
132 | |||
133 | /* Set default irq type */ | ||
134 | irq_set_irq_type(virq, IRQ_TYPE_NONE); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int megamod_xlate(struct irq_host *h, struct device_node *ct, | ||
140 | const u32 *intspec, unsigned int intsize, | ||
141 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | ||
142 | |||
143 | { | ||
144 | /* megamod intspecs must have 1 cell */ | ||
145 | BUG_ON(intsize != 1); | ||
146 | *out_hwirq = intspec[0]; | ||
147 | *out_type = IRQ_TYPE_NONE; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static struct irq_host_ops megamod_host_ops = { | ||
152 | .map = megamod_map, | ||
153 | .xlate = megamod_xlate, | ||
154 | }; | ||
155 | |||
156 | static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output) | ||
157 | { | ||
158 | int index, offset; | ||
159 | u32 val; | ||
160 | |||
161 | if (src < 0 || src >= (NR_COMBINERS * 32)) { | ||
162 | pic->output_to_irq[output] = IRQ_UNMAPPED; | ||
163 | return; | ||
164 | } | ||
165 | |||
166 | /* four mappings per mux register */ | ||
167 | index = output / 4; | ||
168 | offset = (output & 3) * 8; | ||
169 | |||
170 | val = soc_readl(&pic->regs->intmux[index]); | ||
171 | val &= ~(0xff << offset); | ||
172 | val |= src << offset; | ||
173 | soc_writel(val, &pic->regs->intmux[index]); | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Parse the MUX mapping, if one exists. | ||
178 | * | ||
179 | * The MUX map is an array of up to 12 cells; one for each usable core priority | ||
180 | * interrupt. The value of a given cell is the megamodule interrupt source | ||
181 | * which is to me MUXed to the output corresponding to the cell position | ||
182 | * withing the array. The first cell in the array corresponds to priority | ||
183 | * 4 and the last (12th) cell corresponds to priority 15. The allowed | ||
184 | * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt | ||
185 | * sources (0 - 3) are not allowed to be mapped through this property. They | ||
186 | * are handled through the "interrupts" property. This allows us to use a | ||
187 | * value of zero as a "do not map" placeholder. | ||
188 | */ | ||
189 | static void __init parse_priority_map(struct megamod_pic *pic, | ||
190 | int *mapping, int size) | ||
191 | { | ||
192 | struct device_node *np = pic->irqhost->of_node; | ||
193 | const __be32 *map; | ||
194 | int i, maplen; | ||
195 | u32 val; | ||
196 | |||
197 | map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen); | ||
198 | if (map) { | ||
199 | maplen /= 4; | ||
200 | if (maplen > size) | ||
201 | maplen = size; | ||
202 | |||
203 | for (i = 0; i < maplen; i++) { | ||
204 | val = be32_to_cpup(map); | ||
205 | if (val && val >= 4) | ||
206 | mapping[i] = val; | ||
207 | ++map; | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static struct megamod_pic * __init init_megamod_pic(struct device_node *np) | ||
213 | { | ||
214 | struct megamod_pic *pic; | ||
215 | int i, irq; | ||
216 | int mapping[NR_MUX_OUTPUTS]; | ||
217 | |||
218 | pr_info("Initializing C64x+ Megamodule PIC\n"); | ||
219 | |||
220 | pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); | ||
221 | if (!pic) { | ||
222 | pr_err("%s: Could not alloc PIC structure.\n", np->full_name); | ||
223 | return NULL; | ||
224 | } | ||
225 | |||
226 | pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, | ||
227 | NR_COMBINERS * 32, &megamod_host_ops, | ||
228 | IRQ_UNMAPPED); | ||
229 | if (!pic->irqhost) { | ||
230 | pr_err("%s: Could not alloc host.\n", np->full_name); | ||
231 | goto error_free; | ||
232 | } | ||
233 | |||
234 | pic->irqhost->host_data = pic; | ||
235 | |||
236 | raw_spin_lock_init(&pic->lock); | ||
237 | |||
238 | pic->regs = of_iomap(np, 0); | ||
239 | if (!pic->regs) { | ||
240 | pr_err("%s: Could not map registers.\n", np->full_name); | ||
241 | goto error_free; | ||
242 | } | ||
243 | |||
244 | /* Initialize MUX map */ | ||
245 | for (i = 0; i < ARRAY_SIZE(mapping); i++) | ||
246 | mapping[i] = IRQ_UNMAPPED; | ||
247 | |||
248 | parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); | ||
249 | |||
250 | /* | ||
251 | * We can have up to 12 interrupts cascading to the core controller. | ||
252 | * These cascades can be from the combined interrupt sources or for | ||
253 | * individual interrupt sources. The "interrupts" property only | ||
254 | * deals with the cascaded combined interrupts. The individual | ||
255 | * interrupts muxed to the core controller use the core controller | ||
256 | * as their interrupt parent. | ||
257 | */ | ||
258 | for (i = 0; i < NR_COMBINERS; i++) { | ||
259 | |||
260 | irq = irq_of_parse_and_map(np, i); | ||
261 | if (irq == NO_IRQ) | ||
262 | continue; | ||
263 | |||
264 | /* | ||
265 | * We count on the core priority interrupts (4 - 15) being | ||
266 | * direct mapped. Check that device tree provided something | ||
267 | * in that range. | ||
268 | */ | ||
269 | if (irq < 4 || irq >= NR_PRIORITY_IRQS) { | ||
270 | pr_err("%s: combiner-%d virq %d out of range!\n", | ||
271 | np->full_name, i, irq); | ||
272 | continue; | ||
273 | } | ||
274 | |||
275 | /* record the mapping */ | ||
276 | mapping[irq - 4] = i; | ||
277 | |||
278 | pr_debug("%s: combiner-%d cascading to virq %d\n", | ||
279 | np->full_name, i, irq); | ||
280 | |||
281 | cascade_data[i].pic = pic; | ||
282 | cascade_data[i].index = i; | ||
283 | |||
284 | /* mask and clear all events in combiner */ | ||
285 | soc_writel(~0, &pic->regs->evtmask[i]); | ||
286 | soc_writel(~0, &pic->regs->evtclr[i]); | ||
287 | |||
288 | irq_set_handler_data(irq, &cascade_data[i]); | ||
289 | irq_set_chained_handler(irq, megamod_irq_cascade); | ||
290 | } | ||
291 | |||
292 | /* Finally, set up the MUX registers */ | ||
293 | for (i = 0; i < NR_MUX_OUTPUTS; i++) { | ||
294 | if (mapping[i] != IRQ_UNMAPPED) { | ||
295 | pr_debug("%s: setting mux %d to priority %d\n", | ||
296 | np->full_name, mapping[i], i + 4); | ||
297 | set_megamod_mux(pic, mapping[i], i); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | return pic; | ||
302 | |||
303 | error_free: | ||
304 | kfree(pic); | ||
305 | |||
306 | return NULL; | ||
307 | } | ||
308 | |||
309 | /* | ||
310 | * Return next active event after ACK'ing it. | ||
311 | * Return -1 if no events active. | ||
312 | */ | ||
313 | static int get_exception(void) | ||
314 | { | ||
315 | int i, bit; | ||
316 | u32 mask; | ||
317 | |||
318 | for (i = 0; i < NR_COMBINERS; i++) { | ||
319 | mask = soc_readl(&mm_pic->regs->mexpflag[i]); | ||
320 | if (mask) { | ||
321 | bit = __ffs(mask); | ||
322 | soc_writel(1 << bit, &mm_pic->regs->evtclr[i]); | ||
323 | return (i * 32) + bit; | ||
324 | } | ||
325 | } | ||
326 | return -1; | ||
327 | } | ||
328 | |||
329 | static void assert_event(unsigned int val) | ||
330 | { | ||
331 | soc_writel(val, &mm_pic->regs->evtasrt); | ||
332 | } | ||
333 | |||
334 | void __init megamod_pic_init(void) | ||
335 | { | ||
336 | struct device_node *np; | ||
337 | |||
338 | np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic"); | ||
339 | if (!np) | ||
340 | return; | ||
341 | |||
342 | mm_pic = init_megamod_pic(np); | ||
343 | of_node_put(np); | ||
344 | |||
345 | soc_ops.get_exception = get_exception; | ||
346 | soc_ops.assert_event = assert_event; | ||
347 | |||
348 | return; | ||
349 | } | ||