aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2012-10-09 05:54:47 -0400
committerJames Hogan <james.hogan@imgtec.com>2013-03-02 15:09:48 -0500
commit63047ea36070d11f902ab7d09a5a18aea037c0f7 (patch)
treef82e359be810c8b747ed390fc942e8a3d91226f3 /arch
parentac919f0883e53d7785745566692c8a0620abd7ea (diff)
metag: IRQ handling
Add core IRQ handling for metag. The code in irq.c exposes the TBX signal numbers as Linux IRQs. Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/metag/include/asm/irq.h32
-rw-r--r--arch/metag/include/asm/irqflags.h94
-rw-r--r--arch/metag/kernel/irq.c318
3 files changed, 444 insertions, 0 deletions
diff --git a/arch/metag/include/asm/irq.h b/arch/metag/include/asm/irq.h
new file mode 100644
index 000000000000..be0c8f3c5a5d
--- /dev/null
+++ b/arch/metag/include/asm/irq.h
@@ -0,0 +1,32 @@
1#ifndef __ASM_METAG_IRQ_H
2#define __ASM_METAG_IRQ_H
3
4#ifdef CONFIG_4KSTACKS
5extern void irq_ctx_init(int cpu);
6extern void irq_ctx_exit(int cpu);
7# define __ARCH_HAS_DO_SOFTIRQ
8#else
9# define irq_ctx_init(cpu) do { } while (0)
10# define irq_ctx_exit(cpu) do { } while (0)
11#endif
12
13void tbi_startup_interrupt(int);
14void tbi_shutdown_interrupt(int);
15
16struct pt_regs;
17
18int tbisig_map(unsigned int hw);
19extern void do_IRQ(int irq, struct pt_regs *regs);
20
21#ifdef CONFIG_METAG_SUSPEND_MEM
22int traps_save_context(void);
23int traps_restore_context(void);
24#endif
25
26#include <asm-generic/irq.h>
27
28#ifdef CONFIG_HOTPLUG_CPU
29extern void migrate_irqs(void);
30#endif
31
32#endif /* __ASM_METAG_IRQ_H */
diff --git a/arch/metag/include/asm/irqflags.h b/arch/metag/include/asm/irqflags.h
new file mode 100644
index 000000000000..cba5e135bc9a
--- /dev/null
+++ b/arch/metag/include/asm/irqflags.h
@@ -0,0 +1,94 @@
1/*
2 * IRQ flags handling
3 *
4 * This file gets included from lowlevel asm headers too, to provide
5 * wrapped versions of the local_irq_*() APIs, based on the
6 * raw_local_irq_*() functions from the lowlevel headers.
7 */
8#ifndef _ASM_IRQFLAGS_H
9#define _ASM_IRQFLAGS_H
10
11#ifndef __ASSEMBLY__
12
13#include <asm/core_reg.h>
14#include <asm/metag_regs.h>
15
16#define INTS_OFF_MASK TXSTATI_BGNDHALT_BIT
17
18#ifdef CONFIG_SMP
19extern unsigned int get_trigger_mask(void);
20#else
21
22extern unsigned int global_trigger_mask;
23
24static inline unsigned int get_trigger_mask(void)
25{
26 return global_trigger_mask;
27}
28#endif
29
30static inline unsigned long arch_local_save_flags(void)
31{
32 return __core_reg_get(TXMASKI);
33}
34
35static inline int arch_irqs_disabled_flags(unsigned long flags)
36{
37 return (flags & ~INTS_OFF_MASK) == 0;
38}
39
40static inline int arch_irqs_disabled(void)
41{
42 unsigned long flags = arch_local_save_flags();
43
44 return arch_irqs_disabled_flags(flags);
45}
46
47static inline unsigned long __irqs_disabled(void)
48{
49 /*
50 * We shouldn't enable exceptions if they are not already
51 * enabled. This is required for chancalls to work correctly.
52 */
53 return arch_local_save_flags() & INTS_OFF_MASK;
54}
55
56/*
57 * For spinlocks, etc:
58 */
59static inline unsigned long arch_local_irq_save(void)
60{
61 unsigned long flags = __irqs_disabled();
62
63 asm volatile("SWAP %0,TXMASKI\n" : "=r" (flags) : "0" (flags)
64 : "memory");
65
66 return flags;
67}
68
69static inline void arch_local_irq_restore(unsigned long flags)
70{
71 asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
72}
73
74static inline void arch_local_irq_disable(void)
75{
76 unsigned long flags = __irqs_disabled();
77
78 asm volatile("MOV TXMASKI,%0\n" : : "r" (flags) : "memory");
79}
80
81static inline void arch_local_irq_enable(void)
82{
83#ifdef CONFIG_SMP
84 preempt_disable();
85 arch_local_irq_restore(get_trigger_mask());
86 preempt_enable_no_resched();
87#else
88 arch_local_irq_restore(get_trigger_mask());
89#endif
90}
91
92#endif /* (__ASSEMBLY__) */
93
94#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
new file mode 100644
index 000000000000..7c043491e1e3
--- /dev/null
+++ b/arch/metag/kernel/irq.c
@@ -0,0 +1,318 @@
1/*
2 * Linux/Meta general interrupt handling code
3 *
4 */
5
6#include <linux/kernel.h>
7#include <linux/interrupt.h>
8#include <linux/init.h>
9#include <linux/irqdomain.h>
10#include <linux/ratelimit.h>
11
12#include <asm/core_reg.h>
13#include <asm/mach/arch.h>
14#include <asm/uaccess.h>
15
16#ifdef CONFIG_4KSTACKS
17union irq_ctx {
18 struct thread_info tinfo;
19 u32 stack[THREAD_SIZE/sizeof(u32)];
20};
21
22static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
23static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
24#endif
25
26struct irq_domain *root_domain;
27
28static unsigned int startup_meta_irq(struct irq_data *data)
29{
30 tbi_startup_interrupt(data->hwirq);
31 return 0;
32}
33
34static void shutdown_meta_irq(struct irq_data *data)
35{
36 tbi_shutdown_interrupt(data->hwirq);
37}
38
39void do_IRQ(int irq, struct pt_regs *regs)
40{
41 struct pt_regs *old_regs = set_irq_regs(regs);
42#ifdef CONFIG_4KSTACKS
43 struct irq_desc *desc;
44 union irq_ctx *curctx, *irqctx;
45 u32 *isp;
46#endif
47
48 irq_enter();
49
50 irq = irq_linear_revmap(root_domain, irq);
51
52#ifdef CONFIG_DEBUG_STACKOVERFLOW
53 /* Debugging check for stack overflow: is there less than 1KB free? */
54 {
55 unsigned long sp;
56
57 sp = __core_reg_get(A0StP);
58 sp &= THREAD_SIZE - 1;
59
60 if (unlikely(sp > (THREAD_SIZE - 1024)))
61 pr_err("Stack overflow in do_IRQ: %ld\n", sp);
62 }
63#endif
64
65
66#ifdef CONFIG_4KSTACKS
67 curctx = (union irq_ctx *) current_thread_info();
68 irqctx = hardirq_ctx[smp_processor_id()];
69
70 /*
71 * this is where we switch to the IRQ stack. However, if we are
72 * already using the IRQ stack (because we interrupted a hardirq
73 * handler) we can't do that and just have to keep using the
74 * current stack (which is the irq stack already after all)
75 */
76 if (curctx != irqctx) {
77 /* build the stack frame on the IRQ stack */
78 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
79 irqctx->tinfo.task = curctx->tinfo.task;
80
81 /*
82 * Copy the softirq bits in preempt_count so that the
83 * softirq checks work in the hardirq context.
84 */
85 irqctx->tinfo.preempt_count =
86 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
87 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
88
89 desc = irq_to_desc(irq);
90
91 asm volatile (
92 "MOV D0.5,%0\n"
93 "MOV D1Ar1,%1\n"
94 "MOV D1RtP,%2\n"
95 "MOV D0Ar2,%3\n"
96 "SWAP A0StP,D0.5\n"
97 "SWAP PC,D1RtP\n"
98 "MOV A0StP,D0.5\n"
99 :
100 : "r" (isp), "r" (irq), "r" (desc->handle_irq),
101 "r" (desc)
102 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
103 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
104 "D0.5"
105 );
106 } else
107#endif
108 generic_handle_irq(irq);
109
110 irq_exit();
111
112 set_irq_regs(old_regs);
113}
114
115#ifdef CONFIG_4KSTACKS
116
117static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
118
119static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
120
121/*
122 * allocate per-cpu stacks for hardirq and for softirq processing
123 */
124void irq_ctx_init(int cpu)
125{
126 union irq_ctx *irqctx;
127
128 if (hardirq_ctx[cpu])
129 return;
130
131 irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE];
132 irqctx->tinfo.task = NULL;
133 irqctx->tinfo.exec_domain = NULL;
134 irqctx->tinfo.cpu = cpu;
135 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
136 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
137
138 hardirq_ctx[cpu] = irqctx;
139
140 irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE];
141 irqctx->tinfo.task = NULL;
142 irqctx->tinfo.exec_domain = NULL;
143 irqctx->tinfo.cpu = cpu;
144 irqctx->tinfo.preempt_count = 0;
145 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
146
147 softirq_ctx[cpu] = irqctx;
148
149 pr_info("CPU %u irqstacks, hard=%p soft=%p\n",
150 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
151}
152
153void irq_ctx_exit(int cpu)
154{
155 hardirq_ctx[smp_processor_id()] = NULL;
156}
157
158extern asmlinkage void __do_softirq(void);
159
160asmlinkage void do_softirq(void)
161{
162 unsigned long flags;
163 struct thread_info *curctx;
164 union irq_ctx *irqctx;
165 u32 *isp;
166
167 if (in_interrupt())
168 return;
169
170 local_irq_save(flags);
171
172 if (local_softirq_pending()) {
173 curctx = current_thread_info();
174 irqctx = softirq_ctx[smp_processor_id()];
175 irqctx->tinfo.task = curctx->task;
176
177 /* build the stack frame on the softirq stack */
178 isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
179
180 asm volatile (
181 "MOV D0.5,%0\n"
182 "SWAP A0StP,D0.5\n"
183 "CALLR D1RtP,___do_softirq\n"
184 "MOV A0StP,D0.5\n"
185 :
186 : "r" (isp)
187 : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
188 "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
189 "D0.5"
190 );
191 /*
192 * Shouldn't happen, we returned above if in_interrupt():
193 */
194 WARN_ON_ONCE(softirq_count());
195 }
196
197 local_irq_restore(flags);
198}
199#endif
200
201static struct irq_chip meta_irq_type = {
202 .name = "META-IRQ",
203 .irq_startup = startup_meta_irq,
204 .irq_shutdown = shutdown_meta_irq,
205};
206
207/**
208 * tbisig_map() - Map a TBI signal number to a virtual IRQ number.
209 * @hw: Number of the TBI signal. Must be in range.
210 *
211 * Returns: The virtual IRQ number of the TBI signal number IRQ specified by
212 * @hw.
213 */
214int tbisig_map(unsigned int hw)
215{
216 return irq_create_mapping(root_domain, hw);
217}
218
219/**
220 * metag_tbisig_map() - map a tbi signal to a Linux virtual IRQ number
221 * @d: root irq domain
222 * @irq: virtual irq number
223 * @hw: hardware irq number (TBI signal number)
224 *
225 * This sets up a virtual irq for a specified TBI signal number.
226 */
227static int metag_tbisig_map(struct irq_domain *d, unsigned int irq,
228 irq_hw_number_t hw)
229{
230#ifdef CONFIG_SMP
231 irq_set_chip_and_handler(irq, &meta_irq_type, handle_percpu_irq);
232#else
233 irq_set_chip_and_handler(irq, &meta_irq_type, handle_simple_irq);
234#endif
235 return 0;
236}
237
238static const struct irq_domain_ops metag_tbisig_domain_ops = {
239 .map = metag_tbisig_map,
240};
241
242/*
243 * void init_IRQ(void)
244 *
245 * Parameters: None
246 *
247 * Returns: Nothing
248 *
249 * This function should be called during kernel startup to initialize
250 * the IRQ handling routines.
251 */
252void __init init_IRQ(void)
253{
254 root_domain = irq_domain_add_linear(NULL, 32,
255 &metag_tbisig_domain_ops, NULL);
256 if (unlikely(!root_domain))
257 panic("init_IRQ: cannot add root IRQ domain");
258
259 irq_ctx_init(smp_processor_id());
260
261 if (machine_desc->init_irq)
262 machine_desc->init_irq();
263}
264
265int __init arch_probe_nr_irqs(void)
266{
267 if (machine_desc->nr_irqs)
268 nr_irqs = machine_desc->nr_irqs;
269 return 0;
270}
271
272#ifdef CONFIG_HOTPLUG_CPU
273static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
274{
275 struct irq_desc *desc = irq_to_desc(irq);
276 struct irq_chip *chip = irq_data_get_irq_chip(data);
277
278 raw_spin_lock_irq(&desc->lock);
279 if (chip->irq_set_affinity)
280 chip->irq_set_affinity(data, cpumask_of(cpu), false);
281 raw_spin_unlock_irq(&desc->lock);
282}
283
284/*
285 * The CPU has been marked offline. Migrate IRQs off this CPU. If
286 * the affinity settings do not allow other CPUs, force them onto any
287 * available CPU.
288 */
289void migrate_irqs(void)
290{
291 unsigned int i, cpu = smp_processor_id();
292 struct irq_desc *desc;
293
294 for_each_irq_desc(i, desc) {
295 struct irq_data *data = irq_desc_get_irq_data(desc);
296 unsigned int newcpu;
297
298 if (irqd_is_per_cpu(data))
299 continue;
300
301 if (!cpumask_test_cpu(cpu, data->affinity))
302 continue;
303
304 newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
305
306 if (newcpu >= nr_cpu_ids) {
307 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
308 i, cpu);
309
310 cpumask_setall(data->affinity);
311 newcpu = cpumask_any_and(data->affinity,
312 cpu_online_mask);
313 }
314
315 route_irq(data, i, newcpu);
316 }
317}
318#endif /* CONFIG_HOTPLUG_CPU */