aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/sh4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sh/kernel/cpu/sh4
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sh/kernel/cpu/sh4')
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile10
-rw-r--r--arch/sh/kernel/cpu/sh4/ex.S384
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c335
-rw-r--r--arch/sh/kernel/cpu/sh4/irq_intc2.c222
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c138
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c453
6 files changed, 1542 insertions, 0 deletions
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
new file mode 100644
index 000000000000..ead1071eac73
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Linux/SuperH SH-4 backends.
3#
4
5obj-y := ex.o probe.o
6
7obj-$(CONFIG_SH_FPU) += fpu.o
8obj-$(CONFIG_CPU_SUBTYPE_ST40STB1) += irq_intc2.o
9obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
10
diff --git a/arch/sh/kernel/cpu/sh4/ex.S b/arch/sh/kernel/cpu/sh4/ex.S
new file mode 100644
index 000000000000..8221e9d15515
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/ex.S
@@ -0,0 +1,384 @@
1/*
2 * arch/sh/kernel/cpu/sh4/ex.S
3 *
4 * The SH-4 exception vector table.
5
6 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 */
14#include <linux/linkage.h>
15#include <linux/config.h>
16
17 .align 2
18 .data
19
20ENTRY(exception_handling_table)
21 .long exception_error /* 000 */
22 .long exception_error
23#if defined(CONFIG_MMU)
24 .long tlb_miss_load /* 040 */
25 .long tlb_miss_store
26 .long initial_page_write
27 .long tlb_protection_violation_load
28 .long tlb_protection_violation_store
29 .long address_error_load
30 .long address_error_store /* 100 */
31#else
32 .long exception_error ! tlb miss load /* 040 */
33 .long exception_error ! tlb miss store
34 .long exception_error ! initial page write
35 .long exception_error ! tlb prot violation load
36 .long exception_error ! tlb prot violation store
37 .long exception_error ! address error load
38 .long exception_error ! address error store /* 100 */
39#endif
40#if defined(CONFIG_SH_FPU)
41 .long do_fpu_error /* 120 */
42#else
43 .long exception_error /* 120 */
44#endif
45 .long exception_error /* 140 */
46 .long system_call ! Unconditional Trap /* 160 */
47 .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
48 .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
49ENTRY(nmi_slot)
50#if defined (CONFIG_KGDB_NMI)
51 .long debug_enter /* 1C0 */ ! Allow trap to debugger
52#else
53 .long exception_none /* 1C0 */ ! Not implemented yet
54#endif
55ENTRY(user_break_point_trap)
56 .long break_point_trap /* 1E0 */
57ENTRY(interrupt_table)
58 ! external hardware
59 .long do_IRQ ! 0000 /* 200 */
60 .long do_IRQ ! 0001
61 .long do_IRQ ! 0010
62 .long do_IRQ ! 0011
63 .long do_IRQ ! 0100
64 .long do_IRQ ! 0101
65 .long do_IRQ ! 0110
66 .long do_IRQ ! 0111
67 .long do_IRQ ! 1000 /* 300 */
68 .long do_IRQ ! 1001
69 .long do_IRQ ! 1010
70 .long do_IRQ ! 1011
71 .long do_IRQ ! 1100
72 .long do_IRQ ! 1101
73 .long do_IRQ ! 1110
74 .long exception_error
75 ! Internal hardware
76 .long do_IRQ ! TMU0 tuni0 /* 400 */
77 .long do_IRQ ! TMU1 tuni1
78 .long do_IRQ ! TMU2 tuni2
79 .long do_IRQ ! ticpi2
80#if defined(CONFIG_CPU_SUBTYPE_SH7760)
81 .long exception_error
82 .long exception_error
83 .long exception_error
84 .long exception_error
85 .long exception_error /* 500 */
86 .long exception_error
87 .long exception_error
88#else
89 .long do_IRQ ! RTC ati
90 .long do_IRQ ! pri
91 .long do_IRQ ! cui
92 .long do_IRQ ! SCI eri
93 .long do_IRQ ! rxi /* 500 */
94 .long do_IRQ ! txi
95 .long do_IRQ ! tei
96#endif
97 .long do_IRQ ! WDT iti /* 560 */
98 .long do_IRQ ! REF rcmi
99 .long do_IRQ ! rovi
100 .long do_IRQ
101 .long do_IRQ /* 5E0 */
102 .long do_IRQ ! 32 Hitachi UDI /* 600 */
103 .long do_IRQ ! 33 GPIO
104 .long do_IRQ ! 34 DMAC dmte0
105 .long do_IRQ ! 35 dmte1
106 .long do_IRQ ! 36 dmte2
107 .long do_IRQ ! 37 dmte3
108 .long do_IRQ ! 38 dmae
109 .long exception_error ! 39 /* 6E0 */
110#if defined(CONFIG_CPU_SUBTYPE_SH7760)
111 .long exception_error /* 700 */
112 .long exception_error
113 .long exception_error
114 .long exception_error /* 760 */
115#else
116 .long do_IRQ ! 40 SCIF eri /* 700 */
117 .long do_IRQ ! 41 rxi
118 .long do_IRQ ! 42 bri
119 .long do_IRQ ! 43 txi
120#endif
121#if CONFIG_NR_ONCHIP_DMA_CHANNELS == 8
122 .long do_IRQ ! 44 DMAC dmte4 /* 780 */
123 .long do_IRQ ! 45 dmte5
124 .long do_IRQ ! 46 dmte6
125 .long do_IRQ ! 47 dmte7 /* 7E0 */
126#else
127 .long exception_error ! 44 /* 780 */
128 .long exception_error ! 45
129 .long exception_error ! 46
130 .long exception_error ! 47
131#endif
132#if defined(CONFIG_SH_FPU)
133 .long do_fpu_state_restore ! 48 /* 800 */
134 .long do_fpu_state_restore ! 49 /* 820 */
135#else
136 .long exception_error
137 .long exception_error
138#endif
139#if defined(CONFIG_CPU_SUBTYPE_SH7751)
140 .long exception_error /* 840 */
141 .long exception_error
142 .long exception_error
143 .long exception_error
144 .long exception_error
145 .long exception_error
146 .long exception_error /* 900 */
147 .long exception_error
148 .long exception_error
149 .long exception_error
150 .long exception_error
151 .long exception_error
152 .long exception_error
153 .long exception_error
154 .long do_IRQ ! PCI serr /* A00 */
155 .long do_IRQ ! dma3
156 .long do_IRQ ! dma2
157 .long do_IRQ ! dma1
158 .long do_IRQ ! dma0
159 .long do_IRQ ! pwon
160 .long do_IRQ ! pwdwn
161 .long do_IRQ ! err
162 .long do_IRQ ! TMU3 tuni3 /* B00 */
163 .long exception_error
164 .long exception_error
165 .long exception_error
166 .long do_IRQ ! TMU4 tuni4 /* B80 */
167#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
168 .long do_IRQ ! IRQ irq6 /* 840 */
169 .long do_IRQ ! irq7
170 .long do_IRQ ! SCIF eri0
171 .long do_IRQ ! rxi0
172 .long do_IRQ ! bri0
173 .long do_IRQ ! txi0
174 .long do_IRQ ! HCAN2 cani0 /* 900 */
175 .long do_IRQ ! cani1
176 .long do_IRQ ! SSI ssii0
177 .long do_IRQ ! ssii1
178 .long do_IRQ ! HAC haci0
179 .long do_IRQ ! haci1
180 .long do_IRQ ! IIC iici0
181 .long do_IRQ ! iici1
182 .long do_IRQ ! USB usbi /* A00 */
183 .long do_IRQ ! LCDC vint
184 .long exception_error
185 .long exception_error
186 .long do_IRQ ! DMABRG dmabrgi0
187 .long do_IRQ ! dmabrgi1
188 .long do_IRQ ! dmabrgi2
189 .long exception_error
190 .long do_IRQ ! SCIF eri1 /* B00 */
191 .long do_IRQ ! rxi1
192 .long do_IRQ ! bri1
193 .long do_IRQ ! txi1
194 .long do_IRQ ! eri2
195 .long do_IRQ ! rxi2
196 .long do_IRQ ! bri2
197 .long do_IRQ ! txi2
198 .long do_IRQ ! SIM simeri /* C00 */
199 .long do_IRQ ! simrxi
200 .long do_IRQ ! simtxi
201 .long do_IRQ ! simtei
202 .long do_IRQ ! HSPI spii
203 .long exception_error
204 .long exception_error
205 .long exception_error
206 .long do_IRQ ! MMCIF mmci0 /* D00 */
207 .long do_IRQ ! mmci1
208 .long do_IRQ ! mmci2
209 .long do_IRQ ! mmci3
210 .long exception_error
211 .long exception_error
212 .long exception_error
213 .long exception_error
214 .long exception_error /* E00 */
215 .long exception_error
216 .long exception_error
217 .long exception_error
218 .long do_IRQ ! MFI mfii
219 .long exception_error
220 .long exception_error
221 .long exception_error
222 .long exception_error /* F00 */
223 .long exception_error
224 .long exception_error
225 .long exception_error
226 .long do_IRQ ! ADC adi
227 .long do_IRQ ! CMT cmti /* FA0 */
228#elif defined(CONFIG_CPU_SUBTYPE_SH73180)
229 .long do_IRQ ! 50 0x840
230 .long do_IRQ ! 51 0x860
231 .long do_IRQ ! 52 0x880
232 .long do_IRQ ! 53 0x8a0
233 .long do_IRQ ! 54 0x8c0
234 .long do_IRQ ! 55 0x8e0
235 .long do_IRQ ! 56 0x900
236 .long do_IRQ ! 57 0x920
237 .long do_IRQ ! 58 0x940
238 .long do_IRQ ! 59 0x960
239 .long do_IRQ ! 60 0x980
240 .long do_IRQ ! 61 0x9a0
241 .long do_IRQ ! 62 0x9c0
242 .long do_IRQ ! 63 0x9e0
243 .long do_IRQ ! 64 0xa00
244 .long do_IRQ ! 65 0xa20
245 .long do_IRQ ! 66 0xa40
246 .long do_IRQ ! 67 0xa60
247 .long do_IRQ ! 68 0xa80
248 .long do_IRQ ! 69 0xaa0
249 .long do_IRQ ! 70 0xac0
250 .long do_IRQ ! 71 0xae0
251 .long do_IRQ ! 72 0xb00
252 .long do_IRQ ! 73 0xb20
253 .long do_IRQ ! 74 0xb40
254 .long do_IRQ ! 75 0xb60
255 .long do_IRQ ! 76 0xb80
256 .long do_IRQ ! 77 0xba0
257 .long do_IRQ ! 78 0xbc0
258 .long do_IRQ ! 79 0xbe0
259 .long do_IRQ ! 80 0xc00
260 .long do_IRQ ! 81 0xc20
261 .long do_IRQ ! 82 0xc40
262 .long do_IRQ ! 83 0xc60
263 .long do_IRQ ! 84 0xc80
264 .long do_IRQ ! 85 0xca0
265 .long do_IRQ ! 86 0xcc0
266 .long do_IRQ ! 87 0xce0
267 .long do_IRQ ! 88 0xd00
268 .long do_IRQ ! 89 0xd20
269 .long do_IRQ ! 90 0xd40
270 .long do_IRQ ! 91 0xd60
271 .long do_IRQ ! 92 0xd80
272 .long do_IRQ ! 93 0xda0
273 .long do_IRQ ! 94 0xdc0
274 .long do_IRQ ! 95 0xde0
275 .long do_IRQ ! 96 0xe00
276 .long do_IRQ ! 97 0xe20
277 .long do_IRQ ! 98 0xe40
278 .long do_IRQ ! 99 0xe60
279 .long do_IRQ ! 100 0xe80
280 .long do_IRQ ! 101 0xea0
281 .long do_IRQ ! 102 0xec0
282 .long do_IRQ ! 103 0xee0
283 .long do_IRQ ! 104 0xf00
284 .long do_IRQ ! 105 0xf20
285 .long do_IRQ ! 106 0xf40
286 .long do_IRQ ! 107 0xf60
287 .long do_IRQ ! 108 0xf80
288#elif defined(CONFIG_CPU_SUBTYPE_ST40STB1)
289 .long exception_error ! 50 0x840
290 .long exception_error ! 51 0x860
291 .long exception_error ! 52 0x880
292 .long exception_error ! 53 0x8a0
293 .long exception_error ! 54 0x8c0
294 .long exception_error ! 55 0x8e0
295 .long exception_error ! 56 0x900
296 .long exception_error ! 57 0x920
297 .long exception_error ! 58 0x940
298 .long exception_error ! 59 0x960
299 .long exception_error ! 60 0x980
300 .long exception_error ! 61 0x9a0
301 .long exception_error ! 62 0x9c0
302 .long exception_error ! 63 0x9e0
303 .long do_IRQ ! 64 0xa00 PCI serr
304 .long do_IRQ ! 65 0xa20 err
305 .long do_IRQ ! 66 0xa40 ad
306 .long do_IRQ ! 67 0xa60 pwr_dwn
307 .long exception_error ! 68 0xa80
308 .long exception_error ! 69 0xaa0
309 .long exception_error ! 70 0xac0
310 .long exception_error ! 71 0xae0
311 .long do_IRQ ! 72 0xb00 DMA INT0
312 .long do_IRQ ! 73 0xb20 INT1
313 .long do_IRQ ! 74 0xb40 INT2
314 .long do_IRQ ! 75 0xb60 INT3
315 .long do_IRQ ! 76 0xb80 INT4
316 .long exception_error ! 77 0xba0
317 .long do_IRQ ! 78 0xbc0 DMA ERR
318 .long exception_error ! 79 0xbe0
319 .long do_IRQ ! 80 0xc00 PIO0
320 .long do_IRQ ! 81 0xc20 PIO1
321 .long do_IRQ ! 82 0xc40 PIO2
322 .long exception_error ! 83 0xc60
323 .long exception_error ! 84 0xc80
324 .long exception_error ! 85 0xca0
325 .long exception_error ! 86 0xcc0
326 .long exception_error ! 87 0xce0
327 .long exception_error ! 88 0xd00
328 .long exception_error ! 89 0xd20
329 .long exception_error ! 90 0xd40
330 .long exception_error ! 91 0xd60
331 .long exception_error ! 92 0xd80
332 .long exception_error ! 93 0xda0
333 .long exception_error ! 94 0xdc0
334 .long exception_error ! 95 0xde0
335 .long exception_error ! 96 0xe00
336 .long exception_error ! 97 0xe20
337 .long exception_error ! 98 0xe40
338 .long exception_error ! 99 0xe60
339 .long exception_error ! 100 0xe80
340 .long exception_error ! 101 0xea0
341 .long exception_error ! 102 0xec0
342 .long exception_error ! 103 0xee0
343 .long exception_error ! 104 0xf00
344 .long exception_error ! 105 0xf20
345 .long exception_error ! 106 0xf40
346 .long exception_error ! 107 0xf60
347 .long exception_error ! 108 0xf80
348 .long exception_error ! 109 0xfa0
349 .long exception_error ! 110 0xfc0
350 .long exception_error ! 111 0xfe0
351 .long do_IRQ ! 112 0x1000 Mailbox
352 .long exception_error ! 113 0x1020
353 .long exception_error ! 114 0x1040
354 .long exception_error ! 115 0x1060
355 .long exception_error ! 116 0x1080
356 .long exception_error ! 117 0x10a0
357 .long exception_error ! 118 0x10c0
358 .long exception_error ! 119 0x10e0
359 .long exception_error ! 120 0x1100
360 .long exception_error ! 121 0x1120
361 .long exception_error ! 122 0x1140
362 .long exception_error ! 123 0x1160
363 .long exception_error ! 124 0x1180
364 .long exception_error ! 125 0x11a0
365 .long exception_error ! 126 0x11c0
366 .long exception_error ! 127 0x11e0
367 .long exception_error ! 128 0x1200
368 .long exception_error ! 129 0x1220
369 .long exception_error ! 130 0x1240
370 .long exception_error ! 131 0x1260
371 .long exception_error ! 132 0x1280
372 .long exception_error ! 133 0x12a0
373 .long exception_error ! 134 0x12c0
374 .long exception_error ! 135 0x12e0
375 .long exception_error ! 136 0x1300
376 .long exception_error ! 137 0x1320
377 .long exception_error ! 138 0x1340
378 .long exception_error ! 139 0x1360
379 .long do_IRQ ! 140 0x1380 EMPI INV_ADDR
380 .long exception_error ! 141 0x13a0
381 .long exception_error ! 142 0x13c0
382 .long exception_error ! 143 0x13e0
383#endif
384
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
new file mode 100644
index 000000000000..f486c07e10e2
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -0,0 +1,335 @@
1/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $
2 *
3 * linux/arch/sh/kernel/fpu.c
4 *
5 * Save/restore floating point context for signal handlers.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
12 *
13 * FIXME! These routines can be optimized in big endian case.
14 */
15
16#include <linux/sched.h>
17#include <linux/signal.h>
18#include <asm/processor.h>
19#include <asm/io.h>
20
21/* The PR (precision) bit in the FP Status Register must be clear when
22 * an frchg instruction is executed, otherwise the instruction is undefined.
23 * Executing frchg with PR set causes a trap on some SH4 implementations.
24 */
25
26#define FPSCR_RCHG 0x00000000
27
28
29/*
30 * Save FPU registers onto task structure.
31 * Assume called with FPU enabled (SR.FD=0).
32 */
33void
34save_fpu(struct task_struct *tsk, struct pt_regs *regs)
35{
36 unsigned long dummy;
37
38 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
39 enable_fpu();
40 asm volatile("sts.l fpul, @-%0\n\t"
41 "sts.l fpscr, @-%0\n\t"
42 "lds %2, fpscr\n\t"
43 "frchg\n\t"
44 "fmov.s fr15, @-%0\n\t"
45 "fmov.s fr14, @-%0\n\t"
46 "fmov.s fr13, @-%0\n\t"
47 "fmov.s fr12, @-%0\n\t"
48 "fmov.s fr11, @-%0\n\t"
49 "fmov.s fr10, @-%0\n\t"
50 "fmov.s fr9, @-%0\n\t"
51 "fmov.s fr8, @-%0\n\t"
52 "fmov.s fr7, @-%0\n\t"
53 "fmov.s fr6, @-%0\n\t"
54 "fmov.s fr5, @-%0\n\t"
55 "fmov.s fr4, @-%0\n\t"
56 "fmov.s fr3, @-%0\n\t"
57 "fmov.s fr2, @-%0\n\t"
58 "fmov.s fr1, @-%0\n\t"
59 "fmov.s fr0, @-%0\n\t"
60 "frchg\n\t"
61 "fmov.s fr15, @-%0\n\t"
62 "fmov.s fr14, @-%0\n\t"
63 "fmov.s fr13, @-%0\n\t"
64 "fmov.s fr12, @-%0\n\t"
65 "fmov.s fr11, @-%0\n\t"
66 "fmov.s fr10, @-%0\n\t"
67 "fmov.s fr9, @-%0\n\t"
68 "fmov.s fr8, @-%0\n\t"
69 "fmov.s fr7, @-%0\n\t"
70 "fmov.s fr6, @-%0\n\t"
71 "fmov.s fr5, @-%0\n\t"
72 "fmov.s fr4, @-%0\n\t"
73 "fmov.s fr3, @-%0\n\t"
74 "fmov.s fr2, @-%0\n\t"
75 "fmov.s fr1, @-%0\n\t"
76 "fmov.s fr0, @-%0\n\t"
77 "lds %3, fpscr\n\t"
78 : "=r" (dummy)
79 : "0" ((char *)(&tsk->thread.fpu.hard.status)),
80 "r" (FPSCR_RCHG),
81 "r" (FPSCR_INIT)
82 : "memory");
83
84 disable_fpu();
85 release_fpu(regs);
86}
87
88static void
89restore_fpu(struct task_struct *tsk)
90{
91 unsigned long dummy;
92
93 enable_fpu();
94 asm volatile("lds %2, fpscr\n\t"
95 "fmov.s @%0+, fr0\n\t"
96 "fmov.s @%0+, fr1\n\t"
97 "fmov.s @%0+, fr2\n\t"
98 "fmov.s @%0+, fr3\n\t"
99 "fmov.s @%0+, fr4\n\t"
100 "fmov.s @%0+, fr5\n\t"
101 "fmov.s @%0+, fr6\n\t"
102 "fmov.s @%0+, fr7\n\t"
103 "fmov.s @%0+, fr8\n\t"
104 "fmov.s @%0+, fr9\n\t"
105 "fmov.s @%0+, fr10\n\t"
106 "fmov.s @%0+, fr11\n\t"
107 "fmov.s @%0+, fr12\n\t"
108 "fmov.s @%0+, fr13\n\t"
109 "fmov.s @%0+, fr14\n\t"
110 "fmov.s @%0+, fr15\n\t"
111 "frchg\n\t"
112 "fmov.s @%0+, fr0\n\t"
113 "fmov.s @%0+, fr1\n\t"
114 "fmov.s @%0+, fr2\n\t"
115 "fmov.s @%0+, fr3\n\t"
116 "fmov.s @%0+, fr4\n\t"
117 "fmov.s @%0+, fr5\n\t"
118 "fmov.s @%0+, fr6\n\t"
119 "fmov.s @%0+, fr7\n\t"
120 "fmov.s @%0+, fr8\n\t"
121 "fmov.s @%0+, fr9\n\t"
122 "fmov.s @%0+, fr10\n\t"
123 "fmov.s @%0+, fr11\n\t"
124 "fmov.s @%0+, fr12\n\t"
125 "fmov.s @%0+, fr13\n\t"
126 "fmov.s @%0+, fr14\n\t"
127 "fmov.s @%0+, fr15\n\t"
128 "frchg\n\t"
129 "lds.l @%0+, fpscr\n\t"
130 "lds.l @%0+, fpul\n\t"
131 : "=r" (dummy)
132 : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
133 : "memory");
134 disable_fpu();
135}
136
137/*
138 * Load the FPU with signalling NANS. This bit pattern we're using
139 * has the property that no matter wether considered as single or as
140 * double precission represents signaling NANS.
141 */
142
143static void
144fpu_init(void)
145{
146 enable_fpu();
147 asm volatile("lds %0, fpul\n\t"
148 "lds %1, fpscr\n\t"
149 "fsts fpul, fr0\n\t"
150 "fsts fpul, fr1\n\t"
151 "fsts fpul, fr2\n\t"
152 "fsts fpul, fr3\n\t"
153 "fsts fpul, fr4\n\t"
154 "fsts fpul, fr5\n\t"
155 "fsts fpul, fr6\n\t"
156 "fsts fpul, fr7\n\t"
157 "fsts fpul, fr8\n\t"
158 "fsts fpul, fr9\n\t"
159 "fsts fpul, fr10\n\t"
160 "fsts fpul, fr11\n\t"
161 "fsts fpul, fr12\n\t"
162 "fsts fpul, fr13\n\t"
163 "fsts fpul, fr14\n\t"
164 "fsts fpul, fr15\n\t"
165 "frchg\n\t"
166 "fsts fpul, fr0\n\t"
167 "fsts fpul, fr1\n\t"
168 "fsts fpul, fr2\n\t"
169 "fsts fpul, fr3\n\t"
170 "fsts fpul, fr4\n\t"
171 "fsts fpul, fr5\n\t"
172 "fsts fpul, fr6\n\t"
173 "fsts fpul, fr7\n\t"
174 "fsts fpul, fr8\n\t"
175 "fsts fpul, fr9\n\t"
176 "fsts fpul, fr10\n\t"
177 "fsts fpul, fr11\n\t"
178 "fsts fpul, fr12\n\t"
179 "fsts fpul, fr13\n\t"
180 "fsts fpul, fr14\n\t"
181 "fsts fpul, fr15\n\t"
182 "frchg\n\t"
183 "lds %2, fpscr\n\t"
184 : /* no output */
185 : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
186 disable_fpu();
187}
188
189/**
190 * denormal_to_double - Given denormalized float number,
191 * store double float
192 *
193 * @fpu: Pointer to sh_fpu_hard structure
194 * @n: Index to FP register
195 */
196static void
197denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
198{
199 unsigned long du, dl;
200 unsigned long x = fpu->fpul;
201 int exp = 1023 - 126;
202
203 if (x != 0 && (x & 0x7f800000) == 0) {
204 du = (x & 0x80000000);
205 while ((x & 0x00800000) == 0) {
206 x <<= 1;
207 exp--;
208 }
209 x &= 0x007fffff;
210 du |= (exp << 20) | (x >> 3);
211 dl = x << 29;
212
213 fpu->fp_regs[n] = du;
214 fpu->fp_regs[n+1] = dl;
215 }
216}
217
218/**
219 * ieee_fpe_handler - Handle denormalized number exception
220 *
221 * @regs: Pointer to register structure
222 *
223 * Returns 1 when it's handled (should not cause exception).
224 */
225static int
226ieee_fpe_handler (struct pt_regs *regs)
227{
228 unsigned short insn = *(unsigned short *) regs->pc;
229 unsigned short finsn;
230 unsigned long nextpc;
231 int nib[4] = {
232 (insn >> 12) & 0xf,
233 (insn >> 8) & 0xf,
234 (insn >> 4) & 0xf,
235 insn & 0xf};
236
237 if (nib[0] == 0xb ||
238 (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
239 regs->pr = regs->pc + 4;
240
241 if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
242 nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
243 finsn = *(unsigned short *) (regs->pc + 2);
244 } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
245 if (regs->sr & 1)
246 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
247 else
248 nextpc = regs->pc + 4;
249 finsn = *(unsigned short *) (regs->pc + 2);
250 } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
251 if (regs->sr & 1)
252 nextpc = regs->pc + 4;
253 else
254 nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
255 finsn = *(unsigned short *) (regs->pc + 2);
256 } else if (nib[0] == 0x4 && nib[3] == 0xb &&
257 (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
258 nextpc = regs->regs[nib[1]];
259 finsn = *(unsigned short *) (regs->pc + 2);
260 } else if (nib[0] == 0x0 && nib[3] == 0x3 &&
261 (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
262 nextpc = regs->pc + 4 + regs->regs[nib[1]];
263 finsn = *(unsigned short *) (regs->pc + 2);
264 } else if (insn == 0x000b) { /* rts */
265 nextpc = regs->pr;
266 finsn = *(unsigned short *) (regs->pc + 2);
267 } else {
268 nextpc = regs->pc + 2;
269 finsn = insn;
270 }
271
272 if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
273 struct task_struct *tsk = current;
274
275 save_fpu(tsk, regs);
276 if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) {
277 /* FPU error */
278 denormal_to_double (&tsk->thread.fpu.hard,
279 (finsn >> 8) & 0xf);
280 tsk->thread.fpu.hard.fpscr &=
281 ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
282 grab_fpu(regs);
283 restore_fpu(tsk);
284 set_tsk_thread_flag(tsk, TIF_USEDFPU);
285 } else {
286 tsk->thread.trap_no = 11;
287 tsk->thread.error_code = 0;
288 force_sig(SIGFPE, tsk);
289 }
290
291 regs->pc = nextpc;
292 return 1;
293 }
294
295 return 0;
296}
297
298asmlinkage void
299do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
300 struct pt_regs regs)
301{
302 struct task_struct *tsk = current;
303
304 if (ieee_fpe_handler (&regs))
305 return;
306
307 regs.pc += 2;
308 save_fpu(tsk, &regs);
309 tsk->thread.trap_no = 11;
310 tsk->thread.error_code = 0;
311 force_sig(SIGFPE, tsk);
312}
313
314asmlinkage void
315do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
316 unsigned long r7, struct pt_regs regs)
317{
318 struct task_struct *tsk = current;
319
320 grab_fpu(&regs);
321 if (!user_mode(&regs)) {
322 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
323 return;
324 }
325
326 if (used_math()) {
327 /* Using the FPU again. */
328 restore_fpu(tsk);
329 } else {
330 /* First time FPU user. */
331 fpu_init();
332 set_used_math();
333 }
334 set_tsk_thread_flag(tsk, TIF_USEDFPU);
335}
diff --git a/arch/sh/kernel/cpu/sh4/irq_intc2.c b/arch/sh/kernel/cpu/sh4/irq_intc2.c
new file mode 100644
index 000000000000..099ebbf89745
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/irq_intc2.c
@@ -0,0 +1,222 @@
1/*
2 * linux/arch/sh/kernel/irq_intc2.c
3 *
4 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
5 *
6 * May be copied or modified under the terms of the GNU General Public
7 * License. See linux/COPYING for more information.
8 *
9 * Interrupt handling for INTC2-based IRQ.
10 *
11 * These are the "new Hitachi style" interrupts, as present on the
12 * Hitachi 7751 and the STM ST40 STB1.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/irq.h>
18
19#include <asm/system.h>
20#include <asm/io.h>
21#include <asm/machvec.h>
22
23
24struct intc2_data {
25 unsigned char msk_offset;
26 unsigned char msk_shift;
27#ifdef CONFIG_CPU_SUBTYPE_ST40
28 int (*clear_irq) (int);
29#endif
30};
31
32
33static struct intc2_data intc2_data[NR_INTC2_IRQS];
34
35static void enable_intc2_irq(unsigned int irq);
36static void disable_intc2_irq(unsigned int irq);
37
38/* shutdown is same as "disable" */
39#define shutdown_intc2_irq disable_intc2_irq
40
41static void mask_and_ack_intc2(unsigned int);
42static void end_intc2_irq(unsigned int irq);
43
44static unsigned int startup_intc2_irq(unsigned int irq)
45{
46 enable_intc2_irq(irq);
47 return 0; /* never anything pending */
48}
49
50static struct hw_interrupt_type intc2_irq_type = {
51 "INTC2-IRQ",
52 startup_intc2_irq,
53 shutdown_intc2_irq,
54 enable_intc2_irq,
55 disable_intc2_irq,
56 mask_and_ack_intc2,
57 end_intc2_irq
58};
59
60static void disable_intc2_irq(unsigned int irq)
61{
62 int irq_offset = irq - INTC2_FIRST_IRQ;
63 int msk_shift, msk_offset;
64
65 // Sanity check
66 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
67 return;
68
69 msk_shift = intc2_data[irq_offset].msk_shift;
70 msk_offset = intc2_data[irq_offset].msk_offset;
71
72 ctrl_outl(1<<msk_shift,
73 INTC2_BASE+INTC2_INTMSK_OFFSET+msk_offset);
74}
75
76static void enable_intc2_irq(unsigned int irq)
77{
78 int irq_offset = irq - INTC2_FIRST_IRQ;
79 int msk_shift, msk_offset;
80
81 /* Sanity check */
82 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
83 return;
84
85 msk_shift = intc2_data[irq_offset].msk_shift;
86 msk_offset = intc2_data[irq_offset].msk_offset;
87
88 ctrl_outl(1<<msk_shift,
89 INTC2_BASE+INTC2_INTMSKCLR_OFFSET+msk_offset);
90}
91
92static void mask_and_ack_intc2(unsigned int irq)
93{
94 disable_intc2_irq(irq);
95}
96
97static void end_intc2_irq(unsigned int irq)
98{
99 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
100 enable_intc2_irq(irq);
101
102#ifdef CONFIG_CPU_SUBTYPE_ST40
103 if (intc2_data[irq - INTC2_FIRST_IRQ].clear_irq)
104 intc2_data[irq - INTC2_FIRST_IRQ].clear_irq (irq);
105#endif
106}
107
108/*
109 * Setup an INTC2 style interrupt.
110 * NOTE: Unlike IPR interrupts, parameters are not shifted by this code,
111 * allowing the use of the numbers straight out of the datasheet.
112 * For example:
113 * PIO1 which is INTPRI00[19,16] and INTMSK00[13]
114 * would be: ^ ^ ^ ^
115 * | | | |
116 * make_intc2_irq(84, 0, 16, 0, 13);
117 */
118void make_intc2_irq(unsigned int irq,
119 unsigned int ipr_offset, unsigned int ipr_shift,
120 unsigned int msk_offset, unsigned int msk_shift,
121 unsigned int priority)
122{
123 int irq_offset = irq - INTC2_FIRST_IRQ;
124 unsigned int flags;
125 unsigned long ipr;
126
127 if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS))
128 return;
129
130 disable_irq_nosync(irq);
131
132 /* Fill the data we need */
133 intc2_data[irq_offset].msk_offset = msk_offset;
134 intc2_data[irq_offset].msk_shift = msk_shift;
135#ifdef CONFIG_CPU_SUBTYPE_ST40
136 intc2_data[irq_offset].clear_irq = NULL;
137#endif
138
139 /* Set the priority level */
140 local_irq_save(flags);
141
142 ipr=ctrl_inl(INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
143 ipr&=~(0xf<<ipr_shift);
144 ipr|=(priority)<<ipr_shift;
145 ctrl_outl(ipr, INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset);
146
147 local_irq_restore(flags);
148
149 irq_desc[irq].handler=&intc2_irq_type;
150
151 disable_intc2_irq(irq);
152}
153
154#ifdef CONFIG_CPU_SUBTYPE_ST40
155
156struct intc2_init {
157 unsigned short irq;
158 unsigned char ipr_offset, ipr_shift;
159 unsigned char msk_offset, msk_shift;
160};
161
162static struct intc2_init intc2_init_data[] __initdata = {
163 {64, 0, 0, 0, 0}, /* PCI serr */
164 {65, 0, 4, 0, 1}, /* PCI err */
165 {66, 0, 4, 0, 2}, /* PCI ad */
166 {67, 0, 4, 0, 3}, /* PCI pwd down */
167 {72, 0, 8, 0, 5}, /* DMAC INT0 */
168 {73, 0, 8, 0, 6}, /* DMAC INT1 */
169 {74, 0, 8, 0, 7}, /* DMAC INT2 */
170 {75, 0, 8, 0, 8}, /* DMAC INT3 */
171 {76, 0, 8, 0, 9}, /* DMAC INT4 */
172 {78, 0, 8, 0, 11}, /* DMAC ERR */
173 {80, 0, 12, 0, 12}, /* PIO0 */
174 {84, 0, 16, 0, 13}, /* PIO1 */
175 {88, 0, 20, 0, 14}, /* PIO2 */
176 {112, 4, 0, 4, 0}, /* Mailbox */
177#ifdef CONFIG_CPU_SUBTYPE_ST40GX1
178 {116, 4, 4, 4, 4}, /* SSC0 */
179 {120, 4, 8, 4, 8}, /* IR Blaster */
180 {124, 4, 12, 4, 12}, /* USB host */
181 {128, 4, 16, 4, 16}, /* Video processor BLITTER */
182 {132, 4, 20, 4, 20}, /* UART0 */
183 {134, 4, 20, 4, 22}, /* UART2 */
184 {136, 4, 24, 4, 24}, /* IO_PIO0 */
185 {140, 4, 28, 4, 28}, /* EMPI */
186 {144, 8, 0, 8, 0}, /* MAFE */
187 {148, 8, 4, 8, 4}, /* PWM */
188 {152, 8, 8, 8, 8}, /* SSC1 */
189 {156, 8, 12, 8, 12}, /* IO_PIO1 */
190 {160, 8, 16, 8, 16}, /* USB target */
191 {164, 8, 20, 8, 20}, /* UART1 */
192 {168, 8, 24, 8, 24}, /* Teletext */
193 {172, 8, 28, 8, 28}, /* VideoSync VTG */
194 {173, 8, 28, 8, 29}, /* VideoSync DVP0 */
195 {174, 8, 28, 8, 30}, /* VideoSync DVP1 */
196#endif
197};
198
199void __init init_IRQ_intc2(void)
200{
201 struct intc2_init *p;
202
203 printk(KERN_ALERT "init_IRQ_intc2\n");
204
205 for (p = intc2_init_data;
206 p<intc2_init_data+ARRAY_SIZE(intc2_init_data);
207 p++) {
208 make_intc2_irq(p->irq, p->ipr_offset, p->ipr_shift,
209 p-> msk_offset, p->msk_shift, 13);
210 }
211}
212
213/* Adds a termination callback to the interrupt */
214void intc2_add_clear_irq(int irq, int (*fn)(int))
215{
216 if (irq < INTC2_FIRST_IRQ)
217 return;
218
219 intc2_data[irq - INTC2_FIRST_IRQ].clear_irq = fn;
220}
221
222#endif /* CONFIG_CPU_SUBTYPE_ST40 */
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
new file mode 100644
index 000000000000..42427b79697b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -0,0 +1,138 @@
1/*
2 * arch/sh/kernel/cpu/sh4/probe.c
3 *
4 * CPU Subtype Probing for SH-4.
5 *
6 * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
7 * Copyright (C) 2003 Richard Curnow
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/init.h>
15#include <asm/processor.h>
16#include <asm/cache.h>
17#include <asm/io.h>
18
19int __init detect_cpu_and_cache_system(void)
20{
21 unsigned long pvr, prr, cvr;
22 unsigned long size;
23
24 static unsigned long sizes[16] = {
25 [1] = (1 << 12),
26 [2] = (1 << 13),
27 [4] = (1 << 14),
28 [8] = (1 << 15),
29 [9] = (1 << 16)
30 };
31
32 pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffff;
33 prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
34 cvr = (ctrl_inl(CCN_CVR));
35
36 /*
37 * Setup some sane SH-4 defaults for the icache
38 */
39 cpu_data->icache.way_incr = (1 << 13);
40 cpu_data->icache.entry_shift = 5;
41 cpu_data->icache.entry_mask = 0x1fe0;
42 cpu_data->icache.sets = 256;
43 cpu_data->icache.ways = 1;
44 cpu_data->icache.linesz = L1_CACHE_BYTES;
45
46 /*
47 * And again for the dcache ..
48 */
49 cpu_data->dcache.way_incr = (1 << 14);
50 cpu_data->dcache.entry_shift = 5;
51 cpu_data->dcache.entry_mask = 0x3fe0;
52 cpu_data->dcache.sets = 512;
53 cpu_data->dcache.ways = 1;
54 cpu_data->dcache.linesz = L1_CACHE_BYTES;
55
56 /* Set the FPU flag, virtually all SH-4's have one */
57 cpu_data->flags |= CPU_HAS_FPU;
58
59 /*
60 * Probe the underlying processor version/revision and
61 * adjust cpu_data setup accordingly.
62 */
63 switch (pvr) {
64 case 0x205:
65 cpu_data->type = CPU_SH7750;
66 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
67 break;
68 case 0x206:
69 cpu_data->type = CPU_SH7750S;
70 cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
71 break;
72 case 0x1100:
73 cpu_data->type = CPU_SH7751;
74 break;
75 case 0x2000:
76 cpu_data->type = CPU_SH73180;
77 cpu_data->icache.ways = 4;
78 cpu_data->dcache.ways = 4;
79 cpu_data->flags &= ~CPU_HAS_FPU;
80 break;
81 case 0x8000:
82 cpu_data->type = CPU_ST40RA;
83 break;
84 case 0x8100:
85 cpu_data->type = CPU_ST40GX1;
86 break;
87 case 0x700:
88 cpu_data->type = CPU_SH4_501;
89 cpu_data->icache.ways = 2;
90 cpu_data->dcache.ways = 2;
91
92 /* No FPU on the SH4-500 series.. */
93 cpu_data->flags &= ~CPU_HAS_FPU;
94 break;
95 case 0x600:
96 cpu_data->type = CPU_SH4_202;
97 cpu_data->icache.ways = 2;
98 cpu_data->dcache.ways = 2;
99 break;
100 case 0x500 ... 0x501:
101 switch (prr) {
102 case 0x10: cpu_data->type = CPU_SH7750R; break;
103 case 0x11: cpu_data->type = CPU_SH7751R; break;
104 case 0x50: cpu_data->type = CPU_SH7760; break;
105 }
106
107 cpu_data->icache.ways = 2;
108 cpu_data->dcache.ways = 2;
109
110 break;
111 default:
112 cpu_data->type = CPU_SH_NONE;
113 break;
114 }
115
116 /*
117 * On anything that's not a direct-mapped cache, look to the CVR
118 * for I/D-cache specifics.
119 */
120 if (cpu_data->icache.ways > 1) {
121 size = sizes[(cvr >> 20) & 0xf];
122 cpu_data->icache.way_incr = (size >> 1);
123 cpu_data->icache.sets = (size >> 6);
124 cpu_data->icache.entry_mask =
125 (cpu_data->icache.way_incr - (1 << 5));
126 }
127
128 if (cpu_data->dcache.ways > 1) {
129 size = sizes[(cvr >> 16) & 0xf];
130 cpu_data->dcache.way_incr = (size >> 1);
131 cpu_data->dcache.sets = (size >> 6);
132 cpu_data->dcache.entry_mask =
133 (cpu_data->dcache.way_incr - (1 << 5));
134 }
135
136 return 0;
137}
138
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
new file mode 100644
index 000000000000..8437ea7430fe
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -0,0 +1,453 @@
1/*
2 * arch/sh/kernel/cpu/sq.c
3 *
4 * General management API for SH-4 integrated Store Queues
5 *
6 * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
7 * Copyright (C) 2001, 2002 M. R. Brown
8 *
9 * Some of this code has been adopted directly from the old arch/sh/mm/sq.c
10 * hack that was part of the LinuxDC project. For all intents and purposes,
11 * this is a completely new interface that really doesn't have much in common
12 * with the old zone-based approach at all. In fact, it's only listed here for
13 * general completeness.
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/config.h>
23#include <linux/slab.h>
24#include <linux/list.h>
25#include <linux/proc_fs.h>
26#include <linux/miscdevice.h>
27#include <linux/vmalloc.h>
28
29#include <asm/io.h>
30#include <asm/page.h>
31#include <asm/mmu_context.h>
32#include <asm/cpu/sq.h>
33
34static LIST_HEAD(sq_mapping_list);
35static DEFINE_SPINLOCK(sq_mapping_lock);
36
37/**
38 * sq_flush - Flush (prefetch) the store queue cache
39 * @addr: the store queue address to flush
40 *
41 * Executes a prefetch instruction on the specified store queue cache,
42 * so that the cached data is written to physical memory.
43 */
44inline void sq_flush(void *addr)
45{
46 __asm__ __volatile__ ("pref @%0" : : "r" (addr) : "memory");
47}
48
49/**
50 * sq_flush_range - Flush (prefetch) a specific SQ range
51 * @start: the store queue address to start flushing from
52 * @len: the length to flush
53 *
54 * Flushes the store queue cache from @start to @start + @len in a
55 * linear fashion.
56 */
57void sq_flush_range(unsigned long start, unsigned int len)
58{
59 volatile unsigned long *sq = (unsigned long *)start;
60 unsigned long dummy;
61
62 /* Flush the queues */
63 for (len >>= 5; len--; sq += 8)
64 sq_flush((void *)sq);
65
66 /* Wait for completion */
67 dummy = ctrl_inl(P4SEG_STORE_QUE);
68
69 ctrl_outl(0, P4SEG_STORE_QUE + 0);
70 ctrl_outl(0, P4SEG_STORE_QUE + 8);
71}
72
73static struct sq_mapping *__sq_alloc_mapping(unsigned long virt, unsigned long phys, unsigned long size, const char *name)
74{
75 struct sq_mapping *map;
76
77 if (virt + size > SQ_ADDRMAX)
78 return ERR_PTR(-ENOSPC);
79
80 map = kmalloc(sizeof(struct sq_mapping), GFP_KERNEL);
81 if (!map)
82 return ERR_PTR(-ENOMEM);
83
84 INIT_LIST_HEAD(&map->list);
85
86 map->sq_addr = virt;
87 map->addr = phys;
88 map->size = size + 1;
89 map->name = name;
90
91 list_add(&map->list, &sq_mapping_list);
92
93 return map;
94}
95
96static unsigned long __sq_get_next_addr(void)
97{
98 if (!list_empty(&sq_mapping_list)) {
99 struct list_head *pos, *tmp;
100
101 /*
102 * Read one off the list head, as it will have the highest
103 * mapped allocation. Set the next one up right above it.
104 *
105 * This is somewhat sub-optimal, as we don't look at
106 * gaps between allocations or anything lower then the
107 * highest-level allocation.
108 *
109 * However, in the interest of performance and the general
110 * lack of desire to do constant list rebalancing, we don't
111 * worry about it.
112 */
113 list_for_each_safe(pos, tmp, &sq_mapping_list) {
114 struct sq_mapping *entry;
115
116 entry = list_entry(pos, typeof(*entry), list);
117
118 return entry->sq_addr + entry->size;
119 }
120 }
121
122 return P4SEG_STORE_QUE;
123}
124
125/**
126 * __sq_remap - Perform a translation from the SQ to a phys addr
127 * @map: sq mapping containing phys and store queue addresses.
128 *
129 * Maps the store queue address specified in the mapping to the physical
130 * address specified in the mapping.
131 */
132static struct sq_mapping *__sq_remap(struct sq_mapping *map)
133{
134 unsigned long flags, pteh, ptel;
135 struct vm_struct *vma;
136 pgprot_t pgprot;
137
138 /*
139 * Without an MMU (or with it turned off), this is much more
140 * straightforward, as we can just load up each queue's QACR with
141 * the physical address appropriately masked.
142 */
143
144 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
145 ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
146
147#ifdef CONFIG_MMU
148 /*
149 * With an MMU on the other hand, things are slightly more involved.
150 * Namely, we have to have a direct mapping between the SQ addr and
151 * the associated physical address in the UTLB by way of setting up
152 * a virt<->phys translation by hand. We do this by simply specifying
153 * the SQ addr in UTLB.VPN and the associated physical address in
154 * UTLB.PPN.
155 *
156 * Notably, even though this is a special case translation, and some
157 * of the configuration bits are meaningless, we're still required
158 * to have a valid ASID context in PTEH.
159 *
160 * We could also probably get by without explicitly setting PTEA, but
161 * we do it here just for good measure.
162 */
163 spin_lock_irqsave(&sq_mapping_lock, flags);
164
165 pteh = map->sq_addr;
166 ctrl_outl((pteh & MMU_VPN_MASK) | get_asid(), MMU_PTEH);
167
168 ptel = map->addr & PAGE_MASK;
169 ctrl_outl(((ptel >> 28) & 0xe) | (ptel & 0x1), MMU_PTEA);
170
171 pgprot = pgprot_noncached(PAGE_KERNEL);
172
173 ptel &= _PAGE_FLAGS_HARDWARE_MASK;
174 ptel |= pgprot_val(pgprot);
175 ctrl_outl(ptel, MMU_PTEL);
176
177 __asm__ __volatile__ ("ldtlb" : : : "memory");
178
179 spin_unlock_irqrestore(&sq_mapping_lock, flags);
180
181 /*
182 * Next, we need to map ourselves in the kernel page table, so that
183 * future accesses after a TLB flush will be handled when we take a
184 * page fault.
185 *
186 * Theoretically we could just do this directly and not worry about
187 * setting up the translation by hand ahead of time, but for the
188 * cases where we want a one-shot SQ mapping followed by a quick
189 * writeout before we hit the TLB flush, we do it anyways. This way
190 * we at least save ourselves the initial page fault overhead.
191 */
192 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
193 if (!vma)
194 return ERR_PTR(-ENOMEM);
195
196 vma->phys_addr = map->addr;
197
198 if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
199 map->size, pgprot_val(pgprot))) {
200 vunmap(vma->addr);
201 return NULL;
202 }
203#endif /* CONFIG_MMU */
204
205 return map;
206}
207
208/**
209 * sq_remap - Map a physical address through the Store Queues
210 * @phys: Physical address of mapping.
211 * @size: Length of mapping.
212 * @name: User invoking mapping.
213 *
214 * Remaps the physical address @phys through the next available store queue
215 * address of @size length. @name is logged at boot time as well as through
216 * the procfs interface.
217 *
218 * A pre-allocated and filled sq_mapping pointer is returned, and must be
219 * cleaned up with a call to sq_unmap() when the user is done with the
220 * mapping.
221 */
222struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *name)
223{
224 struct sq_mapping *map;
225 unsigned long virt, end;
226 unsigned int psz;
227
228 /* Don't allow wraparound or zero size */
229 end = phys + size - 1;
230 if (!size || end < phys)
231 return NULL;
232 /* Don't allow anyone to remap normal memory.. */
233 if (phys < virt_to_phys(high_memory))
234 return NULL;
235
236 phys &= PAGE_MASK;
237
238 size = PAGE_ALIGN(end + 1) - phys;
239 virt = __sq_get_next_addr();
240 psz = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
241 map = __sq_alloc_mapping(virt, phys, size, name);
242
243 printk("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
244 map->name ? map->name : "???",
245 psz, psz == 1 ? " " : "s",
246 map->sq_addr, map->addr);
247
248 return __sq_remap(map);
249}
250
251/**
252 * sq_unmap - Unmap a Store Queue allocation
253 * @map: Pre-allocated Store Queue mapping.
254 *
255 * Unmaps the store queue allocation @map that was previously created by
256 * sq_remap(). Also frees up the pte that was previously inserted into
257 * the kernel page table and discards the UTLB translation.
258 */
259void sq_unmap(struct sq_mapping *map)
260{
261 if (map->sq_addr > (unsigned long)high_memory)
262 vfree((void *)(map->sq_addr & PAGE_MASK));
263
264 list_del(&map->list);
265 kfree(map);
266}
267
268/**
269 * sq_clear - Clear a store queue range
270 * @addr: Address to start clearing from.
271 * @len: Length to clear.
272 *
273 * A quick zero-fill implementation for clearing out memory that has been
274 * remapped through the store queues.
275 */
276void sq_clear(unsigned long addr, unsigned int len)
277{
278 int i;
279
280 /* Clear out both queues linearly */
281 for (i = 0; i < 8; i++) {
282 ctrl_outl(0, addr + i + 0);
283 ctrl_outl(0, addr + i + 8);
284 }
285
286 sq_flush_range(addr, len);
287}
288
289/**
290 * sq_vma_unmap - Unmap a VMA range
291 * @area: VMA containing range.
292 * @addr: Start of range.
293 * @len: Length of range.
294 *
295 * Searches the sq_mapping_list for a mapping matching the sq addr @addr,
296 * and subsequently frees up the entry. Further cleanup is done by generic
297 * code.
298 */
299static void sq_vma_unmap(struct vm_area_struct *area,
300 unsigned long addr, size_t len)
301{
302 struct list_head *pos, *tmp;
303
304 list_for_each_safe(pos, tmp, &sq_mapping_list) {
305 struct sq_mapping *entry;
306
307 entry = list_entry(pos, typeof(*entry), list);
308
309 if (entry->sq_addr == addr) {
310 /*
311 * We could probably get away without doing the tlb flush
312 * here, as generic code should take care of most of this
313 * when unmapping the rest of the VMA range for us. Leave
314 * it in for added sanity for the time being..
315 */
316 __flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK);
317
318 list_del(&entry->list);
319 kfree(entry);
320
321 return;
322 }
323 }
324}
325
326/**
327 * sq_vma_sync - Sync a VMA range
328 * @area: VMA containing range.
329 * @start: Start of range.
330 * @len: Length of range.
331 * @flags: Additional flags.
332 *
333 * Synchronizes an sq mapped range by flushing the store queue cache for
334 * the duration of the mapping.
335 *
336 * Used internally for user mappings, which must use msync() to prefetch
337 * the store queue cache.
338 */
339static int sq_vma_sync(struct vm_area_struct *area,
340 unsigned long start, size_t len, unsigned int flags)
341{
342 sq_flush_range(start, len);
343
344 return 0;
345}
346
347static struct vm_operations_struct sq_vma_ops = {
348 .unmap = sq_vma_unmap,
349 .sync = sq_vma_sync,
350};
351
352/**
353 * sq_mmap - mmap() for /dev/cpu/sq
354 * @file: unused.
355 * @vma: VMA to remap.
356 *
357 * Remap the specified vma @vma through the store queues, and setup associated
358 * information for the new mapping. Also build up the page tables for the new
359 * area.
360 */
361static int sq_mmap(struct file *file, struct vm_area_struct *vma)
362{
363 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
364 unsigned long size = vma->vm_end - vma->vm_start;
365 struct sq_mapping *map;
366
367 /*
368 * We're not interested in any arbitrary virtual address that has
369 * been stuck in the VMA, as we already know what addresses we
370 * want. Save off the size, and reposition the VMA to begin at
371 * the next available sq address.
372 */
373 vma->vm_start = __sq_get_next_addr();
374 vma->vm_end = vma->vm_start + size;
375
376 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
377
378 vma->vm_flags |= VM_IO | VM_RESERVED;
379
380 map = __sq_alloc_mapping(vma->vm_start, offset, size, "Userspace");
381
382 if (io_remap_pfn_range(vma, map->sq_addr, map->addr >> PAGE_SHIFT,
383 size, vma->vm_page_prot))
384 return -EAGAIN;
385
386 vma->vm_ops = &sq_vma_ops;
387
388 return 0;
389}
390
391#ifdef CONFIG_PROC_FS
392static int sq_mapping_read_proc(char *buf, char **start, off_t off,
393 int len, int *eof, void *data)
394{
395 struct list_head *pos;
396 char *p = buf;
397
398 list_for_each_prev(pos, &sq_mapping_list) {
399 struct sq_mapping *entry;
400
401 entry = list_entry(pos, typeof(*entry), list);
402
403 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr,
404 entry->sq_addr + entry->size - 1, entry->addr,
405 entry->name);
406 }
407
408 return p - buf;
409}
410#endif
411
412static struct file_operations sq_fops = {
413 .owner = THIS_MODULE,
414 .mmap = sq_mmap,
415};
416
417static struct miscdevice sq_dev = {
418 .minor = STORE_QUEUE_MINOR,
419 .name = "sq",
420 .devfs_name = "cpu/sq",
421 .fops = &sq_fops,
422};
423
424static int __init sq_api_init(void)
425{
426 printk(KERN_NOTICE "sq: Registering store queue API.\n");
427
428#ifdef CONFIG_PROC_FS
429 create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0);
430#endif
431
432 return misc_register(&sq_dev);
433}
434
435static void __exit sq_api_exit(void)
436{
437 misc_deregister(&sq_dev);
438}
439
440module_init(sq_api_init);
441module_exit(sq_api_exit);
442
443MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
444MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
445MODULE_LICENSE("GPL");
446MODULE_ALIAS_MISCDEV(STORE_QUEUE_MINOR);
447
448EXPORT_SYMBOL(sq_remap);
449EXPORT_SYMBOL(sq_unmap);
450EXPORT_SYMBOL(sq_clear);
451EXPORT_SYMBOL(sq_flush);
452EXPORT_SYMBOL(sq_flush_range);
453