aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc/kernel/Makefile5
-rw-r--r--arch/sparc/kernel/una_asm.S153
-rw-r--r--arch/sparc/kernel/unaligned.c252
3 files changed, 203 insertions, 207 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index e795f282dece..bf1b15d3f6f5 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -1,4 +1,4 @@
1# $Id: Makefile,v 1.62 2000/12/15 00:41:17 davem Exp $ 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
@@ -12,7 +12,8 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
12 sys_sparc.o sunos_asm.o systbls.o \ 12 sys_sparc.o sunos_asm.o systbls.o \
13 time.o windows.o cpu.o devices.o sclow.o \ 13 time.o windows.o cpu.o devices.o sclow.o \
14 tadpole.o tick14.o ptrace.o sys_solaris.o \ 14 tadpole.o tick14.o ptrace.o sys_solaris.o \
15 unaligned.o muldiv.o semaphore.o prom.o of_device.o devres.o 15 unaligned.o una_asm.o muldiv.o semaphore.o \
16 prom.o of_device.o devres.o
16 17
17devres-y = ../../../kernel/irq/devres.o 18devres-y = ../../../kernel/irq/devres.o
18 19
diff --git a/arch/sparc/kernel/una_asm.S b/arch/sparc/kernel/una_asm.S
new file mode 100644
index 000000000000..8cc03458eb7e
--- /dev/null
+++ b/arch/sparc/kernel/una_asm.S
@@ -0,0 +1,153 @@
1/* una_asm.S: Kernel unaligned trap assembler helpers.
2 *
3 * Copyright (C) 1996,2005,2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/errno.h>
8
9 .text
10
11retl_efault:
12 retl
13 mov -EFAULT, %o0
14
15 /* int __do_int_store(unsigned long *dst_addr, int size,
16 * unsigned long *src_val)
17 *
18 * %o0 = dest_addr
19 * %o1 = size
20 * %o2 = src_val
21 *
22 * Return '0' on success, -EFAULT on failure.
23 */
24 .globl __do_int_store
25__do_int_store:
26 ld [%o2], %g1
27 cmp %1, 2
28 be 2f
29 cmp %1, 4
30 be 1f
31 srl %g1, 24, %g2
32 srl %g1, 16, %g7
334: stb %g2, [%o0]
34 srl %g1, 8, %g2
355: stb %g7, [%o0 + 1]
36 ld [%o2 + 4], %g7
376: stb %g2, [%o0 + 2]
38 srl %g7, 24, %g2
397: stb %g1, [%o0 + 3]
40 srl %g7, 16, %g1
418: stb %g2, [%o0 + 4]
42 srl %g7, 8, %g2
439: stb %g1, [%o0 + 5]
4410: stb %g2, [%o0 + 6]
45 b 0f
4611: stb %g7, [%o0 + 7]
471: srl %g1, 16, %g7
4812: stb %g2, [%o0]
49 srl %g1, 8, %g2
5013: stb %g7, [%o0 + 1]
5114: stb %g2, [%o0 + 2]
52 b 0f
5315: stb %g1, [%o0 + 3]
542: srl %g1, 8, %g2
5516: stb %g2, [%o0]
5617: stb %g1, [%o0 + 1]
570: retl
58 mov 0, %o0
59
60 .section __ex_table,#alloc
61 .word 4b, retl_efault
62 .word 5b, retl_efault
63 .word 6b, retl_efault
64 .word 7b, retl_efault
65 .word 8b, retl_efault
66 .word 9b, retl_efault
67 .word 10b, retl_efault
68 .word 11b, retl_efault
69 .word 12b, retl_efault
70 .word 13b, retl_efault
71 .word 14b, retl_efault
72 .word 15b, retl_efault
73 .word 16b, retl_efault
74 .word 17b, retl_efault
75 .previous
76
77 /* int do_int_load(unsigned long *dest_reg, int size,
78 * unsigned long *saddr, int is_signed)
79 *
80 * %o0 = dest_reg
81 * %o1 = size
82 * %o2 = saddr
83 * %o3 = is_signed
84 *
85 * Return '0' on success, -EFAULT on failure.
86 */
87 .globl do_int_load
88do_int_load:
89 cmp %o1, 8
90 be 9f
91 cmp %o1, 4
92 be 6f
934: ldub [%o2], %g1
945: ldub [%o2 + 1], %g2
95 sll %g1, 8, %g1
96 tst %o3
97 be 3f
98 or %g1, %g2, %g1
99 sll %g1, 16, %g1
100 sra %g1, 16, %g1
1013: b 0f
102 st %g1, [%o0]
1036: ldub [%o2 + 1], %g2
104 sll %g1, 24, %g1
1057: ldub [%o2 + 2], %g7
106 sll %g2, 16, %g2
1078: ldub [%o2 + 3], %g3
108 sll %g7, 8, %g7
109 or %g3, %g2, %g3
110 or %g7, %g3, %g7
111 or %g1, %g7, %g1
112 b 0f
113 st %g1, [%o0]
1149: ldub [%o2], %g1
11510: ldub [%o2 + 1], %g2
116 sll %g1, 24, %g1
11711: ldub [%o2 + 2], %g7
118 sll %g2, 16, %g2
11912: ldub [%o2 + 3], %g3
120 sll %g7, 8, %g7
121 or %g1, %g2, %g1
122 or %g7, %g3, %g7
123 or %g1, %g7, %g7
12413: ldub [%o2 + 4], %g1
125 st %g7, [%o0]
12614: ldub [%o2 + 5], %g2
127 sll %g1, 24, %g1
12815: ldub [%o2 + 6], %g7
129 sll %g2, 16, %g2
13016: ldub [%o2 + 7], %g3
131 sll %g7, 8, %g7
132 or %g1, %g2, %g1
133 or %g7, %g3, %g7
134 or %g1, %g7, %g7
135 st %g7, [%o0 + 4]
1360: retl
137 mov 0, %o0
138
139 .section __ex_table,#alloc
140 .word 4b, retl_efault
141 .word 5b, retl_efault
142 .word 6b, retl_efault
143 .word 7b, retl_efault
144 .word 8b, retl_efault
145 .word 9b, retl_efault
146 .word 10b, retl_efault
147 .word 11b, retl_efault
148 .word 12b, retl_efault
149 .word 13b, retl_efault
150 .word 14b, retl_efault
151 .word 15b, retl_efault
152 .word 16b, retl_efault
153 .previous
diff --git a/arch/sparc/kernel/unaligned.c b/arch/sparc/kernel/unaligned.c
index a6330fbc9dd9..33857be16661 100644
--- a/arch/sparc/kernel/unaligned.c
+++ b/arch/sparc/kernel/unaligned.c
@@ -175,157 +175,31 @@ static void unaligned_panic(char *str)
175 panic(str); 175 panic(str);
176} 176}
177 177
178#define do_integer_load(dest_reg, size, saddr, is_signed, errh) ({ \ 178/* una_asm.S */
179__asm__ __volatile__ ( \ 179extern int do_int_load(unsigned long *dest_reg, int size,
180 "cmp %1, 8\n\t" \ 180 unsigned long *saddr, int is_signed);
181 "be 9f\n\t" \ 181extern int __do_int_store(unsigned long *dst_addr, int size,
182 " cmp %1, 4\n\t" \ 182 unsigned long *src_val);
183 "be 6f\n" \ 183
184"4:\t" " ldub [%2], %%l1\n" \ 184static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
185"5:\t" "ldub [%2 + 1], %%l2\n\t" \ 185 struct pt_regs *regs)
186 "sll %%l1, 8, %%l1\n\t" \ 186{
187 "tst %3\n\t" \ 187 unsigned long zero[2] = { 0, 0 };
188 "be 3f\n\t" \ 188 unsigned long *src_val;
189 " add %%l1, %%l2, %%l1\n\t" \ 189
190 "sll %%l1, 16, %%l1\n\t" \ 190 if (reg_num)
191 "sra %%l1, 16, %%l1\n" \ 191 src_val = fetch_reg_addr(reg_num, regs);
192"3:\t" "b 0f\n\t" \ 192 else {
193 " st %%l1, [%0]\n" \ 193 src_val = &zero[0];
194"6:\t" "ldub [%2 + 1], %%l2\n\t" \ 194 if (size == 8)
195 "sll %%l1, 24, %%l1\n" \ 195 zero[1] = fetch_reg(1, regs);
196"7:\t" "ldub [%2 + 2], %%g7\n\t" \ 196 }
197 "sll %%l2, 16, %%l2\n" \ 197 return __do_int_store(dst_addr, size, src_val);
198"8:\t" "ldub [%2 + 3], %%g1\n\t" \ 198}
199 "sll %%g7, 8, %%g7\n\t" \
200 "or %%l1, %%l2, %%l1\n\t" \
201 "or %%g7, %%g1, %%g7\n\t" \
202 "or %%l1, %%g7, %%l1\n\t" \
203 "b 0f\n\t" \
204 " st %%l1, [%0]\n" \
205"9:\t" "ldub [%2], %%l1\n" \
206"10:\t" "ldub [%2 + 1], %%l2\n\t" \
207 "sll %%l1, 24, %%l1\n" \
208"11:\t" "ldub [%2 + 2], %%g7\n\t" \
209 "sll %%l2, 16, %%l2\n" \
210"12:\t" "ldub [%2 + 3], %%g1\n\t" \
211 "sll %%g7, 8, %%g7\n\t" \
212 "or %%l1, %%l2, %%l1\n\t" \
213 "or %%g7, %%g1, %%g7\n\t" \
214 "or %%l1, %%g7, %%g7\n" \
215"13:\t" "ldub [%2 + 4], %%l1\n\t" \
216 "st %%g7, [%0]\n" \
217"14:\t" "ldub [%2 + 5], %%l2\n\t" \
218 "sll %%l1, 24, %%l1\n" \
219"15:\t" "ldub [%2 + 6], %%g7\n\t" \
220 "sll %%l2, 16, %%l2\n" \
221"16:\t" "ldub [%2 + 7], %%g1\n\t" \
222 "sll %%g7, 8, %%g7\n\t" \
223 "or %%l1, %%l2, %%l1\n\t" \
224 "or %%g7, %%g1, %%g7\n\t" \
225 "or %%l1, %%g7, %%g7\n\t" \
226 "st %%g7, [%0 + 4]\n" \
227"0:\n\n\t" \
228 ".section __ex_table,#alloc\n\t" \
229 ".word 4b, " #errh "\n\t" \
230 ".word 5b, " #errh "\n\t" \
231 ".word 6b, " #errh "\n\t" \
232 ".word 7b, " #errh "\n\t" \
233 ".word 8b, " #errh "\n\t" \
234 ".word 9b, " #errh "\n\t" \
235 ".word 10b, " #errh "\n\t" \
236 ".word 11b, " #errh "\n\t" \
237 ".word 12b, " #errh "\n\t" \
238 ".word 13b, " #errh "\n\t" \
239 ".word 14b, " #errh "\n\t" \
240 ".word 15b, " #errh "\n\t" \
241 ".word 16b, " #errh "\n\n\t" \
242 ".previous\n\t" \
243 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed) \
244 : "l1", "l2", "g7", "g1", "cc"); \
245})
246
247#define store_common(dst_addr, size, src_val, errh) ({ \
248__asm__ __volatile__ ( \
249 "ld [%2], %%l1\n" \
250 "cmp %1, 2\n\t" \
251 "be 2f\n\t" \
252 " cmp %1, 4\n\t" \
253 "be 1f\n\t" \
254 " srl %%l1, 24, %%l2\n\t" \
255 "srl %%l1, 16, %%g7\n" \
256"4:\t" "stb %%l2, [%0]\n\t" \
257 "srl %%l1, 8, %%l2\n" \
258"5:\t" "stb %%g7, [%0 + 1]\n\t" \
259 "ld [%2 + 4], %%g7\n" \
260"6:\t" "stb %%l2, [%0 + 2]\n\t" \
261 "srl %%g7, 24, %%l2\n" \
262"7:\t" "stb %%l1, [%0 + 3]\n\t" \
263 "srl %%g7, 16, %%l1\n" \
264"8:\t" "stb %%l2, [%0 + 4]\n\t" \
265 "srl %%g7, 8, %%l2\n" \
266"9:\t" "stb %%l1, [%0 + 5]\n" \
267"10:\t" "stb %%l2, [%0 + 6]\n\t" \
268 "b 0f\n" \
269"11:\t" " stb %%g7, [%0 + 7]\n" \
270"1:\t" "srl %%l1, 16, %%g7\n" \
271"12:\t" "stb %%l2, [%0]\n\t" \
272 "srl %%l1, 8, %%l2\n" \
273"13:\t" "stb %%g7, [%0 + 1]\n" \
274"14:\t" "stb %%l2, [%0 + 2]\n\t" \
275 "b 0f\n" \
276"15:\t" " stb %%l1, [%0 + 3]\n" \
277"2:\t" "srl %%l1, 8, %%l2\n" \
278"16:\t" "stb %%l2, [%0]\n" \
279"17:\t" "stb %%l1, [%0 + 1]\n" \
280"0:\n\n\t" \
281 ".section __ex_table,#alloc\n\t" \
282 ".word 4b, " #errh "\n\t" \
283 ".word 5b, " #errh "\n\t" \
284 ".word 6b, " #errh "\n\t" \
285 ".word 7b, " #errh "\n\t" \
286 ".word 8b, " #errh "\n\t" \
287 ".word 9b, " #errh "\n\t" \
288 ".word 10b, " #errh "\n\t" \
289 ".word 11b, " #errh "\n\t" \
290 ".word 12b, " #errh "\n\t" \
291 ".word 13b, " #errh "\n\t" \
292 ".word 14b, " #errh "\n\t" \
293 ".word 15b, " #errh "\n\t" \
294 ".word 16b, " #errh "\n\t" \
295 ".word 17b, " #errh "\n\n\t" \
296 ".previous\n\t" \
297 : : "r" (dst_addr), "r" (size), "r" (src_val) \
298 : "l1", "l2", "g7", "g1", "cc"); \
299})
300
301#define do_integer_store(reg_num, size, dst_addr, regs, errh) ({ \
302 unsigned long *src_val; \
303 static unsigned long zero[2] = { 0, }; \
304 \
305 if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
306 else { \
307 src_val = &zero[0]; \
308 if (size == 8) \
309 zero[1] = fetch_reg(1, regs); \
310 } \
311 store_common(dst_addr, size, src_val, errh); \
312})
313 199
314extern void smp_capture(void); 200extern void smp_capture(void);
315extern void smp_release(void); 201extern void smp_release(void);
316 202
317#define do_atomic(srcdest_reg, mem, errh) ({ \
318 unsigned long flags, tmp; \
319 \
320 smp_capture(); \
321 local_irq_save(flags); \
322 tmp = *srcdest_reg; \
323 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
324 store_common(mem, 4, &tmp, errh); \
325 local_irq_restore(flags); \
326 smp_release(); \
327})
328
329static inline void advance(struct pt_regs *regs) 203static inline void advance(struct pt_regs *regs)
330{ 204{
331 regs->pc = regs->npc; 205 regs->pc = regs->npc;
@@ -342,9 +216,7 @@ static inline int ok_for_kernel(unsigned int insn)
342 return !floating_point_load_or_store_p(insn); 216 return !floating_point_load_or_store_p(insn);
343} 217}
344 218
345void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault"); 219static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
346
347void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
348{ 220{
349 unsigned long g2 = regs->u_regs [UREG_G2]; 221 unsigned long g2 = regs->u_regs [UREG_G2];
350 unsigned long fixup = search_extables_range(regs->pc, &g2); 222 unsigned long fixup = search_extables_range(regs->pc, &g2);
@@ -379,48 +251,34 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
379 printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", 251 printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
380 regs->pc); 252 regs->pc);
381 unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); 253 unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
382
383 __asm__ __volatile__ ("\n"
384"kernel_unaligned_trap_fault:\n\t"
385 "mov %0, %%o0\n\t"
386 "call kernel_mna_trap_fault\n\t"
387 " mov %1, %%o1\n\t"
388 :
389 : "r" (regs), "r" (insn)
390 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
391 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
392 } else { 254 } else {
393 unsigned long addr = compute_effective_address(regs, insn); 255 unsigned long addr = compute_effective_address(regs, insn);
256 int err;
394 257
395#ifdef DEBUG_MNA 258#ifdef DEBUG_MNA
396 printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n", 259 printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
397 regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); 260 regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
398#endif 261#endif
399 switch(dir) { 262 switch (dir) {
400 case load: 263 case load:
401 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs), 264 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
402 size, (unsigned long *) addr, 265 regs),
403 decode_signedness(insn), 266 size, (unsigned long *) addr,
404 kernel_unaligned_trap_fault); 267 decode_signedness(insn));
405 break; 268 break;
406 269
407 case store: 270 case store:
408 do_integer_store(((insn>>25)&0x1f), size, 271 err = do_int_store(((insn>>25)&0x1f), size,
409 (unsigned long *) addr, regs, 272 (unsigned long *) addr, regs);
410 kernel_unaligned_trap_fault);
411 break; 273 break;
412#if 0 /* unsupported */
413 case both:
414 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
415 (unsigned long *) addr,
416 kernel_unaligned_trap_fault);
417 break;
418#endif
419 default: 274 default:
420 panic("Impossible kernel unaligned trap."); 275 panic("Impossible kernel unaligned trap.");
421 /* Not reached... */ 276 /* Not reached... */
422 } 277 }
423 advance(regs); 278 if (err)
279 kernel_mna_trap_fault(regs, insn);
280 else
281 advance(regs);
424 } 282 }
425} 283}
426 284
@@ -459,9 +317,7 @@ static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
459 return 0; 317 return 0;
460} 318}
461 319
462void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("user_mna_trap_fault"); 320static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
463
464void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
465{ 321{
466 siginfo_t info; 322 siginfo_t info;
467 323
@@ -485,7 +341,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
485 if(!ok_for_user(regs, insn, dir)) { 341 if(!ok_for_user(regs, insn, dir)) {
486 goto kill_user; 342 goto kill_user;
487 } else { 343 } else {
488 int size = decode_access_size(insn); 344 int err, size = decode_access_size(insn);
489 unsigned long addr; 345 unsigned long addr;
490 346
491 if(floating_point_load_or_store_p(insn)) { 347 if(floating_point_load_or_store_p(insn)) {
@@ -496,48 +352,34 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
496 addr = compute_effective_address(regs, insn); 352 addr = compute_effective_address(regs, insn);
497 switch(dir) { 353 switch(dir) {
498 case load: 354 case load:
499 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs), 355 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
500 size, (unsigned long *) addr, 356 regs),
501 decode_signedness(insn), 357 size, (unsigned long *) addr,
502 user_unaligned_trap_fault); 358 decode_signedness(insn));
503 break; 359 break;
504 360
505 case store: 361 case store:
506 do_integer_store(((insn>>25)&0x1f), size, 362 err = do_int_store(((insn>>25)&0x1f), size,
507 (unsigned long *) addr, regs, 363 (unsigned long *) addr, regs);
508 user_unaligned_trap_fault);
509 break; 364 break;
510 365
511 case both: 366 case both:
512#if 0 /* unsupported */
513 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
514 (unsigned long *) addr,
515 user_unaligned_trap_fault);
516#else
517 /* 367 /*
518 * This was supported in 2.4. However, we question 368 * This was supported in 2.4. However, we question
519 * the value of SWAP instruction across word boundaries. 369 * the value of SWAP instruction across word boundaries.
520 */ 370 */
521 printk("Unaligned SWAP unsupported.\n"); 371 printk("Unaligned SWAP unsupported.\n");
522 goto kill_user; 372 err = -EFAULT;
523#endif
524 break; 373 break;
525 374
526 default: 375 default:
527 unaligned_panic("Impossible user unaligned trap."); 376 unaligned_panic("Impossible user unaligned trap.");
528
529 __asm__ __volatile__ ("\n"
530"user_unaligned_trap_fault:\n\t"
531 "mov %0, %%o0\n\t"
532 "call user_mna_trap_fault\n\t"
533 " mov %1, %%o1\n\t"
534 :
535 : "r" (regs), "r" (insn)
536 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
537 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
538 goto out; 377 goto out;
539 } 378 }
540 advance(regs); 379 if (err)
380 goto kill_user;
381 else
382 advance(regs);
541 goto out; 383 goto out;
542 } 384 }
543 385