aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/lib')
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/VISsave.S8
-rw-r--r--arch/sparc64/lib/dec_and_lock.S80
-rw-r--r--arch/sparc64/lib/strncpy_from_user.S16
-rw-r--r--arch/sparc64/lib/user_fixup.c63
5 files changed, 40 insertions, 129 deletions
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index d968aebe83b2..c295806500f7 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
14 copy_in_user.o user_fixup.o memmove.o \ 14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
16 16
17lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
18
19obj-y += iomap.o 17obj-y += iomap.o
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S
index 4e18989bd602..a0ded5c5aa5c 100644
--- a/arch/sparc64/lib/VISsave.S
+++ b/arch/sparc64/lib/VISsave.S
@@ -59,15 +59,17 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
59 be,pn %icc, 9b 59 be,pn %icc, 9b
60 add %g6, TI_FPREGS, %g2 60 add %g6, TI_FPREGS, %g2
61 andcc %o5, FPRS_DL, %g0 61 andcc %o5, FPRS_DL, %g0
62 membar #StoreStore | #LoadStore
63 62
64 be,pn %icc, 4f 63 be,pn %icc, 4f
65 add %g6, TI_FPREGS+0x40, %g3 64 add %g6, TI_FPREGS+0x40, %g3
65 membar #Sync
66 stda %f0, [%g2 + %g1] ASI_BLK_P 66 stda %f0, [%g2 + %g1] ASI_BLK_P
67 stda %f16, [%g3 + %g1] ASI_BLK_P 67 stda %f16, [%g3 + %g1] ASI_BLK_P
68 membar #Sync
68 andcc %o5, FPRS_DU, %g0 69 andcc %o5, FPRS_DU, %g0
69 be,pn %icc, 5f 70 be,pn %icc, 5f
704: add %g1, 128, %g1 714: add %g1, 128, %g1
72 membar #Sync
71 stda %f32, [%g2 + %g1] ASI_BLK_P 73 stda %f32, [%g2 + %g1] ASI_BLK_P
72 74
73 stda %f48, [%g3 + %g1] ASI_BLK_P 75 stda %f48, [%g3 + %g1] ASI_BLK_P
@@ -87,7 +89,7 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
87 sll %g1, 5, %g1 89 sll %g1, 5, %g1
88 add %g6, TI_FPREGS+0xc0, %g3 90 add %g6, TI_FPREGS+0xc0, %g3
89 wr %g0, FPRS_FEF, %fprs 91 wr %g0, FPRS_FEF, %fprs
90 membar #StoreStore | #LoadStore 92 membar #Sync
91 stda %f32, [%g2 + %g1] ASI_BLK_P 93 stda %f32, [%g2 + %g1] ASI_BLK_P
92 stda %f48, [%g3 + %g1] ASI_BLK_P 94 stda %f48, [%g3 + %g1] ASI_BLK_P
93 membar #Sync 95 membar #Sync
@@ -128,8 +130,8 @@ VISenterhalf:
128 be,pn %icc, 4f 130 be,pn %icc, 4f
129 add %g6, TI_FPREGS, %g2 131 add %g6, TI_FPREGS, %g2
130 132
131 membar #StoreStore | #LoadStore
132 add %g6, TI_FPREGS+0x40, %g3 133 add %g6, TI_FPREGS+0x40, %g3
134 membar #Sync
133 stda %f0, [%g2 + %g1] ASI_BLK_P 135 stda %f0, [%g2 + %g1] ASI_BLK_P
134 stda %f16, [%g3 + %g1] ASI_BLK_P 136 stda %f16, [%g3 + %g1] ASI_BLK_P
135 membar #Sync 137 membar #Sync
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
deleted file mode 100644
index 8ee288dd0afc..000000000000
--- a/arch/sparc64/lib/dec_and_lock.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
2 * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
3 * using cas and ldstub instructions.
4 *
5 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
6 */
7#include <linux/config.h>
8#include <asm/thread_info.h>
9
10 .text
11 .align 64
12
13 /* CAS basically works like this:
14 *
15 * void CAS(MEM, REG1, REG2)
16 * {
17 * START_ATOMIC();
18 * if (*(MEM) == REG1) {
19 * TMP = *(MEM);
20 * *(MEM) = REG2;
21 * REG2 = TMP;
22 * } else
23 * REG2 = *(MEM);
24 * END_ATOMIC();
25 * }
26 */
27
28 .globl _atomic_dec_and_lock
29_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
30loop1: lduw [%o0], %g2
31 subcc %g2, 1, %g7
32 be,pn %icc, start_to_zero
33 nop
34nzero: cas [%o0], %g2, %g7
35 cmp %g2, %g7
36 bne,pn %icc, loop1
37 mov 0, %g1
38
39out:
40 membar #StoreLoad | #StoreStore
41 retl
42 mov %g1, %o0
43start_to_zero:
44#ifdef CONFIG_PREEMPT
45 ldsw [%g6 + TI_PRE_COUNT], %g3
46 add %g3, 1, %g3
47 stw %g3, [%g6 + TI_PRE_COUNT]
48#endif
49to_zero:
50 ldstub [%o1], %g3
51 membar #StoreLoad | #StoreStore
52 brnz,pn %g3, spin_on_lock
53 nop
54loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
55 cmp %g2, %g7
56
57 be,pt %icc, out
58 mov 1, %g1
59 lduw [%o0], %g2
60 subcc %g2, 1, %g7
61 be,pn %icc, loop2
62 nop
63 membar #StoreStore | #LoadStore
64 stb %g0, [%o1]
65#ifdef CONFIG_PREEMPT
66 ldsw [%g6 + TI_PRE_COUNT], %g3
67 sub %g3, 1, %g3
68 stw %g3, [%g6 + TI_PRE_COUNT]
69#endif
70
71 b,pt %xcc, nzero
72 nop
73spin_on_lock:
74 ldub [%o1], %g3
75 membar #LoadLoad
76 brnz,pt %g3, spin_on_lock
77 nop
78 ba,pt %xcc, to_zero
79 nop
80 nop
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
index 09cbbaa0ebf4..e1264650ca7a 100644
--- a/arch/sparc64/lib/strncpy_from_user.S
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -125,15 +125,11 @@ __strncpy_from_user:
125 add %o2, %o3, %o0 125 add %o2, %o3, %o0
126 .size __strncpy_from_user, .-__strncpy_from_user 126 .size __strncpy_from_user, .-__strncpy_from_user
127 127
128 .section .fixup,#alloc,#execinstr
129 .align 4
1304: retl
131 mov -EFAULT, %o0
132
133 .section __ex_table,#alloc 128 .section __ex_table,#alloc
134 .align 4 129 .align 4
135 .word 60b, 4b 130 .word 60b, __retl_efault
136 .word 61b, 4b 131 .word 61b, __retl_efault
137 .word 62b, 4b 132 .word 62b, __retl_efault
138 .word 63b, 4b 133 .word 63b, __retl_efault
139 .word 64b, 4b 134 .word 64b, __retl_efault
135 .previous
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
index 0278e34125db..19d1fdb17d0e 100644
--- a/arch/sparc64/lib/user_fixup.c
+++ b/arch/sparc64/lib/user_fixup.c
@@ -11,61 +11,56 @@
11 11
12/* Calculating the exact fault address when using 12/* Calculating the exact fault address when using
13 * block loads and stores can be very complicated. 13 * block loads and stores can be very complicated.
14 *
14 * Instead of trying to be clever and handling all 15 * Instead of trying to be clever and handling all
15 * of the cases, just fix things up simply here. 16 * of the cases, just fix things up simply here.
16 */ 17 */
17 18
18unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) 19static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
19{ 20{
20 char *dst = to; 21 unsigned long fault_addr = current_thread_info()->fault_address;
21 const char __user *src = from; 22 unsigned long end = start + size;
22 23
23 while (size) { 24 if (fault_addr < start || fault_addr >= end) {
24 if (__get_user(*dst, src)) 25 *offset = 0;
25 break; 26 } else {
26 dst++; 27 *offset = start - fault_addr;
27 src++; 28 size = end - fault_addr;
28 size--;
29 } 29 }
30 return size;
31}
30 32
31 if (size) 33unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
32 memset(dst, 0, size); 34{
35 unsigned long offset;
36
37 size = compute_size((unsigned long) from, size, &offset);
38 if (likely(size))
39 memset(to + offset, 0, size);
33 40
34 return size; 41 return size;
35} 42}
36 43
37unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) 44unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
38{ 45{
39 char __user *dst = to; 46 unsigned long offset;
40 const char *src = from;
41
42 while (size) {
43 if (__put_user(*src, dst))
44 break;
45 dst++;
46 src++;
47 size--;
48 }
49 47
50 return size; 48 return compute_size((unsigned long) to, size, &offset);
51} 49}
52 50
53unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) 51unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
54{ 52{
55 char __user *dst = to; 53 unsigned long fault_addr = current_thread_info()->fault_address;
56 char __user *src = from; 54 unsigned long start = (unsigned long) to;
55 unsigned long end = start + size;
57 56
58 while (size) { 57 if (fault_addr >= start && fault_addr < end)
59 char tmp; 58 return end - fault_addr;
60 59
61 if (__get_user(tmp, src)) 60 start = (unsigned long) from;
62 break; 61 end = start + size;
63 if (__put_user(tmp, dst)) 62 if (fault_addr >= start && fault_addr < end)
64 break; 63 return end - fault_addr;
65 dst++;
66 src++;
67 size--;
68 }
69 64
70 return size; 65 return size;
71} 66}