aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-24 00:25:02 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-24 00:25:02 -0400
commitc1d9728ecc5b560465df3c0c0d3b3825c2710b40 (patch)
treed0abb5c923a7a3eca2d4b2c3e1964bf484870909 /arch/sparc64
parent165415f700b0c77fa1f8db6198f48582639adf78 (diff)
parent87e807b6c461bbd449496a4c3ab78ab164a4ba97 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig.debug8
-rw-r--r--arch/sparc64/kernel/entry.S39
-rw-r--r--arch/sparc64/kernel/ptrace.c7
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c3
-rw-r--r--arch/sparc64/kernel/una_asm.S2
-rw-r--r--arch/sparc64/kernel/unaligned.c64
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/dec_and_lock.S80
8 files changed, 81 insertions, 124 deletions
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index cd8d39fb954d..af0e9411b83e 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -33,14 +33,6 @@ config DEBUG_BOOTMEM
33 depends on DEBUG_KERNEL 33 depends on DEBUG_KERNEL
34 bool "Debug BOOTMEM initialization" 34 bool "Debug BOOTMEM initialization"
35 35
36# We have a custom atomic_dec_and_lock() implementation but it's not
37# compatible with spinlock debugging so we need to fall back on
38# the generic version in that case.
39config HAVE_DEC_LOCK
40 bool
41 depends on SMP && !DEBUG_SPINLOCK
42 default y
43
44config MCOUNT 36config MCOUNT
45 bool 37 bool
46 depends on STACK_DEBUG 38 depends on STACK_DEBUG
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 3e0badb820c5..b48349527853 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -42,19 +42,15 @@
42 * executing (see inherit_locked_prom_mappings() rant). 42 * executing (see inherit_locked_prom_mappings() rant).
43 */ 43 */
44sparc64_vpte_nucleus: 44sparc64_vpte_nucleus:
45 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */ 45 /* Note that kvmap below has verified that the address is
46 mov 0xf, %g5 46 * in the range MODULES_VADDR --> VMALLOC_END already. So
47 sllx %g5, 28, %g5 47 * here we need only check if it is an OBP address or not.
48 48 */
49 /* Is addr >= LOW_OBP_ADDRESS? */ 49 sethi %hi(LOW_OBP_ADDRESS), %g5
50 cmp %g4, %g5 50 cmp %g4, %g5
51 blu,pn %xcc, sparc64_vpte_patchme1 51 blu,pn %xcc, sparc64_vpte_patchme1
52 mov 0x1, %g5 52 mov 0x1, %g5
53
54 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
55 sllx %g5, 32, %g5 53 sllx %g5, 32, %g5
56
57 /* Is addr < HI_OBP_ADDRESS? */
58 cmp %g4, %g5 54 cmp %g4, %g5
59 blu,pn %xcc, obp_iaddr_patch 55 blu,pn %xcc, obp_iaddr_patch
60 nop 56 nop
@@ -156,26 +152,29 @@ obp_daddr_patch:
156 * rather, use information saved during inherit_prom_mappings() using 8k 152 * rather, use information saved during inherit_prom_mappings() using 8k
157 * pagesize. 153 * pagesize.
158 */ 154 */
155 .align 32
159kvmap: 156kvmap:
160 /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */ 157 sethi %hi(MODULES_VADDR), %g5
161 mov 0xf, %g5 158 cmp %g4, %g5
162 sllx %g5, 28, %g5 159 blu,pn %xcc, longpath
160 mov (VMALLOC_END >> 24), %g5
161 sllx %g5, 24, %g5
162 cmp %g4, %g5
163 bgeu,pn %xcc, longpath
164 nop
163 165
164 /* Is addr >= LOW_OBP_ADDRESS? */ 166kvmap_check_obp:
167 sethi %hi(LOW_OBP_ADDRESS), %g5
165 cmp %g4, %g5 168 cmp %g4, %g5
166 blu,pn %xcc, vmalloc_addr 169 blu,pn %xcc, kvmap_vmalloc_addr
167 mov 0x1, %g5 170 mov 0x1, %g5
168
169 /* Load 0x100000000, which is HI_OBP_ADDRESS. */
170 sllx %g5, 32, %g5 171 sllx %g5, 32, %g5
171
172 /* Is addr < HI_OBP_ADDRESS? */
173 cmp %g4, %g5 172 cmp %g4, %g5
174 blu,pn %xcc, obp_daddr_patch 173 blu,pn %xcc, obp_daddr_patch
175 nop 174 nop
176 175
177vmalloc_addr: 176kvmap_vmalloc_addr:
178 /* If we get here, a vmalloc addr accessed, load kernel VPTE. */ 177 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
179 ldxa [%g3 + %g6] ASI_N, %g5 178 ldxa [%g3 + %g6] ASI_N, %g5
180 brgez,pn %g5, longpath 179 brgez,pn %g5, longpath
181 nop 180 nop
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 23ad839d113f..5efbff90d668 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -30,6 +30,7 @@
30#include <asm/psrcompat.h> 30#include <asm/psrcompat.h>
31#include <asm/visasm.h> 31#include <asm/visasm.h>
32#include <asm/spitfire.h> 32#include <asm/spitfire.h>
33#include <asm/page.h>
33 34
34/* Returning from ptrace is a bit tricky because the syscall return 35/* Returning from ptrace is a bit tricky because the syscall return
35 * low level code assumes any value returned which is negative and 36 * low level code assumes any value returned which is negative and
@@ -128,20 +129,20 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
128 * is mapped to in the user's address space, we can skip the 129 * is mapped to in the user's address space, we can skip the
129 * D-cache flush. 130 * D-cache flush.
130 */ 131 */
131 if ((uaddr ^ kaddr) & (1UL << 13)) { 132 if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
132 unsigned long start = __pa(kaddr); 133 unsigned long start = __pa(kaddr);
133 unsigned long end = start + len; 134 unsigned long end = start + len;
134 135
135 if (tlb_type == spitfire) { 136 if (tlb_type == spitfire) {
136 for (; start < end; start += 32) 137 for (; start < end; start += 32)
137 spitfire_put_dcache_tag(va & 0x3fe0, 0x0); 138 spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
138 } else { 139 } else {
139 for (; start < end; start += 32) 140 for (; start < end; start += 32)
140 __asm__ __volatile__( 141 __asm__ __volatile__(
141 "stxa %%g0, [%0] %1\n\t" 142 "stxa %%g0, [%0] %1\n\t"
142 "membar #Sync" 143 "membar #Sync"
143 : /* no outputs */ 144 : /* no outputs */
144 : "r" (va), 145 : "r" (start),
145 "i" (ASI_DCACHE_INVALIDATE)); 146 "i" (ASI_DCACHE_INVALIDATE));
146 } 147 }
147 } 148 }
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index cbb5e59824e5..fb7a5370dbfc 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add);
163EXPORT_SYMBOL(atomic64_add_ret); 163EXPORT_SYMBOL(atomic64_add_ret);
164EXPORT_SYMBOL(atomic64_sub); 164EXPORT_SYMBOL(atomic64_sub);
165EXPORT_SYMBOL(atomic64_sub_ret); 165EXPORT_SYMBOL(atomic64_sub_ret);
166#ifdef CONFIG_SMP
167EXPORT_SYMBOL(_atomic_dec_and_lock);
168#endif
169 166
170/* Atomic bit operations. */ 167/* Atomic bit operations. */
171EXPORT_SYMBOL(test_and_set_bit); 168EXPORT_SYMBOL(test_and_set_bit);
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S
index cbb40585253c..da48400bcc95 100644
--- a/arch/sparc64/kernel/una_asm.S
+++ b/arch/sparc64/kernel/una_asm.S
@@ -17,7 +17,7 @@ kernel_unaligned_trap_fault:
17__do_int_store: 17__do_int_store:
18 rd %asi, %o4 18 rd %asi, %o4
19 wr %o3, 0, %asi 19 wr %o3, 0, %asi
20 ldx [%o2], %g3 20 mov %o2, %g3
21 cmp %o1, 2 21 cmp %o1, 2
22 be,pn %icc, 2f 22 be,pn %icc, 2f
23 cmp %o1, 4 23 cmp %o1, 4
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index da9739f0d437..42718f6a7d36 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -184,13 +184,14 @@ extern void do_int_load(unsigned long *dest_reg, int size,
184 unsigned long *saddr, int is_signed, int asi); 184 unsigned long *saddr, int is_signed, int asi);
185 185
186extern void __do_int_store(unsigned long *dst_addr, int size, 186extern void __do_int_store(unsigned long *dst_addr, int size,
187 unsigned long *src_val, int asi); 187 unsigned long src_val, int asi);
188 188
189static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, 189static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
190 struct pt_regs *regs, int asi) 190 struct pt_regs *regs, int asi, int orig_asi)
191{ 191{
192 unsigned long zero = 0; 192 unsigned long zero = 0;
193 unsigned long *src_val = &zero; 193 unsigned long *src_val_p = &zero;
194 unsigned long src_val;
194 195
195 if (size == 16) { 196 if (size == 16) {
196 size = 8; 197 size = 8;
@@ -198,7 +199,25 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
198 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | 199 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
199 (unsigned)fetch_reg(reg_num + 1, regs); 200 (unsigned)fetch_reg(reg_num + 1, regs);
200 } else if (reg_num) { 201 } else if (reg_num) {
201 src_val = fetch_reg_addr(reg_num, regs); 202 src_val_p = fetch_reg_addr(reg_num, regs);
203 }
204 src_val = *src_val_p;
205 if (unlikely(asi != orig_asi)) {
206 switch (size) {
207 case 2:
208 src_val = swab16(src_val);
209 break;
210 case 4:
211 src_val = swab32(src_val);
212 break;
213 case 8:
214 src_val = swab64(src_val);
215 break;
216 case 16:
217 default:
218 BUG();
219 break;
220 };
202 } 221 }
203 __do_int_store(dst_addr, size, src_val, asi); 222 __do_int_store(dst_addr, size, src_val, asi);
204} 223}
@@ -276,6 +295,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
276 kernel_mna_trap_fault(); 295 kernel_mna_trap_fault();
277 } else { 296 } else {
278 unsigned long addr; 297 unsigned long addr;
298 int orig_asi, asi;
279 299
280 addr = compute_effective_address(regs, insn, 300 addr = compute_effective_address(regs, insn,
281 ((insn >> 25) & 0x1f)); 301 ((insn >> 25) & 0x1f));
@@ -285,18 +305,48 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
285 regs->tpc, dirstrings[dir], addr, size, 305 regs->tpc, dirstrings[dir], addr, size,
286 regs->u_regs[UREG_RETPC]); 306 regs->u_regs[UREG_RETPC]);
287#endif 307#endif
308 orig_asi = asi = decode_asi(insn, regs);
309 switch (asi) {
310 case ASI_NL:
311 case ASI_AIUPL:
312 case ASI_AIUSL:
313 case ASI_PL:
314 case ASI_SL:
315 case ASI_PNFL:
316 case ASI_SNFL:
317 asi &= ~0x08;
318 break;
319 };
288 switch (dir) { 320 switch (dir) {
289 case load: 321 case load:
290 do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), 322 do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
291 size, (unsigned long *) addr, 323 size, (unsigned long *) addr,
292 decode_signedness(insn), 324 decode_signedness(insn), asi);
293 decode_asi(insn, regs)); 325 if (unlikely(asi != orig_asi)) {
326 unsigned long val_in = *(unsigned long *) addr;
327 switch (size) {
328 case 2:
329 val_in = swab16(val_in);
330 break;
331 case 4:
332 val_in = swab32(val_in);
333 break;
334 case 8:
335 val_in = swab64(val_in);
336 break;
337 case 16:
338 default:
339 BUG();
340 break;
341 };
342 *(unsigned long *) addr = val_in;
343 }
294 break; 344 break;
295 345
296 case store: 346 case store:
297 do_int_store(((insn>>25)&0x1f), size, 347 do_int_store(((insn>>25)&0x1f), size,
298 (unsigned long *) addr, regs, 348 (unsigned long *) addr, regs,
299 decode_asi(insn, regs)); 349 asi, orig_asi);
300 break; 350 break;
301 351
302 default: 352 default:
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index d968aebe83b2..c295806500f7 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
14 copy_in_user.o user_fixup.o memmove.o \ 14 copy_in_user.o user_fixup.o memmove.o \
15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
16 16
17lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
18
19obj-y += iomap.o 17obj-y += iomap.o
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
deleted file mode 100644
index 8ee288dd0afc..000000000000
--- a/arch/sparc64/lib/dec_and_lock.S
+++ /dev/null
@@ -1,80 +0,0 @@
1/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
2 * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
3 * using cas and ldstub instructions.
4 *
5 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
6 */
7#include <linux/config.h>
8#include <asm/thread_info.h>
9
10 .text
11 .align 64
12
13 /* CAS basically works like this:
14 *
15 * void CAS(MEM, REG1, REG2)
16 * {
17 * START_ATOMIC();
18 * if (*(MEM) == REG1) {
19 * TMP = *(MEM);
20 * *(MEM) = REG2;
21 * REG2 = TMP;
22 * } else
23 * REG2 = *(MEM);
24 * END_ATOMIC();
25 * }
26 */
27
28 .globl _atomic_dec_and_lock
29_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
30loop1: lduw [%o0], %g2
31 subcc %g2, 1, %g7
32 be,pn %icc, start_to_zero
33 nop
34nzero: cas [%o0], %g2, %g7
35 cmp %g2, %g7
36 bne,pn %icc, loop1
37 mov 0, %g1
38
39out:
40 membar #StoreLoad | #StoreStore
41 retl
42 mov %g1, %o0
43start_to_zero:
44#ifdef CONFIG_PREEMPT
45 ldsw [%g6 + TI_PRE_COUNT], %g3
46 add %g3, 1, %g3
47 stw %g3, [%g6 + TI_PRE_COUNT]
48#endif
49to_zero:
50 ldstub [%o1], %g3
51 membar #StoreLoad | #StoreStore
52 brnz,pn %g3, spin_on_lock
53 nop
54loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
55 cmp %g2, %g7
56
57 be,pt %icc, out
58 mov 1, %g1
59 lduw [%o0], %g2
60 subcc %g2, 1, %g7
61 be,pn %icc, loop2
62 nop
63 membar #StoreStore | #LoadStore
64 stb %g0, [%o1]
65#ifdef CONFIG_PREEMPT
66 ldsw [%g6 + TI_PRE_COUNT], %g3
67 sub %g3, 1, %g3
68 stw %g3, [%g6 + TI_PRE_COUNT]
69#endif
70
71 b,pt %xcc, nzero
72 nop
73spin_on_lock:
74 ldub [%o1], %g3
75 membar #LoadLoad
76 brnz,pt %g3, spin_on_lock
77 nop
78 ba,pt %xcc, to_zero
79 nop
80 nop