aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2005-09-29 21:50:34 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-09-29 21:50:34 -0400
commit717463d806a53380a691eeb0136a4b750a9f9ae0 (patch)
treea9c76e59662916d133362a5266bb61dc2586344b /arch/sparc64
parent4cb29d18129fb425c6202ab535c3fc1856391b99 (diff)
[SPARC64]: Fix several bugs in flush_ptrace_access().
1) Use cpudata cache line sizes, not magic constants. 2) Align start address in cheetah case so we do not get unaligned address traps. (pgrep was good at triggering this, via /proc/${pid}/cmdline accesses) Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/ptrace.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 5efbff90d66..774ecbb8a03 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -31,6 +31,7 @@
31#include <asm/visasm.h> 31#include <asm/visasm.h>
32#include <asm/spitfire.h> 32#include <asm/spitfire.h>
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/cpudata.h>
34 35
35/* Returning from ptrace is a bit tricky because the syscall return 36/* Returning from ptrace is a bit tricky because the syscall return
36 * low level code assumes any value returned which is negative and 37 * low level code assumes any value returned which is negative and
@@ -132,12 +133,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
132 if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { 133 if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
133 unsigned long start = __pa(kaddr); 134 unsigned long start = __pa(kaddr);
134 unsigned long end = start + len; 135 unsigned long end = start + len;
136 unsigned long dcache_line_size;
137
138 dcache_line_size = local_cpu_data().dcache_line_size;
135 139
136 if (tlb_type == spitfire) { 140 if (tlb_type == spitfire) {
137 for (; start < end; start += 32) 141 for (; start < end; start += dcache_line_size)
138 spitfire_put_dcache_tag(start & 0x3fe0, 0x0); 142 spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
139 } else { 143 } else {
140 for (; start < end; start += 32) 144 start &= ~(dcache_line_size - 1);
145 for (; start < end; start += dcache_line_size)
141 __asm__ __volatile__( 146 __asm__ __volatile__(
142 "stxa %%g0, [%0] %1\n\t" 147 "stxa %%g0, [%0] %1\n\t"
143 "membar #Sync" 148 "membar #Sync"
@@ -150,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
150 if (write && tlb_type == spitfire) { 155 if (write && tlb_type == spitfire) {
151 unsigned long start = (unsigned long) kaddr; 156 unsigned long start = (unsigned long) kaddr;
152 unsigned long end = start + len; 157 unsigned long end = start + len;
158 unsigned long icache_line_size;
159
160 icache_line_size = local_cpu_data().icache_line_size;
153 161
154 for (; start < end; start += 32) 162 for (; start < end; start += icache_line_size)
155 flushi(start); 163 flushi(start);
156 } 164 }
157} 165}