aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-04-17 21:03:11 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 21:03:11 -0400
commitdadeafdfc8da8c27e5a68e0706b9856eaac89391 (patch)
tree17993d26e93e598a2f449063fe213afad2a45814
parentfb65b9619b756793d824df7501c895a2c2871f40 (diff)
[PATCH] sparc64: Reduce ptrace cache flushing
We were flushing the D-cache excessively for ptrace() processing and this makes debugging threads so slow as to be totally unusable. All process page accesses via ptrace() go via access_process_vm(). This routine, for each process page, uses get_user_pages(). That in turn does a flush_dcache_page() on the child pages before we copy in/out the ptrace request data. Therefore, all we need to do after the data movement is: 1) Flush the D-cache pages if the kernel maps the page to a different color than userspace does. 2) If we wrote to the page, we need to flush the I-cache on older cpus. Previously we just flushed the entire cache at the end of a ptrace() request, and that was beyond stupid. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/sparc64/kernel/ptrace.c82
-rw-r--r--include/asm-sparc64/cacheflush.h22
2 files changed, 69 insertions, 35 deletions
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 08bac537262a..5f080cf04b33 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -103,6 +103,55 @@ void ptrace_disable(struct task_struct *child)
103 /* nothing to do */ 103 /* nothing to do */
104} 104}
105 105
106/* To get the necessary page struct, access_process_vm() first calls
107 * get_user_pages(). This has done a flush_dcache_page() on the
108 * accessed page. Then our caller (copy_{to,from}_user_page()) did
109 * to memcpy to read/write the data from that page.
110 *
111 * Now, the only thing we have to do is:
112 * 1) flush the D-cache if it's possible than an illegal alias
113 * has been created
114 * 2) flush the I-cache if this is pre-cheetah and we did a write
115 */
116void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
117 unsigned long uaddr, void *kaddr,
118 unsigned long len, int write)
119{
120 BUG_ON(len > PAGE_SIZE);
121
122#ifdef DCACHE_ALIASING_POSSIBLE
123 /* If bit 13 of the kernel address we used to access the
124 * user page is the same as the virtual address that page
125 * is mapped to in the user's address space, we can skip the
126 * D-cache flush.
127 */
128 if ((uaddr ^ kaddr) & (1UL << 13)) {
129 unsigned long start = __pa(kaddr);
130 unsigned long end = start + len;
131
132 if (tlb_type == spitfire) {
133 for (; start < end; start += 32)
134 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
135 } else {
136 for (; start < end; start += 32)
137 __asm__ __volatile__(
138 "stxa %%g0, [%0] %1\n\t"
139 "membar #Sync"
140 : /* no outputs */
141 : "r" (va),
142 "i" (ASI_DCACHE_INVALIDATE));
143 }
144 }
145#endif
146 if (write && tlb_type == spitfire) {
147 unsigned long start = (unsigned long) kaddr;
148 unsigned long end = start + len;
149
150 for (; start < end; start += 32)
151 flushi(start);
152 }
153}
154
106asmlinkage void do_ptrace(struct pt_regs *regs) 155asmlinkage void do_ptrace(struct pt_regs *regs)
107{ 156{
108 int request = regs->u_regs[UREG_I0]; 157 int request = regs->u_regs[UREG_I0];
@@ -227,7 +276,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
227 pt_error_return(regs, -res); 276 pt_error_return(regs, -res);
228 else 277 else
229 pt_os_succ_return(regs, tmp64, (void __user *) data); 278 pt_os_succ_return(regs, tmp64, (void __user *) data);
230 goto flush_and_out; 279 goto out_tsk;
231 } 280 }
232 281
233 case PTRACE_POKETEXT: /* write the word at location addr. */ 282 case PTRACE_POKETEXT: /* write the word at location addr. */
@@ -253,7 +302,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
253 pt_error_return(regs, -res); 302 pt_error_return(regs, -res);
254 else 303 else
255 pt_succ_return(regs, res); 304 pt_succ_return(regs, res);
256 goto flush_and_out; 305 goto out_tsk;
257 } 306 }
258 307
259 case PTRACE_GETREGS: { 308 case PTRACE_GETREGS: {
@@ -485,12 +534,12 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
485 (char __user *)addr2, data); 534 (char __user *)addr2, data);
486 if (res == data) { 535 if (res == data) {
487 pt_succ_return(regs, 0); 536 pt_succ_return(regs, 0);
488 goto flush_and_out; 537 goto out_tsk;
489 } 538 }
490 if (res >= 0) 539 if (res >= 0)
491 res = -EIO; 540 res = -EIO;
492 pt_error_return(regs, -res); 541 pt_error_return(regs, -res);
493 goto flush_and_out; 542 goto out_tsk;
494 } 543 }
495 544
496 case PTRACE_WRITETEXT: 545 case PTRACE_WRITETEXT:
@@ -499,12 +548,12 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
499 addr, data); 548 addr, data);
500 if (res == data) { 549 if (res == data) {
501 pt_succ_return(regs, 0); 550 pt_succ_return(regs, 0);
502 goto flush_and_out; 551 goto out_tsk;
503 } 552 }
504 if (res >= 0) 553 if (res >= 0)
505 res = -EIO; 554 res = -EIO;
506 pt_error_return(regs, -res); 555 pt_error_return(regs, -res);
507 goto flush_and_out; 556 goto out_tsk;
508 } 557 }
509 case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */ 558 case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
510 addr = 1; 559 addr = 1;
@@ -571,27 +620,6 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
571 goto out_tsk; 620 goto out_tsk;
572 } 621 }
573 } 622 }
574flush_and_out:
575 {
576 unsigned long va;
577
578 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
579 for (va = 0; va < (1 << 16); va += (1 << 5))
580 spitfire_put_dcache_tag(va, 0x0);
581 /* No need to mess with I-cache on Cheetah. */
582 } else {
583 for (va = 0; va < L1DCACHE_SIZE; va += 32)
584 spitfire_put_dcache_tag(va, 0x0);
585 if (request == PTRACE_PEEKTEXT ||
586 request == PTRACE_POKETEXT ||
587 request == PTRACE_READTEXT ||
588 request == PTRACE_WRITETEXT) {
589 for (va = 0; va < (PAGE_SIZE << 1); va += 32)
590 spitfire_put_icache_tag(va, 0x0);
591 __asm__ __volatile__("flush %g6");
592 }
593 }
594 }
595out_tsk: 623out_tsk:
596 if (child) 624 if (child)
597 put_task_struct(child); 625 put_task_struct(child);
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h
index 86f02937ff1b..51b26e81d828 100644
--- a/include/asm-sparc64/cacheflush.h
+++ b/include/asm-sparc64/cacheflush.h
@@ -49,16 +49,22 @@ extern void flush_dcache_page(struct page *page);
49#define flush_icache_page(vma, pg) do { } while(0) 49#define flush_icache_page(vma, pg) do { } while(0)
50#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 50#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
51 51
52#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 52extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
53 do { \ 53 unsigned long uaddr, void *kaddr,
54 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 54 unsigned long len, int write);
55 memcpy(dst, src, len); \ 55
56#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
57 do { \
58 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
59 memcpy(dst, src, len); \
60 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
56 } while (0) 61 } while (0)
57 62
58#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 63#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
59 do { \ 64 do { \
60 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 65 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
61 memcpy(dst, src, len); \ 66 memcpy(dst, src, len); \
67 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
62 } while (0) 68 } while (0)
63 69
64#define flush_dcache_mmap_lock(mapping) do { } while (0) 70#define flush_dcache_mmap_lock(mapping) do { } while (0)