diff options
author | David S. Miller <davem@davemloft.net> | 2005-04-17 21:03:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-17 21:03:11 -0400 |
commit | dadeafdfc8da8c27e5a68e0706b9856eaac89391 (patch) | |
tree | 17993d26e93e598a2f449063fe213afad2a45814 /include/asm-sparc64 | |
parent | fb65b9619b756793d824df7501c895a2c2871f40 (diff) |
[PATCH] sparc64: Reduce ptrace cache flushing
We were flushing the D-cache excessively for ptrace() processing
and this makes debugging threads so slow as to be totally unusable.
All process page accesses via ptrace() go via access_process_vm().
This routine, for each process page, uses get_user_pages(). That
in turn does a flush_dcache_page() on the child pages before we
copy in/out the ptrace request data.
Therefore, all we need to do after the data movement is:
1) Flush the D-cache pages if the kernel maps the page to a different
color than userspace does.
2) If we wrote to the page, we need to flush the I-cache on older cpus.
Previously we just flushed the entire cache at the end of a ptrace()
request, and that was beyond stupid.
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/cacheflush.h | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h index 86f02937ff1b..51b26e81d828 100644 --- a/include/asm-sparc64/cacheflush.h +++ b/include/asm-sparc64/cacheflush.h | |||
@@ -49,16 +49,22 @@ extern void flush_dcache_page(struct page *page); | |||
49 | #define flush_icache_page(vma, pg) do { } while(0) | 49 | #define flush_icache_page(vma, pg) do { } while(0) |
50 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | 50 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) |
51 | 51 | ||
52 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 52 | extern void flush_ptrace_access(struct vm_area_struct *, struct page *, |
53 | do { \ | 53 | unsigned long uaddr, void *kaddr, |
54 | flush_cache_page(vma, vaddr, page_to_pfn(page));\ | 54 | unsigned long len, int write); |
55 | memcpy(dst, src, len); \ | 55 | |
56 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
57 | do { \ | ||
58 | flush_cache_page(vma, vaddr, page_to_pfn(page)); \ | ||
59 | memcpy(dst, src, len); \ | ||
60 | flush_ptrace_access(vma, page, vaddr, src, len, 0); \ | ||
56 | } while (0) | 61 | } while (0) |
57 | 62 | ||
58 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 63 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
59 | do { \ | 64 | do { \ |
60 | flush_cache_page(vma, vaddr, page_to_pfn(page));\ | 65 | flush_cache_page(vma, vaddr, page_to_pfn(page)); \ |
61 | memcpy(dst, src, len); \ | 66 | memcpy(dst, src, len); \ |
67 | flush_ptrace_access(vma, page, vaddr, dst, len, 1); \ | ||
62 | } while (0) | 68 | } while (0) |
63 | 69 | ||
64 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 70 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |