aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-11-05 02:18:16 -0500
committerPaul Mundt <lethal@linux-sh.org>2007-11-06 21:14:12 -0500
commitba1789efea81acc6633f427bfeb871fd608965b5 (patch)
treea93f739cc4d36ebf762ce38a8f90b2ffceecb0b9
parent7747b9a493a197cb4db44c98d25ce6d3d9f586d1 (diff)
sh: Optimized copy_{to,from}_user_page() for SH-4.
This moves copy_{to,from}_user_page() out-of-line on SH-4 and converts for the kmap_coherent() API. Based on the MIPS implementation. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/mm/pg-sh4.c52
-rw-r--r--include/asm-sh/cacheflush.h18
2 files changed, 43 insertions, 27 deletions
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index ede6dd1e3701..8c7a9ca79879 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -52,33 +52,39 @@ static inline void kunmap_coherent(struct page *page)
52void clear_user_page(void *to, unsigned long address, struct page *page) 52void clear_user_page(void *to, unsigned long address, struct page *page)
53{ 53{
54 __set_bit(PG_mapped, &page->flags); 54 __set_bit(PG_mapped, &page->flags);
55 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 55
56 clear_page(to); 56 clear_page(to);
57 else { 57 if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
58 void *vto = kmap_coherent(page, address); 58 __flush_wback_region(to, PAGE_SIZE);
59 __clear_user_page(vto, to);
60 kunmap_coherent(vto);
61 }
62} 59}
63 60
64/* 61void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
65 * copy_user_page 62 unsigned long vaddr, void *dst, const void *src,
66 * @to: P1 address 63 unsigned long len)
67 * @from: P1 address
68 * @address: U0 address to be mapped
69 * @page: page (virt_to_page(to))
70 */
71void copy_user_page(void *to, void *from, unsigned long address,
72 struct page *page)
73{ 64{
65 void *vto;
66
74 __set_bit(PG_mapped, &page->flags); 67 __set_bit(PG_mapped, &page->flags);
75 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) 68
76 copy_page(to, from); 69 vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
77 else { 70 memcpy(vto, src, len);
78 void *vfrom = kmap_coherent(page, address); 71 kunmap_coherent(vto);
79 __copy_user_page(vfrom, from, to); 72
80 kunmap_coherent(vfrom); 73 if (vma->vm_flags & VM_EXEC)
81 } 74 flush_cache_page(vma, vaddr, page_to_pfn(page));
75}
76
77void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78 unsigned long vaddr, void *dst, const void *src,
79 unsigned long len)
80{
81 void *vfrom;
82
83 __set_bit(PG_mapped, &page->flags);
84
85 vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
86 memcpy(dst, vfrom, len);
87 kunmap_coherent(vfrom);
82} 88}
83 89
84void copy_user_highpage(struct page *to, struct page *from, 90void copy_user_highpage(struct page *to, struct page *from,
diff --git a/include/asm-sh/cacheflush.h b/include/asm-sh/cacheflush.h
index aa558da08471..b91246153b7e 100644
--- a/include/asm-sh/cacheflush.h
+++ b/include/asm-sh/cacheflush.h
@@ -43,21 +43,31 @@ extern void __flush_purge_region(void *start, int size);
43extern void __flush_invalidate_region(void *start, int size); 43extern void __flush_invalidate_region(void *start, int size);
44#endif 44#endif
45 45
46#define flush_cache_vmap(start, end) flush_cache_all() 46#ifdef CONFIG_CPU_SH4
47#define flush_cache_vunmap(start, end) flush_cache_all() 47extern void copy_to_user_page(struct vm_area_struct *vma,
48 struct page *page, unsigned long vaddr, void *dst, const void *src,
49 unsigned long len);
48 50
49#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 51extern void copy_from_user_page(struct vm_area_struct *vma,
52 struct page *page, unsigned long vaddr, void *dst, const void *src,
53 unsigned long len);
54#else
55#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
50 do { \ 56 do { \
51 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 57 flush_cache_page(vma, vaddr, page_to_pfn(page));\
52 memcpy(dst, src, len); \ 58 memcpy(dst, src, len); \
53 flush_icache_user_range(vma, page, vaddr, len); \ 59 flush_icache_user_range(vma, page, vaddr, len); \
54 } while (0) 60 } while (0)
55 61
56#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 62#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
57 do { \ 63 do { \
58 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 64 flush_cache_page(vma, vaddr, page_to_pfn(page));\
59 memcpy(dst, src, len); \ 65 memcpy(dst, src, len); \
60 } while (0) 66 } while (0)
67#endif
68
69#define flush_cache_vmap(start, end) flush_cache_all()
70#define flush_cache_vunmap(start, end) flush_cache_all()
61 71
62#define HAVE_ARCH_UNMAPPED_AREA 72#define HAVE_ARCH_UNMAPPED_AREA
63 73