diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-07-27 07:53:22 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-07-27 07:53:22 -0400 |
commit | dfff0fa65ab15db45acd64b3189787d37ab163cd (patch) | |
tree | c888641a25f83fb75a4886f6c1e63c44d889fed4 /arch/sh/mm/pg-sh4.c | |
parent | 2277ab4a1df50e05bc732fe9488d4e902bb8399a (diff) |
sh: wire up clear_user_highpage() for sh4, convert sh7705.
This wires up clear_user_highpage() on SH-4 and subsequently converts the
SH7705 32kB cache mode over to using it. Now that the SH-4 implementation
handles all of the dcache purging directly in the aliasing case, there is
no need to do this in the default clear_page() implementation.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pg-sh4.c')
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index f3c4b2a54fc7..4d93070b8220 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/sh/mm/pg-sh4.c | 2 | * arch/sh/mm/pg-sh4.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2002 - 2007 Paul Mundt | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
6 | * | 6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | 7 | * Released under the terms of the GNU GPL v2.0. |
8 | */ | 8 | */ |
@@ -58,20 +58,6 @@ static inline void kunmap_coherent(struct page *page) | |||
58 | preempt_check_resched(); | 58 | preempt_check_resched(); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* | ||
62 | * clear_user_page | ||
63 | * @to: P1 address | ||
64 | * @address: U0 address to be mapped | ||
65 | * @page: page (virt_to_page(to)) | ||
66 | */ | ||
67 | void clear_user_page(void *to, unsigned long address, struct page *page) | ||
68 | { | ||
69 | clear_page(to); | ||
70 | |||
71 | if (pages_do_alias((unsigned long)to, address & PAGE_MASK)) | ||
72 | __flush_wback_region(to, PAGE_SIZE); | ||
73 | } | ||
74 | |||
75 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 61 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
76 | unsigned long vaddr, void *dst, const void *src, | 62 | unsigned long vaddr, void *dst, const void *src, |
77 | unsigned long len) | 63 | unsigned long len) |
@@ -128,3 +114,16 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
128 | smp_wmb(); | 114 | smp_wmb(); |
129 | } | 115 | } |
130 | EXPORT_SYMBOL(copy_user_highpage); | 116 | EXPORT_SYMBOL(copy_user_highpage); |
117 | |||
118 | void clear_user_highpage(struct page *page, unsigned long vaddr) | ||
119 | { | ||
120 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
121 | |||
122 | clear_page(kaddr); | ||
123 | |||
124 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | ||
125 | __flush_wback_region(kaddr, PAGE_SIZE); | ||
126 | |||
127 | kunmap_atomic(kaddr, KM_USER0); | ||
128 | } | ||
129 | EXPORT_SYMBOL(clear_user_highpage); | ||