aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v6.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 11:08:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 18:53:47 -0500
commit063b0a4207e43acbeff3d4b09f43e750e0212b48 (patch)
treeeb2a2c1faa732c763102040478830111fc13f2a5 /arch/arm/mm/copypage-v6.c
parentd73e60b7144a86baf0fdfcc9537a70bb4f72e11c (diff)
[ARM] copypage: provide our own copy_user_highpage()
We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
-rw-r--r--arch/arm/mm/copypage-v6.c61
1 files changed, 37 insertions, 24 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 0e21c0767580..2ea75d0f5048 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -10,8 +10,8 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/highmem.h>
13 14
14#include <asm/page.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
@@ -33,9 +33,16 @@ static DEFINE_SPINLOCK(v6_lock);
33 * Copy the user page. No aliasing to deal with so we can just 33 * Copy the user page. No aliasing to deal with so we can just
34 * attack the kernel's existing mapping of these pages. 34 * attack the kernel's existing mapping of these pages.
35 */ 35 */
36static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) 36static void v6_copy_user_highpage_nonaliasing(struct page *to,
37 struct page *from, unsigned long vaddr)
37{ 38{
39 void *kto, *kfrom;
40
41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1);
38 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44 kunmap_atomic(kto, KM_USER1);
45 kunmap_atomic(kfrom, KM_USER0);
39} 46}
40 47
41/* 48/*
@@ -48,26 +55,32 @@ static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
48} 55}
49 56
50/* 57/*
51 * Copy the page, taking account of the cache colour. 58 * Discard data in the kernel mapping for the new page.
59 * FIXME: needs this MCRR to be supported.
52 */ 60 */
53static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) 61static void discard_old_kernel_data(void *kto)
54{ 62{
55 unsigned int offset = CACHE_COLOUR(vaddr);
56 unsigned long from, to;
57 struct page *page = virt_to_page(kfrom);
58
59 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
60 __flush_dcache_page(page_mapping(page), page);
61
62 /*
63 * Discard data in the kernel mapping for the new page.
64 * FIXME: needs this MCRR to be supported.
65 */
66 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" 63 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
67 : 64 :
68 : "r" (kto), 65 : "r" (kto),
69 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) 66 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
70 : "cc"); 67 : "cc");
68}
69
70/*
71 * Copy the page, taking account of the cache colour.
72 */
73static void v6_copy_user_highpage_aliasing(struct page *to,
74 struct page *from, unsigned long vaddr)
75{
76 unsigned int offset = CACHE_COLOUR(vaddr);
77 unsigned long kfrom, kto;
78
79 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
80 __flush_dcache_page(page_mapping(from), from);
81
82 /* FIXME: not highmem safe */
83 discard_old_kernel_data(page_address(to));
71 84
72 /* 85 /*
73 * Now copy the page using the same cache colour as the 86 * Now copy the page using the same cache colour as the
@@ -75,16 +88,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
75 */ 88 */
76 spin_lock(&v6_lock); 89 spin_lock(&v6_lock);
77 90
78 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); 91 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
79 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); 92 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
80 93
81 from = from_address + (offset << PAGE_SHIFT); 94 kfrom = from_address + (offset << PAGE_SHIFT);
82 to = to_address + (offset << PAGE_SHIFT); 95 kto = to_address + (offset << PAGE_SHIFT);
83 96
84 flush_tlb_kernel_page(from); 97 flush_tlb_kernel_page(kfrom);
85 flush_tlb_kernel_page(to); 98 flush_tlb_kernel_page(kto);
86 99
87 copy_page((void *)to, (void *)from); 100 copy_page((void *)kto, (void *)kfrom);
88 101
89 spin_unlock(&v6_lock); 102 spin_unlock(&v6_lock);
90} 103}
@@ -124,14 +137,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
124 137
125struct cpu_user_fns v6_user_fns __initdata = { 138struct cpu_user_fns v6_user_fns __initdata = {
126 .cpu_clear_user_page = v6_clear_user_page_nonaliasing, 139 .cpu_clear_user_page = v6_clear_user_page_nonaliasing,
127 .cpu_copy_user_page = v6_copy_user_page_nonaliasing, 140 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
128}; 141};
129 142
130static int __init v6_userpage_init(void) 143static int __init v6_userpage_init(void)
131{ 144{
132 if (cache_is_vipt_aliasing()) { 145 if (cache_is_vipt_aliasing()) {
133 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 146 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
134 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; 147 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
135 } 148 }
136 149
137 return 0; 150 return 0;