aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-xscale.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 11:08:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 18:53:47 -0500
commit063b0a4207e43acbeff3d4b09f43e750e0212b48 (patch)
treeeb2a2c1faa732c763102040478830111fc13f2a5 /arch/arm/mm/copypage-xscale.c
parentd73e60b7144a86baf0fdfcc9537a70bb4f72e11c (diff)
[ARM] copypage: provide our own copy_user_highpage()
We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-xscale.c')
-rw-r--r--arch/arm/mm/copypage-xscale.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index bad49331bbf9..01bafafce181 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -15,8 +15,8 @@
15 */ 15 */
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/highmem.h>
18 19
19#include <asm/page.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
@@ -35,7 +35,7 @@
35static DEFINE_SPINLOCK(minicache_lock); 35static DEFINE_SPINLOCK(minicache_lock);
36 36
37/* 37/*
38 * XScale mini-dcache optimised copy_user_page 38 * XScale mini-dcache optimised copy_user_highpage
39 * 39 *
40 * We flush the destination cache lines just before we write the data into the 40 * We flush the destination cache lines just before we write the data into the
41 * corresponding address. Since the Dcache is read-allocate, this removes the 41 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -90,21 +90,24 @@ mc_copy_user_page(void *from, void *to)
90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); 90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
91} 91}
92 92
93void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 93void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr)
94{ 95{
95 struct page *page = virt_to_page(kfrom); 96 void *kto = kmap_atomic(to, KM_USER1);
96 97
97 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
98 __flush_dcache_page(page_mapping(page), page); 99 __flush_dcache_page(page_mapping(from), from);
99 100
100 spin_lock(&minicache_lock); 101 spin_lock(&minicache_lock);
101 102
102 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
103 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
104 105
105 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
106 107
107 spin_unlock(&minicache_lock); 108 spin_unlock(&minicache_lock);
109
110 kunmap_atomic(kto, KM_USER1);
108} 111}
109 112
110/* 113/*
@@ -133,5 +136,5 @@ xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
133 136
134struct cpu_user_fns xscale_mc_user_fns __initdata = { 137struct cpu_user_fns xscale_mc_user_fns __initdata = {
135 .cpu_clear_user_page = xscale_mc_clear_user_page, 138 .cpu_clear_user_page = xscale_mc_clear_user_page,
136 .cpu_copy_user_page = xscale_mc_copy_user_page, 139 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
137}; 140};