diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-01-05 04:50:33 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-01-05 04:50:33 -0500 |
commit | 353816f43d1fb340ff2d9a911dd5d0799c09f6a5 (patch) | |
tree | 517290fd884d286fe2971137ac89f89e3567785a /arch/arm/mm/copypage-v4mc.c | |
parent | 160bbab3000dafccbe43688e48208cecf4deb879 (diff) | |
parent | fe0bdec68b77020281dc814805edfe594ae89e0f (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/poodle.c
arch/arm/mach-pxa/spitz.c
Diffstat (limited to 'arch/arm/mm/copypage-v4mc.c')
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 53 |
1 files changed, 28 insertions, 25 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 8d33e2549344..bdb5fd983b15 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -15,8 +15,8 @@ | |||
15 | */ | 15 | */ |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/highmem.h> | ||
18 | 19 | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -33,7 +33,7 @@ | |||
33 | static DEFINE_SPINLOCK(minicache_lock); | 33 | static DEFINE_SPINLOCK(minicache_lock); |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * ARMv4 mini-dcache optimised copy_user_page | 36 | * ARMv4 mini-dcache optimised copy_user_highpage |
37 | * | 37 | * |
38 | * We flush the destination cache lines just before we write the data into the | 38 | * We flush the destination cache lines just before we write the data into the |
39 | * corresponding address. Since the Dcache is read-allocate, this removes the | 39 | * corresponding address. Since the Dcache is read-allocate, this removes the |
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); | |||
42 | * | 42 | * |
43 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" | 43 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" |
44 | * instruction. If your processor does not supply this, you have to write your | 44 | * instruction. If your processor does not supply this, you have to write your |
45 | * own copy_user_page that does the right thing. | 45 | * own copy_user_highpage that does the right thing. |
46 | */ | 46 | */ |
47 | static void __attribute__((naked)) | 47 | static void __attribute__((naked)) |
48 | mc_copy_user_page(void *from, void *to) | 48 | mc_copy_user_page(void *from, void *to) |
@@ -68,50 +68,53 @@ mc_copy_user_page(void *from, void *to) | |||
68 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); | 68 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); |
69 | } | 69 | } |
70 | 70 | ||
71 | void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | 71 | void v4_mc_copy_user_highpage(struct page *from, struct page *to, |
72 | unsigned long vaddr) | ||
72 | { | 73 | { |
73 | struct page *page = virt_to_page(kfrom); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
74 | 75 | ||
75 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 76 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) |
76 | __flush_dcache_page(page_mapping(page), page); | 77 | __flush_dcache_page(page_mapping(from), from); |
77 | 78 | ||
78 | spin_lock(&minicache_lock); | 79 | spin_lock(&minicache_lock); |
79 | 80 | ||
80 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); | 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
81 | flush_tlb_kernel_page(0xffff8000); | 82 | flush_tlb_kernel_page(0xffff8000); |
82 | 83 | ||
83 | mc_copy_user_page((void *)0xffff8000, kto); | 84 | mc_copy_user_page((void *)0xffff8000, kto); |
84 | 85 | ||
85 | spin_unlock(&minicache_lock); | 86 | spin_unlock(&minicache_lock); |
87 | |||
88 | kunmap_atomic(kto, KM_USER1); | ||
86 | } | 89 | } |
87 | 90 | ||
88 | /* | 91 | /* |
89 | * ARMv4 optimised clear_user_page | 92 | * ARMv4 optimised clear_user_page |
90 | */ | 93 | */ |
91 | void __attribute__((naked)) | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
92 | v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) | ||
93 | { | 95 | { |
94 | asm volatile( | 96 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
95 | "str lr, [sp, #-4]!\n\ | 97 | asm volatile("\ |
96 | mov r1, %0 @ 1\n\ | 98 | mov r1, %2 @ 1\n\ |
97 | mov r2, #0 @ 1\n\ | 99 | mov r2, #0 @ 1\n\ |
98 | mov r3, #0 @ 1\n\ | 100 | mov r3, #0 @ 1\n\ |
99 | mov ip, #0 @ 1\n\ | 101 | mov ip, #0 @ 1\n\ |
100 | mov lr, #0 @ 1\n\ | 102 | mov lr, #0 @ 1\n\ |
101 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 103 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
102 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 104 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
103 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 105 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
104 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 106 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
105 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 107 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
106 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 108 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
107 | subs r1, r1, #1 @ 1\n\ | 109 | subs r1, r1, #1 @ 1\n\ |
108 | bne 1b @ 1\n\ | 110 | bne 1b @ 1" |
109 | ldr pc, [sp], #4" | 111 | : "=r" (ptr) |
110 | : | 112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
111 | : "I" (PAGE_SIZE / 64)); | 113 | : "r1", "r2", "r3", "ip", "lr"); |
114 | kunmap_atomic(kaddr, KM_USER0); | ||
112 | } | 115 | } |
113 | 116 | ||
114 | struct cpu_user_fns v4_mc_user_fns __initdata = { | 117 | struct cpu_user_fns v4_mc_user_fns __initdata = { |
115 | .cpu_clear_user_page = v4_mc_clear_user_page, | 118 | .cpu_clear_user_highpage = v4_mc_clear_user_highpage, |
116 | .cpu_copy_user_page = v4_mc_copy_user_page, | 119 | .cpu_copy_user_highpage = v4_mc_copy_user_highpage, |
117 | }; | 120 | }; |