aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v6.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
-rw-r--r--arch/arm/mm/copypage-v6.c155
1 files changed, 155 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
new file mode 100644
index 000000000000..694ac8208858
--- /dev/null
+++ b/arch/arm/mm/copypage-v6.c
@@ -0,0 +1,155 @@
1/*
2 * linux/arch/arm/mm/copypage-v6.c
3 *
4 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/spinlock.h>
12#include <linux/mm.h>
13
14#include <asm/page.h>
15#include <asm/pgtable.h>
16#include <asm/shmparam.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19
20#if SHMLBA > 16384
21#error FIX ME
22#endif
23
24#define from_address (0xffff8000)
25#define from_pgprot PAGE_KERNEL
26#define to_address (0xffffc000)
27#define to_pgprot PAGE_KERNEL
28
29static pte_t *from_pte;
30static pte_t *to_pte;
31static DEFINE_SPINLOCK(v6_lock);
32
33#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
34
35/*
36 * Copy the user page. No aliasing to deal with so we can just
37 * attack the kernel's existing mapping of these pages.
38 */
39void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
40{
41 copy_page(kto, kfrom);
42}
43
44/*
45 * Clear the user page. No aliasing to deal with so we can just
46 * attack the kernel's existing mapping of this page.
47 */
48void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
49{
50 clear_page(kaddr);
51}
52
53/*
54 * Copy the page, taking account of the cache colour.
55 */
56void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
57{
58 unsigned int offset = DCACHE_COLOUR(vaddr);
59 unsigned long from, to;
60
61 /*
62 * Discard data in the kernel mapping for the new page.
63 * FIXME: needs this MCRR to be supported.
64 */
65 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
66 :
67 : "r" (kto),
68 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
69 : "cc");
70
71 /*
72 * Now copy the page using the same cache colour as the
73 * pages ultimate destination.
74 */
75 spin_lock(&v6_lock);
76
77 set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
78 set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
79
80 from = from_address + (offset << PAGE_SHIFT);
81 to = to_address + (offset << PAGE_SHIFT);
82
83 flush_tlb_kernel_page(from);
84 flush_tlb_kernel_page(to);
85
86 copy_page((void *)to, (void *)from);
87
88 spin_unlock(&v6_lock);
89}
90
91/*
92 * Clear the user page. We need to deal with the aliasing issues,
93 * so remap the kernel page into the same cache colour as the user
94 * page.
95 */
96void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
97{
98 unsigned int offset = DCACHE_COLOUR(vaddr);
99 unsigned long to = to_address + (offset << PAGE_SHIFT);
100
101 /*
102 * Discard data in the kernel mapping for the new page
103 * FIXME: needs this MCRR to be supported.
104 */
105 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
106 :
107 : "r" (kaddr),
108 "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
109 : "cc");
110
111 /*
112 * Now clear the page using the same cache colour as
113 * the pages ultimate destination.
114 */
115 spin_lock(&v6_lock);
116
117 set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
118 flush_tlb_kernel_page(to);
119 clear_page((void *)to);
120
121 spin_unlock(&v6_lock);
122}
123
124struct cpu_user_fns v6_user_fns __initdata = {
125 .cpu_clear_user_page = v6_clear_user_page_nonaliasing,
126 .cpu_copy_user_page = v6_copy_user_page_nonaliasing,
127};
128
129static int __init v6_userpage_init(void)
130{
131 if (cache_is_vipt_aliasing()) {
132 pgd_t *pgd;
133 pmd_t *pmd;
134
135 pgd = pgd_offset_k(from_address);
136 pmd = pmd_alloc(&init_mm, pgd, from_address);
137 if (!pmd)
138 BUG();
139 from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
140 if (!from_pte)
141 BUG();
142
143 to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
144 if (!to_pte)
145 BUG();
146
147 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
148 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
149 }
150
151 return 0;
152}
153
154__initcall(v6_userpage_init);
155