diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-10-30 14:03:21 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-10-30 14:03:21 -0500 |
commit | b4c2803ca8ad7bb1aad215d89532e24488e9e68e (patch) | |
tree | 719a557c8f30724270d59a425c33fc0d1db067ed /arch/arm/mm | |
parent | d362979aa2b031b91ee12122e5c4cad89577d8d3 (diff) |
[ARM] Make v6 copypage function static and cleanup pgprots
We know what pgprot we're going to use, so don't #define it. Also,
since we select the nonaliasing/aliasing copypage implementation at
run time, there's no point having it globally visible.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 16 |
1 files changed, 7 insertions, 9 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 27d041574ea7..269ce6913ee9 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -22,9 +22,7 @@ | |||
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #define from_address (0xffff8000) | 24 | #define from_address (0xffff8000) |
25 | #define from_pgprot PAGE_KERNEL | ||
26 | #define to_address (0xffffc000) | 25 | #define to_address (0xffffc000) |
27 | #define to_pgprot PAGE_KERNEL | ||
28 | 26 | ||
29 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) | 27 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) |
30 | 28 | ||
@@ -34,7 +32,7 @@ static DEFINE_SPINLOCK(v6_lock); | |||
34 | * Copy the user page. No aliasing to deal with so we can just | 32 | * Copy the user page. No aliasing to deal with so we can just |
35 | * attack the kernel's existing mapping of these pages. | 33 | * attack the kernel's existing mapping of these pages. |
36 | */ | 34 | */ |
37 | void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) | 35 | static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) |
38 | { | 36 | { |
39 | copy_page(kto, kfrom); | 37 | copy_page(kto, kfrom); |
40 | } | 38 | } |
@@ -43,7 +41,7 @@ void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long v | |||
43 | * Clear the user page. No aliasing to deal with so we can just | 41 | * Clear the user page. No aliasing to deal with so we can just |
44 | * attack the kernel's existing mapping of this page. | 42 | * attack the kernel's existing mapping of this page. |
45 | */ | 43 | */ |
46 | void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) | 44 | static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) |
47 | { | 45 | { |
48 | clear_page(kaddr); | 46 | clear_page(kaddr); |
49 | } | 47 | } |
@@ -51,7 +49,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) | |||
51 | /* | 49 | /* |
52 | * Copy the page, taking account of the cache colour. | 50 | * Copy the page, taking account of the cache colour. |
53 | */ | 51 | */ |
54 | void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) | 52 | static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) |
55 | { | 53 | { |
56 | unsigned int offset = CACHE_COLOUR(vaddr); | 54 | unsigned int offset = CACHE_COLOUR(vaddr); |
57 | unsigned long from, to; | 55 | unsigned long from, to; |
@@ -72,8 +70,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd | |||
72 | */ | 70 | */ |
73 | spin_lock(&v6_lock); | 71 | spin_lock(&v6_lock); |
74 | 72 | ||
75 | set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); | 73 | set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL)); |
76 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); | 74 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL)); |
77 | 75 | ||
78 | from = from_address + (offset << PAGE_SHIFT); | 76 | from = from_address + (offset << PAGE_SHIFT); |
79 | to = to_address + (offset << PAGE_SHIFT); | 77 | to = to_address + (offset << PAGE_SHIFT); |
@@ -91,7 +89,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd | |||
91 | * so remap the kernel page into the same cache colour as the user | 89 | * so remap the kernel page into the same cache colour as the user |
92 | * page. | 90 | * page. |
93 | */ | 91 | */ |
94 | void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | 92 | static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) |
95 | { | 93 | { |
96 | unsigned int offset = CACHE_COLOUR(vaddr); | 94 | unsigned int offset = CACHE_COLOUR(vaddr); |
97 | unsigned long to = to_address + (offset << PAGE_SHIFT); | 95 | unsigned long to = to_address + (offset << PAGE_SHIFT); |
@@ -112,7 +110,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
112 | */ | 110 | */ |
113 | spin_lock(&v6_lock); | 111 | spin_lock(&v6_lock); |
114 | 112 | ||
115 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); | 113 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL)); |
116 | flush_tlb_kernel_page(to); | 114 | flush_tlb_kernel_page(to); |
117 | clear_page((void *)to); | 115 | clear_page((void *)to); |
118 | 116 | ||