diff options
| author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-05-10 12:30:47 -0400 |
|---|---|---|
| committer | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-05-10 12:30:47 -0400 |
| commit | 08ee4e4c5fd3fb0857eeb6a5a0ff66881432e8a3 (patch) | |
| tree | a4c70b10a7bbd4dd5606a5d5122d98fd2c82b229 | |
| parent | d2bab05ac1f9a2f5ddcd2f3256237e5c47fc127f (diff) | |
[PATCH] ARM: Use top_pmd for V6 copy/clear user_page
Remove needless page table walking for v6 page operations.
Signed-off-by: Russell King <rmk@arm.linux.org.uk>
| -rw-r--r-- | arch/arm/mm/copypage-v6.c | 28 |
1 files changed, 6 insertions, 22 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 694ac8208858..a8c00236bd3d 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
| @@ -26,8 +26,8 @@ | |||
| 26 | #define to_address (0xffffc000) | 26 | #define to_address (0xffffc000) |
| 27 | #define to_pgprot PAGE_KERNEL | 27 | #define to_pgprot PAGE_KERNEL |
| 28 | 28 | ||
| 29 | static pte_t *from_pte; | 29 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) |
| 30 | static pte_t *to_pte; | 30 | |
| 31 | static DEFINE_SPINLOCK(v6_lock); | 31 | static DEFINE_SPINLOCK(v6_lock); |
| 32 | 32 | ||
| 33 | #define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | 33 | #define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) |
| @@ -74,8 +74,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd | |||
| 74 | */ | 74 | */ |
| 75 | spin_lock(&v6_lock); | 75 | spin_lock(&v6_lock); |
| 76 | 76 | ||
| 77 | set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); | 77 | set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); |
| 78 | set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); | 78 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); |
| 79 | 79 | ||
| 80 | from = from_address + (offset << PAGE_SHIFT); | 80 | from = from_address + (offset << PAGE_SHIFT); |
| 81 | to = to_address + (offset << PAGE_SHIFT); | 81 | to = to_address + (offset << PAGE_SHIFT); |
| @@ -114,7 +114,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
| 114 | */ | 114 | */ |
| 115 | spin_lock(&v6_lock); | 115 | spin_lock(&v6_lock); |
| 116 | 116 | ||
| 117 | set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); | 117 | set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); |
| 118 | flush_tlb_kernel_page(to); | 118 | flush_tlb_kernel_page(to); |
| 119 | clear_page((void *)to); | 119 | clear_page((void *)to); |
| 120 | 120 | ||
| @@ -129,21 +129,6 @@ struct cpu_user_fns v6_user_fns __initdata = { | |||
| 129 | static int __init v6_userpage_init(void) | 129 | static int __init v6_userpage_init(void) |
| 130 | { | 130 | { |
| 131 | if (cache_is_vipt_aliasing()) { | 131 | if (cache_is_vipt_aliasing()) { |
| 132 | pgd_t *pgd; | ||
| 133 | pmd_t *pmd; | ||
| 134 | |||
| 135 | pgd = pgd_offset_k(from_address); | ||
| 136 | pmd = pmd_alloc(&init_mm, pgd, from_address); | ||
| 137 | if (!pmd) | ||
| 138 | BUG(); | ||
| 139 | from_pte = pte_alloc_kernel(&init_mm, pmd, from_address); | ||
| 140 | if (!from_pte) | ||
| 141 | BUG(); | ||
| 142 | |||
| 143 | to_pte = pte_alloc_kernel(&init_mm, pmd, to_address); | ||
| 144 | if (!to_pte) | ||
| 145 | BUG(); | ||
| 146 | |||
| 147 | cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; | 132 | cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; |
| 148 | cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; | 133 | cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; |
| 149 | } | 134 | } |
| @@ -151,5 +136,4 @@ static int __init v6_userpage_init(void) | |||
| 151 | return 0; | 136 | return 0; |
| 152 | } | 137 | } |
| 153 | 138 | ||
| 154 | __initcall(v6_userpage_init); | 139 | core_initcall(v6_userpage_init); |
| 155 | |||
