aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v6.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-07-02 10:20:44 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-01-26 15:06:28 -0500
commit67ece1443174d852e71c42facb3e2d7dd338c88a (patch)
tree418359d432acfcb2ecc4c58c8afa5f73de4fa01e /arch/arm/mm/copypage-v6.c
parent6e78df176141f2cb673bed7fa47825e3c6a8719f (diff)
ARM: pgtable: consolidate set_pte_ext(TOP_PTE,...) + tlb flush
A number of places establish a PTE in our top page table and immediately flush the TLB. Rather than having this at every callsite, provide an inline function for this purpose. This changes some global tlb flushes to be local; each time we setup one of these mappings, we always do it with preemption disabled which would prevent us migrating to another CPU. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
-rw-r--r--arch/arm/mm/copypage-v6.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 86524591b1b5..29c770463e41 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -90,11 +90,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); 90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); 91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
92 92
93 set_pte_ext(TOP_PTE(kfrom), mk_pte(from, PAGE_KERNEL), 0); 93 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
94 set_pte_ext(TOP_PTE(kto), mk_pte(to, PAGE_KERNEL), 0); 94 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
95
96 flush_tlb_kernel_page(kfrom);
97 flush_tlb_kernel_page(kto);
98 95
99 copy_page((void *)kto, (void *)kfrom); 96 copy_page((void *)kto, (void *)kfrom);
100 97
@@ -119,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
119 */ 116 */
120 raw_spin_lock(&v6_lock); 117 raw_spin_lock(&v6_lock);
121 118
122 set_pte_ext(TOP_PTE(to), mk_pte(page, PAGE_KERNEL), 0); 119 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
123 flush_tlb_kernel_page(to);
124 clear_page((void *)to); 120 clear_page((void *)to);
125 121
126 raw_spin_unlock(&v6_lock); 122 raw_spin_unlock(&v6_lock);