aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-07-02 10:20:44 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-01-26 15:06:28 -0500
commit67ece1443174d852e71c42facb3e2d7dd338c88a (patch)
tree418359d432acfcb2ecc4c58c8afa5f73de4fa01e /arch
parent6e78df176141f2cb673bed7fa47825e3c6a8719f (diff)
ARM: pgtable: consolidate set_pte_ext(TOP_PTE,...) + tlb flush
A number of places establish a PTE in our top page table and immediately flush the TLB. Rather than having this at every callsite, provide an inline function for this purpose. This changes some global tlb flushes to be local; each time we setup one of these mappings, we always do it with preemption disabled which would prevent us migrating to another CPU. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/copypage-v4mc.c3
-rw-r--r--arch/arm/mm/copypage-v6.c10
-rw-r--r--arch/arm/mm/copypage-xscale.c3
-rw-r--r--arch/arm/mm/flush.c10
-rw-r--r--arch/arm/mm/highmem.c13
-rw-r--r--arch/arm/mm/mm.h6
6 files changed, 20 insertions, 25 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index e4dc2f491123..6e06180a8bc0 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -74,8 +74,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
74 74
75 raw_spin_lock(&minicache_lock); 75 raw_spin_lock(&minicache_lock);
76 76
77 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); 77 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
78 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
79 78
80 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 79 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
81 80
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 86524591b1b5..29c770463e41 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -90,11 +90,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); 90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); 91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
92 92
93 set_pte_ext(TOP_PTE(kfrom), mk_pte(from, PAGE_KERNEL), 0); 93 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
94 set_pte_ext(TOP_PTE(kto), mk_pte(to, PAGE_KERNEL), 0); 94 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
95
96 flush_tlb_kernel_page(kfrom);
97 flush_tlb_kernel_page(kto);
98 95
99 copy_page((void *)kto, (void *)kfrom); 96 copy_page((void *)kto, (void *)kfrom);
100 97
@@ -119,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
119 */ 116 */
120 raw_spin_lock(&v6_lock); 117 raw_spin_lock(&v6_lock);
121 118
122 set_pte_ext(TOP_PTE(to), mk_pte(page, PAGE_KERNEL), 0); 119 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
123 flush_tlb_kernel_page(to);
124 clear_page((void *)to); 120 clear_page((void *)to);
125 121
126 raw_spin_unlock(&v6_lock); 122 raw_spin_unlock(&v6_lock);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 2497dcf6d9ae..804eeddda97f 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -94,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
94 94
95 raw_spin_lock(&minicache_lock); 95 raw_spin_lock(&minicache_lock);
96 96
97 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); 97 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
98 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
99 98
100 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
101 100
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index f4d407af4690..4d0b70f035eb 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -28,8 +28,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
28 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 28 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
29 const int zero = 0; 29 const int zero = 0;
30 30
31 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 31 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
32 flush_tlb_kernel_page(to);
33 32
34 asm( "mcrr p15, 0, %1, %0, c14\n" 33 asm( "mcrr p15, 0, %1, %0, c14\n"
35 " mcr p15, 0, %2, c7, c10, 4" 34 " mcr p15, 0, %2, c7, c10, 4"
@@ -40,13 +39,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
40 39
41static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) 40static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
42{ 41{
43 unsigned long colour = CACHE_COLOUR(vaddr); 42 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
44 unsigned long offset = vaddr & (PAGE_SIZE - 1); 43 unsigned long offset = vaddr & (PAGE_SIZE - 1);
45 unsigned long to; 44 unsigned long to;
46 45
47 set_pte_ext(TOP_PTE(FLUSH_ALIAS_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); 46 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
48 to = FLUSH_ALIAS_START + (colour << PAGE_SHIFT) + offset; 47 to = va + offset;
49 flush_tlb_kernel_page(to);
50 flush_icache_range(to, to + len); 48 flush_icache_range(to, to + len);
51} 49}
52 50
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 807c0573abbe..35352517a5d4 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -71,13 +71,12 @@ void *__kmap_atomic(struct page *page)
71 */ 71 */
72 BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 72 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
73#endif 73#endif
74 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
75 /* 74 /*
76 * When debugging is off, kunmap_atomic leaves the previous mapping 75 * When debugging is off, kunmap_atomic leaves the previous mapping
77 * in place, so this TLB flush ensures the TLB is updated with the 76 * in place, so the contained TLB flush ensures the TLB is updated
78 * new mapping. 77 * with the new mapping.
79 */ 78 */
80 local_flush_tlb_kernel_page(vaddr); 79 set_top_pte(vaddr, mk_pte(page, kmap_prot));
81 80
82 return (void *)vaddr; 81 return (void *)vaddr;
83} 82}
@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr)
96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 95 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
97#ifdef CONFIG_DEBUG_HIGHMEM 96#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 97 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
99 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 98 set_top_pte(vaddr, __pte(0));
100 local_flush_tlb_kernel_page(vaddr);
101#else 99#else
102 (void) idx; /* to kill a warning */ 100 (void) idx; /* to kill a warning */
103#endif 101#endif
@@ -123,8 +121,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
123#ifdef CONFIG_DEBUG_HIGHMEM 121#ifdef CONFIG_DEBUG_HIGHMEM
124 BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 122 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
125#endif 123#endif
126 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); 124 set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
127 local_flush_tlb_kernel_page(vaddr);
128 125
129 return (void *)vaddr; 126 return (void *)vaddr;
130} 127}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6ee1ff2c1da6..a4e7febeb6a1 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -18,6 +18,12 @@ extern pmd_t *top_pmd;
18/* PFN alias flushing, for VIPT caches */ 18/* PFN alias flushing, for VIPT caches */
19#define FLUSH_ALIAS_START 0xffff4000 19#define FLUSH_ALIAS_START 0xffff4000
20 20
21static inline void set_top_pte(unsigned long va, pte_t pte)
22{
23 set_pte_ext(TOP_PTE(va), pte, 0);
24 local_flush_tlb_kernel_page(va);
25}
26
21static inline pmd_t *pmd_off_k(unsigned long virt) 27static inline pmd_t *pmd_off_k(unsigned long virt)
22{ 28{
23 return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); 29 return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);