aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-25 19:05:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-25 19:05:40 -0400
commit1cf35d47712dd5dc4d62c6ce984f04ac6eab0408 (patch)
treef00857df7a2eec9520c1a950a0f9ae16cdfc4627 /arch/ia64
parent9a60ee117bbeaf2fb9a02ea80a6bdbc2811ca4d2 (diff)
mm: split 'tlb_flush_mmu()' into tlb flushing and memory freeing parts
The mmu-gather operation 'tlb_flush_mmu()' has done two things: the actual tlb flush operation, and the batched freeing of the pages that the TLB entries pointed at. This splits the operation into separate phases, so that the forced batched flushing done by zap_pte_range() can now do the actual TLB flush while still holding the page table lock, but delay the batched freeing of all the pages to after the lock has been dropped. This in turn allows us to avoid a race condition between set_page_dirty() (as called by zap_pte_range() when it finds a dirty shared memory pte) and page_mkclean(): because we now flush all the dirty page data from the TLB's while holding the pte lock, page_mkclean() will be held up walking the (recently cleaned) page tables until after the TLB entries have been flushed from all CPU's. Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Tested-by: Dave Hansen <dave.hansen@intel.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King - ARM Linux <linux@arm.linux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/tlb.h42
1 files changed, 32 insertions, 10 deletions
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index bc5efc7c3f3f..39d64e0df1de 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
91#define RR_RID_MASK 0x00000000ffffff00L 91#define RR_RID_MASK 0x00000000ffffff00L
92#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 92#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
93 93
94/*
95 * Flush the TLB for address range START to END and, if not in fast mode, release the
96 * freed pages that where gathered up to this point.
97 */
98static inline void 94static inline void
99ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 95ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
100{ 96{
101 unsigned long i;
102 unsigned int nr;
103
104 if (!tlb->need_flush)
105 return;
106 tlb->need_flush = 0; 97 tlb->need_flush = 0;
107 98
108 if (tlb->fullmm) { 99 if (tlb->fullmm) {
@@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
135 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); 126 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
136 } 127 }
137 128
129}
130
131static inline void
132ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
133{
134 unsigned long i;
135 unsigned int nr;
136
138 /* lastly, release the freed pages */ 137 /* lastly, release the freed pages */
139 nr = tlb->nr; 138 nr = tlb->nr;
140 139
@@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
144 free_page_and_swap_cache(tlb->pages[i]); 143 free_page_and_swap_cache(tlb->pages[i]);
145} 144}
146 145
146/*
147 * Flush the TLB for address range START to END and, if not in fast mode, release the
148 * freed pages that where gathered up to this point.
149 */
150static inline void
151ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
152{
153 if (!tlb->need_flush)
154 return;
155 ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
156 ia64_tlb_flush_mmu_free(tlb);
157}
158
147static inline void __tlb_alloc_page(struct mmu_gather *tlb) 159static inline void __tlb_alloc_page(struct mmu_gather *tlb)
148{ 160{
149 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 161 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
@@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
206 return tlb->max - tlb->nr; 218 return tlb->max - tlb->nr;
207} 219}
208 220
221static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
222{
223 ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
224}
225
226static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
227{
228 ia64_tlb_flush_mmu_free(tlb);
229}
230
209static inline void tlb_flush_mmu(struct mmu_gather *tlb) 231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
210{ 232{
211 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);