aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-06-27 21:02:21 -0400
committerH. Peter Anvin <hpa@zytor.com>2012-06-27 22:29:11 -0400
commit597e1c3580b7cfd95bb0f3167e2b297bf8a5a3ae (patch)
treed67c27b8505af6f7f21fa918599c2e9d37c2e5e1 /mm/memory.c
parent3df3212f9722c7e45c723b9ea231a04ba4dbc47c (diff)
mm/mmu_gather: enable tlb flush range in generic mmu_gather
This patch enabled the tlb flush range support in generic mmu layer. Most of arch has self tlb flush range support, like ARM/IA64 etc. X86 arch has no this support in hardware yet. But another instruction 'invlpg' can implement this function in some degree. So, enable this feather in generic layer for x86 now. and maybe useful for other archs in further. Generic mmu_gather struct is protected by micro HAVE_GENERIC_MMU_GATHER. Other archs that has flush range supported own self mmu_gather struct. So, now this change is safe for them. In future we may unify this struct and related functions on multiple archs. Thanks for Peter Zijlstra time and time reminder for multiple architecture code safe! Signed-off-by: Alex Shi <alex.shi@intel.com> Link: http://lkml.kernel.org/r/1340845344-27557-7-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 1b7dc662bf9f..32c99433cfdf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
206 tlb->mm = mm; 206 tlb->mm = mm;
207 207
208 tlb->fullmm = fullmm; 208 tlb->fullmm = fullmm;
209 tlb->start = -1UL;
210 tlb->end = 0;
209 tlb->need_flush = 0; 211 tlb->need_flush = 0;
210 tlb->fast_mode = (num_possible_cpus() == 1); 212 tlb->fast_mode = (num_possible_cpus() == 1);
211 tlb->local.next = NULL; 213 tlb->local.next = NULL;
@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
248{ 250{
249 struct mmu_gather_batch *batch, *next; 251 struct mmu_gather_batch *batch, *next;
250 252
253 tlb->start = start;
254 tlb->end = end;
251 tlb_flush_mmu(tlb); 255 tlb_flush_mmu(tlb);
252 256
253 /* keep the page table cache within bounds */ 257 /* keep the page table cache within bounds */
@@ -1204,6 +1208,11 @@ again:
1204 */ 1208 */
1205 if (force_flush) { 1209 if (force_flush) {
1206 force_flush = 0; 1210 force_flush = 0;
1211
1212#ifdef HAVE_GENERIC_MMU_GATHER
1213 tlb->start = addr;
1214 tlb->end = end;
1215#endif
1207 tlb_flush_mmu(tlb); 1216 tlb_flush_mmu(tlb);
1208 if (addr != end) 1217 if (addr != end)
1209 goto again; 1218 goto again;