diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 16:17:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-26 16:17:17 -0400 |
commit | 4cb38750d49010ae72e718d46605ac9ba5a851b4 (patch) | |
tree | 8c991a900fd176288f4acbc340512b90d604374d /mm | |
parent | 0a2fe19ccc4bc552a8083a595a3aa737b8bea727 (diff) | |
parent | 7efa1c87963d23cc57ba40c07316d3e28cc75a3a (diff) |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/mm changes from Peter Anvin:
"The big change here is the patchset by Alex Shi to use INVLPG to flush
only the affected pages when we only need to flush a small page range.
It also removes the special INVALIDATE_TLB_VECTOR interrupts (32
vectors!) and replace it with an ordinary IPI function call."
Fix up trivial conflicts in arch/x86/include/asm/apic.h (added code next
to changed line)
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/tlb: Fix build warning and crash when building for !SMP
x86/tlb: do flush_tlb_kernel_range by 'invlpg'
x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR
x86/tlb: enable tlb flush range support for x86
mm/mmu_gather: enable tlb flush range in generic mmu_gather
x86/tlb: add tlb_flushall_shift knob into debugfs
x86/tlb: add tlb_flushall_shift for specific CPU
x86/tlb: fall back to flush all when meet a THP large page
x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range
x86/tlb_info: get last level TLB entry number of CPU
x86: Add read_mostly declaration/definition to variables from smp.h
x86: Define early read-mostly per-cpu macros
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 2466d1250231..91f69459d3e8 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -206,6 +206,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | |||
206 | tlb->mm = mm; | 206 | tlb->mm = mm; |
207 | 207 | ||
208 | tlb->fullmm = fullmm; | 208 | tlb->fullmm = fullmm; |
209 | tlb->start = -1UL; | ||
210 | tlb->end = 0; | ||
209 | tlb->need_flush = 0; | 211 | tlb->need_flush = 0; |
210 | tlb->fast_mode = (num_possible_cpus() == 1); | 212 | tlb->fast_mode = (num_possible_cpus() == 1); |
211 | tlb->local.next = NULL; | 213 | tlb->local.next = NULL; |
@@ -248,6 +250,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
248 | { | 250 | { |
249 | struct mmu_gather_batch *batch, *next; | 251 | struct mmu_gather_batch *batch, *next; |
250 | 252 | ||
253 | tlb->start = start; | ||
254 | tlb->end = end; | ||
251 | tlb_flush_mmu(tlb); | 255 | tlb_flush_mmu(tlb); |
252 | 256 | ||
253 | /* keep the page table cache within bounds */ | 257 | /* keep the page table cache within bounds */ |
@@ -1204,6 +1208,11 @@ again: | |||
1204 | */ | 1208 | */ |
1205 | if (force_flush) { | 1209 | if (force_flush) { |
1206 | force_flush = 0; | 1210 | force_flush = 0; |
1211 | |||
1212 | #ifdef HAVE_GENERIC_MMU_GATHER | ||
1213 | tlb->start = addr; | ||
1214 | tlb->end = end; | ||
1215 | #endif | ||
1207 | tlb_flush_mmu(tlb); | 1216 | tlb_flush_mmu(tlb); |
1208 | if (addr != end) | 1217 | if (addr != end) |
1209 | goto again; | 1218 | goto again; |