aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-06-27 21:02:19 -0400
committerH. Peter Anvin <hpa@zytor.com>2012-06-27 22:29:10 -0400
commitc4211f42d3e66875298a5e26a75109878c80f15b (patch)
tree5f4db23b52be8eb74f95c35621373df790eacdd2 /arch/x86/mm
parentd8dfe60d6dcad5989c4558b753b98d657e2813c0 (diff)
x86/tlb: add tlb_flushall_shift for specific CPU
Testing show different CPU type(micro architectures and NUMA mode) has different balance points between the TLB flush all and multiple invlpg. And there also has cases the tlb flush change has no any help. This patch give a interface to let x86 vendor developers have a chance to set different shift for different CPU type. like some machine in my hands, balance points is 16 entries on Romely-EP; while it is at 8 entries on Bloomfield NHM-EP; and is 256 on IVB mobile CPU. but on model 15 core2 Xeon using invlpg has nothing help. For untested machine, do a conservative optimization, same as NHM CPU. Signed-off-by: Alex Shi <alex.shi@intel.com> Link: http://lkml.kernel.org/r/1340845344-27557-5-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/tlb.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 184a02a4d871..2939f2f9edbb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -316,8 +316,6 @@ void flush_tlb_mm(struct mm_struct *mm)
316 preempt_enable(); 316 preempt_enable();
317} 317}
318 318
319#define FLUSHALL_BAR 16
320
321#ifdef CONFIG_TRANSPARENT_HUGEPAGE 319#ifdef CONFIG_TRANSPARENT_HUGEPAGE
322static inline unsigned long has_large_page(struct mm_struct *mm, 320static inline unsigned long has_large_page(struct mm_struct *mm,
323 unsigned long start, unsigned long end) 321 unsigned long start, unsigned long end)
@@ -352,7 +350,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
352{ 350{
353 struct mm_struct *mm; 351 struct mm_struct *mm;
354 352
355 if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) { 353 if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) {
356flush_all: 354flush_all:
357 flush_tlb_mm(vma->vm_mm); 355 flush_tlb_mm(vma->vm_mm);
358 return; 356 return;
@@ -373,7 +371,8 @@ flush_all:
373 act_entries = tlb_entries > mm->total_vm ? 371 act_entries = tlb_entries > mm->total_vm ?
374 mm->total_vm : tlb_entries; 372 mm->total_vm : tlb_entries;
375 373
376 if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR) 374 if ((end - start) >> PAGE_SHIFT >
375 act_entries >> tlb_flushall_shift)
377 local_flush_tlb(); 376 local_flush_tlb();
378 else { 377 else {
379 if (has_large_page(mm, start, end)) { 378 if (has_large_page(mm, start, end)) {