aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2014-07-31 11:40:56 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-07-31 11:48:50 -0400
commit9dfa6dee5355f200cf19528ca7c678ef4007cec5 (patch)
treea42ed7ec7cc9e9bb3f27507b89c0fbd3a524d2ce
parente9f4e0a9fe2723078b7a1a1169828dd46a7b2f9e (diff)
x86/mm: Fix missed global TLB flush stat
If we take the if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) { local_flush_tlb(); goto out; } path out of flush_tlb_mm_range(), we will have flushed the tlb, but not incremented NR_TLB_LOCAL_FLUSH_ALL. This unifies the way out of the function so that we always take a single path when doing a full tlb flush. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: http://lkml.kernel.org/r/20140731154056.FF763B76@viggo.jf.intel.com Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/mm/tlb.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index dff6ddebc45f..ae584d09e8b0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -164,8 +164,9 @@ unsigned long tlb_single_page_flush_ceiling = 1;
164void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 164void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
165 unsigned long end, unsigned long vmflag) 165 unsigned long end, unsigned long vmflag)
166{ 166{
167 int need_flush_others_all = 1;
168 unsigned long addr; 167 unsigned long addr;
168 /* do a global flush by default */
169 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
169 170
170 preempt_disable(); 171 preempt_disable();
171 if (current->active_mm != mm) 172 if (current->active_mm != mm)
@@ -176,16 +177,14 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
176 goto out; 177 goto out;
177 } 178 }
178 179
179 if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) { 180 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
180 local_flush_tlb(); 181 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
181 goto out;
182 }
183 182
184 if ((end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { 183 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
184 base_pages_to_flush = TLB_FLUSH_ALL;
185 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 185 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
186 local_flush_tlb(); 186 local_flush_tlb();
187 } else { 187 } else {
188 need_flush_others_all = 0;
189 /* flush range by one by one 'invlpg' */ 188 /* flush range by one by one 'invlpg' */
190 for (addr = start; addr < end; addr += PAGE_SIZE) { 189 for (addr = start; addr < end; addr += PAGE_SIZE) {
191 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 190 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
@@ -193,7 +192,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
193 } 192 }
194 } 193 }
195out: 194out:
196 if (need_flush_others_all) { 195 if (base_pages_to_flush == TLB_FLUSH_ALL) {
197 start = 0UL; 196 start = 0UL;
198 end = TLB_FLUSH_ALL; 197 end = TLB_FLUSH_ALL;
199 } 198 }