aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2014-07-31 11:40:54 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2014-07-31 11:48:50 -0400
commit4995ab9cf512e9a6cc07dfd6b1d4e2fc48ce7fef (patch)
tree467624294215591ad06894e992fabd1d416b3a48
parenteff50c347fcc8feeb8c1723c23c89aba67c60263 (diff)
x86/mm: Clean up the TLB flushing code
The if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) line of code is not exactly the easiest to audit, especially when it ends up at two different indentation levels. This eliminates one of the the copy-n-paste versions. It also gives us a unified exit point for each path through this function. We need this in a minute for our tracepoint. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: http://lkml.kernel.org/r/20140731154054.44F1CDDC@viggo.jf.intel.com Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/mm/tlb.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index dd8dda167a24..378fbef279d2 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -161,23 +161,24 @@ void flush_tlb_current_task(void)
161void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 161void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
162 unsigned long end, unsigned long vmflag) 162 unsigned long end, unsigned long vmflag)
163{ 163{
164 bool need_flush_others_all = true;
164 unsigned long addr; 165 unsigned long addr;
165 unsigned act_entries, tlb_entries = 0; 166 unsigned act_entries, tlb_entries = 0;
166 unsigned long nr_base_pages; 167 unsigned long nr_base_pages;
167 168
168 preempt_disable(); 169 preempt_disable();
169 if (current->active_mm != mm) 170 if (current->active_mm != mm)
170 goto flush_all; 171 goto out;
171 172
172 if (!current->mm) { 173 if (!current->mm) {
173 leave_mm(smp_processor_id()); 174 leave_mm(smp_processor_id());
174 goto flush_all; 175 goto out;
175 } 176 }
176 177
177 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 178 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
178 || vmflag & VM_HUGETLB) { 179 || vmflag & VM_HUGETLB) {
179 local_flush_tlb(); 180 local_flush_tlb();
180 goto flush_all; 181 goto out;
181 } 182 }
182 183
183 /* In modern CPU, last level tlb used for both data/ins */ 184 /* In modern CPU, last level tlb used for both data/ins */
@@ -196,22 +197,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
196 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 197 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
197 local_flush_tlb(); 198 local_flush_tlb();
198 } else { 199 } else {
200 need_flush_others_all = false;
199 /* flush range by one by one 'invlpg' */ 201 /* flush range by one by one 'invlpg' */
200 for (addr = start; addr < end; addr += PAGE_SIZE) { 202 for (addr = start; addr < end; addr += PAGE_SIZE) {
201 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 203 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
202 __flush_tlb_single(addr); 204 __flush_tlb_single(addr);
203 } 205 }
204
205 if (cpumask_any_but(mm_cpumask(mm),
206 smp_processor_id()) < nr_cpu_ids)
207 flush_tlb_others(mm_cpumask(mm), mm, start, end);
208 preempt_enable();
209 return;
210 } 206 }
211 207out:
212flush_all: 208 if (need_flush_others_all) {
209 start = 0UL;
210 end = TLB_FLUSH_ALL;
211 }
213 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 212 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
214 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 213 flush_tlb_others(mm_cpumask(mm), mm, start, end);
215 preempt_enable(); 214 preempt_enable();
216} 215}
217 216