diff options
author | Alex Shi <alex.shi@intel.com> | 2012-06-27 21:02:24 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2012-06-27 22:29:14 -0400 |
commit | effee4b9b3b0aa5770bcd98de5f672b05b27703c (patch) | |
tree | b167657ec2ba05797b925a93e7e1b45222ac5ac3 /arch/x86/mm | |
parent | 52aec3308db85f4e9f5c8b9f5dc4fbd0138c6fa4 (diff) |
x86/tlb: do flush_tlb_kernel_range by 'invlpg'
This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay
and gain was analyzed in previous patch
(x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range).
In the testing: http://lkml.org/lkml/2012/6/21/10
The pay is mostly covered by long kernel path, but the gain is still
quite clear, memory access in user APP can increase 30+% when kernel
execute this funtion.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-10-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/tlb.c | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 2b5f506a7655..613cd83e8c0c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -264,6 +264,36 @@ void flush_tlb_all(void) | |||
264 | on_each_cpu(do_flush_tlb_all, NULL, 1); | 264 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
265 | } | 265 | } |
266 | 266 | ||
267 | static void do_kernel_range_flush(void *info) | ||
268 | { | ||
269 | struct flush_tlb_info *f = info; | ||
270 | unsigned long addr; | ||
271 | |||
272 | /* flush range by one by one 'invlpg' */ | ||
273 | for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) | ||
274 | __flush_tlb_single(addr); | ||
275 | } | ||
276 | |||
277 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
278 | { | ||
279 | unsigned act_entries; | ||
280 | struct flush_tlb_info info; | ||
281 | |||
282 | /* In modern CPU, last level tlb used for both data/ins */ | ||
283 | act_entries = tlb_lld_4k[ENTRIES]; | ||
284 | |||
285 | /* Balance as user space task's flush, a bit conservative */ | ||
286 | if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 || | ||
287 | (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) | ||
288 | |||
289 | on_each_cpu(do_flush_tlb_all, NULL, 1); | ||
290 | else { | ||
291 | info.flush_start = start; | ||
292 | info.flush_end = end; | ||
293 | on_each_cpu(do_kernel_range_flush, &info, 1); | ||
294 | } | ||
295 | } | ||
296 | |||
267 | #ifdef CONFIG_DEBUG_TLBFLUSH | 297 | #ifdef CONFIG_DEBUG_TLBFLUSH |
268 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, | 298 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, |
269 | size_t count, loff_t *ppos) | 299 | size_t count, loff_t *ppos) |