diff options
author | Dave Hansen <dave@sr71.net> | 2013-09-11 17:20:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-11 18:57:09 -0400 |
commit | 6df46865ff8715932e7d42e52cac17e8461758cb (patch) | |
tree | 7c7e1d43b22a2bec2d4a6fce95ddc3cbd481aa1e /arch/x86 | |
parent | 9824cf9753ecbe8f5b47aa9b2f218207defea211 (diff) |
mm: vmstats: track TLB flush stats on UP too
The previous patch doing vmstats for TLB flushes ("mm: vmstats: tlb flush
counters") effectively missed UP since arch/x86/mm/tlb.c is only compiled
for SMP.
UP systems do not do remote TLB flushes, so compile those counters out on
UP.
arch/x86/kernel/cpu/mtrr/generic.c calls __flush_tlb() directly. This is
probably an optimization since both the mtrr code and __flush_tlb() write
cr4. It would probably be safe to make that a flush_tlb_all() (and then
get these statistics), but the mtrr code is ancient and I'm hesitant to
touch it other than to just stick in the counters.
[akpm@linux-foundation.org: tweak comments]
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 37 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 4 |
3 files changed, 34 insertions, 9 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index cf512003e663..e6d90babc245 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -62,6 +62,7 @@ static inline void __flush_tlb_all(void) | |||
62 | 62 | ||
63 | static inline void __flush_tlb_one(unsigned long addr) | 63 | static inline void __flush_tlb_one(unsigned long addr) |
64 | { | 64 | { |
65 | count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); | ||
65 | __flush_tlb_single(addr); | 66 | __flush_tlb_single(addr); |
66 | } | 67 | } |
67 | 68 | ||
@@ -84,14 +85,38 @@ static inline void __flush_tlb_one(unsigned long addr) | |||
84 | 85 | ||
85 | #ifndef CONFIG_SMP | 86 | #ifndef CONFIG_SMP |
86 | 87 | ||
87 | #define flush_tlb() __flush_tlb() | 88 | /* "_up" is for UniProcessor. |
88 | #define flush_tlb_all() __flush_tlb_all() | 89 | * |
89 | #define local_flush_tlb() __flush_tlb() | 90 | * This is a helper for other header functions. *Not* intended to be called |
91 | * directly. All global TLB flushes need to either call this, or to bump the | ||
92 | * vm statistics themselves. | ||
93 | */ | ||
94 | static inline void __flush_tlb_up(void) | ||
95 | { | ||
96 | count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
97 | __flush_tlb(); | ||
98 | } | ||
99 | |||
100 | static inline void flush_tlb_all(void) | ||
101 | { | ||
102 | count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
103 | __flush_tlb_all(); | ||
104 | } | ||
105 | |||
106 | static inline void flush_tlb(void) | ||
107 | { | ||
108 | __flush_tlb_up(); | ||
109 | } | ||
110 | |||
111 | static inline void local_flush_tlb(void) | ||
112 | { | ||
113 | __flush_tlb_up(); | ||
114 | } | ||
90 | 115 | ||
91 | static inline void flush_tlb_mm(struct mm_struct *mm) | 116 | static inline void flush_tlb_mm(struct mm_struct *mm) |
92 | { | 117 | { |
93 | if (mm == current->active_mm) | 118 | if (mm == current->active_mm) |
94 | __flush_tlb(); | 119 | __flush_tlb_up(); |
95 | } | 120 | } |
96 | 121 | ||
97 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 122 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
@@ -105,14 +130,14 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
105 | unsigned long start, unsigned long end) | 130 | unsigned long start, unsigned long end) |
106 | { | 131 | { |
107 | if (vma->vm_mm == current->active_mm) | 132 | if (vma->vm_mm == current->active_mm) |
108 | __flush_tlb(); | 133 | __flush_tlb_up(); |
109 | } | 134 | } |
110 | 135 | ||
111 | static inline void flush_tlb_mm_range(struct mm_struct *mm, | 136 | static inline void flush_tlb_mm_range(struct mm_struct *mm, |
112 | unsigned long start, unsigned long end, unsigned long vmflag) | 137 | unsigned long start, unsigned long end, unsigned long vmflag) |
113 | { | 138 | { |
114 | if (mm == current->active_mm) | 139 | if (mm == current->active_mm) |
115 | __flush_tlb(); | 140 | __flush_tlb_up(); |
116 | } | 141 | } |
117 | 142 | ||
118 | static inline void native_flush_tlb_others(const struct cpumask *cpumask, | 143 | static inline void native_flush_tlb_others(const struct cpumask *cpumask, |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index d4cdfa67509e..ce2d0a2c3e4f 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -683,6 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) | |||
683 | } | 683 | } |
684 | 684 | ||
685 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ | 685 | /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ |
686 | count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
686 | __flush_tlb(); | 687 | __flush_tlb(); |
687 | 688 | ||
688 | /* Save MTRR state */ | 689 | /* Save MTRR state */ |
@@ -696,6 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock) | |||
696 | static void post_set(void) __releases(set_atomicity_lock) | 697 | static void post_set(void) __releases(set_atomicity_lock) |
697 | { | 698 | { |
698 | /* Flush TLBs (no need to flush caches - they are disabled) */ | 699 | /* Flush TLBs (no need to flush caches - they are disabled) */ |
700 | count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
699 | __flush_tlb(); | 701 | __flush_tlb(); |
700 | 702 | ||
701 | /* Intel (P6) standard MTRRs */ | 703 | /* Intel (P6) standard MTRRs */ |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index f030cbe669a5..ae699b3bbac8 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -280,10 +280,8 @@ static void do_kernel_range_flush(void *info) | |||
280 | unsigned long addr; | 280 | unsigned long addr; |
281 | 281 | ||
282 | /* flush range by one by one 'invlpg' */ | 282 | /* flush range by one by one 'invlpg' */ |
283 | for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) { | 283 | for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) |
284 | count_vm_event(NR_TLB_LOCAL_FLUSH_ONE_KERNEL); | ||
285 | __flush_tlb_single(addr); | 284 | __flush_tlb_single(addr); |
286 | } | ||
287 | } | 285 | } |
288 | 286 | ||
289 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 287 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |