aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-08-20 20:02:19 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-21 07:52:58 -0400
commit994025caba3e6beade9bde84dd1b70d9d250f27b (patch)
tree0fe4d6f08d252af1db6a631182725085bb76442b /arch/x86/xen/mmu.c
parent168d2f464ab9860f0d1e66cf1f9684973222f1c6 (diff)
xen: add debugfs support
Add support for exporting statistics on mmu updates, multicall batching and pv spinlocks into debugfs. The base path is xen/ and each subsystem adds its own directory: mmu, multicalls, spinlocks. In each directory, writing 1 to "zero_stats" will cause the corresponding stats to be zeroed the next time they're updated. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c163
1 files changed, 161 insertions, 2 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index d9a35a363095..f5af913fd7b0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -40,6 +40,7 @@
40 */ 40 */
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/debugfs.h>
43#include <linux/bug.h> 44#include <linux/bug.h>
44 45
45#include <asm/pgtable.h> 46#include <asm/pgtable.h>
@@ -57,6 +58,61 @@
57 58
58#include "multicalls.h" 59#include "multicalls.h"
59#include "mmu.h" 60#include "mmu.h"
61#include "debugfs.h"
62
63#define MMU_UPDATE_HISTO 30
64
65#ifdef CONFIG_XEN_DEBUG_FS
66
67static struct {
68 u32 pgd_update;
69 u32 pgd_update_pinned;
70 u32 pgd_update_batched;
71
72 u32 pud_update;
73 u32 pud_update_pinned;
74 u32 pud_update_batched;
75
76 u32 pmd_update;
77 u32 pmd_update_pinned;
78 u32 pmd_update_batched;
79
80 u32 pte_update;
81 u32 pte_update_pinned;
82 u32 pte_update_batched;
83
84 u32 mmu_update;
85 u32 mmu_update_extended;
86 u32 mmu_update_histo[MMU_UPDATE_HISTO];
87
88 u32 prot_commit;
89 u32 prot_commit_batched;
90
91 u32 set_pte_at;
92 u32 set_pte_at_batched;
93 u32 set_pte_at_pinned;
94 u32 set_pte_at_current;
95 u32 set_pte_at_kernel;
96} mmu_stats;
97
98static u8 zero_stats;
99
100static inline void check_zero(void)
101{
102 if (unlikely(zero_stats)) {
103 memset(&mmu_stats, 0, sizeof(mmu_stats));
104 zero_stats = 0;
105 }
106}
107
108#define ADD_STATS(elem, val) \
109 do { check_zero(); mmu_stats.elem += (val); } while(0)
110
111#else /* !CONFIG_XEN_DEBUG_FS */
112
113#define ADD_STATS(elem, val) do { (void)(val); } while(0)
114
115#endif /* CONFIG_XEN_DEBUG_FS */
60 116
61/* 117/*
62 * Just beyond the highest usermode address. STACK_TOP_MAX has a 118 * Just beyond the highest usermode address. STACK_TOP_MAX has a
@@ -243,11 +299,21 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
243 299
244 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 300 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
245 301
246 if (mcs.mc != NULL) 302 if (mcs.mc != NULL) {
303 ADD_STATS(mmu_update_extended, 1);
304 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
305
247 mcs.mc->args[1]++; 306 mcs.mc->args[1]++;
248 else { 307
308 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
309 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
310 else
311 ADD_STATS(mmu_update_histo[0], 1);
312 } else {
313 ADD_STATS(mmu_update, 1);
249 mcs = __xen_mc_entry(sizeof(*u)); 314 mcs = __xen_mc_entry(sizeof(*u));
250 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 315 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
316 ADD_STATS(mmu_update_histo[1], 1);
251 } 317 }
252 318
253 u = mcs.args; 319 u = mcs.args;
@@ -267,6 +333,8 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
267 u.val = pmd_val_ma(val); 333 u.val = pmd_val_ma(val);
268 xen_extend_mmu_update(&u); 334 xen_extend_mmu_update(&u);
269 335
336 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
337
270 xen_mc_issue(PARAVIRT_LAZY_MMU); 338 xen_mc_issue(PARAVIRT_LAZY_MMU);
271 339
272 preempt_enable(); 340 preempt_enable();
@@ -274,6 +342,8 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
274 342
275void xen_set_pmd(pmd_t *ptr, pmd_t val) 343void xen_set_pmd(pmd_t *ptr, pmd_t val)
276{ 344{
345 ADD_STATS(pmd_update, 1);
346
277 /* If page is not pinned, we can just update the entry 347 /* If page is not pinned, we can just update the entry
278 directly */ 348 directly */
279 if (!xen_page_pinned(ptr)) { 349 if (!xen_page_pinned(ptr)) {
@@ -281,6 +351,8 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
281 return; 351 return;
282 } 352 }
283 353
354 ADD_STATS(pmd_update_pinned, 1);
355
284 xen_set_pmd_hyper(ptr, val); 356 xen_set_pmd_hyper(ptr, val);
285} 357}
286 358
@@ -300,12 +372,18 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
300 if (mm == &init_mm) 372 if (mm == &init_mm)
301 preempt_disable(); 373 preempt_disable();
302 374
375 ADD_STATS(set_pte_at, 1);
376// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
377 ADD_STATS(set_pte_at_current, mm == current->mm);
378 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
379
303 if (mm == current->mm || mm == &init_mm) { 380 if (mm == current->mm || mm == &init_mm) {
304 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 381 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
305 struct multicall_space mcs; 382 struct multicall_space mcs;
306 mcs = xen_mc_entry(0); 383 mcs = xen_mc_entry(0);
307 384
308 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); 385 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
386 ADD_STATS(set_pte_at_batched, 1);
309 xen_mc_issue(PARAVIRT_LAZY_MMU); 387 xen_mc_issue(PARAVIRT_LAZY_MMU);
310 goto out; 388 goto out;
311 } else 389 } else
@@ -336,6 +414,9 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
336 u.val = pte_val_ma(pte); 414 u.val = pte_val_ma(pte);
337 xen_extend_mmu_update(&u); 415 xen_extend_mmu_update(&u);
338 416
417 ADD_STATS(prot_commit, 1);
418 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
419
339 xen_mc_issue(PARAVIRT_LAZY_MMU); 420 xen_mc_issue(PARAVIRT_LAZY_MMU);
340} 421}
341 422
@@ -402,6 +483,8 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
402 u.val = pud_val_ma(val); 483 u.val = pud_val_ma(val);
403 xen_extend_mmu_update(&u); 484 xen_extend_mmu_update(&u);
404 485
486 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
487
405 xen_mc_issue(PARAVIRT_LAZY_MMU); 488 xen_mc_issue(PARAVIRT_LAZY_MMU);
406 489
407 preempt_enable(); 490 preempt_enable();
@@ -409,6 +492,8 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
409 492
410void xen_set_pud(pud_t *ptr, pud_t val) 493void xen_set_pud(pud_t *ptr, pud_t val)
411{ 494{
495 ADD_STATS(pud_update, 1);
496
412 /* If page is not pinned, we can just update the entry 497 /* If page is not pinned, we can just update the entry
413 directly */ 498 directly */
414 if (!xen_page_pinned(ptr)) { 499 if (!xen_page_pinned(ptr)) {
@@ -416,11 +501,17 @@ void xen_set_pud(pud_t *ptr, pud_t val)
416 return; 501 return;
417 } 502 }
418 503
504 ADD_STATS(pud_update_pinned, 1);
505
419 xen_set_pud_hyper(ptr, val); 506 xen_set_pud_hyper(ptr, val);
420} 507}
421 508
422void xen_set_pte(pte_t *ptep, pte_t pte) 509void xen_set_pte(pte_t *ptep, pte_t pte)
423{ 510{
511 ADD_STATS(pte_update, 1);
512// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
513 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
514
424#ifdef CONFIG_X86_PAE 515#ifdef CONFIG_X86_PAE
425 ptep->pte_high = pte.pte_high; 516 ptep->pte_high = pte.pte_high;
426 smp_wmb(); 517 smp_wmb();
@@ -517,6 +608,8 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
517{ 608{
518 pgd_t *user_ptr = xen_get_user_pgd(ptr); 609 pgd_t *user_ptr = xen_get_user_pgd(ptr);
519 610
611 ADD_STATS(pgd_update, 1);
612
520 /* If page is not pinned, we can just update the entry 613 /* If page is not pinned, we can just update the entry
521 directly */ 614 directly */
522 if (!xen_page_pinned(ptr)) { 615 if (!xen_page_pinned(ptr)) {
@@ -528,6 +621,9 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
528 return; 621 return;
529 } 622 }
530 623
624 ADD_STATS(pgd_update_pinned, 1);
625 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
626
531 /* If it's pinned, then we can at least batch the kernel and 627 /* If it's pinned, then we can at least batch the kernel and
532 user updates together. */ 628 user updates together. */
533 xen_mc_batch(); 629 xen_mc_batch();
@@ -1003,3 +1099,66 @@ void xen_exit_mmap(struct mm_struct *mm)
1003 1099
1004 spin_unlock(&mm->page_table_lock); 1100 spin_unlock(&mm->page_table_lock);
1005} 1101}
1102
1103#ifdef CONFIG_XEN_DEBUG_FS
1104
1105static struct dentry *d_mmu_debug;
1106
1107static int __init xen_mmu_debugfs(void)
1108{
1109 struct dentry *d_xen = xen_init_debugfs();
1110
1111 if (d_xen == NULL)
1112 return -ENOMEM;
1113
1114 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1115
1116 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1117
1118 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1119 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1120 &mmu_stats.pgd_update_pinned);
1121 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1122 &mmu_stats.pgd_update_pinned);
1123
1124 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1125 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1126 &mmu_stats.pud_update_pinned);
1127 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1128 &mmu_stats.pud_update_pinned);
1129
1130 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1131 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1132 &mmu_stats.pmd_update_pinned);
1133 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1134 &mmu_stats.pmd_update_pinned);
1135
1136 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1137// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1138// &mmu_stats.pte_update_pinned);
1139 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1140 &mmu_stats.pte_update_pinned);
1141
1142 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1143 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
1144 &mmu_stats.mmu_update_extended);
1145 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
1146 mmu_stats.mmu_update_histo, 20);
1147
1148 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
1149 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
1150 &mmu_stats.set_pte_at_batched);
1151 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
1152 &mmu_stats.set_pte_at_current);
1153 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
1154 &mmu_stats.set_pte_at_kernel);
1155
1156 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
1157 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
1158 &mmu_stats.prot_commit_batched);
1159
1160 return 0;
1161}
1162fs_initcall(xen_mmu_debugfs);
1163
1164#endif /* CONFIG_XEN_DEBUG_FS */