aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-12-16 18:50:17 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-05-20 17:26:39 -0400
commitc86d8077b3ec048e42e26372b02dae26b38b0d6b (patch)
treeab625231e049a0011297ed19aac90ac95e2f2881 /arch/x86
parentd5108316b894a172f891795dbad4975ab7ed7a41 (diff)
xen/mmu: remove all ad-hoc stats stuff
To make way for tracing. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c138
1 files changed, 0 insertions, 138 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 1a41e9257076..eb6d83a458c9 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -75,61 +75,12 @@
75#include "mmu.h" 75#include "mmu.h"
76#include "debugfs.h" 76#include "debugfs.h"
77 77
78#define MMU_UPDATE_HISTO 30
79
80/* 78/*
81 * Protects atomic reservation decrease/increase against concurrent increases. 79 * Protects atomic reservation decrease/increase against concurrent increases.
82 * Also protects non-atomic updates of current_pages and balloon lists. 80 * Also protects non-atomic updates of current_pages and balloon lists.
83 */ 81 */
84DEFINE_SPINLOCK(xen_reservation_lock); 82DEFINE_SPINLOCK(xen_reservation_lock);
85 83
86#ifdef CONFIG_XEN_DEBUG_FS
87
88static struct {
89 u32 pgd_update;
90 u32 pgd_update_pinned;
91 u32 pgd_update_batched;
92
93 u32 pud_update;
94 u32 pud_update_pinned;
95 u32 pud_update_batched;
96
97 u32 pmd_update;
98 u32 pmd_update_pinned;
99 u32 pmd_update_batched;
100
101 u32 pte_update;
102 u32 pte_update_pinned;
103 u32 pte_update_batched;
104
105 u32 mmu_update;
106 u32 mmu_update_extended;
107 u32 mmu_update_histo[MMU_UPDATE_HISTO];
108
109 u32 prot_commit;
110 u32 prot_commit_batched;
111} mmu_stats;
112
113static u8 zero_stats;
114
115static inline void check_zero(void)
116{
117 if (unlikely(zero_stats)) {
118 memset(&mmu_stats, 0, sizeof(mmu_stats));
119 zero_stats = 0;
120 }
121}
122
123#define ADD_STATS(elem, val) \
124 do { check_zero(); mmu_stats.elem += (val); } while(0)
125
126#else /* !CONFIG_XEN_DEBUG_FS */
127
128#define ADD_STATS(elem, val) do { (void)(val); } while(0)
129
130#endif /* CONFIG_XEN_DEBUG_FS */
131
132
133/* 84/*
134 * Identity map, in addition to plain kernel map. This needs to be 85 * Identity map, in addition to plain kernel map. This needs to be
135 * large enough to allocate page table pages to allocate the rest. 86 * large enough to allocate page table pages to allocate the rest.
@@ -263,20 +214,10 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
263 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 214 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
264 215
265 if (mcs.mc != NULL) { 216 if (mcs.mc != NULL) {
266 ADD_STATS(mmu_update_extended, 1);
267 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
268
269 mcs.mc->args[1]++; 217 mcs.mc->args[1]++;
270
271 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
272 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
273 else
274 ADD_STATS(mmu_update_histo[0], 1);
275 } else { 218 } else {
276 ADD_STATS(mmu_update, 1);
277 mcs = __xen_mc_entry(sizeof(*u)); 219 mcs = __xen_mc_entry(sizeof(*u));
278 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 220 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
279 ADD_STATS(mmu_update_histo[1], 1);
280 } 221 }
281 222
282 u = mcs.args; 223 u = mcs.args;
@@ -296,8 +237,6 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
296 u.val = pmd_val_ma(val); 237 u.val = pmd_val_ma(val);
297 xen_extend_mmu_update(&u); 238 xen_extend_mmu_update(&u);
298 239
299 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
300
301 xen_mc_issue(PARAVIRT_LAZY_MMU); 240 xen_mc_issue(PARAVIRT_LAZY_MMU);
302 241
303 preempt_enable(); 242 preempt_enable();
@@ -305,8 +244,6 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
305 244
306static void xen_set_pmd(pmd_t *ptr, pmd_t val) 245static void xen_set_pmd(pmd_t *ptr, pmd_t val)
307{ 246{
308 ADD_STATS(pmd_update, 1);
309
310 /* If page is not pinned, we can just update the entry 247 /* If page is not pinned, we can just update the entry
311 directly */ 248 directly */
312 if (!xen_page_pinned(ptr)) { 249 if (!xen_page_pinned(ptr)) {
@@ -314,8 +251,6 @@ static void xen_set_pmd(pmd_t *ptr, pmd_t val)
314 return; 251 return;
315 } 252 }
316 253
317 ADD_STATS(pmd_update_pinned, 1);
318
319 xen_set_pmd_hyper(ptr, val); 254 xen_set_pmd_hyper(ptr, val);
320} 255}
321 256
@@ -348,9 +283,6 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
348 283
349static void xen_set_pte(pte_t *ptep, pte_t pteval) 284static void xen_set_pte(pte_t *ptep, pte_t pteval)
350{ 285{
351 ADD_STATS(pte_update, 1);
352// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
353
354 if (!xen_batched_set_pte(ptep, pteval)) 286 if (!xen_batched_set_pte(ptep, pteval))
355 native_set_pte(ptep, pteval); 287 native_set_pte(ptep, pteval);
356} 288}
@@ -379,9 +311,6 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
379 u.val = pte_val_ma(pte); 311 u.val = pte_val_ma(pte);
380 xen_extend_mmu_update(&u); 312 xen_extend_mmu_update(&u);
381 313
382 ADD_STATS(prot_commit, 1);
383 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
384
385 xen_mc_issue(PARAVIRT_LAZY_MMU); 314 xen_mc_issue(PARAVIRT_LAZY_MMU);
386} 315}
387 316
@@ -593,8 +522,6 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
593 u.val = pud_val_ma(val); 522 u.val = pud_val_ma(val);
594 xen_extend_mmu_update(&u); 523 xen_extend_mmu_update(&u);
595 524
596 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
597
598 xen_mc_issue(PARAVIRT_LAZY_MMU); 525 xen_mc_issue(PARAVIRT_LAZY_MMU);
599 526
600 preempt_enable(); 527 preempt_enable();
@@ -602,8 +529,6 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
602 529
603static void xen_set_pud(pud_t *ptr, pud_t val) 530static void xen_set_pud(pud_t *ptr, pud_t val)
604{ 531{
605 ADD_STATS(pud_update, 1);
606
607 /* If page is not pinned, we can just update the entry 532 /* If page is not pinned, we can just update the entry
608 directly */ 533 directly */
609 if (!xen_page_pinned(ptr)) { 534 if (!xen_page_pinned(ptr)) {
@@ -611,8 +536,6 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
611 return; 536 return;
612 } 537 }
613 538
614 ADD_STATS(pud_update_pinned, 1);
615
616 xen_set_pud_hyper(ptr, val); 539 xen_set_pud_hyper(ptr, val);
617} 540}
618 541
@@ -705,8 +628,6 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
705{ 628{
706 pgd_t *user_ptr = xen_get_user_pgd(ptr); 629 pgd_t *user_ptr = xen_get_user_pgd(ptr);
707 630
708 ADD_STATS(pgd_update, 1);
709
710 /* If page is not pinned, we can just update the entry 631 /* If page is not pinned, we can just update the entry
711 directly */ 632 directly */
712 if (!xen_page_pinned(ptr)) { 633 if (!xen_page_pinned(ptr)) {
@@ -718,9 +639,6 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
718 return; 639 return;
719 } 640 }
720 641
721 ADD_STATS(pgd_update_pinned, 1);
722 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
723
724 /* If it's pinned, then we can at least batch the kernel and 642 /* If it's pinned, then we can at least batch the kernel and
725 user updates together. */ 643 user updates together. */
726 xen_mc_batch(); 644 xen_mc_batch();
@@ -2384,8 +2302,6 @@ out:
2384} 2302}
2385EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2303EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2386 2304
2387#ifdef CONFIG_XEN_DEBUG_FS
2388
2389static int p2m_dump_open(struct inode *inode, struct file *filp) 2305static int p2m_dump_open(struct inode *inode, struct file *filp)
2390{ 2306{
2391 return single_open(filp, p2m_dump_show, NULL); 2307 return single_open(filp, p2m_dump_show, NULL);
@@ -2397,57 +2313,3 @@ static const struct file_operations p2m_dump_fops = {
2397 .llseek = seq_lseek, 2313 .llseek = seq_lseek,
2398 .release = single_release, 2314 .release = single_release,
2399}; 2315};
2400
2401static struct dentry *d_mmu_debug;
2402
2403static int __init xen_mmu_debugfs(void)
2404{
2405 struct dentry *d_xen = xen_init_debugfs();
2406
2407 if (d_xen == NULL)
2408 return -ENOMEM;
2409
2410 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2411
2412 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2413
2414 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2415 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2416 &mmu_stats.pgd_update_pinned);
2417 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2418 &mmu_stats.pgd_update_pinned);
2419
2420 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2421 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2422 &mmu_stats.pud_update_pinned);
2423 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2424 &mmu_stats.pud_update_pinned);
2425
2426 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2427 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2428 &mmu_stats.pmd_update_pinned);
2429 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2430 &mmu_stats.pmd_update_pinned);
2431
2432 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2433// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2434// &mmu_stats.pte_update_pinned);
2435 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2436 &mmu_stats.pte_update_pinned);
2437
2438 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2439 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2440 &mmu_stats.mmu_update_extended);
2441 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2442 mmu_stats.mmu_update_histo, 20);
2443
2444 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2445 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2446 &mmu_stats.prot_commit_batched);
2447
2448 debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
2449 return 0;
2450}
2451fs_initcall(xen_mmu_debugfs);
2452
2453#endif /* CONFIG_XEN_DEBUG_FS */