aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/xen/Kconfig10
-rw-r--r--arch/x86/xen/Makefile3
-rw-r--r--arch/x86/xen/debugfs.c123
-rw-r--r--arch/x86/xen/debugfs.h10
-rw-r--r--arch/x86/xen/mmu.c163
-rw-r--r--arch/x86/xen/multicalls.c115
-rw-r--r--arch/x86/xen/spinlock.c165
7 files changed, 580 insertions, 9 deletions
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 3815e425f470..d3e68465ace9 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -27,4 +27,12 @@ config XEN_MAX_DOMAIN_MEMORY
27config XEN_SAVE_RESTORE 27config XEN_SAVE_RESTORE
28 bool 28 bool
29 depends on PM 29 depends on PM
30 default y \ No newline at end of file 30 default y
31
32config XEN_DEBUG_FS
33 bool "Enable Xen debug and tuning parameters in debugfs"
34 depends on XEN && DEBUG_FS
35 default n
36 help
37 Enable statistics output and various tuning options in debugfs.
38 Enabling this option may incur a significant performance overhead. \ No newline at end of file
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 9ee745fa5527..313947940a1a 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -8,4 +8,5 @@ endif
8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 8obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
9 time.o xen-asm_$(BITS).o grant-table.o suspend.o 9 time.o xen-asm_$(BITS).o grant-table.o suspend.o
10 10
11obj-$(CONFIG_SMP) += smp.o spinlock.o 11obj-$(CONFIG_SMP) += smp.o spinlock.o
12obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o \ No newline at end of file
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
new file mode 100644
index 000000000000..b53225d2cac3
--- /dev/null
+++ b/arch/x86/xen/debugfs.c
@@ -0,0 +1,123 @@
1#include <linux/init.h>
2#include <linux/debugfs.h>
3#include <linux/module.h>
4
5#include "debugfs.h"
6
7static struct dentry *d_xen_debug;
8
9struct dentry * __init xen_init_debugfs(void)
10{
11 if (!d_xen_debug) {
12 d_xen_debug = debugfs_create_dir("xen", NULL);
13
14 if (!d_xen_debug)
15 pr_warning("Could not create 'xen' debugfs directory\n");
16 }
17
18 return d_xen_debug;
19}
20
21struct array_data
22{
23 void *array;
24 unsigned elements;
25};
26
27static int u32_array_open(struct inode *inode, struct file *file)
28{
29 file->private_data = NULL;
30 return nonseekable_open(inode, file);
31}
32
33static size_t format_array(char *buf, size_t bufsize, const char *fmt,
34 u32 *array, unsigned array_size)
35{
36 size_t ret = 0;
37 unsigned i;
38
39 for(i = 0; i < array_size; i++) {
40 size_t len;
41
42 len = snprintf(buf, bufsize, fmt, array[i]);
43 len++; /* ' ' or '\n' */
44 ret += len;
45
46 if (buf) {
47 buf += len;
48 bufsize -= len;
49 buf[-1] = (i == array_size-1) ? '\n' : ' ';
50 }
51 }
52
53 ret++; /* \0 */
54 if (buf)
55 *buf = '\0';
56
57 return ret;
58}
59
60static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size)
61{
62 size_t len = format_array(NULL, 0, fmt, array, array_size);
63 char *ret;
64
65 ret = kmalloc(len, GFP_KERNEL);
66 if (ret == NULL)
67 return NULL;
68
69 format_array(ret, len, fmt, array, array_size);
70 return ret;
71}
72
73static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
74 loff_t *ppos)
75{
76 struct inode *inode = file->f_path.dentry->d_inode;
77 struct array_data *data = inode->i_private;
78 size_t size;
79
80 if (*ppos == 0) {
81 if (file->private_data) {
82 kfree(file->private_data);
83 file->private_data = NULL;
84 }
85
86 file->private_data = format_array_alloc("%u", data->array, data->elements);
87 }
88
89 size = 0;
90 if (file->private_data)
91 size = strlen(file->private_data);
92
93 return simple_read_from_buffer(buf, len, ppos, file->private_data, size);
94}
95
96static int xen_array_release(struct inode *inode, struct file *file)
97{
98 kfree(file->private_data);
99
100 return 0;
101}
102
103static struct file_operations u32_array_fops = {
104 .owner = THIS_MODULE,
105 .open = u32_array_open,
106 .release= xen_array_release,
107 .read = u32_array_read,
108};
109
110struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
111 struct dentry *parent,
112 u32 *array, unsigned elements)
113{
114 struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
115
116 if (data == NULL)
117 return NULL;
118
119 data->array = array;
120 data->elements = elements;
121
122 return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
123}
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
new file mode 100644
index 000000000000..e28132084832
--- /dev/null
+++ b/arch/x86/xen/debugfs.h
@@ -0,0 +1,10 @@
1#ifndef _XEN_DEBUGFS_H
2#define _XEN_DEBUGFS_H
3
4struct dentry * __init xen_init_debugfs(void);
5
6struct dentry *xen_debugfs_create_u32_array(const char *name, mode_t mode,
7 struct dentry *parent,
8 u32 *array, unsigned elements);
9
10#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index d9a35a363095..f5af913fd7b0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -40,6 +40,7 @@
40 */ 40 */
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/highmem.h> 42#include <linux/highmem.h>
43#include <linux/debugfs.h>
43#include <linux/bug.h> 44#include <linux/bug.h>
44 45
45#include <asm/pgtable.h> 46#include <asm/pgtable.h>
@@ -57,6 +58,61 @@
57 58
58#include "multicalls.h" 59#include "multicalls.h"
59#include "mmu.h" 60#include "mmu.h"
61#include "debugfs.h"
62
63#define MMU_UPDATE_HISTO 30
64
65#ifdef CONFIG_XEN_DEBUG_FS
66
67static struct {
68 u32 pgd_update;
69 u32 pgd_update_pinned;
70 u32 pgd_update_batched;
71
72 u32 pud_update;
73 u32 pud_update_pinned;
74 u32 pud_update_batched;
75
76 u32 pmd_update;
77 u32 pmd_update_pinned;
78 u32 pmd_update_batched;
79
80 u32 pte_update;
81 u32 pte_update_pinned;
82 u32 pte_update_batched;
83
84 u32 mmu_update;
85 u32 mmu_update_extended;
86 u32 mmu_update_histo[MMU_UPDATE_HISTO];
87
88 u32 prot_commit;
89 u32 prot_commit_batched;
90
91 u32 set_pte_at;
92 u32 set_pte_at_batched;
93 u32 set_pte_at_pinned;
94 u32 set_pte_at_current;
95 u32 set_pte_at_kernel;
96} mmu_stats;
97
98static u8 zero_stats;
99
100static inline void check_zero(void)
101{
102 if (unlikely(zero_stats)) {
103 memset(&mmu_stats, 0, sizeof(mmu_stats));
104 zero_stats = 0;
105 }
106}
107
108#define ADD_STATS(elem, val) \
109 do { check_zero(); mmu_stats.elem += (val); } while(0)
110
111#else /* !CONFIG_XEN_DEBUG_FS */
112
113#define ADD_STATS(elem, val) do { (void)(val); } while(0)
114
115#endif /* CONFIG_XEN_DEBUG_FS */
60 116
61/* 117/*
62 * Just beyond the highest usermode address. STACK_TOP_MAX has a 118 * Just beyond the highest usermode address. STACK_TOP_MAX has a
@@ -243,11 +299,21 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
243 299
244 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 300 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
245 301
246 if (mcs.mc != NULL) 302 if (mcs.mc != NULL) {
303 ADD_STATS(mmu_update_extended, 1);
304 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
305
247 mcs.mc->args[1]++; 306 mcs.mc->args[1]++;
248 else { 307
308 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
309 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
310 else
311 ADD_STATS(mmu_update_histo[0], 1);
312 } else {
313 ADD_STATS(mmu_update, 1);
249 mcs = __xen_mc_entry(sizeof(*u)); 314 mcs = __xen_mc_entry(sizeof(*u));
250 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 315 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
316 ADD_STATS(mmu_update_histo[1], 1);
251 } 317 }
252 318
253 u = mcs.args; 319 u = mcs.args;
@@ -267,6 +333,8 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
267 u.val = pmd_val_ma(val); 333 u.val = pmd_val_ma(val);
268 xen_extend_mmu_update(&u); 334 xen_extend_mmu_update(&u);
269 335
336 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
337
270 xen_mc_issue(PARAVIRT_LAZY_MMU); 338 xen_mc_issue(PARAVIRT_LAZY_MMU);
271 339
272 preempt_enable(); 340 preempt_enable();
@@ -274,6 +342,8 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
274 342
275void xen_set_pmd(pmd_t *ptr, pmd_t val) 343void xen_set_pmd(pmd_t *ptr, pmd_t val)
276{ 344{
345 ADD_STATS(pmd_update, 1);
346
277 /* If page is not pinned, we can just update the entry 347 /* If page is not pinned, we can just update the entry
278 directly */ 348 directly */
279 if (!xen_page_pinned(ptr)) { 349 if (!xen_page_pinned(ptr)) {
@@ -281,6 +351,8 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
281 return; 351 return;
282 } 352 }
283 353
354 ADD_STATS(pmd_update_pinned, 1);
355
284 xen_set_pmd_hyper(ptr, val); 356 xen_set_pmd_hyper(ptr, val);
285} 357}
286 358
@@ -300,12 +372,18 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
300 if (mm == &init_mm) 372 if (mm == &init_mm)
301 preempt_disable(); 373 preempt_disable();
302 374
375 ADD_STATS(set_pte_at, 1);
376// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
377 ADD_STATS(set_pte_at_current, mm == current->mm);
378 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
379
303 if (mm == current->mm || mm == &init_mm) { 380 if (mm == current->mm || mm == &init_mm) {
304 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 381 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
305 struct multicall_space mcs; 382 struct multicall_space mcs;
306 mcs = xen_mc_entry(0); 383 mcs = xen_mc_entry(0);
307 384
308 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); 385 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
386 ADD_STATS(set_pte_at_batched, 1);
309 xen_mc_issue(PARAVIRT_LAZY_MMU); 387 xen_mc_issue(PARAVIRT_LAZY_MMU);
310 goto out; 388 goto out;
311 } else 389 } else
@@ -336,6 +414,9 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
336 u.val = pte_val_ma(pte); 414 u.val = pte_val_ma(pte);
337 xen_extend_mmu_update(&u); 415 xen_extend_mmu_update(&u);
338 416
417 ADD_STATS(prot_commit, 1);
418 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
419
339 xen_mc_issue(PARAVIRT_LAZY_MMU); 420 xen_mc_issue(PARAVIRT_LAZY_MMU);
340} 421}
341 422
@@ -402,6 +483,8 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
402 u.val = pud_val_ma(val); 483 u.val = pud_val_ma(val);
403 xen_extend_mmu_update(&u); 484 xen_extend_mmu_update(&u);
404 485
486 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
487
405 xen_mc_issue(PARAVIRT_LAZY_MMU); 488 xen_mc_issue(PARAVIRT_LAZY_MMU);
406 489
407 preempt_enable(); 490 preempt_enable();
@@ -409,6 +492,8 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
409 492
410void xen_set_pud(pud_t *ptr, pud_t val) 493void xen_set_pud(pud_t *ptr, pud_t val)
411{ 494{
495 ADD_STATS(pud_update, 1);
496
412 /* If page is not pinned, we can just update the entry 497 /* If page is not pinned, we can just update the entry
413 directly */ 498 directly */
414 if (!xen_page_pinned(ptr)) { 499 if (!xen_page_pinned(ptr)) {
@@ -416,11 +501,17 @@ void xen_set_pud(pud_t *ptr, pud_t val)
416 return; 501 return;
417 } 502 }
418 503
504 ADD_STATS(pud_update_pinned, 1);
505
419 xen_set_pud_hyper(ptr, val); 506 xen_set_pud_hyper(ptr, val);
420} 507}
421 508
422void xen_set_pte(pte_t *ptep, pte_t pte) 509void xen_set_pte(pte_t *ptep, pte_t pte)
423{ 510{
511 ADD_STATS(pte_update, 1);
512// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
513 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
514
424#ifdef CONFIG_X86_PAE 515#ifdef CONFIG_X86_PAE
425 ptep->pte_high = pte.pte_high; 516 ptep->pte_high = pte.pte_high;
426 smp_wmb(); 517 smp_wmb();
@@ -517,6 +608,8 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
517{ 608{
518 pgd_t *user_ptr = xen_get_user_pgd(ptr); 609 pgd_t *user_ptr = xen_get_user_pgd(ptr);
519 610
611 ADD_STATS(pgd_update, 1);
612
520 /* If page is not pinned, we can just update the entry 613 /* If page is not pinned, we can just update the entry
521 directly */ 614 directly */
522 if (!xen_page_pinned(ptr)) { 615 if (!xen_page_pinned(ptr)) {
@@ -528,6 +621,9 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
528 return; 621 return;
529 } 622 }
530 623
624 ADD_STATS(pgd_update_pinned, 1);
625 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
626
531 /* If it's pinned, then we can at least batch the kernel and 627 /* If it's pinned, then we can at least batch the kernel and
532 user updates together. */ 628 user updates together. */
533 xen_mc_batch(); 629 xen_mc_batch();
@@ -1003,3 +1099,66 @@ void xen_exit_mmap(struct mm_struct *mm)
1003 1099
1004 spin_unlock(&mm->page_table_lock); 1100 spin_unlock(&mm->page_table_lock);
1005} 1101}
1102
1103#ifdef CONFIG_XEN_DEBUG_FS
1104
1105static struct dentry *d_mmu_debug;
1106
1107static int __init xen_mmu_debugfs(void)
1108{
1109 struct dentry *d_xen = xen_init_debugfs();
1110
1111 if (d_xen == NULL)
1112 return -ENOMEM;
1113
1114 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1115
1116 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1117
1118 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1119 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1120 &mmu_stats.pgd_update_pinned);
1121 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1122 &mmu_stats.pgd_update_pinned);
1123
1124 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1125 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1126 &mmu_stats.pud_update_pinned);
1127 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1128 &mmu_stats.pud_update_pinned);
1129
1130 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1131 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1132 &mmu_stats.pmd_update_pinned);
1133 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1134 &mmu_stats.pmd_update_pinned);
1135
1136 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1137// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1138// &mmu_stats.pte_update_pinned);
1139 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1140 &mmu_stats.pte_update_pinned);
1141
1142 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1143 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
1144 &mmu_stats.mmu_update_extended);
1145 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
1146 mmu_stats.mmu_update_histo, 20);
1147
1148 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
1149 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
1150 &mmu_stats.set_pte_at_batched);
1151 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
1152 &mmu_stats.set_pte_at_current);
1153 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
1154 &mmu_stats.set_pte_at_kernel);
1155
1156 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
1157 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
1158 &mmu_stats.prot_commit_batched);
1159
1160 return 0;
1161}
1162fs_initcall(xen_mmu_debugfs);
1163
1164#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 9efd1c6c9776..8ea8a0d0b0de 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -21,16 +21,20 @@
21 */ 21 */
22#include <linux/percpu.h> 22#include <linux/percpu.h>
23#include <linux/hardirq.h> 23#include <linux/hardirq.h>
24#include <linux/debugfs.h>
24 25
25#include <asm/xen/hypercall.h> 26#include <asm/xen/hypercall.h>
26 27
27#include "multicalls.h" 28#include "multicalls.h"
29#include "debugfs.h"
30
31#define MC_BATCH 32
28 32
29#define MC_DEBUG 1 33#define MC_DEBUG 1
30 34
31#define MC_BATCH 32
32#define MC_ARGS (MC_BATCH * 16) 35#define MC_ARGS (MC_BATCH * 16)
33 36
37
34struct mc_buffer { 38struct mc_buffer {
35 struct multicall_entry entries[MC_BATCH]; 39 struct multicall_entry entries[MC_BATCH];
36#if MC_DEBUG 40#if MC_DEBUG
@@ -47,6 +51,76 @@ struct mc_buffer {
47static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); 51static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
48DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); 52DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
49 53
54/* flush reasons 0- slots, 1- args, 2- callbacks */
55enum flush_reasons
56{
57 FL_SLOTS,
58 FL_ARGS,
59 FL_CALLBACKS,
60
61 FL_N_REASONS
62};
63
64#ifdef CONFIG_XEN_DEBUG_FS
65#define NHYPERCALLS 40 /* not really */
66
67static struct {
68 unsigned histo[MC_BATCH+1];
69
70 unsigned issued;
71 unsigned arg_total;
72 unsigned hypercalls;
73 unsigned histo_hypercalls[NHYPERCALLS];
74
75 unsigned flush[FL_N_REASONS];
76} mc_stats;
77
78static u8 zero_stats;
79
80static inline void check_zero(void)
81{
82 if (unlikely(zero_stats)) {
83 memset(&mc_stats, 0, sizeof(mc_stats));
84 zero_stats = 0;
85 }
86}
87
88static void mc_add_stats(const struct mc_buffer *mc)
89{
90 int i;
91
92 check_zero();
93
94 mc_stats.issued++;
95 mc_stats.hypercalls += mc->mcidx;
96 mc_stats.arg_total += mc->argidx;
97
98 mc_stats.histo[mc->mcidx]++;
99 for(i = 0; i < mc->mcidx; i++) {
100 unsigned op = mc->entries[i].op;
101 if (op < NHYPERCALLS)
102 mc_stats.histo_hypercalls[op]++;
103 }
104}
105
106static void mc_stats_flush(enum flush_reasons idx)
107{
108 check_zero();
109
110 mc_stats.flush[idx]++;
111}
112
113#else /* !CONFIG_XEN_DEBUG_FS */
114
115static inline void mc_add_stats(const struct mc_buffer *mc)
116{
117}
118
119static inline void mc_stats_flush(enum flush_reasons idx)
120{
121}
122#endif /* CONFIG_XEN_DEBUG_FS */
123
50void xen_mc_flush(void) 124void xen_mc_flush(void)
51{ 125{
52 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 126 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
@@ -60,6 +134,8 @@ void xen_mc_flush(void)
60 something in the middle */ 134 something in the middle */
61 local_irq_save(flags); 135 local_irq_save(flags);
62 136
137 mc_add_stats(b);
138
63 if (b->mcidx) { 139 if (b->mcidx) {
64#if MC_DEBUG 140#if MC_DEBUG
65 memcpy(b->debug, b->entries, 141 memcpy(b->debug, b->entries,
@@ -115,6 +191,7 @@ struct multicall_space __xen_mc_entry(size_t args)
115 191
116 if (b->mcidx == MC_BATCH || 192 if (b->mcidx == MC_BATCH ||
117 (argidx + args) > MC_ARGS) { 193 (argidx + args) > MC_ARGS) {
194 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
118 xen_mc_flush(); 195 xen_mc_flush();
119 argidx = roundup(b->argidx, sizeof(u64)); 196 argidx = roundup(b->argidx, sizeof(u64));
120 } 197 }
@@ -158,10 +235,44 @@ void xen_mc_callback(void (*fn)(void *), void *data)
158 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 235 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
159 struct callback *cb; 236 struct callback *cb;
160 237
161 if (b->cbidx == MC_BATCH) 238 if (b->cbidx == MC_BATCH) {
239 mc_stats_flush(FL_CALLBACKS);
162 xen_mc_flush(); 240 xen_mc_flush();
241 }
163 242
164 cb = &b->callbacks[b->cbidx++]; 243 cb = &b->callbacks[b->cbidx++];
165 cb->fn = fn; 244 cb->fn = fn;
166 cb->data = data; 245 cb->data = data;
167} 246}
247
248#ifdef CONFIG_XEN_DEBUG_FS
249
250static struct dentry *d_mc_debug;
251
252static int __init xen_mc_debugfs(void)
253{
254 struct dentry *d_xen = xen_init_debugfs();
255
256 if (d_xen == NULL)
257 return -ENOMEM;
258
259 d_mc_debug = debugfs_create_dir("multicalls", d_xen);
260
261 debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
262
263 debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
264 debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
265 debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
266
267 xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
268 mc_stats.histo, MC_BATCH);
269 xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
270 mc_stats.histo_hypercalls, NHYPERCALLS);
271 xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
272 mc_stats.flush, FL_N_REASONS);
273
274 return 0;
275}
276fs_initcall(xen_mc_debugfs);
277
278#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 4884bc603aa7..0d8f3b2d9bec 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -4,6 +4,8 @@
4 */ 4 */
5#include <linux/kernel_stat.h> 5#include <linux/kernel_stat.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/debugfs.h>
8#include <linux/log2.h>
7 9
8#include <asm/paravirt.h> 10#include <asm/paravirt.h>
9 11
@@ -11,6 +13,93 @@
11#include <xen/events.h> 13#include <xen/events.h>
12 14
13#include "xen-ops.h" 15#include "xen-ops.h"
16#include "debugfs.h"
17
18#ifdef CONFIG_XEN_DEBUG_FS
19static struct xen_spinlock_stats
20{
21 u64 taken;
22 u32 taken_slow;
23 u32 taken_slow_nested;
24 u32 taken_slow_pickup;
25 u32 taken_slow_spurious;
26
27 u64 released;
28 u32 released_slow;
29 u32 released_slow_kicked;
30
31#define HISTO_BUCKETS 20
32 u32 histo_spin_fast[HISTO_BUCKETS+1];
33 u32 histo_spin[HISTO_BUCKETS+1];
34
35 u64 spinning_time;
36 u64 total_time;
37} spinlock_stats;
38
39static u8 zero_stats;
40
41static unsigned lock_timeout = 1 << 10;
42#define TIMEOUT lock_timeout
43
44static inline void check_zero(void)
45{
46 if (unlikely(zero_stats)) {
47 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
48 zero_stats = 0;
49 }
50}
51
52#define ADD_STATS(elem, val) \
53 do { check_zero(); spinlock_stats.elem += (val); } while(0)
54
55static inline u64 spin_time_start(void)
56{
57 return xen_clocksource_read();
58}
59
60static void __spin_time_accum(u64 delta, u32 *array)
61{
62 unsigned index = ilog2(delta);
63
64 check_zero();
65
66 if (index < HISTO_BUCKETS)
67 array[index]++;
68 else
69 array[HISTO_BUCKETS]++;
70}
71
72static inline void spin_time_accum_fast(u64 start)
73{
74 u32 delta = xen_clocksource_read() - start;
75
76 __spin_time_accum(delta, spinlock_stats.histo_spin_fast);
77 spinlock_stats.spinning_time += delta;
78}
79
80static inline void spin_time_accum(u64 start)
81{
82 u32 delta = xen_clocksource_read() - start;
83
84 __spin_time_accum(delta, spinlock_stats.histo_spin);
85 spinlock_stats.total_time += delta;
86}
87#else /* !CONFIG_XEN_DEBUG_FS */
88#define TIMEOUT (1 << 10)
89#define ADD_STATS(elem, val) do { (void)(val); } while(0)
90
91static inline u64 spin_time_start(void)
92{
93 return 0;
94}
95
96static inline void spin_time_accum_fast(u64 start)
97{
98}
99static inline void spin_time_accum(u64 start)
100{
101}
102#endif /* CONFIG_XEN_DEBUG_FS */
14 103
15struct xen_spinlock { 104struct xen_spinlock {
16 unsigned char lock; /* 0 -> free; 1 -> locked */ 105 unsigned char lock; /* 0 -> free; 1 -> locked */
@@ -92,6 +181,9 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
92 /* announce we're spinning */ 181 /* announce we're spinning */
93 prev = spinning_lock(xl); 182 prev = spinning_lock(xl);
94 183
184 ADD_STATS(taken_slow, 1);
185 ADD_STATS(taken_slow_nested, prev != NULL);
186
95 do { 187 do {
96 /* clear pending */ 188 /* clear pending */
97 xen_clear_irq_pending(irq); 189 xen_clear_irq_pending(irq);
@@ -100,6 +192,8 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
100 we weren't looking */ 192 we weren't looking */
101 ret = xen_spin_trylock(lock); 193 ret = xen_spin_trylock(lock);
102 if (ret) { 194 if (ret) {
195 ADD_STATS(taken_slow_pickup, 1);
196
103 /* 197 /*
104 * If we interrupted another spinlock while it 198 * If we interrupted another spinlock while it
105 * was blocking, make sure it doesn't block 199 * was blocking, make sure it doesn't block
@@ -120,6 +214,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
120 * pending. 214 * pending.
121 */ 215 */
122 xen_poll_irq(irq); 216 xen_poll_irq(irq);
217 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
123 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 218 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
124 219
125 kstat_this_cpu.irqs[irq]++; 220 kstat_this_cpu.irqs[irq]++;
@@ -132,11 +227,18 @@ out:
132static void xen_spin_lock(struct raw_spinlock *lock) 227static void xen_spin_lock(struct raw_spinlock *lock)
133{ 228{
134 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 229 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
135 int timeout; 230 unsigned timeout;
136 u8 oldval; 231 u8 oldval;
232 u64 start_spin;
233
234 ADD_STATS(taken, 1);
235
236 start_spin = spin_time_start();
137 237
138 do { 238 do {
139 timeout = 1 << 10; 239 u64 start_spin_fast = spin_time_start();
240
241 timeout = TIMEOUT;
140 242
141 asm("1: xchgb %1,%0\n" 243 asm("1: xchgb %1,%0\n"
142 " testb %1,%1\n" 244 " testb %1,%1\n"
@@ -151,16 +253,22 @@ static void xen_spin_lock(struct raw_spinlock *lock)
151 : "1" (1) 253 : "1" (1)
152 : "memory"); 254 : "memory");
153 255
154 } while (unlikely(oldval != 0 && !xen_spin_lock_slow(lock))); 256 spin_time_accum_fast(start_spin_fast);
257 } while (unlikely(oldval != 0 && (TIMEOUT == ~0 || !xen_spin_lock_slow(lock))));
258
259 spin_time_accum(start_spin);
155} 260}
156 261
157static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) 262static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
158{ 263{
159 int cpu; 264 int cpu;
160 265
266 ADD_STATS(released_slow, 1);
267
161 for_each_online_cpu(cpu) { 268 for_each_online_cpu(cpu) {
162 /* XXX should mix up next cpu selection */ 269 /* XXX should mix up next cpu selection */
163 if (per_cpu(lock_spinners, cpu) == xl) { 270 if (per_cpu(lock_spinners, cpu) == xl) {
271 ADD_STATS(released_slow_kicked, 1);
164 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 272 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
165 break; 273 break;
166 } 274 }
@@ -171,6 +279,8 @@ static void xen_spin_unlock(struct raw_spinlock *lock)
171{ 279{
172 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 280 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
173 281
282 ADD_STATS(released, 1);
283
174 smp_wmb(); /* make sure no writes get moved after unlock */ 284 smp_wmb(); /* make sure no writes get moved after unlock */
175 xl->lock = 0; /* release lock */ 285 xl->lock = 0; /* release lock */
176 286
@@ -216,3 +326,52 @@ void __init xen_init_spinlocks(void)
216 pv_lock_ops.spin_trylock = xen_spin_trylock; 326 pv_lock_ops.spin_trylock = xen_spin_trylock;
217 pv_lock_ops.spin_unlock = xen_spin_unlock; 327 pv_lock_ops.spin_unlock = xen_spin_unlock;
218} 328}
329
330#ifdef CONFIG_XEN_DEBUG_FS
331
332static struct dentry *d_spin_debug;
333
334static int __init xen_spinlock_debugfs(void)
335{
336 struct dentry *d_xen = xen_init_debugfs();
337
338 if (d_xen == NULL)
339 return -ENOMEM;
340
341 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
342
343 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
344
345 debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
346
347 debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
348 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
349 &spinlock_stats.taken_slow);
350 debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
351 &spinlock_stats.taken_slow_nested);
352 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
353 &spinlock_stats.taken_slow_pickup);
354 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
355 &spinlock_stats.taken_slow_spurious);
356
357 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
358 debugfs_create_u32("released_slow", 0444, d_spin_debug,
359 &spinlock_stats.released_slow);
360 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
361 &spinlock_stats.released_slow_kicked);
362
363 debugfs_create_u64("time_spinning", 0444, d_spin_debug,
364 &spinlock_stats.spinning_time);
365 debugfs_create_u64("time_total", 0444, d_spin_debug,
366 &spinlock_stats.total_time);
367
368 xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
369 spinlock_stats.histo_spin, HISTO_BUCKETS + 1);
370 xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
371 spinlock_stats.histo_spin_fast, HISTO_BUCKETS + 1);
372
373 return 0;
374}
375fs_initcall(xen_spinlock_debugfs);
376
377#endif /* CONFIG_XEN_DEBUG_FS */