aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/multicalls.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/multicalls.c')
-rw-r--r--arch/x86/xen/multicalls.c169
1 files changed, 47 insertions, 122 deletions
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 1b2b73ff0a6..0d82003e76a 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -30,12 +30,13 @@
30 30
31#define MC_BATCH 32 31#define MC_BATCH 32
32 32
33#define MC_DEBUG 1 33#define MC_DEBUG 0
34 34
35#define MC_ARGS (MC_BATCH * 16) 35#define MC_ARGS (MC_BATCH * 16)
36 36
37 37
38struct mc_buffer { 38struct mc_buffer {
39 unsigned mcidx, argidx, cbidx;
39 struct multicall_entry entries[MC_BATCH]; 40 struct multicall_entry entries[MC_BATCH];
40#if MC_DEBUG 41#if MC_DEBUG
41 struct multicall_entry debug[MC_BATCH]; 42 struct multicall_entry debug[MC_BATCH];
@@ -46,85 +47,15 @@ struct mc_buffer {
46 void (*fn)(void *); 47 void (*fn)(void *);
47 void *data; 48 void *data;
48 } callbacks[MC_BATCH]; 49 } callbacks[MC_BATCH];
49 unsigned mcidx, argidx, cbidx;
50}; 50};
51 51
52static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); 52static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
53DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); 53DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
54 54
55/* flush reasons 0- slots, 1- args, 2- callbacks */
56enum flush_reasons
57{
58 FL_SLOTS,
59 FL_ARGS,
60 FL_CALLBACKS,
61
62 FL_N_REASONS
63};
64
65#ifdef CONFIG_XEN_DEBUG_FS
66#define NHYPERCALLS 40 /* not really */
67
68static struct {
69 unsigned histo[MC_BATCH+1];
70
71 unsigned issued;
72 unsigned arg_total;
73 unsigned hypercalls;
74 unsigned histo_hypercalls[NHYPERCALLS];
75
76 unsigned flush[FL_N_REASONS];
77} mc_stats;
78
79static u8 zero_stats;
80
81static inline void check_zero(void)
82{
83 if (unlikely(zero_stats)) {
84 memset(&mc_stats, 0, sizeof(mc_stats));
85 zero_stats = 0;
86 }
87}
88
89static void mc_add_stats(const struct mc_buffer *mc)
90{
91 int i;
92
93 check_zero();
94
95 mc_stats.issued++;
96 mc_stats.hypercalls += mc->mcidx;
97 mc_stats.arg_total += mc->argidx;
98
99 mc_stats.histo[mc->mcidx]++;
100 for(i = 0; i < mc->mcidx; i++) {
101 unsigned op = mc->entries[i].op;
102 if (op < NHYPERCALLS)
103 mc_stats.histo_hypercalls[op]++;
104 }
105}
106
107static void mc_stats_flush(enum flush_reasons idx)
108{
109 check_zero();
110
111 mc_stats.flush[idx]++;
112}
113
114#else /* !CONFIG_XEN_DEBUG_FS */
115
116static inline void mc_add_stats(const struct mc_buffer *mc)
117{
118}
119
120static inline void mc_stats_flush(enum flush_reasons idx)
121{
122}
123#endif /* CONFIG_XEN_DEBUG_FS */
124
125void xen_mc_flush(void) 55void xen_mc_flush(void)
126{ 56{
127 struct mc_buffer *b = &__get_cpu_var(mc_buffer); 57 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
58 struct multicall_entry *mc;
128 int ret = 0; 59 int ret = 0;
129 unsigned long flags; 60 unsigned long flags;
130 int i; 61 int i;
@@ -135,9 +66,26 @@ void xen_mc_flush(void)
135 something in the middle */ 66 something in the middle */
136 local_irq_save(flags); 67 local_irq_save(flags);
137 68
138 mc_add_stats(b); 69 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
70
71 switch (b->mcidx) {
72 case 0:
73 /* no-op */
74 BUG_ON(b->argidx != 0);
75 break;
76
77 case 1:
78 /* Singleton multicall - bypass multicall machinery
79 and just do the call directly. */
80 mc = &b->entries[0];
81
82 mc->result = privcmd_call(mc->op,
83 mc->args[0], mc->args[1], mc->args[2],
84 mc->args[3], mc->args[4]);
85 ret = mc->result < 0;
86 break;
139 87
140 if (b->mcidx) { 88 default:
141#if MC_DEBUG 89#if MC_DEBUG
142 memcpy(b->debug, b->entries, 90 memcpy(b->debug, b->entries,
143 b->mcidx * sizeof(struct multicall_entry)); 91 b->mcidx * sizeof(struct multicall_entry));
@@ -164,11 +112,10 @@ void xen_mc_flush(void)
164 } 112 }
165 } 113 }
166#endif 114#endif
115 }
167 116
168 b->mcidx = 0; 117 b->mcidx = 0;
169 b->argidx = 0; 118 b->argidx = 0;
170 } else
171 BUG_ON(b->argidx != 0);
172 119
173 for (i = 0; i < b->cbidx; i++) { 120 for (i = 0; i < b->cbidx; i++) {
174 struct callback *cb = &b->callbacks[i]; 121 struct callback *cb = &b->callbacks[i];
@@ -188,18 +135,21 @@ struct multicall_space __xen_mc_entry(size_t args)
188 struct multicall_space ret; 135 struct multicall_space ret;
189 unsigned argidx = roundup(b->argidx, sizeof(u64)); 136 unsigned argidx = roundup(b->argidx, sizeof(u64));
190 137
138 trace_xen_mc_entry_alloc(args);
139
191 BUG_ON(preemptible()); 140 BUG_ON(preemptible());
192 BUG_ON(b->argidx >= MC_ARGS); 141 BUG_ON(b->argidx >= MC_ARGS);
193 142
194 if (b->mcidx == MC_BATCH || 143 if (unlikely(b->mcidx == MC_BATCH ||
195 (argidx + args) >= MC_ARGS) { 144 (argidx + args) >= MC_ARGS)) {
196 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); 145 trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
146 XEN_MC_FL_BATCH : XEN_MC_FL_ARGS);
197 xen_mc_flush(); 147 xen_mc_flush();
198 argidx = roundup(b->argidx, sizeof(u64)); 148 argidx = roundup(b->argidx, sizeof(u64));
199 } 149 }
200 150
201 ret.mc = &b->entries[b->mcidx]; 151 ret.mc = &b->entries[b->mcidx];
202#ifdef MC_DEBUG 152#if MC_DEBUG
203 b->caller[b->mcidx] = __builtin_return_address(0); 153 b->caller[b->mcidx] = __builtin_return_address(0);
204#endif 154#endif
205 b->mcidx++; 155 b->mcidx++;
@@ -218,20 +168,25 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
218 BUG_ON(preemptible()); 168 BUG_ON(preemptible());
219 BUG_ON(b->argidx >= MC_ARGS); 169 BUG_ON(b->argidx >= MC_ARGS);
220 170
221 if (b->mcidx == 0) 171 if (unlikely(b->mcidx == 0 ||
222 return ret; 172 b->entries[b->mcidx - 1].op != op)) {
223 173 trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP);
224 if (b->entries[b->mcidx - 1].op != op) 174 goto out;
225 return ret; 175 }
226 176
227 if ((b->argidx + size) >= MC_ARGS) 177 if (unlikely((b->argidx + size) >= MC_ARGS)) {
228 return ret; 178 trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE);
179 goto out;
180 }
229 181
230 ret.mc = &b->entries[b->mcidx - 1]; 182 ret.mc = &b->entries[b->mcidx - 1];
231 ret.args = &b->args[b->argidx]; 183 ret.args = &b->args[b->argidx];
232 b->argidx += size; 184 b->argidx += size;
233 185
234 BUG_ON(b->argidx >= MC_ARGS); 186 BUG_ON(b->argidx >= MC_ARGS);
187
188 trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK);
189out:
235 return ret; 190 return ret;
236} 191}
237 192
@@ -241,43 +196,13 @@ void xen_mc_callback(void (*fn)(void *), void *data)
241 struct callback *cb; 196 struct callback *cb;
242 197
243 if (b->cbidx == MC_BATCH) { 198 if (b->cbidx == MC_BATCH) {
244 mc_stats_flush(FL_CALLBACKS); 199 trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK);
245 xen_mc_flush(); 200 xen_mc_flush();
246 } 201 }
247 202
203 trace_xen_mc_callback(fn, data);
204
248 cb = &b->callbacks[b->cbidx++]; 205 cb = &b->callbacks[b->cbidx++];
249 cb->fn = fn; 206 cb->fn = fn;
250 cb->data = data; 207 cb->data = data;
251} 208}
252
253#ifdef CONFIG_XEN_DEBUG_FS
254
255static struct dentry *d_mc_debug;
256
257static int __init xen_mc_debugfs(void)
258{
259 struct dentry *d_xen = xen_init_debugfs();
260
261 if (d_xen == NULL)
262 return -ENOMEM;
263
264 d_mc_debug = debugfs_create_dir("multicalls", d_xen);
265
266 debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
267
268 debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
269 debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
270 debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
271
272 xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
273 mc_stats.histo, MC_BATCH);
274 xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
275 mc_stats.histo_hypercalls, NHYPERCALLS);
276 xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
277 mc_stats.flush, FL_N_REASONS);
278
279 return 0;
280}
281fs_initcall(xen_mc_debugfs);
282
283#endif /* CONFIG_XEN_DEBUG_FS */