aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-12-16 20:02:35 -0500
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2011-07-18 18:43:27 -0400
commit847088079162a5cf8ab0d1ad1ecf7fa60c057246 (patch)
treed4e3a7a2901bcdc67033b6912694e6f1c2f65f6e
parentc796f213a6934712ede728d9b53ef0e5066db23a (diff)
xen/trace: add mmu tracepoints
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
-rw-r--r--arch/x86/xen/mmu.c26
-rw-r--r--include/trace/events/xen.h223
2 files changed, 247 insertions, 2 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 0ccccb67a993..43fa7771ccb9 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -48,6 +48,8 @@
48#include <linux/memblock.h> 48#include <linux/memblock.h>
49#include <linux/seq_file.h> 49#include <linux/seq_file.h>
50 50
51#include <trace/events/xen.h>
52
51#include <asm/pgtable.h> 53#include <asm/pgtable.h>
52#include <asm/tlbflush.h> 54#include <asm/tlbflush.h>
53#include <asm/fixmap.h> 55#include <asm/fixmap.h>
@@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
194 struct multicall_space mcs; 196 struct multicall_space mcs;
195 struct mmu_update *u; 197 struct mmu_update *u;
196 198
199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
200
197 mcs = xen_mc_entry(sizeof(*u)); 201 mcs = xen_mc_entry(sizeof(*u));
198 u = mcs.args; 202 u = mcs.args;
199 203
@@ -245,6 +249,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
245 249
246static void xen_set_pmd(pmd_t *ptr, pmd_t val) 250static void xen_set_pmd(pmd_t *ptr, pmd_t val)
247{ 251{
252 trace_xen_mmu_set_pmd(ptr, val);
253
248 /* If page is not pinned, we can just update the entry 254 /* If page is not pinned, we can just update the entry
249 directly */ 255 directly */
250 if (!xen_page_pinned(ptr)) { 256 if (!xen_page_pinned(ptr)) {
@@ -282,22 +288,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
282 return true; 288 return true;
283} 289}
284 290
285static void xen_set_pte(pte_t *ptep, pte_t pteval) 291static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
286{ 292{
287 if (!xen_batched_set_pte(ptep, pteval)) 293 if (!xen_batched_set_pte(ptep, pteval))
288 native_set_pte(ptep, pteval); 294 native_set_pte(ptep, pteval);
289} 295}
290 296
297static void xen_set_pte(pte_t *ptep, pte_t pteval)
298{
299 trace_xen_mmu_set_pte(ptep, pteval);
300 __xen_set_pte(ptep, pteval);
301}
302
291static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 303static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
292 pte_t *ptep, pte_t pteval) 304 pte_t *ptep, pte_t pteval)
293{ 305{
294 xen_set_pte(ptep, pteval); 306 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
307 __xen_set_pte(ptep, pteval);
295} 308}
296 309
297pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 310pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
298 unsigned long addr, pte_t *ptep) 311 unsigned long addr, pte_t *ptep)
299{ 312{
300 /* Just return the pte as-is. We preserve the bits on commit */ 313 /* Just return the pte as-is. We preserve the bits on commit */
314 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
301 return *ptep; 315 return *ptep;
302} 316}
303 317
@@ -306,6 +320,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
306{ 320{
307 struct mmu_update u; 321 struct mmu_update u;
308 322
323 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
309 xen_mc_batch(); 324 xen_mc_batch();
310 325
311 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 326 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
@@ -530,6 +545,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
530 545
531static void xen_set_pud(pud_t *ptr, pud_t val) 546static void xen_set_pud(pud_t *ptr, pud_t val)
532{ 547{
548 trace_xen_mmu_set_pud(ptr, val);
549
533 /* If page is not pinned, we can just update the entry 550 /* If page is not pinned, we can just update the entry
534 directly */ 551 directly */
535 if (!xen_page_pinned(ptr)) { 552 if (!xen_page_pinned(ptr)) {
@@ -543,17 +560,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
543#ifdef CONFIG_X86_PAE 560#ifdef CONFIG_X86_PAE
544static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 561static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
545{ 562{
563 trace_xen_mmu_set_pte_atomic(ptep, pte);
546 set_64bit((u64 *)ptep, native_pte_val(pte)); 564 set_64bit((u64 *)ptep, native_pte_val(pte));
547} 565}
548 566
549static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 567static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
550{ 568{
569 trace_xen_mmu_pte_clear(mm, addr, ptep);
551 if (!xen_batched_set_pte(ptep, native_make_pte(0))) 570 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
552 native_pte_clear(mm, addr, ptep); 571 native_pte_clear(mm, addr, ptep);
553} 572}
554 573
555static void xen_pmd_clear(pmd_t *pmdp) 574static void xen_pmd_clear(pmd_t *pmdp)
556{ 575{
576 trace_xen_mmu_pmd_clear(pmdp);
557 set_pmd(pmdp, __pmd(0)); 577 set_pmd(pmdp, __pmd(0));
558} 578}
559#endif /* CONFIG_X86_PAE */ 579#endif /* CONFIG_X86_PAE */
@@ -629,6 +649,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
629{ 649{
630 pgd_t *user_ptr = xen_get_user_pgd(ptr); 650 pgd_t *user_ptr = xen_get_user_pgd(ptr);
631 651
652 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
653
632 /* If page is not pinned, we can just update the entry 654 /* If page is not pinned, we can just update the entry
633 directly */ 655 directly */
634 if (!xen_page_pinned(ptr)) { 656 if (!xen_page_pinned(ptr)) {
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 330848269bc1..08089e82f77e 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -125,6 +125,229 @@ TRACE_EVENT(xen_mc_extend_args,
125 __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" : 125 __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
126 __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???") 126 __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
127 ); 127 );
128
129/* mmu */
130TRACE_EVENT(xen_mmu_set_pte,
131 TP_PROTO(pte_t *ptep, pte_t pteval),
132 TP_ARGS(ptep, pteval),
133 TP_STRUCT__entry(
134 __field(pte_t *, ptep)
135 __field(pteval_t, pteval)
136 ),
137 TP_fast_assign(__entry->ptep = ptep;
138 __entry->pteval = pteval.pte),
139 TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
140 __entry->ptep,
141 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
142 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
143 );
144
145TRACE_EVENT(xen_mmu_set_pte_atomic,
146 TP_PROTO(pte_t *ptep, pte_t pteval),
147 TP_ARGS(ptep, pteval),
148 TP_STRUCT__entry(
149 __field(pte_t *, ptep)
150 __field(pteval_t, pteval)
151 ),
152 TP_fast_assign(__entry->ptep = ptep;
153 __entry->pteval = pteval.pte),
154 TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
155 __entry->ptep,
156 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
157 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
158 );
159
160TRACE_EVENT(xen_mmu_set_domain_pte,
161 TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
162 TP_ARGS(ptep, pteval, domid),
163 TP_STRUCT__entry(
164 __field(pte_t *, ptep)
165 __field(pteval_t, pteval)
166 __field(unsigned, domid)
167 ),
168 TP_fast_assign(__entry->ptep = ptep;
169 __entry->pteval = pteval.pte;
170 __entry->domid = domid),
171 TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
172 __entry->ptep,
173 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
174 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
175 __entry->domid)
176 );
177
178TRACE_EVENT(xen_mmu_set_pte_at,
179 TP_PROTO(struct mm_struct *mm, unsigned long addr,
180 pte_t *ptep, pte_t pteval),
181 TP_ARGS(mm, addr, ptep, pteval),
182 TP_STRUCT__entry(
183 __field(struct mm_struct *, mm)
184 __field(unsigned long, addr)
185 __field(pte_t *, ptep)
186 __field(pteval_t, pteval)
187 ),
188 TP_fast_assign(__entry->mm = mm;
189 __entry->addr = addr;
190 __entry->ptep = ptep;
191 __entry->pteval = pteval.pte),
192 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
193 __entry->mm, __entry->addr, __entry->ptep,
194 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
195 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
196 );
197
198TRACE_EVENT(xen_mmu_pte_clear,
199 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
200 TP_ARGS(mm, addr, ptep),
201 TP_STRUCT__entry(
202 __field(struct mm_struct *, mm)
203 __field(unsigned long, addr)
204 __field(pte_t *, ptep)
205 ),
206 TP_fast_assign(__entry->mm = mm;
207 __entry->addr = addr;
208 __entry->ptep = ptep),
209 TP_printk("mm %p addr %lx ptep %p",
210 __entry->mm, __entry->addr, __entry->ptep)
211 );
212
213TRACE_EVENT(xen_mmu_set_pmd,
214 TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
215 TP_ARGS(pmdp, pmdval),
216 TP_STRUCT__entry(
217 __field(pmd_t *, pmdp)
218 __field(pmdval_t, pmdval)
219 ),
220 TP_fast_assign(__entry->pmdp = pmdp;
221 __entry->pmdval = pmdval.pmd),
222 TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
223 __entry->pmdp,
224 (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
225 (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
226 );
227
228TRACE_EVENT(xen_mmu_pmd_clear,
229 TP_PROTO(pmd_t *pmdp),
230 TP_ARGS(pmdp),
231 TP_STRUCT__entry(
232 __field(pmd_t *, pmdp)
233 ),
234 TP_fast_assign(__entry->pmdp = pmdp),
235 TP_printk("pmdp %p", __entry->pmdp)
236 );
237
238#if PAGETABLE_LEVELS >= 4
239
240TRACE_EVENT(xen_mmu_set_pud,
241 TP_PROTO(pud_t *pudp, pud_t pudval),
242 TP_ARGS(pudp, pudval),
243 TP_STRUCT__entry(
244 __field(pud_t *, pudp)
245 __field(pudval_t, pudval)
246 ),
247 TP_fast_assign(__entry->pudp = pudp;
248 __entry->pudval = native_pud_val(pudval)),
249 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
250 __entry->pudp,
251 (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
252 (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
253 );
254
255TRACE_EVENT(xen_mmu_set_pgd,
256 TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
257 TP_ARGS(pgdp, user_pgdp, pgdval),
258 TP_STRUCT__entry(
259 __field(pgd_t *, pgdp)
260 __field(pgd_t *, user_pgdp)
261 __field(pgdval_t, pgdval)
262 ),
263 TP_fast_assign(__entry->pgdp = pgdp;
264 __entry->user_pgdp = user_pgdp;
265 __entry->pgdval = pgdval.pgd),
266 TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
267 __entry->pgdp, __entry->user_pgdp,
268 (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
269 (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
270 );
271
272TRACE_EVENT(xen_mmu_pud_clear,
273 TP_PROTO(pud_t *pudp),
274 TP_ARGS(pudp),
275 TP_STRUCT__entry(
276 __field(pud_t *, pudp)
277 ),
278 TP_fast_assign(__entry->pudp = pudp),
279 TP_printk("pudp %p", __entry->pudp)
280 );
281#else
282
283TRACE_EVENT(xen_mmu_set_pud,
284 TP_PROTO(pud_t *pudp, pud_t pudval),
285 TP_ARGS(pudp, pudval),
286 TP_STRUCT__entry(
287 __field(pud_t *, pudp)
288 __field(pudval_t, pudval)
289 ),
290 TP_fast_assign(__entry->pudp = pudp;
291 __entry->pudval = native_pud_val(pudval)),
292 TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
293 __entry->pudp,
294 (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
295 (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
296 );
297
298#endif
299
300TRACE_EVENT(xen_mmu_pgd_clear,
301 TP_PROTO(pgd_t *pgdp),
302 TP_ARGS(pgdp),
303 TP_STRUCT__entry(
304 __field(pgd_t *, pgdp)
305 ),
306 TP_fast_assign(__entry->pgdp = pgdp),
307 TP_printk("pgdp %p", __entry->pgdp)
308 );
309
310TRACE_EVENT(xen_mmu_ptep_modify_prot_start,
311 TP_PROTO(struct mm_struct *mm, unsigned long addr,
312 pte_t *ptep, pte_t pteval),
313 TP_ARGS(mm, addr, ptep, pteval),
314 TP_STRUCT__entry(
315 __field(struct mm_struct *, mm)
316 __field(unsigned long, addr)
317 __field(pte_t *, ptep)
318 __field(pteval_t, pteval)
319 ),
320 TP_fast_assign(__entry->mm = mm;
321 __entry->addr = addr;
322 __entry->ptep = ptep;
323 __entry->pteval = pteval.pte),
324 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
325 __entry->mm, __entry->addr, __entry->ptep,
326 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
327 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
328 );
329
330TRACE_EVENT(xen_mmu_ptep_modify_prot_commit,
331 TP_PROTO(struct mm_struct *mm, unsigned long addr,
332 pte_t *ptep, pte_t pteval),
333 TP_ARGS(mm, addr, ptep, pteval),
334 TP_STRUCT__entry(
335 __field(struct mm_struct *, mm)
336 __field(unsigned long, addr)
337 __field(pte_t *, ptep)
338 __field(pteval_t, pteval)
339 ),
340 TP_fast_assign(__entry->mm = mm;
341 __entry->addr = addr;
342 __entry->ptep = ptep;
343 __entry->pteval = pteval.pte),
344 TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
345 __entry->mm, __entry->addr, __entry->ptep,
346 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
347 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
348 );
349
350
128#endif /* _TRACE_XEN_H */ 351#endif /* _TRACE_XEN_H */
129 352
130/* This part must be outside protection */ 353/* This part must be outside protection */