aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2015-06-07 14:37:03 -0400
committerIngo Molnar <mingo@kernel.org>2015-06-09 06:24:32 -0400
commit2a1dcb1f796ad37028df37d96fc7c5b6b1705a43 (patch)
treeef1afe258c5ccd08d723cabf7e78dbb50b48d7cc
parent97efebf1bc30a80122af3295ebdb726dbc040ca6 (diff)
x86/mpx: Trace the attempts to find bounds tables
There are two different events being traced here. They are doing similar things so share a trace "EVENT_CLASS" and are presented together. 1. Trace when MPX is zapping pages "mpx_unmap_zap": When MPX can not free an entire bounds table, it will instead try to zap unused parts of a bounds table to free the backing memory. This decreases RSS (resident set size) without decreasing the virtual space allocated for bounds tables. 2. Trace attempts to find bounds tables "mpx_unmap_search": This event traces any time we go looking to unmap a bounds table for a given virtual address range. This is useful to ensure that the kernel actually "tried" to free a bounds table versus times it succeeded in finding one. It might try and fail if it realized that a table was shared with an adjacent VMA which is not being unmapped. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dave Hansen <dave@sr71.net> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20150607183703.B9D2468B@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/trace/mpx.h32
-rw-r--r--arch/x86/mm/mpx.c2
2 files changed, 34 insertions, 0 deletions
diff --git a/arch/x86/include/asm/trace/mpx.h b/arch/x86/include/asm/trace/mpx.h
index 5c3af06a2ae1..c13c6fa3fa92 100644
--- a/arch/x86/include/asm/trace/mpx.h
+++ b/arch/x86/include/asm/trace/mpx.h
@@ -63,6 +63,38 @@ TRACE_EVENT(bounds_exception_mpx,
63 __entry->bndstatus) 63 __entry->bndstatus)
64); 64);
65 65
66DECLARE_EVENT_CLASS(mpx_range_trace,
67
68 TP_PROTO(unsigned long start,
69 unsigned long end),
70 TP_ARGS(start, end),
71
72 TP_STRUCT__entry(
73 __field(unsigned long, start)
74 __field(unsigned long, end)
75 ),
76
77 TP_fast_assign(
78 __entry->start = start;
79 __entry->end = end;
80 ),
81
82 TP_printk("[0x%p:0x%p]",
83 (void *)__entry->start,
84 (void *)__entry->end
85 )
86);
87
88DEFINE_EVENT(mpx_range_trace, mpx_unmap_zap,
89 TP_PROTO(unsigned long start, unsigned long end),
90 TP_ARGS(start, end)
91);
92
93DEFINE_EVENT(mpx_range_trace, mpx_unmap_search,
94 TP_PROTO(unsigned long start, unsigned long end),
95 TP_ARGS(start, end)
96);
97
66#else 98#else
67 99
68/* 100/*
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 75e5d7043f65..55729ee9263e 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -668,6 +668,7 @@ static int zap_bt_entries(struct mm_struct *mm,
668 668
669 len = min(vma->vm_end, end) - addr; 669 len = min(vma->vm_end, end) - addr;
670 zap_page_range(vma, addr, len, NULL); 670 zap_page_range(vma, addr, len, NULL);
671 trace_mpx_unmap_zap(addr, addr+len);
671 672
672 vma = vma->vm_next; 673 vma = vma->vm_next;
673 addr = vma->vm_start; 674 addr = vma->vm_start;
@@ -840,6 +841,7 @@ static int mpx_unmap_tables(struct mm_struct *mm,
840 long __user *bd_entry, *bde_start, *bde_end; 841 long __user *bd_entry, *bde_start, *bde_end;
841 unsigned long bt_addr; 842 unsigned long bt_addr;
842 843
844 trace_mpx_unmap_search(start, end);
843 /* 845 /*
844 * "Edge" bounds tables are those which are being used by the region 846 * "Edge" bounds tables are those which are being used by the region
845 * (start -> end), but that may be shared with adjacent areas. If they 847 * (start -> end), but that may be shared with adjacent areas. If they