aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorDavid Ahern <david.ahern@oracle.com>2015-06-15 16:15:45 -0400
committerDavid S. Miller <davem@davemloft.net>2015-06-25 09:01:02 -0400
commitb69fb7699c92f85991672fc144b0adb7c717fbc8 (patch)
tree07291dc96b98d4037d8466e12d5dc3b9207f52c0 /arch/sparc
parent2bf7c3efc393937d1e5f92681501a914dbfbae07 (diff)
sparc64: perf: Add sanity checking on addresses in user stack
Processes are getting killed (sigbus or segv) while walking userspace callchains when using perf. In some instances I have seen ufp = 0x7ff which does not seem like a proper stack address. This patch adds a function to run validity checks against the address before attempting the copy_from_user. The checks are copied from the x86 version as a start point with the addition of a 4-byte alignment check. Signed-off-by: David Ahern <david.ahern@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/uaccess_64.h22
-rw-r--r--arch/sparc/kernel/perf_event.c13
2 files changed, 35 insertions, 0 deletions
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index a35194b7dba0..ea6e9a20f3ff 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -49,6 +49,28 @@ do { \
49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ 49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
50} while(0) 50} while(0)
51 51
52/*
53 * Test whether a block of memory is a valid user space address.
54 * Returns 0 if the range is valid, nonzero otherwise.
55 */
56static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
57{
58 if (__builtin_constant_p(size))
59 return addr > limit - size;
60
61 addr += size;
62 if (addr < size)
63 return true;
64
65 return addr > limit;
66}
67
68#define __range_not_ok(addr, size, limit) \
69({ \
70 __chk_user_ptr(addr); \
71 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
72})
73
52static inline int __access_ok(const void __user * addr, unsigned long size) 74static inline int __access_ok(const void __user * addr, unsigned long size)
53{ 75{
54 return 1; 76 return 1;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 48387be665e9..a665e3f8c6c6 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1741,6 +1741,16 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
1741 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1741 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1742} 1742}
1743 1743
1744static inline int
1745valid_user_frame(const void __user *fp, unsigned long size)
1746{
1747 /* addresses should be at least 4-byte aligned */
1748 if (((unsigned long) fp) & 3)
1749 return 0;
1750
1751 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1752}
1753
1744static void perf_callchain_user_64(struct perf_callchain_entry *entry, 1754static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1745 struct pt_regs *regs) 1755 struct pt_regs *regs)
1746{ 1756{
@@ -1753,6 +1763,9 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1753 unsigned long pc; 1763 unsigned long pc;
1754 1764
1755 usf = (struct sparc_stackf __user *)ufp; 1765 usf = (struct sparc_stackf __user *)ufp;
1766 if (!valid_user_frame(usf, sizeof(sf)))
1767 break;
1768
1756 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1769 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1757 break; 1770 break;
1758 1771