aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-02-13 17:39:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 00:21:41 -0500
commit393f203f5fd54421fddb1e2a263f64d3876eeadb (patch)
treee78e96b38ecd36eec62325cc2cc21e8d79397bc1 /arch/x86/include
parent3f15801cdc2379ca4bf507f48bffd788f9e508ae (diff)
x86_64: kasan: add interceptors for memset/memmove/memcpy functions
Recently instrumentation of builtin functions calls was removed from GCC 5.0. To check the memory accessed by such functions, userspace asan always uses interceptors for them. So now we should do this as well. This patch declares memset/memmove/memcpy as weak symbols. In mm/kasan/kasan.c we have our own implementation of those functions which checks memory before accessing it. Default memset/memmove/memcpy now now always have aliases with '__' prefix. For files that built without kasan instrumentation (e.g. mm/slub.c) original mem* replaced (via #define) with prefixed variants, cause we don't want to check memory accesses there. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/string_64.h18
1 files changed, 17 insertions, 1 deletions
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 19e2c468fc2c..e4661196994e 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,11 +27,12 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
27 function. */ 27 function. */
28 28
29#define __HAVE_ARCH_MEMCPY 1 29#define __HAVE_ARCH_MEMCPY 1
30extern void *__memcpy(void *to, const void *from, size_t len);
31
30#ifndef CONFIG_KMEMCHECK 32#ifndef CONFIG_KMEMCHECK
31#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 33#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
32extern void *memcpy(void *to, const void *from, size_t len); 34extern void *memcpy(void *to, const void *from, size_t len);
33#else 35#else
34extern void *__memcpy(void *to, const void *from, size_t len);
35#define memcpy(dst, src, len) \ 36#define memcpy(dst, src, len) \
36({ \ 37({ \
37 size_t __len = (len); \ 38 size_t __len = (len); \
@@ -53,9 +54,11 @@ extern void *__memcpy(void *to, const void *from, size_t len);
53 54
54#define __HAVE_ARCH_MEMSET 55#define __HAVE_ARCH_MEMSET
55void *memset(void *s, int c, size_t n); 56void *memset(void *s, int c, size_t n);
57void *__memset(void *s, int c, size_t n);
56 58
57#define __HAVE_ARCH_MEMMOVE 59#define __HAVE_ARCH_MEMMOVE
58void *memmove(void *dest, const void *src, size_t count); 60void *memmove(void *dest, const void *src, size_t count);
61void *__memmove(void *dest, const void *src, size_t count);
59 62
60int memcmp(const void *cs, const void *ct, size_t count); 63int memcmp(const void *cs, const void *ct, size_t count);
61size_t strlen(const char *s); 64size_t strlen(const char *s);
@@ -63,6 +66,19 @@ char *strcpy(char *dest, const char *src);
63char *strcat(char *dest, const char *src); 66char *strcat(char *dest, const char *src);
64int strcmp(const char *cs, const char *ct); 67int strcmp(const char *cs, const char *ct);
65 68
69#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
70
71/*
72 * For files that not instrumented (e.g. mm/slub.c) we
73 * should use not instrumented version of mem* functions.
74 */
75
76#undef memcpy
77#define memcpy(dst, src, len) __memcpy(dst, src, len)
78#define memmove(dst, src, len) __memmove(dst, src, len)
79#define memset(s, c, n) __memset(s, c, n)
80#endif
81
66#endif /* __KERNEL__ */ 82#endif /* __KERNEL__ */
67 83
68#endif /* _ASM_X86_STRING_64_H */ 84#endif /* _ASM_X86_STRING_64_H */