diff options
author | Alexander Potapenko <glider@google.com> | 2016-07-28 18:49:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-28 19:07:41 -0400 |
commit | c146a2b98eb5898eb0fab15a332257a4102ecae9 (patch) | |
tree | 87d665fec1265305deee3c9ab1028e0d9c58b364 | |
parent | 734537c9cb725fc8005ee7a25c48f1ad10fce5df (diff) |
mm, kasan: account for object redzone in SLUB's nearest_obj()
When looking up the nearest SLUB object for a given address, correctly
calculate its offset if SLAB_RED_ZONE is enabled for that cache.
Previously, when KASAN had detected an error on an object from a cache
with SLAB_RED_ZONE set, the actual start address of the object was
miscalculated, which led to random stacks having been reported.
When looking up the nearest SLUB object for a given address, correctly
calculate its offset if SLAB_RED_ZONE is enabled for that cache.
Fixes: 7ed2f9e663854db ("mm, kasan: SLAB support")
Link: http://lkml.kernel.org/r/1468347165-41906-2-git-send-email-glider@google.com
Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Steven Rostedt (Red Hat) <rostedt@goodmis.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Kuthonuzo Luruo <kuthonuzo.luruo@hpe.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slub_def.h | 10 | ||||
-rw-r--r-- | mm/slub.c | 2 |
2 files changed, 7 insertions, 5 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5624c1f3eb0a..cf501cf8e6db 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -119,15 +119,17 @@ static inline void sysfs_slab_remove(struct kmem_cache *s) | |||
119 | void object_err(struct kmem_cache *s, struct page *page, | 119 | void object_err(struct kmem_cache *s, struct page *page, |
120 | u8 *object, char *reason); | 120 | u8 *object, char *reason); |
121 | 121 | ||
122 | void *fixup_red_left(struct kmem_cache *s, void *p); | ||
123 | |||
122 | static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, | 124 | static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, |
123 | void *x) { | 125 | void *x) { |
124 | void *object = x - (x - page_address(page)) % cache->size; | 126 | void *object = x - (x - page_address(page)) % cache->size; |
125 | void *last_object = page_address(page) + | 127 | void *last_object = page_address(page) + |
126 | (page->objects - 1) * cache->size; | 128 | (page->objects - 1) * cache->size; |
127 | if (unlikely(object > last_object)) | 129 | void *result = (unlikely(object > last_object)) ? last_object : object; |
128 | return last_object; | 130 | |
129 | else | 131 | result = fixup_red_left(cache, result); |
130 | return object; | 132 | return result; |
131 | } | 133 | } |
132 | 134 | ||
133 | #endif /* _LINUX_SLUB_DEF_H */ | 135 | #endif /* _LINUX_SLUB_DEF_H */ |
@@ -124,7 +124,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s) | |||
124 | #endif | 124 | #endif |
125 | } | 125 | } |
126 | 126 | ||
127 | static inline void *fixup_red_left(struct kmem_cache *s, void *p) | 127 | inline void *fixup_red_left(struct kmem_cache *s, void *p) |
128 | { | 128 | { |
129 | if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) | 129 | if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) |
130 | p += s->red_left_pad; | 130 | p += s->red_left_pad; |