aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2018-04-10 19:30:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-11 13:28:32 -0400
commitc3895391df385c6628638f014c87e16f5e2efd45 (patch)
tree891a1eafbfb599e7712ac70388c8d4b787048477
parentb7d349c741293b694c552593dbd7d38ea7eb7143 (diff)
kasan, slub: fix handling of kasan_slab_free hook
The kasan_slab_free hook's return value denotes whether the reuse of a slab object must be delayed (e.g. when the object is put into memory qurantine). The current way SLUB handles this hook is by ignoring its return value and hardcoding checks similar (but not exactly the same) to the ones performed in kasan_slab_free, which is prone to making mistakes. The main difference between the hardcoded checks and the ones in kasan_slab_free is whether we want to perform a free in case when an invalid-free or a double-free was detected (we don't). This patch changes the way SLUB handles this by: 1. taking into account the return value of kasan_slab_free for each of the objects, that are being freed; 2. reconstructing the freelist of objects to exclude the ones, whose reuse must be delayed. [andreyknvl@google.com: eliminate unnecessary branch in slab_free] Link: http://lkml.kernel.org/r/a62759a2545fddf69b0c034547212ca1eb1b3ce2.1520359686.git.andreyknvl@google.com Link: http://lkml.kernel.org/r/083f58501e54731203801d899632d76175868e97.1519400992.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Acked-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Kostya Serebryany <kcc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c57
1 files changed, 34 insertions, 23 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4fb037c98782..44aa7847324a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1363,10 +1363,8 @@ static __always_inline void kfree_hook(void *x)
1363 kasan_kfree_large(x, _RET_IP_); 1363 kasan_kfree_large(x, _RET_IP_);
1364} 1364}
1365 1365
1366static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x) 1366static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
1367{ 1367{
1368 void *freeptr;
1369
1370 kmemleak_free_recursive(x, s->flags); 1368 kmemleak_free_recursive(x, s->flags);
1371 1369
1372 /* 1370 /*
@@ -1386,17 +1384,12 @@ static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
1386 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1384 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1387 debug_check_no_obj_freed(x, s->object_size); 1385 debug_check_no_obj_freed(x, s->object_size);
1388 1386
1389 freeptr = get_freepointer(s, x); 1387 /* KASAN might put x into memory quarantine, delaying its reuse */
1390 /* 1388 return kasan_slab_free(s, x, _RET_IP_);
1391 * kasan_slab_free() may put x into memory quarantine, delaying its
1392 * reuse. In this case the object's freelist pointer is changed.
1393 */
1394 kasan_slab_free(s, x, _RET_IP_);
1395 return freeptr;
1396} 1389}
1397 1390
1398static inline void slab_free_freelist_hook(struct kmem_cache *s, 1391static inline bool slab_free_freelist_hook(struct kmem_cache *s,
1399 void *head, void *tail) 1392 void **head, void **tail)
1400{ 1393{
1401/* 1394/*
1402 * Compiler cannot detect this function can be removed if slab_free_hook() 1395 * Compiler cannot detect this function can be removed if slab_free_hook()
@@ -1407,13 +1400,33 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
1407 defined(CONFIG_DEBUG_OBJECTS_FREE) || \ 1400 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1408 defined(CONFIG_KASAN) 1401 defined(CONFIG_KASAN)
1409 1402
1410 void *object = head; 1403 void *object;
1411 void *tail_obj = tail ? : head; 1404 void *next = *head;
1412 void *freeptr; 1405 void *old_tail = *tail ? *tail : *head;
1406
1407 /* Head and tail of the reconstructed freelist */
1408 *head = NULL;
1409 *tail = NULL;
1413 1410
1414 do { 1411 do {
1415 freeptr = slab_free_hook(s, object); 1412 object = next;
1416 } while ((object != tail_obj) && (object = freeptr)); 1413 next = get_freepointer(s, object);
1414 /* If object's reuse doesn't have to be delayed */
1415 if (!slab_free_hook(s, object)) {
1416 /* Move object to the new freelist */
1417 set_freepointer(s, object, *head);
1418 *head = object;
1419 if (!*tail)
1420 *tail = object;
1421 }
1422 } while (object != old_tail);
1423
1424 if (*head == *tail)
1425 *tail = NULL;
1426
1427 return *head != NULL;
1428#else
1429 return true;
1417#endif 1430#endif
1418} 1431}
1419 1432
@@ -2968,14 +2981,12 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2968 void *head, void *tail, int cnt, 2981 void *head, void *tail, int cnt,
2969 unsigned long addr) 2982 unsigned long addr)
2970{ 2983{
2971 slab_free_freelist_hook(s, head, tail);
2972 /* 2984 /*
2973 * slab_free_freelist_hook() could have put the items into quarantine. 2985 * With KASAN enabled slab_free_freelist_hook modifies the freelist
2974 * If so, no need to free them. 2986 * to remove objects, whose reuse must be delayed.
2975 */ 2987 */
2976 if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU)) 2988 if (slab_free_freelist_hook(s, &head, &tail))
2977 return; 2989 do_slab_free(s, page, head, tail, cnt, addr);
2978 do_slab_free(s, page, head, tail, cnt, addr);
2979} 2990}
2980 2991
2981#ifdef CONFIG_KASAN 2992#ifdef CONFIG_KASAN