diff options
Diffstat (limited to 'mm/slub.c')
| -rw-r--r-- | mm/slub.c | 61 |
1 files changed, 41 insertions, 20 deletions
| @@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, | |||
| 249 | unsigned long ptr_addr) | 249 | unsigned long ptr_addr) |
| 250 | { | 250 | { |
| 251 | #ifdef CONFIG_SLAB_FREELIST_HARDENED | 251 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| 252 | return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); | 252 | /* |
| 253 | * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. | ||
| 254 | * Normally, this doesn't cause any issues, as both set_freepointer() | ||
| 255 | * and get_freepointer() are called with a pointer with the same tag. | ||
| 256 | * However, there are some issues with CONFIG_SLUB_DEBUG code. For | ||
| 257 | * example, when __free_slub() iterates over objects in a cache, it | ||
| 258 | * passes untagged pointers to check_object(). check_object() in turns | ||
| 259 | * calls get_freepointer() with an untagged pointer, which causes the | ||
| 260 | * freepointer to be restored incorrectly. | ||
| 261 | */ | ||
| 262 | return (void *)((unsigned long)ptr ^ s->random ^ | ||
| 263 | (unsigned long)kasan_reset_tag((void *)ptr_addr)); | ||
| 253 | #else | 264 | #else |
| 254 | return ptr; | 265 | return ptr; |
| 255 | #endif | 266 | #endif |
| @@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) | |||
| 303 | __p < (__addr) + (__objects) * (__s)->size; \ | 314 | __p < (__addr) + (__objects) * (__s)->size; \ |
| 304 | __p += (__s)->size) | 315 | __p += (__s)->size) |
| 305 | 316 | ||
| 306 | #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \ | ||
| 307 | for (__p = fixup_red_left(__s, __addr), __idx = 1; \ | ||
| 308 | __idx <= __objects; \ | ||
| 309 | __p += (__s)->size, __idx++) | ||
| 310 | |||
| 311 | /* Determine object index from a given position */ | 317 | /* Determine object index from a given position */ |
| 312 | static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) | 318 | static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) |
| 313 | { | 319 | { |
| 314 | return (p - addr) / s->size; | 320 | return (kasan_reset_tag(p) - addr) / s->size; |
| 315 | } | 321 | } |
| 316 | 322 | ||
| 317 | static inline unsigned int order_objects(unsigned int order, unsigned int size) | 323 | static inline unsigned int order_objects(unsigned int order, unsigned int size) |
| @@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s, | |||
| 507 | return 1; | 513 | return 1; |
| 508 | 514 | ||
| 509 | base = page_address(page); | 515 | base = page_address(page); |
| 516 | object = kasan_reset_tag(object); | ||
| 510 | object = restore_red_left(s, object); | 517 | object = restore_red_left(s, object); |
| 511 | if (object < base || object >= base + page->objects * s->size || | 518 | if (object < base || object >= base + page->objects * s->size || |
| 512 | (object - base) % s->size) { | 519 | (object - base) % s->size) { |
| @@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
| 1075 | init_tracking(s, object); | 1082 | init_tracking(s, object); |
| 1076 | } | 1083 | } |
| 1077 | 1084 | ||
| 1085 | static void setup_page_debug(struct kmem_cache *s, void *addr, int order) | ||
| 1086 | { | ||
| 1087 | if (!(s->flags & SLAB_POISON)) | ||
| 1088 | return; | ||
| 1089 | |||
| 1090 | metadata_access_enable(); | ||
| 1091 | memset(addr, POISON_INUSE, PAGE_SIZE << order); | ||
| 1092 | metadata_access_disable(); | ||
| 1093 | } | ||
| 1094 | |||
| 1078 | static inline int alloc_consistency_checks(struct kmem_cache *s, | 1095 | static inline int alloc_consistency_checks(struct kmem_cache *s, |
| 1079 | struct page *page, | 1096 | struct page *page, |
| 1080 | void *object, unsigned long addr) | 1097 | void *object, unsigned long addr) |
| @@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, | |||
| 1330 | #else /* !CONFIG_SLUB_DEBUG */ | 1347 | #else /* !CONFIG_SLUB_DEBUG */ |
| 1331 | static inline void setup_object_debug(struct kmem_cache *s, | 1348 | static inline void setup_object_debug(struct kmem_cache *s, |
| 1332 | struct page *page, void *object) {} | 1349 | struct page *page, void *object) {} |
| 1350 | static inline void setup_page_debug(struct kmem_cache *s, | ||
| 1351 | void *addr, int order) {} | ||
| 1333 | 1352 | ||
| 1334 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1353 | static inline int alloc_debug_processing(struct kmem_cache *s, |
| 1335 | struct page *page, void *object, unsigned long addr) { return 0; } | 1354 | struct page *page, void *object, unsigned long addr) { return 0; } |
| @@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, | |||
| 1374 | */ | 1393 | */ |
| 1375 | static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | 1394 | static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) |
| 1376 | { | 1395 | { |
| 1396 | ptr = kasan_kmalloc_large(ptr, size, flags); | ||
| 1397 | /* As ptr might get tagged, call kmemleak hook after KASAN. */ | ||
| 1377 | kmemleak_alloc(ptr, size, 1, flags); | 1398 | kmemleak_alloc(ptr, size, 1, flags); |
| 1378 | return kasan_kmalloc_large(ptr, size, flags); | 1399 | return ptr; |
| 1379 | } | 1400 | } |
| 1380 | 1401 | ||
| 1381 | static __always_inline void kfree_hook(void *x) | 1402 | static __always_inline void kfree_hook(void *x) |
| @@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
| 1641 | if (page_is_pfmemalloc(page)) | 1662 | if (page_is_pfmemalloc(page)) |
| 1642 | SetPageSlabPfmemalloc(page); | 1663 | SetPageSlabPfmemalloc(page); |
| 1643 | 1664 | ||
| 1644 | start = page_address(page); | 1665 | kasan_poison_slab(page); |
| 1645 | 1666 | ||
| 1646 | if (unlikely(s->flags & SLAB_POISON)) | 1667 | start = page_address(page); |
| 1647 | memset(start, POISON_INUSE, PAGE_SIZE << order); | ||
| 1648 | 1668 | ||
| 1649 | kasan_poison_slab(page); | 1669 | setup_page_debug(s, start, order); |
| 1650 | 1670 | ||
| 1651 | shuffle = shuffle_freelist(s, page); | 1671 | shuffle = shuffle_freelist(s, page); |
| 1652 | 1672 | ||
| 1653 | if (!shuffle) { | 1673 | if (!shuffle) { |
| 1654 | for_each_object_idx(p, idx, s, start, page->objects) { | ||
| 1655 | if (likely(idx < page->objects)) { | ||
| 1656 | next = p + s->size; | ||
| 1657 | next = setup_object(s, page, next); | ||
| 1658 | set_freepointer(s, p, next); | ||
| 1659 | } else | ||
| 1660 | set_freepointer(s, p, NULL); | ||
| 1661 | } | ||
| 1662 | start = fixup_red_left(s, start); | 1674 | start = fixup_red_left(s, start); |
| 1663 | start = setup_object(s, page, start); | 1675 | start = setup_object(s, page, start); |
| 1664 | page->freelist = start; | 1676 | page->freelist = start; |
| 1677 | for (idx = 0, p = start; idx < page->objects - 1; idx++) { | ||
| 1678 | next = p + s->size; | ||
| 1679 | next = setup_object(s, page, next); | ||
| 1680 | set_freepointer(s, p, next); | ||
| 1681 | p = next; | ||
| 1682 | } | ||
| 1683 | set_freepointer(s, p, NULL); | ||
| 1665 | } | 1684 | } |
| 1666 | 1685 | ||
| 1667 | page->inuse = page->objects; | 1686 | page->inuse = page->objects; |
| @@ -3846,6 +3865,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, | |||
| 3846 | unsigned int offset; | 3865 | unsigned int offset; |
| 3847 | size_t object_size; | 3866 | size_t object_size; |
| 3848 | 3867 | ||
| 3868 | ptr = kasan_reset_tag(ptr); | ||
| 3869 | |||
| 3849 | /* Find object and usable object size. */ | 3870 | /* Find object and usable object size. */ |
| 3850 | s = page->slab_cache; | 3871 | s = page->slab_cache; |
| 3851 | 3872 | ||
