aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2016-03-25 17:22:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 19:37:42 -0400
commit505f5dcb1c419e55a9621a01f83eb5745d8d7398 (patch)
tree4d608fdad5254972f8bba02f437060764e30bc6f
parent7ed2f9e663854db313f177a511145630e398b402 (diff)
mm, kasan: add GFP flags to KASAN API
Add GFP flags to KASAN hooks for future patches to use. This patch is based on the "mm: kasan: unified support for SLUB and SLAB allocators" patch originally prepared by Dmitry Chernenkov. Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kasan.h19
-rw-r--r--include/linux/slab.h4
-rw-r--r--mm/kasan/kasan.c15
-rw-r--r--mm/mempool.c16
-rw-r--r--mm/slab.c15
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/slub.c15
8 files changed, 48 insertions, 42 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 839f2007a0f9..737371b56044 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -55,13 +55,14 @@ void kasan_poison_slab(struct page *page);
55void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 55void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
56void kasan_poison_object_data(struct kmem_cache *cache, void *object); 56void kasan_poison_object_data(struct kmem_cache *cache, void *object);
57 57
58void kasan_kmalloc_large(const void *ptr, size_t size); 58void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
59void kasan_kfree_large(const void *ptr); 59void kasan_kfree_large(const void *ptr);
60void kasan_kfree(void *ptr); 60void kasan_kfree(void *ptr);
61void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size); 61void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
62void kasan_krealloc(const void *object, size_t new_size); 62 gfp_t flags);
63void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
63 64
64void kasan_slab_alloc(struct kmem_cache *s, void *object); 65void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
65void kasan_slab_free(struct kmem_cache *s, void *object); 66void kasan_slab_free(struct kmem_cache *s, void *object);
66 67
67struct kasan_cache { 68struct kasan_cache {
@@ -94,14 +95,16 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
94static inline void kasan_poison_object_data(struct kmem_cache *cache, 95static inline void kasan_poison_object_data(struct kmem_cache *cache,
95 void *object) {} 96 void *object) {}
96 97
97static inline void kasan_kmalloc_large(void *ptr, size_t size) {} 98static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
98static inline void kasan_kfree_large(const void *ptr) {} 99static inline void kasan_kfree_large(const void *ptr) {}
99static inline void kasan_kfree(void *ptr) {} 100static inline void kasan_kfree(void *ptr) {}
100static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, 101static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
101 size_t size) {} 102 size_t size, gfp_t flags) {}
102static inline void kasan_krealloc(const void *object, size_t new_size) {} 103static inline void kasan_krealloc(const void *object, size_t new_size,
104 gfp_t flags) {}
103 105
104static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} 106static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
107 gfp_t flags) {}
105static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} 108static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
106 109
107static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 110static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
diff --git a/include/linux/slab.h b/include/linux/slab.h
index aa61595a1482..508bd827e6dc 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -376,7 +376,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
376{ 376{
377 void *ret = kmem_cache_alloc(s, flags); 377 void *ret = kmem_cache_alloc(s, flags);
378 378
379 kasan_kmalloc(s, ret, size); 379 kasan_kmalloc(s, ret, size, flags);
380 return ret; 380 return ret;
381} 381}
382 382
@@ -387,7 +387,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
387{ 387{
388 void *ret = kmem_cache_alloc_node(s, gfpflags, node); 388 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
389 389
390 kasan_kmalloc(s, ret, size); 390 kasan_kmalloc(s, ret, size, gfpflags);
391 return ret; 391 return ret;
392} 392}
393#endif /* CONFIG_TRACING */ 393#endif /* CONFIG_TRACING */
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 7c82509ef169..cb998e0ec9d3 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -434,9 +434,9 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
434} 434}
435#endif 435#endif
436 436
437void kasan_slab_alloc(struct kmem_cache *cache, void *object) 437void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
438{ 438{
439 kasan_kmalloc(cache, object, cache->object_size); 439 kasan_kmalloc(cache, object, cache->object_size, flags);
440} 440}
441 441
442void kasan_slab_free(struct kmem_cache *cache, void *object) 442void kasan_slab_free(struct kmem_cache *cache, void *object)
@@ -462,7 +462,8 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
462 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); 462 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
463} 463}
464 464
465void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) 465void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
466 gfp_t flags)
466{ 467{
467 unsigned long redzone_start; 468 unsigned long redzone_start;
468 unsigned long redzone_end; 469 unsigned long redzone_end;
@@ -491,7 +492,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
491} 492}
492EXPORT_SYMBOL(kasan_kmalloc); 493EXPORT_SYMBOL(kasan_kmalloc);
493 494
494void kasan_kmalloc_large(const void *ptr, size_t size) 495void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
495{ 496{
496 struct page *page; 497 struct page *page;
497 unsigned long redzone_start; 498 unsigned long redzone_start;
@@ -510,7 +511,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size)
510 KASAN_PAGE_REDZONE); 511 KASAN_PAGE_REDZONE);
511} 512}
512 513
513void kasan_krealloc(const void *object, size_t size) 514void kasan_krealloc(const void *object, size_t size, gfp_t flags)
514{ 515{
515 struct page *page; 516 struct page *page;
516 517
@@ -520,9 +521,9 @@ void kasan_krealloc(const void *object, size_t size)
520 page = virt_to_head_page(object); 521 page = virt_to_head_page(object);
521 522
522 if (unlikely(!PageSlab(page))) 523 if (unlikely(!PageSlab(page)))
523 kasan_kmalloc_large(object, size); 524 kasan_kmalloc_large(object, size, flags);
524 else 525 else
525 kasan_kmalloc(page->slab_cache, object, size); 526 kasan_kmalloc(page->slab_cache, object, size, flags);
526} 527}
527 528
528void kasan_kfree(void *ptr) 529void kasan_kfree(void *ptr)
diff --git a/mm/mempool.c b/mm/mempool.c
index 07c383ddbbab..9b7a14a791cc 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -112,12 +112,12 @@ static void kasan_poison_element(mempool_t *pool, void *element)
112 kasan_free_pages(element, (unsigned long)pool->pool_data); 112 kasan_free_pages(element, (unsigned long)pool->pool_data);
113} 113}
114 114
115static void kasan_unpoison_element(mempool_t *pool, void *element) 115static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
116{ 116{
117 if (pool->alloc == mempool_alloc_slab) 117 if (pool->alloc == mempool_alloc_slab)
118 kasan_slab_alloc(pool->pool_data, element); 118 kasan_slab_alloc(pool->pool_data, element, flags);
119 if (pool->alloc == mempool_kmalloc) 119 if (pool->alloc == mempool_kmalloc)
120 kasan_krealloc(element, (size_t)pool->pool_data); 120 kasan_krealloc(element, (size_t)pool->pool_data, flags);
121 if (pool->alloc == mempool_alloc_pages) 121 if (pool->alloc == mempool_alloc_pages)
122 kasan_alloc_pages(element, (unsigned long)pool->pool_data); 122 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
123} 123}
@@ -130,12 +130,12 @@ static void add_element(mempool_t *pool, void *element)
130 pool->elements[pool->curr_nr++] = element; 130 pool->elements[pool->curr_nr++] = element;
131} 131}
132 132
133static void *remove_element(mempool_t *pool) 133static void *remove_element(mempool_t *pool, gfp_t flags)
134{ 134{
135 void *element = pool->elements[--pool->curr_nr]; 135 void *element = pool->elements[--pool->curr_nr];
136 136
137 BUG_ON(pool->curr_nr < 0); 137 BUG_ON(pool->curr_nr < 0);
138 kasan_unpoison_element(pool, element); 138 kasan_unpoison_element(pool, element, flags);
139 check_element(pool, element); 139 check_element(pool, element);
140 return element; 140 return element;
141} 141}
@@ -154,7 +154,7 @@ void mempool_destroy(mempool_t *pool)
154 return; 154 return;
155 155
156 while (pool->curr_nr) { 156 while (pool->curr_nr) {
157 void *element = remove_element(pool); 157 void *element = remove_element(pool, GFP_KERNEL);
158 pool->free(element, pool->pool_data); 158 pool->free(element, pool->pool_data);
159 } 159 }
160 kfree(pool->elements); 160 kfree(pool->elements);
@@ -250,7 +250,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr)
250 spin_lock_irqsave(&pool->lock, flags); 250 spin_lock_irqsave(&pool->lock, flags);
251 if (new_min_nr <= pool->min_nr) { 251 if (new_min_nr <= pool->min_nr) {
252 while (new_min_nr < pool->curr_nr) { 252 while (new_min_nr < pool->curr_nr) {
253 element = remove_element(pool); 253 element = remove_element(pool, GFP_KERNEL);
254 spin_unlock_irqrestore(&pool->lock, flags); 254 spin_unlock_irqrestore(&pool->lock, flags);
255 pool->free(element, pool->pool_data); 255 pool->free(element, pool->pool_data);
256 spin_lock_irqsave(&pool->lock, flags); 256 spin_lock_irqsave(&pool->lock, flags);
@@ -347,7 +347,7 @@ repeat_alloc:
347 347
348 spin_lock_irqsave(&pool->lock, flags); 348 spin_lock_irqsave(&pool->lock, flags);
349 if (likely(pool->curr_nr)) { 349 if (likely(pool->curr_nr)) {
350 element = remove_element(pool); 350 element = remove_element(pool, gfp_temp);
351 spin_unlock_irqrestore(&pool->lock, flags); 351 spin_unlock_irqrestore(&pool->lock, flags);
352 /* paired with rmb in mempool_free(), read comment there */ 352 /* paired with rmb in mempool_free(), read comment there */
353 smp_wmb(); 353 smp_wmb();
diff --git a/mm/slab.c b/mm/slab.c
index 7515578471d8..17e2848979c5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3378{ 3378{
3379 void *ret = slab_alloc(cachep, flags, _RET_IP_); 3379 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3380 3380
3381 kasan_slab_alloc(cachep, ret); 3381 kasan_slab_alloc(cachep, ret, flags);
3382 trace_kmem_cache_alloc(_RET_IP_, ret, 3382 trace_kmem_cache_alloc(_RET_IP_, ret,
3383 cachep->object_size, cachep->size, flags); 3383 cachep->object_size, cachep->size, flags);
3384 3384
@@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3444 3444
3445 ret = slab_alloc(cachep, flags, _RET_IP_); 3445 ret = slab_alloc(cachep, flags, _RET_IP_);
3446 3446
3447 kasan_kmalloc(cachep, ret, size); 3447 kasan_kmalloc(cachep, ret, size, flags);
3448 trace_kmalloc(_RET_IP_, ret, 3448 trace_kmalloc(_RET_IP_, ret,
3449 size, cachep->size, flags); 3449 size, cachep->size, flags);
3450 return ret; 3450 return ret;
@@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3468{ 3468{
3469 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3469 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3470 3470
3471 kasan_slab_alloc(cachep, ret); 3471 kasan_slab_alloc(cachep, ret, flags);
3472 trace_kmem_cache_alloc_node(_RET_IP_, ret, 3472 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3473 cachep->object_size, cachep->size, 3473 cachep->object_size, cachep->size,
3474 flags, nodeid); 3474 flags, nodeid);
@@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3486 void *ret; 3486 void *ret;
3487 3487
3488 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); 3488 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3489 kasan_kmalloc(cachep, ret, size); 3489
3490 kasan_kmalloc(cachep, ret, size, flags);
3490 trace_kmalloc_node(_RET_IP_, ret, 3491 trace_kmalloc_node(_RET_IP_, ret,
3491 size, cachep->size, 3492 size, cachep->size,
3492 flags, nodeid); 3493 flags, nodeid);
@@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3505 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3506 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3506 return cachep; 3507 return cachep;
3507 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); 3508 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3508 kasan_kmalloc(cachep, ret, size); 3509 kasan_kmalloc(cachep, ret, size, flags);
3509 3510
3510 return ret; 3511 return ret;
3511} 3512}
@@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3541 return cachep; 3542 return cachep;
3542 ret = slab_alloc(cachep, flags, caller); 3543 ret = slab_alloc(cachep, flags, caller);
3543 3544
3544 kasan_kmalloc(cachep, ret, size); 3545 kasan_kmalloc(cachep, ret, size, flags);
3545 trace_kmalloc(caller, ret, 3546 trace_kmalloc(caller, ret,
3546 size, cachep->size, flags); 3547 size, cachep->size, flags);
3547 3548
@@ -4323,7 +4324,7 @@ size_t ksize(const void *objp)
4323 /* We assume that ksize callers could use the whole allocated area, 4324 /* We assume that ksize callers could use the whole allocated area,
4324 * so we need to unpoison this area. 4325 * so we need to unpoison this area.
4325 */ 4326 */
4326 kasan_krealloc(objp, size); 4327 kasan_krealloc(objp, size, GFP_NOWAIT);
4327 4328
4328 return size; 4329 return size;
4329} 4330}
diff --git a/mm/slab.h b/mm/slab.h
index ff39a8fc3b3f..5969769fbee6 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -405,7 +405,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
405 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 405 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
406 kmemleak_alloc_recursive(object, s->object_size, 1, 406 kmemleak_alloc_recursive(object, s->object_size, 1,
407 s->flags, flags); 407 s->flags, flags);
408 kasan_slab_alloc(s, object); 408 kasan_slab_alloc(s, object, flags);
409 } 409 }
410 memcg_kmem_put_cache(s); 410 memcg_kmem_put_cache(s);
411} 411}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 4de72e220c82..3239bfd758e6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1013,7 +1013,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1013 page = alloc_kmem_pages(flags, order); 1013 page = alloc_kmem_pages(flags, order);
1014 ret = page ? page_address(page) : NULL; 1014 ret = page ? page_address(page) : NULL;
1015 kmemleak_alloc(ret, size, 1, flags); 1015 kmemleak_alloc(ret, size, 1, flags);
1016 kasan_kmalloc_large(ret, size); 1016 kasan_kmalloc_large(ret, size, flags);
1017 return ret; 1017 return ret;
1018} 1018}
1019EXPORT_SYMBOL(kmalloc_order); 1019EXPORT_SYMBOL(kmalloc_order);
@@ -1192,7 +1192,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1192 ks = ksize(p); 1192 ks = ksize(p);
1193 1193
1194 if (ks >= new_size) { 1194 if (ks >= new_size) {
1195 kasan_krealloc((void *)p, new_size); 1195 kasan_krealloc((void *)p, new_size, flags);
1196 return (void *)p; 1196 return (void *)p;
1197 } 1197 }
1198 1198
diff --git a/mm/slub.c b/mm/slub.c
index 7277413ebc8b..4dbb109eb8cd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1313,7 +1313,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
1313static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) 1313static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1314{ 1314{
1315 kmemleak_alloc(ptr, size, 1, flags); 1315 kmemleak_alloc(ptr, size, 1, flags);
1316 kasan_kmalloc_large(ptr, size); 1316 kasan_kmalloc_large(ptr, size, flags);
1317} 1317}
1318 1318
1319static inline void kfree_hook(const void *x) 1319static inline void kfree_hook(const void *x)
@@ -2596,7 +2596,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2596{ 2596{
2597 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2597 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2598 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2598 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2599 kasan_kmalloc(s, ret, size); 2599 kasan_kmalloc(s, ret, size, gfpflags);
2600 return ret; 2600 return ret;
2601} 2601}
2602EXPORT_SYMBOL(kmem_cache_alloc_trace); 2602EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -2624,7 +2624,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2624 trace_kmalloc_node(_RET_IP_, ret, 2624 trace_kmalloc_node(_RET_IP_, ret,
2625 size, s->size, gfpflags, node); 2625 size, s->size, gfpflags, node);
2626 2626
2627 kasan_kmalloc(s, ret, size); 2627 kasan_kmalloc(s, ret, size, gfpflags);
2628 return ret; 2628 return ret;
2629} 2629}
2630EXPORT_SYMBOL(kmem_cache_alloc_node_trace); 2630EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -3182,7 +3182,8 @@ static void early_kmem_cache_node_alloc(int node)
3182 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 3182 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3183 init_tracking(kmem_cache_node, n); 3183 init_tracking(kmem_cache_node, n);
3184#endif 3184#endif
3185 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node)); 3185 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3186 GFP_KERNEL);
3186 init_kmem_cache_node(n); 3187 init_kmem_cache_node(n);
3187 inc_slabs_node(kmem_cache_node, node, page->objects); 3188 inc_slabs_node(kmem_cache_node, node, page->objects);
3188 3189
@@ -3561,7 +3562,7 @@ void *__kmalloc(size_t size, gfp_t flags)
3561 3562
3562 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3563 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3563 3564
3564 kasan_kmalloc(s, ret, size); 3565 kasan_kmalloc(s, ret, size, flags);
3565 3566
3566 return ret; 3567 return ret;
3567} 3568}
@@ -3606,7 +3607,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3606 3607
3607 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3608 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3608 3609
3609 kasan_kmalloc(s, ret, size); 3610 kasan_kmalloc(s, ret, size, flags);
3610 3611
3611 return ret; 3612 return ret;
3612} 3613}
@@ -3635,7 +3636,7 @@ size_t ksize(const void *object)
3635 size_t size = __ksize(object); 3636 size_t size = __ksize(object);
3636 /* We assume that ksize callers could use whole allocated area, 3637 /* We assume that ksize callers could use whole allocated area,
3637 so we need unpoison this area. */ 3638 so we need unpoison this area. */
3638 kasan_krealloc(object, size); 3639 kasan_krealloc(object, size, GFP_NOWAIT);
3639 return size; 3640 return size;
3640} 3641}
3641EXPORT_SYMBOL(ksize); 3642EXPORT_SYMBOL(ksize);