aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2016-03-15 17:54:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit249247b6f8ee362189a2f2bf598a14ff6c95fb4c (patch)
treebcf211d6446ff1cc56996423ba42a2b4734e7c86
parentd31676dfde257cb2b3e52d4e657d8ad2251e4d49 (diff)
mm/slab: remove object status buffer for DEBUG_SLAB_LEAK
Now, we don't use object status buffer in any setup. Remove it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slab.c34
1 files changed, 2 insertions, 32 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 907abe9964bf..02be9d9776ad 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -380,22 +380,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
380 380
381#endif 381#endif
382 382
383#define OBJECT_FREE (0)
384#define OBJECT_ACTIVE (1)
385
386#ifdef CONFIG_DEBUG_SLAB_LEAK 383#ifdef CONFIG_DEBUG_SLAB_LEAK
387 384
388static void set_obj_status(struct page *page, int idx, int val)
389{
390 int freelist_size;
391 char *status;
392 struct kmem_cache *cachep = page->slab_cache;
393
394 freelist_size = cachep->num * sizeof(freelist_idx_t);
395 status = (char *)page->freelist + freelist_size;
396 status[idx] = val;
397}
398
399static inline bool is_store_user_clean(struct kmem_cache *cachep) 385static inline bool is_store_user_clean(struct kmem_cache *cachep)
400{ 386{
401 return atomic_read(&cachep->store_user_clean) == 1; 387 return atomic_read(&cachep->store_user_clean) == 1;
@@ -413,7 +399,6 @@ static inline void set_store_user_dirty(struct kmem_cache *cachep)
413} 399}
414 400
415#else 401#else
416static inline void set_obj_status(struct page *page, int idx, int val) {}
417static inline void set_store_user_dirty(struct kmem_cache *cachep) {} 402static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
418 403
419#endif 404#endif
@@ -476,9 +461,6 @@ static size_t calculate_freelist_size(int nr_objs, size_t align)
476 size_t freelist_size; 461 size_t freelist_size;
477 462
478 freelist_size = nr_objs * sizeof(freelist_idx_t); 463 freelist_size = nr_objs * sizeof(freelist_idx_t);
479 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
480 freelist_size += nr_objs * sizeof(char);
481
482 if (align) 464 if (align)
483 freelist_size = ALIGN(freelist_size, align); 465 freelist_size = ALIGN(freelist_size, align);
484 466
@@ -491,10 +473,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
491 int nr_objs; 473 int nr_objs;
492 size_t remained_size; 474 size_t remained_size;
493 size_t freelist_size; 475 size_t freelist_size;
494 int extra_space = 0;
495 476
496 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
497 extra_space = sizeof(char);
498 /* 477 /*
499 * Ignore padding for the initial guess. The padding 478 * Ignore padding for the initial guess. The padding
500 * is at most @align-1 bytes, and @buffer_size is at 479 * is at most @align-1 bytes, and @buffer_size is at
@@ -503,7 +482,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
503 * into the memory allocation when taking the padding 482 * into the memory allocation when taking the padding
504 * into account. 483 * into account.
505 */ 484 */
506 nr_objs = slab_size / (buffer_size + idx_size + extra_space); 485 nr_objs = slab_size / (buffer_size + idx_size);
507 486
508 /* 487 /*
509 * This calculated number will be either the right 488 * This calculated number will be either the right
@@ -1961,16 +1940,13 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1961 break; 1940 break;
1962 1941
1963 if (flags & CFLGS_OFF_SLAB) { 1942 if (flags & CFLGS_OFF_SLAB) {
1964 size_t freelist_size_per_obj = sizeof(freelist_idx_t);
1965 /* 1943 /*
1966 * Max number of objs-per-slab for caches which 1944 * Max number of objs-per-slab for caches which
1967 * use off-slab slabs. Needed to avoid a possible 1945 * use off-slab slabs. Needed to avoid a possible
1968 * looping condition in cache_grow(). 1946 * looping condition in cache_grow().
1969 */ 1947 */
1970 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1971 freelist_size_per_obj += sizeof(char);
1972 offslab_limit = size; 1948 offslab_limit = size;
1973 offslab_limit /= freelist_size_per_obj; 1949 offslab_limit /= sizeof(freelist_idx_t);
1974 1950
1975 if (num > offslab_limit) 1951 if (num > offslab_limit)
1976 break; 1952 break;
@@ -2533,7 +2509,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
2533 if (cachep->ctor) 2509 if (cachep->ctor)
2534 cachep->ctor(objp); 2510 cachep->ctor(objp);
2535#endif 2511#endif
2536 set_obj_status(page, i, OBJECT_FREE);
2537 set_free_obj(page, i, i); 2512 set_free_obj(page, i, i);
2538 } 2513 }
2539} 2514}
@@ -2745,7 +2720,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2745 BUG_ON(objnr >= cachep->num); 2720 BUG_ON(objnr >= cachep->num);
2746 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2721 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2747 2722
2748 set_obj_status(page, objnr, OBJECT_FREE);
2749 if (cachep->flags & SLAB_POISON) { 2723 if (cachep->flags & SLAB_POISON) {
2750 poison_obj(cachep, objp, POISON_FREE); 2724 poison_obj(cachep, objp, POISON_FREE);
2751 slab_kernel_map(cachep, objp, 0, caller); 2725 slab_kernel_map(cachep, objp, 0, caller);
@@ -2878,8 +2852,6 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2878static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 2852static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2879 gfp_t flags, void *objp, unsigned long caller) 2853 gfp_t flags, void *objp, unsigned long caller)
2880{ 2854{
2881 struct page *page;
2882
2883 if (!objp) 2855 if (!objp)
2884 return objp; 2856 return objp;
2885 if (cachep->flags & SLAB_POISON) { 2857 if (cachep->flags & SLAB_POISON) {
@@ -2904,8 +2876,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2904 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 2876 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2905 } 2877 }
2906 2878
2907 page = virt_to_head_page(objp);
2908 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
2909 objp += obj_offset(cachep); 2879 objp += obj_offset(cachep);
2910 if (cachep->ctor && cachep->flags & SLAB_POISON) 2880 if (cachep->ctor && cachep->flags & SLAB_POISON)
2911 cachep->ctor(objp); 2881 cachep->ctor(objp);