aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-10-23 21:07:46 -0400
committerPekka Enberg <penberg@iki.fi>2013-10-24 13:17:34 -0400
commit16025177e1e16529451108faed257db95c7c9d6a (patch)
treee819e0d36f55d876ec87aa33022a67e947259943 /mm/slab.c
parentb1cb0982bdd6f57fed690f796659733350bb2cae (diff)
slab: remove kmem_bufctl_t
Now, we changed the management method of free objects of the slab and there is no need to use special value, BUFCTL_END, BUFCTL_FREE and BUFCTL_ACTIVE. So remove them. Acked-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c42
1 files changed, 11 insertions, 31 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 05fe37eb4a57..6ced1ccf8abb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -163,27 +163,7 @@
163 */ 163 */
164static bool pfmemalloc_active __read_mostly; 164static bool pfmemalloc_active __read_mostly;
165 165
166/* 166#define SLAB_LIMIT (((unsigned int)(~0U))-1)
167 * kmem_bufctl_t:
168 *
169 * Bufctl's are used for linking objs within a slab
170 * linked offsets.
171 *
172 * This implementation relies on "struct page" for locating the cache &
173 * slab an object belongs to.
174 * This allows the bufctl structure to be small (one int), but limits
175 * the number of objects a slab (not a cache) can contain when off-slab
176 * bufctls are used. The limit is the size of the largest general cache
177 * that does not use off-slab slabs.
178 * For 32bit archs with 4 kB pages, is this 56.
179 * This is not serious, as it is only for large objects, when it is unwise
180 * to have too many per slab.
181 * Note: This limit can be raised by introducing a general cache whose size
182 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
183 */
184
185typedef unsigned int kmem_bufctl_t;
186#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
187 167
188/* 168/*
189 * struct slab 169 * struct slab
@@ -197,7 +177,7 @@ struct slab {
197 struct list_head list; 177 struct list_head list;
198 void *s_mem; /* including colour offset */ 178 void *s_mem; /* including colour offset */
199 unsigned int inuse; /* num of objs active in slab */ 179 unsigned int inuse; /* num of objs active in slab */
200 kmem_bufctl_t free; 180 unsigned int free;
201 }; 181 };
202}; 182};
203 183
@@ -613,7 +593,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
613 593
614static size_t slab_mgmt_size(size_t nr_objs, size_t align) 594static size_t slab_mgmt_size(size_t nr_objs, size_t align)
615{ 595{
616 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 596 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(unsigned int), align);
617} 597}
618 598
619/* 599/*
@@ -633,7 +613,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
633 * slab is used for: 613 * slab is used for:
634 * 614 *
635 * - The struct slab 615 * - The struct slab
636 * - One kmem_bufctl_t for each object 616 * - One unsigned int for each object
637 * - Padding to respect alignment of @align 617 * - Padding to respect alignment of @align
638 * - @buffer_size bytes for each object 618 * - @buffer_size bytes for each object
639 * 619 *
@@ -658,7 +638,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
658 * into account. 638 * into account.
659 */ 639 */
660 nr_objs = (slab_size - sizeof(struct slab)) / 640 nr_objs = (slab_size - sizeof(struct slab)) /
661 (buffer_size + sizeof(kmem_bufctl_t)); 641 (buffer_size + sizeof(unsigned int));
662 642
663 /* 643 /*
664 * This calculated number will be either the right 644 * This calculated number will be either the right
@@ -2068,7 +2048,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2068 * looping condition in cache_grow(). 2048 * looping condition in cache_grow().
2069 */ 2049 */
2070 offslab_limit = size - sizeof(struct slab); 2050 offslab_limit = size - sizeof(struct slab);
2071 offslab_limit /= sizeof(kmem_bufctl_t); 2051 offslab_limit /= sizeof(unsigned int);
2072 2052
2073 if (num > offslab_limit) 2053 if (num > offslab_limit)
2074 break; 2054 break;
@@ -2309,7 +2289,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2309 if (!cachep->num) 2289 if (!cachep->num)
2310 return -E2BIG; 2290 return -E2BIG;
2311 2291
2312 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2292 slab_size = ALIGN(cachep->num * sizeof(unsigned int)
2313 + sizeof(struct slab), cachep->align); 2293 + sizeof(struct slab), cachep->align);
2314 2294
2315 /* 2295 /*
@@ -2324,7 +2304,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2324 if (flags & CFLGS_OFF_SLAB) { 2304 if (flags & CFLGS_OFF_SLAB) {
2325 /* really off slab. No need for manual alignment */ 2305 /* really off slab. No need for manual alignment */
2326 slab_size = 2306 slab_size =
2327 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2307 cachep->num * sizeof(unsigned int) + sizeof(struct slab);
2328 2308
2329#ifdef CONFIG_PAGE_POISONING 2309#ifdef CONFIG_PAGE_POISONING
2330 /* If we're going to use the generic kernel_map_pages() 2310 /* If we're going to use the generic kernel_map_pages()
@@ -2603,9 +2583,9 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep,
2603 return slabp; 2583 return slabp;
2604} 2584}
2605 2585
2606static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2586static inline unsigned int *slab_bufctl(struct slab *slabp)
2607{ 2587{
2608 return (kmem_bufctl_t *) (slabp + 1); 2588 return (unsigned int *) (slabp + 1);
2609} 2589}
2610 2590
2611static void cache_init_objs(struct kmem_cache *cachep, 2591static void cache_init_objs(struct kmem_cache *cachep,
@@ -2684,7 +2664,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2684{ 2664{
2685 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2665 unsigned int objnr = obj_to_index(cachep, slabp, objp);
2686#if DEBUG 2666#if DEBUG
2687 kmem_bufctl_t i; 2667 unsigned int i;
2688 2668
2689 /* Verify that the slab belongs to the intended node */ 2669 /* Verify that the slab belongs to the intended node */
2690 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2670 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);