diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-10-23 21:07:48 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@iki.fi> | 2013-10-24 13:17:34 -0400 |
commit | 106a74e13b329cf609c145dc198087c04f5f8ca5 (patch) | |
tree | 63909fa596e89915eb6058e9a09dfba3c102586e | |
parent | 45eed508de6cec3174040f800aaf90d60c7b5b5b (diff) |
slab: replace free and inuse in struct slab with newly introduced active
Now, free in struct slab is same meaning as inuse.
So, remove both and replace them with active.
Acked-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Pekka Enberg <penberg@iki.fi>
-rw-r--r-- | mm/slab.c | 54 |
1 files changed, 25 insertions, 29 deletions
@@ -174,8 +174,7 @@ struct slab { | |||
174 | struct { | 174 | struct { |
175 | struct list_head list; | 175 | struct list_head list; |
176 | void *s_mem; /* including colour offset */ | 176 | void *s_mem; /* including colour offset */ |
177 | unsigned int inuse; /* num of objs active in slab */ | 177 | unsigned int active; /* num of objs active in slab */ |
178 | unsigned int free; | ||
179 | }; | 178 | }; |
180 | }; | 179 | }; |
181 | 180 | ||
@@ -1658,7 +1657,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) | |||
1658 | active_slabs++; | 1657 | active_slabs++; |
1659 | } | 1658 | } |
1660 | list_for_each_entry(slabp, &n->slabs_partial, list) { | 1659 | list_for_each_entry(slabp, &n->slabs_partial, list) { |
1661 | active_objs += slabp->inuse; | 1660 | active_objs += slabp->active; |
1662 | active_slabs++; | 1661 | active_slabs++; |
1663 | } | 1662 | } |
1664 | list_for_each_entry(slabp, &n->slabs_free, list) | 1663 | list_for_each_entry(slabp, &n->slabs_free, list) |
@@ -2451,7 +2450,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
2451 | 2450 | ||
2452 | slabp = list_entry(p, struct slab, list); | 2451 | slabp = list_entry(p, struct slab, list); |
2453 | #if DEBUG | 2452 | #if DEBUG |
2454 | BUG_ON(slabp->inuse); | 2453 | BUG_ON(slabp->active); |
2455 | #endif | 2454 | #endif |
2456 | list_del(&slabp->list); | 2455 | list_del(&slabp->list); |
2457 | /* | 2456 | /* |
@@ -2570,9 +2569,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, | |||
2570 | slabp = addr + colour_off; | 2569 | slabp = addr + colour_off; |
2571 | colour_off += cachep->slab_size; | 2570 | colour_off += cachep->slab_size; |
2572 | } | 2571 | } |
2573 | slabp->inuse = 0; | 2572 | slabp->active = 0; |
2574 | slabp->s_mem = addr + colour_off; | 2573 | slabp->s_mem = addr + colour_off; |
2575 | slabp->free = 0; | ||
2576 | return slabp; | 2574 | return slabp; |
2577 | } | 2575 | } |
2578 | 2576 | ||
@@ -2642,12 +2640,11 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
2642 | { | 2640 | { |
2643 | void *objp; | 2641 | void *objp; |
2644 | 2642 | ||
2645 | slabp->inuse++; | 2643 | objp = index_to_obj(cachep, slabp, slab_bufctl(slabp)[slabp->active]); |
2646 | objp = index_to_obj(cachep, slabp, slab_bufctl(slabp)[slabp->free]); | 2644 | slabp->active++; |
2647 | #if DEBUG | 2645 | #if DEBUG |
2648 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); | 2646 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); |
2649 | #endif | 2647 | #endif |
2650 | slabp->free++; | ||
2651 | 2648 | ||
2652 | return objp; | 2649 | return objp; |
2653 | } | 2650 | } |
@@ -2663,7 +2660,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
2663 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); | 2660 | WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); |
2664 | 2661 | ||
2665 | /* Verify double free bug */ | 2662 | /* Verify double free bug */ |
2666 | for (i = slabp->free; i < cachep->num; i++) { | 2663 | for (i = slabp->active; i < cachep->num; i++) { |
2667 | if (slab_bufctl(slabp)[i] == objnr) { | 2664 | if (slab_bufctl(slabp)[i] == objnr) { |
2668 | printk(KERN_ERR "slab: double free detected in cache " | 2665 | printk(KERN_ERR "slab: double free detected in cache " |
2669 | "'%s', objp %p\n", cachep->name, objp); | 2666 | "'%s', objp %p\n", cachep->name, objp); |
@@ -2671,9 +2668,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, | |||
2671 | } | 2668 | } |
2672 | } | 2669 | } |
2673 | #endif | 2670 | #endif |
2674 | slabp->free--; | 2671 | slabp->active--; |
2675 | slab_bufctl(slabp)[slabp->free] = objnr; | 2672 | slab_bufctl(slabp)[slabp->active] = objnr; |
2676 | slabp->inuse--; | ||
2677 | } | 2673 | } |
2678 | 2674 | ||
2679 | /* | 2675 | /* |
@@ -2908,9 +2904,9 @@ retry: | |||
2908 | * there must be at least one object available for | 2904 | * there must be at least one object available for |
2909 | * allocation. | 2905 | * allocation. |
2910 | */ | 2906 | */ |
2911 | BUG_ON(slabp->inuse >= cachep->num); | 2907 | BUG_ON(slabp->active >= cachep->num); |
2912 | 2908 | ||
2913 | while (slabp->inuse < cachep->num && batchcount--) { | 2909 | while (slabp->active < cachep->num && batchcount--) { |
2914 | STATS_INC_ALLOCED(cachep); | 2910 | STATS_INC_ALLOCED(cachep); |
2915 | STATS_INC_ACTIVE(cachep); | 2911 | STATS_INC_ACTIVE(cachep); |
2916 | STATS_SET_HIGH(cachep); | 2912 | STATS_SET_HIGH(cachep); |
@@ -2921,7 +2917,7 @@ retry: | |||
2921 | 2917 | ||
2922 | /* move slabp to correct slabp list: */ | 2918 | /* move slabp to correct slabp list: */ |
2923 | list_del(&slabp->list); | 2919 | list_del(&slabp->list); |
2924 | if (slabp->free == cachep->num) | 2920 | if (slabp->active == cachep->num) |
2925 | list_add(&slabp->list, &n->slabs_full); | 2921 | list_add(&slabp->list, &n->slabs_full); |
2926 | else | 2922 | else |
2927 | list_add(&slabp->list, &n->slabs_partial); | 2923 | list_add(&slabp->list, &n->slabs_partial); |
@@ -3206,14 +3202,14 @@ retry: | |||
3206 | STATS_INC_ACTIVE(cachep); | 3202 | STATS_INC_ACTIVE(cachep); |
3207 | STATS_SET_HIGH(cachep); | 3203 | STATS_SET_HIGH(cachep); |
3208 | 3204 | ||
3209 | BUG_ON(slabp->inuse == cachep->num); | 3205 | BUG_ON(slabp->active == cachep->num); |
3210 | 3206 | ||
3211 | obj = slab_get_obj(cachep, slabp, nodeid); | 3207 | obj = slab_get_obj(cachep, slabp, nodeid); |
3212 | n->free_objects--; | 3208 | n->free_objects--; |
3213 | /* move slabp to correct slabp list: */ | 3209 | /* move slabp to correct slabp list: */ |
3214 | list_del(&slabp->list); | 3210 | list_del(&slabp->list); |
3215 | 3211 | ||
3216 | if (slabp->free == cachep->num) | 3212 | if (slabp->active == cachep->num) |
3217 | list_add(&slabp->list, &n->slabs_full); | 3213 | list_add(&slabp->list, &n->slabs_full); |
3218 | else | 3214 | else |
3219 | list_add(&slabp->list, &n->slabs_partial); | 3215 | list_add(&slabp->list, &n->slabs_partial); |
@@ -3380,7 +3376,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
3380 | n->free_objects++; | 3376 | n->free_objects++; |
3381 | 3377 | ||
3382 | /* fixup slab chains */ | 3378 | /* fixup slab chains */ |
3383 | if (slabp->inuse == 0) { | 3379 | if (slabp->active == 0) { |
3384 | if (n->free_objects > n->free_limit) { | 3380 | if (n->free_objects > n->free_limit) { |
3385 | n->free_objects -= cachep->num; | 3381 | n->free_objects -= cachep->num; |
3386 | /* No need to drop any previously held | 3382 | /* No need to drop any previously held |
@@ -3441,7 +3437,7 @@ free_done: | |||
3441 | struct slab *slabp; | 3437 | struct slab *slabp; |
3442 | 3438 | ||
3443 | slabp = list_entry(p, struct slab, list); | 3439 | slabp = list_entry(p, struct slab, list); |
3444 | BUG_ON(slabp->inuse); | 3440 | BUG_ON(slabp->active); |
3445 | 3441 | ||
3446 | i++; | 3442 | i++; |
3447 | p = p->next; | 3443 | p = p->next; |
@@ -4066,22 +4062,22 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) | |||
4066 | spin_lock_irq(&n->list_lock); | 4062 | spin_lock_irq(&n->list_lock); |
4067 | 4063 | ||
4068 | list_for_each_entry(slabp, &n->slabs_full, list) { | 4064 | list_for_each_entry(slabp, &n->slabs_full, list) { |
4069 | if (slabp->inuse != cachep->num && !error) | 4065 | if (slabp->active != cachep->num && !error) |
4070 | error = "slabs_full accounting error"; | 4066 | error = "slabs_full accounting error"; |
4071 | active_objs += cachep->num; | 4067 | active_objs += cachep->num; |
4072 | active_slabs++; | 4068 | active_slabs++; |
4073 | } | 4069 | } |
4074 | list_for_each_entry(slabp, &n->slabs_partial, list) { | 4070 | list_for_each_entry(slabp, &n->slabs_partial, list) { |
4075 | if (slabp->inuse == cachep->num && !error) | 4071 | if (slabp->active == cachep->num && !error) |
4076 | error = "slabs_partial inuse accounting error"; | 4072 | error = "slabs_partial accounting error"; |
4077 | if (!slabp->inuse && !error) | 4073 | if (!slabp->active && !error) |
4078 | error = "slabs_partial/inuse accounting error"; | 4074 | error = "slabs_partial accounting error"; |
4079 | active_objs += slabp->inuse; | 4075 | active_objs += slabp->active; |
4080 | active_slabs++; | 4076 | active_slabs++; |
4081 | } | 4077 | } |
4082 | list_for_each_entry(slabp, &n->slabs_free, list) { | 4078 | list_for_each_entry(slabp, &n->slabs_free, list) { |
4083 | if (slabp->inuse && !error) | 4079 | if (slabp->active && !error) |
4084 | error = "slabs_free/inuse accounting error"; | 4080 | error = "slabs_free accounting error"; |
4085 | num_slabs++; | 4081 | num_slabs++; |
4086 | } | 4082 | } |
4087 | free_objects += n->free_objects; | 4083 | free_objects += n->free_objects; |
@@ -4243,7 +4239,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) | |||
4243 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { | 4239 | for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { |
4244 | bool active = true; | 4240 | bool active = true; |
4245 | 4241 | ||
4246 | for (j = s->free; j < c->num; j++) { | 4242 | for (j = s->active; j < c->num; j++) { |
4247 | /* Skip freed item */ | 4243 | /* Skip freed item */ |
4248 | if (slab_bufctl(s)[j] == i) { | 4244 | if (slab_bufctl(s)[j] == i) { |
4249 | active = false; | 4245 | active = false; |