aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-10-23 21:07:45 -0400
committerPekka Enberg <penberg@iki.fi>2013-10-24 13:17:33 -0400
commitb1cb0982bdd6f57fed690f796659733350bb2cae (patch)
tree2f460da47a949d3736ec8a734983a91b34ab166f /mm/slab.c
parenta57a49887eb3398d31bfaa8009531f7121d6537d (diff)
slab: change the management method of free objects of the slab
Current free objects management method of the slab is weird, because it touch random position of the array of kmem_bufctl_t when we try to get free object. See following example. struct slab's free = 6 kmem_bufctl_t array: 1 END 5 7 0 4 3 2 To get free objects, we access this array with following pattern. 6 -> 3 -> 7 -> 2 -> 5 -> 4 -> 0 -> 1 -> END If we have many objects, this array would be larger and be not in the same cache line. It is not good for performance. We can do same thing through more easy way, like as the stack. Only thing we have to do is to maintain stack top to free object. I use free field of struct slab for this purpose. After that, if we need to get an object, we can get it at stack top and manipulate top pointer. That's all. This method already used in array_cache management. Following is an access pattern when we use this method. struct slab's free = 0 kmem_bufctl_t array: 6 3 7 2 5 4 0 1 To get free objects, we access this array with following pattern. 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 This may help cache line footprint if slab has many objects, and, in addition, this makes code much much simpler. Acked-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@iki.fi>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c94
1 files changed, 30 insertions, 64 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 75c60821e382..05fe37eb4a57 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -183,9 +183,6 @@ static bool pfmemalloc_active __read_mostly;
183 */ 183 */
184 184
185typedef unsigned int kmem_bufctl_t; 185typedef unsigned int kmem_bufctl_t;
186#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
187#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
188#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
189#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 186#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
190 187
191/* 188/*
@@ -2653,9 +2650,8 @@ static void cache_init_objs(struct kmem_cache *cachep,
2653 if (cachep->ctor) 2650 if (cachep->ctor)
2654 cachep->ctor(objp); 2651 cachep->ctor(objp);
2655#endif 2652#endif
2656 slab_bufctl(slabp)[i] = i + 1; 2653 slab_bufctl(slabp)[i] = i;
2657 } 2654 }
2658 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2659} 2655}
2660 2656
2661static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2657static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
@@ -2671,16 +2667,14 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2671static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2667static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2672 int nodeid) 2668 int nodeid)
2673{ 2669{
2674 void *objp = index_to_obj(cachep, slabp, slabp->free); 2670 void *objp;
2675 kmem_bufctl_t next;
2676 2671
2677 slabp->inuse++; 2672 slabp->inuse++;
2678 next = slab_bufctl(slabp)[slabp->free]; 2673 objp = index_to_obj(cachep, slabp, slab_bufctl(slabp)[slabp->free]);
2679#if DEBUG 2674#if DEBUG
2680 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2681 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2675 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2682#endif 2676#endif
2683 slabp->free = next; 2677 slabp->free++;
2684 2678
2685 return objp; 2679 return objp;
2686} 2680}
@@ -2689,19 +2683,23 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2689 void *objp, int nodeid) 2683 void *objp, int nodeid)
2690{ 2684{
2691 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2685 unsigned int objnr = obj_to_index(cachep, slabp, objp);
2692
2693#if DEBUG 2686#if DEBUG
2687 kmem_bufctl_t i;
2688
2694 /* Verify that the slab belongs to the intended node */ 2689 /* Verify that the slab belongs to the intended node */
2695 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); 2690 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
2696 2691
2697 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2692 /* Verify double free bug */
2698 printk(KERN_ERR "slab: double free detected in cache " 2693 for (i = slabp->free; i < cachep->num; i++) {
2699 "'%s', objp %p\n", cachep->name, objp); 2694 if (slab_bufctl(slabp)[i] == objnr) {
2700 BUG(); 2695 printk(KERN_ERR "slab: double free detected in cache "
2696 "'%s', objp %p\n", cachep->name, objp);
2697 BUG();
2698 }
2701 } 2699 }
2702#endif 2700#endif
2703 slab_bufctl(slabp)[objnr] = slabp->free; 2701 slabp->free--;
2704 slabp->free = objnr; 2702 slab_bufctl(slabp)[slabp->free] = objnr;
2705 slabp->inuse--; 2703 slabp->inuse--;
2706} 2704}
2707 2705
@@ -2862,9 +2860,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2862 BUG_ON(objnr >= cachep->num); 2860 BUG_ON(objnr >= cachep->num);
2863 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2861 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2864 2862
2865#ifdef CONFIG_DEBUG_SLAB_LEAK
2866 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2867#endif
2868 if (cachep->flags & SLAB_POISON) { 2863 if (cachep->flags & SLAB_POISON) {
2869#ifdef CONFIG_DEBUG_PAGEALLOC 2864#ifdef CONFIG_DEBUG_PAGEALLOC
2870 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2865 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2881,33 +2876,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2881 return objp; 2876 return objp;
2882} 2877}
2883 2878
2884static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2885{
2886 kmem_bufctl_t i;
2887 int entries = 0;
2888
2889 /* Check slab's freelist to see if this obj is there. */
2890 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2891 entries++;
2892 if (entries > cachep->num || i >= cachep->num)
2893 goto bad;
2894 }
2895 if (entries != cachep->num - slabp->inuse) {
2896bad:
2897 printk(KERN_ERR "slab: Internal list corruption detected in "
2898 "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
2899 cachep->name, cachep->num, slabp, slabp->inuse,
2900 print_tainted());
2901 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
2902 sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
2903 1);
2904 BUG();
2905 }
2906}
2907#else 2879#else
2908#define kfree_debugcheck(x) do { } while(0) 2880#define kfree_debugcheck(x) do { } while(0)
2909#define cache_free_debugcheck(x,objp,z) (objp) 2881#define cache_free_debugcheck(x,objp,z) (objp)
2910#define check_slabp(x,y) do { } while(0)
2911#endif 2882#endif
2912 2883
2913static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, 2884static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
@@ -2957,7 +2928,6 @@ retry:
2957 } 2928 }
2958 2929
2959 slabp = list_entry(entry, struct slab, list); 2930 slabp = list_entry(entry, struct slab, list);
2960 check_slabp(cachep, slabp);
2961 check_spinlock_acquired(cachep); 2931 check_spinlock_acquired(cachep);
2962 2932
2963 /* 2933 /*
@@ -2975,11 +2945,10 @@ retry:
2975 ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp, 2945 ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
2976 node)); 2946 node));
2977 } 2947 }
2978 check_slabp(cachep, slabp);
2979 2948
2980 /* move slabp to correct slabp list: */ 2949 /* move slabp to correct slabp list: */
2981 list_del(&slabp->list); 2950 list_del(&slabp->list);
2982 if (slabp->free == BUFCTL_END) 2951 if (slabp->free == cachep->num)
2983 list_add(&slabp->list, &n->slabs_full); 2952 list_add(&slabp->list, &n->slabs_full);
2984 else 2953 else
2985 list_add(&slabp->list, &n->slabs_partial); 2954 list_add(&slabp->list, &n->slabs_partial);
@@ -3054,16 +3023,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3054 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3023 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3055 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3024 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3056 } 3025 }
3057#ifdef CONFIG_DEBUG_SLAB_LEAK
3058 {
3059 struct slab *slabp;
3060 unsigned objnr;
3061
3062 slabp = virt_to_slab(objp);
3063 objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
3064 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3065 }
3066#endif
3067 objp += obj_offset(cachep); 3026 objp += obj_offset(cachep);
3068 if (cachep->ctor && cachep->flags & SLAB_POISON) 3027 if (cachep->ctor && cachep->flags & SLAB_POISON)
3069 cachep->ctor(objp); 3028 cachep->ctor(objp);
@@ -3269,7 +3228,6 @@ retry:
3269 3228
3270 slabp = list_entry(entry, struct slab, list); 3229 slabp = list_entry(entry, struct slab, list);
3271 check_spinlock_acquired_node(cachep, nodeid); 3230 check_spinlock_acquired_node(cachep, nodeid);
3272 check_slabp(cachep, slabp);
3273 3231
3274 STATS_INC_NODEALLOCS(cachep); 3232 STATS_INC_NODEALLOCS(cachep);
3275 STATS_INC_ACTIVE(cachep); 3233 STATS_INC_ACTIVE(cachep);
@@ -3278,12 +3236,11 @@ retry:
3278 BUG_ON(slabp->inuse == cachep->num); 3236 BUG_ON(slabp->inuse == cachep->num);
3279 3237
3280 obj = slab_get_obj(cachep, slabp, nodeid); 3238 obj = slab_get_obj(cachep, slabp, nodeid);
3281 check_slabp(cachep, slabp);
3282 n->free_objects--; 3239 n->free_objects--;
3283 /* move slabp to correct slabp list: */ 3240 /* move slabp to correct slabp list: */
3284 list_del(&slabp->list); 3241 list_del(&slabp->list);
3285 3242
3286 if (slabp->free == BUFCTL_END) 3243 if (slabp->free == cachep->num)
3287 list_add(&slabp->list, &n->slabs_full); 3244 list_add(&slabp->list, &n->slabs_full);
3288 else 3245 else
3289 list_add(&slabp->list, &n->slabs_partial); 3246 list_add(&slabp->list, &n->slabs_partial);
@@ -3445,11 +3402,9 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3445 n = cachep->node[node]; 3402 n = cachep->node[node];
3446 list_del(&slabp->list); 3403 list_del(&slabp->list);
3447 check_spinlock_acquired_node(cachep, node); 3404 check_spinlock_acquired_node(cachep, node);
3448 check_slabp(cachep, slabp);
3449 slab_put_obj(cachep, slabp, objp, node); 3405 slab_put_obj(cachep, slabp, objp, node);
3450 STATS_DEC_ACTIVE(cachep); 3406 STATS_DEC_ACTIVE(cachep);
3451 n->free_objects++; 3407 n->free_objects++;
3452 check_slabp(cachep, slabp);
3453 3408
3454 /* fixup slab chains */ 3409 /* fixup slab chains */
3455 if (slabp->inuse == 0) { 3410 if (slabp->inuse == 0) {
@@ -4308,12 +4263,23 @@ static inline int add_caller(unsigned long *n, unsigned long v)
4308static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4263static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4309{ 4264{
4310 void *p; 4265 void *p;
4311 int i; 4266 int i, j;
4267
4312 if (n[0] == n[1]) 4268 if (n[0] == n[1])
4313 return; 4269 return;
4314 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) { 4270 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
4315 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4271 bool active = true;
4272
4273 for (j = s->free; j < c->num; j++) {
4274 /* Skip freed item */
4275 if (slab_bufctl(s)[j] == i) {
4276 active = false;
4277 break;
4278 }
4279 }
4280 if (!active)
4316 continue; 4281 continue;
4282
4317 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4283 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4318 return; 4284 return;
4319 } 4285 }