aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorMatthew Dobson <colpatch@us.ibm.com>2006-02-01 06:05:47 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:17 -0500
commit78d382d77c84229d031431931bf6490d5da6ab86 (patch)
treebb6b4fa29b05209ebe88beee496b7785644bb874 /mm/slab.c
parent12dd36faec5d3bd96da84fa8f76efecc632930ab (diff)
[PATCH] slab: extract slab_{put|get}_obj
Create two helper functions slab_get_obj() and slab_put_obj() to replace duplicated code in mm/slab.c Signed-off-by: Matthew Dobson <colpatch@us.ibm.com> Acked-by: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c77
1 files changed, 40 insertions, 37 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 85adf0992011..594a9155c7d8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2226,6 +2226,42 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
2226 } 2226 }
2227} 2227}
2228 2228
2229static void *slab_get_obj(kmem_cache_t *cachep, struct slab *slabp, int nodeid)
2230{
2231 void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
2232 kmem_bufctl_t next;
2233
2234 slabp->inuse++;
2235 next = slab_bufctl(slabp)[slabp->free];
2236#if DEBUG
2237 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2238 WARN_ON(slabp->nodeid != nodeid);
2239#endif
2240 slabp->free = next;
2241
2242 return objp;
2243}
2244
2245static void slab_put_obj(kmem_cache_t *cachep, struct slab *slabp, void *objp,
2246 int nodeid)
2247{
2248 unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
2249
2250#if DEBUG
2251 /* Verify that the slab belongs to the intended node */
2252 WARN_ON(slabp->nodeid != nodeid);
2253
2254 if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
2255 printk(KERN_ERR "slab: double free detected in cache "
2256 "'%s', objp %p\n", cachep->name, objp);
2257 BUG();
2258 }
2259#endif
2260 slab_bufctl(slabp)[objnr] = slabp->free;
2261 slabp->free = objnr;
2262 slabp->inuse--;
2263}
2264
2229static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) 2265static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
2230{ 2266{
2231 int i; 2267 int i;
@@ -2515,22 +2551,12 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
2515 check_slabp(cachep, slabp); 2551 check_slabp(cachep, slabp);
2516 check_spinlock_acquired(cachep); 2552 check_spinlock_acquired(cachep);
2517 while (slabp->inuse < cachep->num && batchcount--) { 2553 while (slabp->inuse < cachep->num && batchcount--) {
2518 kmem_bufctl_t next;
2519 STATS_INC_ALLOCED(cachep); 2554 STATS_INC_ALLOCED(cachep);
2520 STATS_INC_ACTIVE(cachep); 2555 STATS_INC_ACTIVE(cachep);
2521 STATS_SET_HIGH(cachep); 2556 STATS_SET_HIGH(cachep);
2522 2557
2523 /* get obj pointer */ 2558 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2524 ac->entry[ac->avail++] = slabp->s_mem + 2559 numa_node_id());
2525 slabp->free * cachep->buffer_size;
2526
2527 slabp->inuse++;
2528 next = slab_bufctl(slabp)[slabp->free];
2529#if DEBUG
2530 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2531 WARN_ON(numa_node_id() != slabp->nodeid);
2532#endif
2533 slabp->free = next;
2534 } 2560 }
2535 check_slabp(cachep, slabp); 2561 check_slabp(cachep, slabp);
2536 2562
@@ -2675,7 +2701,6 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2675 struct slab *slabp; 2701 struct slab *slabp;
2676 struct kmem_list3 *l3; 2702 struct kmem_list3 *l3;
2677 void *obj; 2703 void *obj;
2678 kmem_bufctl_t next;
2679 int x; 2704 int x;
2680 2705
2681 l3 = cachep->nodelists[nodeid]; 2706 l3 = cachep->nodelists[nodeid];
@@ -2701,14 +2726,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
2701 2726
2702 BUG_ON(slabp->inuse == cachep->num); 2727 BUG_ON(slabp->inuse == cachep->num);
2703 2728
2704 /* get obj pointer */ 2729 obj = slab_get_obj(cachep, slabp, nodeid);
2705 obj = slabp->s_mem + slabp->free * cachep->buffer_size;
2706 slabp->inuse++;
2707 next = slab_bufctl(slabp)[slabp->free];
2708#if DEBUG
2709 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2710#endif
2711 slabp->free = next;
2712 check_slabp(cachep, slabp); 2730 check_slabp(cachep, slabp);
2713 l3->free_objects--; 2731 l3->free_objects--;
2714 /* move slabp to correct slabp list: */ 2732 /* move slabp to correct slabp list: */
@@ -2748,29 +2766,14 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
2748 for (i = 0; i < nr_objects; i++) { 2766 for (i = 0; i < nr_objects; i++) {
2749 void *objp = objpp[i]; 2767 void *objp = objpp[i];
2750 struct slab *slabp; 2768 struct slab *slabp;
2751 unsigned int objnr;
2752 2769
2753 slabp = page_get_slab(virt_to_page(objp)); 2770 slabp = page_get_slab(virt_to_page(objp));
2754 l3 = cachep->nodelists[node]; 2771 l3 = cachep->nodelists[node];
2755 list_del(&slabp->list); 2772 list_del(&slabp->list);
2756 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
2757 check_spinlock_acquired_node(cachep, node); 2773 check_spinlock_acquired_node(cachep, node);
2758 check_slabp(cachep, slabp); 2774 check_slabp(cachep, slabp);
2759 2775 slab_put_obj(cachep, slabp, objp, node);
2760#if DEBUG
2761 /* Verify that the slab belongs to the intended node */
2762 WARN_ON(slabp->nodeid != node);
2763
2764 if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
2765 printk(KERN_ERR "slab: double free detected in cache "
2766 "'%s', objp %p\n", cachep->name, objp);
2767 BUG();
2768 }
2769#endif
2770 slab_bufctl(slabp)[objnr] = slabp->free;
2771 slabp->free = objnr;
2772 STATS_DEC_ACTIVE(cachep); 2776 STATS_DEC_ACTIVE(cachep);
2773 slabp->inuse--;
2774 l3->free_objects++; 2777 l3->free_objects++;
2775 check_slabp(cachep, slabp); 2778 check_slabp(cachep, slabp);
2776 2779