aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-03-25 06:06:44 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 11:22:49 -0500
commit3ded175a4b7a4548f3358dcf5f3ad65f63cdb4ed (patch)
treec27a3da70d638bcf32f75c28d1da3f1eb18cb617
parentc5e3b83e97be4e09961c0af101644643e5d03d17 (diff)
[PATCH] slab: add transfer_objects() function
slabr_objects() can be used to transfer objects between various object caches of the slab allocator. It is currently only used during __cache_alloc() to retrieve elements from the shared array. We will be using it soon to transfer elements from the alien caches to the remote shared array. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c42
1 files changed, 28 insertions, 14 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 6a3760e0353c..dee857a8680b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -898,6 +898,30 @@ static struct array_cache *alloc_arraycache(int node, int entries,
898 return nc; 898 return nc;
899} 899}
900 900
901/*
902 * Transfer objects in one arraycache to another.
903 * Locking must be handled by the caller.
904 *
905 * Return the number of entries transferred.
906 */
907static int transfer_objects(struct array_cache *to,
908 struct array_cache *from, unsigned int max)
909{
910 /* Figure out how many entries to transfer */
911 int nr = min(min(from->avail, max), to->limit - to->avail);
912
913 if (!nr)
914 return 0;
915
916 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
917 sizeof(void *) *nr);
918
919 from->avail -= nr;
920 to->avail += nr;
921 to->touched = 1;
922 return nr;
923}
924
901#ifdef CONFIG_NUMA 925#ifdef CONFIG_NUMA
902static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); 926static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
903static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 927static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
@@ -2680,20 +2704,10 @@ retry:
2680 BUG_ON(ac->avail > 0 || !l3); 2704 BUG_ON(ac->avail > 0 || !l3);
2681 spin_lock(&l3->list_lock); 2705 spin_lock(&l3->list_lock);
2682 2706
2683 if (l3->shared) { 2707 /* See if we can refill from the shared array */
2684 struct array_cache *shared_array = l3->shared; 2708 if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2685 if (shared_array->avail) { 2709 goto alloc_done;
2686 if (batchcount > shared_array->avail) 2710
2687 batchcount = shared_array->avail;
2688 shared_array->avail -= batchcount;
2689 ac->avail = batchcount;
2690 memcpy(ac->entry,
2691 &(shared_array->entry[shared_array->avail]),
2692 sizeof(void *) * batchcount);
2693 shared_array->touched = 1;
2694 goto alloc_done;
2695 }
2696 }
2697 while (batchcount > 0) { 2711 while (batchcount > 0) {
2698 struct list_head *entry; 2712 struct list_head *entry;
2699 struct slab *slabp; 2713 struct slab *slabp;