diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-03-25 06:06:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-25 11:22:49 -0500 |
commit | 3ded175a4b7a4548f3358dcf5f3ad65f63cdb4ed (patch) | |
tree | c27a3da70d638bcf32f75c28d1da3f1eb18cb617 /mm/slab.c | |
parent | c5e3b83e97be4e09961c0af101644643e5d03d17 (diff) |
[PATCH] slab: add transfer_objects() function
slabr_objects() can be used to transfer objects between various object
caches of the slab allocator. It is currently only used during
__cache_alloc() to retrieve elements from the shared array. We will be
using it soon to transfer elements from the alien caches to the remote
shared array.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 42 |
1 files changed, 28 insertions, 14 deletions
@@ -898,6 +898,30 @@ static struct array_cache *alloc_arraycache(int node, int entries, | |||
898 | return nc; | 898 | return nc; |
899 | } | 899 | } |
900 | 900 | ||
901 | /* | ||
902 | * Transfer objects in one arraycache to another. | ||
903 | * Locking must be handled by the caller. | ||
904 | * | ||
905 | * Return the number of entries transferred. | ||
906 | */ | ||
907 | static int transfer_objects(struct array_cache *to, | ||
908 | struct array_cache *from, unsigned int max) | ||
909 | { | ||
910 | /* Figure out how many entries to transfer */ | ||
911 | int nr = min(min(from->avail, max), to->limit - to->avail); | ||
912 | |||
913 | if (!nr) | ||
914 | return 0; | ||
915 | |||
916 | memcpy(to->entry + to->avail, from->entry + from->avail -nr, | ||
917 | sizeof(void *) *nr); | ||
918 | |||
919 | from->avail -= nr; | ||
920 | to->avail += nr; | ||
921 | to->touched = 1; | ||
922 | return nr; | ||
923 | } | ||
924 | |||
901 | #ifdef CONFIG_NUMA | 925 | #ifdef CONFIG_NUMA |
902 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); | 926 | static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); |
903 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); | 927 | static void *alternate_node_alloc(struct kmem_cache *, gfp_t); |
@@ -2680,20 +2704,10 @@ retry: | |||
2680 | BUG_ON(ac->avail > 0 || !l3); | 2704 | BUG_ON(ac->avail > 0 || !l3); |
2681 | spin_lock(&l3->list_lock); | 2705 | spin_lock(&l3->list_lock); |
2682 | 2706 | ||
2683 | if (l3->shared) { | 2707 | /* See if we can refill from the shared array */ |
2684 | struct array_cache *shared_array = l3->shared; | 2708 | if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) |
2685 | if (shared_array->avail) { | 2709 | goto alloc_done; |
2686 | if (batchcount > shared_array->avail) | 2710 | |
2687 | batchcount = shared_array->avail; | ||
2688 | shared_array->avail -= batchcount; | ||
2689 | ac->avail = batchcount; | ||
2690 | memcpy(ac->entry, | ||
2691 | &(shared_array->entry[shared_array->avail]), | ||
2692 | sizeof(void *) * batchcount); | ||
2693 | shared_array->touched = 1; | ||
2694 | goto alloc_done; | ||
2695 | } | ||
2696 | } | ||
2697 | while (batchcount > 0) { | 2711 | while (batchcount > 0) { |
2698 | struct list_head *entry; | 2712 | struct list_head *entry; |
2699 | struct slab *slabp; | 2713 | struct slab *slabp; |