aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-10 06:15:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-10 12:26:52 -0400
commit894b8788d7f265eb7c6f75a9a77cedeb48f51586 (patch)
tree4b00fa4704090876895b8a7528c6fe5e2201fc28 /include/linux
parent02b67325a6d34f2ae67484a8802b6ffc9ce9931d (diff)
slub: support concurrent local and remote frees and allocs on a slab
Avoid atomic overhead in slab_alloc and slab_free SLUB needs to use the slab_lock for the per cpu slabs to synchronize with potential kfree operations. This patch avoids that need by moving all free objects onto a lockless_freelist. The regular freelist continues to exist and will be used to free objects. So while we consume the lockless_freelist the regular freelist may build up objects. If we are out of objects on the lockless_freelist then we may check the regular freelist. If it has objects then we move those over to the lockless_freelist and do this again. There is a significant savings in terms of atomic operations that have to be performed. We can even free directly to the lockless_freelist if we know that we are running on the same processor. So this speeds up short lived objects. They may be allocated and freed without taking the slab_lock. This is particular good for netperf. In order to maximize the effect of the new faster hotpath we extract the hottest performance pieces into inlined functions. These are then inlined into kmem_cache_alloc and kmem_cache_free. So hotpath allocation and freeing no longer requires a subroutine call within SLUB. [I am not sure that it is worth doing this because it changes the easy to read structure of slub just to reduce atomic ops. However, there is someone out there with a benchmark on 4 way and 8 way processor systems that seems to show a 5% regression vs. Slab. Seems that the regression is due to increased atomic operations use vs. SLAB in SLUB). I wonder if this is applicable or discernable at all in a real workload?] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm_types.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index e30687bad075..d5bb1796e12b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -50,13 +50,16 @@ struct page {
50 spinlock_t ptl; 50 spinlock_t ptl;
51#endif 51#endif
52 struct { /* SLUB uses */ 52 struct { /* SLUB uses */
53 struct page *first_page; /* Compound pages */ 53 void **lockless_freelist;
54 struct kmem_cache *slab; /* Pointer to slab */ 54 struct kmem_cache *slab; /* Pointer to slab */
55 }; 55 };
56 struct {
57 struct page *first_page; /* Compound pages */
58 };
56 }; 59 };
57 union { 60 union {
58 pgoff_t index; /* Our offset within mapping. */ 61 pgoff_t index; /* Our offset within mapping. */
59 void *freelist; /* SLUB: pointer to free object */ 62 void *freelist; /* SLUB: freelist req. slab lock */
60 }; 63 };
61 struct list_head lru; /* Pageout list, eg. active_list 64 struct list_head lru; /* Pageout list, eg. active_list
62 * protected by zone->lru_lock ! 65 * protected by zone->lru_lock !