aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-02-20 05:21:33 -0500
committerPekka Enberg <penberg@cs.helsinki.fi>2009-02-20 05:25:47 -0500
commit51735a7ca67531267a27b57e5fe20f7815192f9c (patch)
tree35c2f72b18b168f8f9c8d7f7b1172422f0a16238 /include/linux/slub_def.h
parentffadd4d0feb5376c82dc3a4104731b7ce2794edc (diff)
SLUB: Do not pass 8k objects through to the page allocator
Increase the maximum object size in SLUB so that 8k objects are not passed through to the page allocator anymore. The network stack uses 8k objects for performance critical operations. The patch is motivated by a SLAB vs. SLUB regression in the netperf benchmark. The problem is that the kfree(skb->head) call in skb_release_data() that is subject to page allocator pass-through as the size passed to __alloc_skb() is larger than 4 KB in this test. As explained by Yanmin Zhang: I use 2.6.29-rc2 kernel to run netperf UDP-U-4k CPU_NUM client/server pair loopback testing on x86-64 machines. Comparing with SLUB, SLAB's result is about 2.3 times of SLUB's. After applying the reverting patch, the result difference between SLUB and SLAB becomes 1% which we might consider as fluctuation. [ penberg@cs.helsinki.fi: fix oops in kmalloc() ] Reported-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com> Tested-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 986e09dcfd8f..e217a7a68ea7 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -129,9 +129,9 @@ struct kmem_cache {
129 * This should be dropped to PAGE_SIZE / 2 once the page allocator 129 * This should be dropped to PAGE_SIZE / 2 once the page allocator
130 * "fastpath" becomes competitive with the slab allocator fastpaths. 130 * "fastpath" becomes competitive with the slab allocator fastpaths.
131 */ 131 */
132#define SLUB_MAX_SIZE (PAGE_SIZE) 132#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
133 133
134#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) 134#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
135 135
136/* 136/*
137 * We keep the general caches in an array of slab caches that are used for 137 * We keep the general caches in an array of slab caches that are used for