aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-03-05 17:05:56 -0500
committerChristoph Lameter <clameter@sgi.com>2008-03-06 19:21:50 -0500
commitb6210386787728b84db25adc4f1eba70440a4c73 (patch)
tree22702da1d1168c50cd56044be6cf68a0c093471e
parent1c61fc40fc264059ff41a614ed2d899127288281 (diff)
slub: Do not cross cacheline boundaries for very small objects
SLUB should pack even small objects nicely into cachelines if that is what has been asked for. Use the same algorithm as SLAB for this. The effect of this patch for a system with a cacheline size of 64 bytes is that the 24 byte sized slab caches will now put exactly 2 objects into a cacheline instead of 3 with some overlap into the next cacheline. This reduces the object density in a 4k slab from 170 to 128 objects (same as SLAB). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Christoph Lameter <clameter@sgi.com>
-rw-r--r--mm/slub.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a96e11c77fd9..96d63eb3ab17 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags,
1856 * The hardware cache alignment cannot override the specified 1856 * The hardware cache alignment cannot override the specified
1857 * alignment though. If that is greater then use it. 1857 * alignment though. If that is greater then use it.
1858 */ 1858 */
1859 if ((flags & SLAB_HWCACHE_ALIGN) && 1859 if (flags & SLAB_HWCACHE_ALIGN) {
1860 size > cache_line_size() / 2) 1860 unsigned long ralign = cache_line_size();
1861 return max_t(unsigned long, align, cache_line_size()); 1861 while (size <= ralign / 2)
1862 ralign /= 2;
1863 align = max(align, ralign);
1864 }
1862 1865
1863 if (align < ARCH_SLAB_MINALIGN) 1866 if (align < ARCH_SLAB_MINALIGN)
1864 return ARCH_SLAB_MINALIGN; 1867 align = ARCH_SLAB_MINALIGN;
1865 1868
1866 return ALIGN(align, sizeof(void *)); 1869 return ALIGN(align, sizeof(void *));
1867} 1870}