aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0863fd38a5ce..96d63eb3ab17 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1368,7 +1368,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1368 struct page *page = c->page; 1368 struct page *page = c->page;
1369 int tail = 1; 1369 int tail = 1;
1370 1370
1371 if (c->freelist) 1371 if (page->freelist)
1372 stat(c, DEACTIVATE_REMOTE_FREES); 1372 stat(c, DEACTIVATE_REMOTE_FREES);
1373 /* 1373 /*
1374 * Merge cpu freelist into slab freelist. Typically we get here 1374 * Merge cpu freelist into slab freelist. Typically we get here
@@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags,
1856 * The hardware cache alignment cannot override the specified 1856 * The hardware cache alignment cannot override the specified
1857 * alignment though. If that is greater then use it. 1857 * alignment though. If that is greater then use it.
1858 */ 1858 */
1859 if ((flags & SLAB_HWCACHE_ALIGN) && 1859 if (flags & SLAB_HWCACHE_ALIGN) {
1860 size > cache_line_size() / 2) 1860 unsigned long ralign = cache_line_size();
1861 return max_t(unsigned long, align, cache_line_size()); 1861 while (size <= ralign / 2)
1862 ralign /= 2;
1863 align = max(align, ralign);
1864 }
1862 1865
1863 if (align < ARCH_SLAB_MINALIGN) 1866 if (align < ARCH_SLAB_MINALIGN)
1864 return ARCH_SLAB_MINALIGN; 1867 align = ARCH_SLAB_MINALIGN;
1865 1868
1866 return ALIGN(align, sizeof(void *)); 1869 return ALIGN(align, sizeof(void *));
1867} 1870}