aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/vm/slub.txt4
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--mm/slab.c9
-rw-r--r--mm/slub.c13
4 files changed, 16 insertions, 14 deletions
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index dcf8bcf846d6..7c13f22a0c9e 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -50,14 +50,14 @@ F.e. in order to boot just with sanity checks and red zoning one would specify:
50 50
51Trying to find an issue in the dentry cache? Try 51Trying to find an issue in the dentry cache? Try
52 52
53 slub_debug=,dentry_cache 53 slub_debug=,dentry
54 54
55to only enable debugging on the dentry cache. 55to only enable debugging on the dentry cache.
56 56
57Red zoning and tracking may realign the slab. We can just apply sanity checks 57Red zoning and tracking may realign the slab. We can just apply sanity checks
58to the dentry cache with 58to the dentry cache with
59 59
60 slub_debug=F,dentry_cache 60 slub_debug=F,dentry
61 61
62In case you forgot to enable debugging on the kernel command line: It is 62In case you forgot to enable debugging on the kernel command line: It is
63possible to enable debugging manually when the kernel is up. Look at the 63possible to enable debugging manually when the kernel is up. Look at the
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index fcc48096ee64..39c3a5eb8ebe 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -41,7 +41,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
41 goto found; \ 41 goto found; \
42 else \ 42 else \
43 i++; 43 i++;
44#include "kmalloc_sizes.h" 44#include <linux/kmalloc_sizes.h>
45#undef CACHE 45#undef CACHE
46 { 46 {
47 extern void __you_cannot_kmalloc_that_much(void); 47 extern void __you_cannot_kmalloc_that_much(void);
@@ -75,7 +75,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
75 goto found; \ 75 goto found; \
76 else \ 76 else \
77 i++; 77 i++;
78#include "kmalloc_sizes.h" 78#include <linux/kmalloc_sizes.h>
79#undef CACHE 79#undef CACHE
80 { 80 {
81 extern void __you_cannot_kmalloc_that_much(void); 81 extern void __you_cannot_kmalloc_that_much(void);
diff --git a/mm/slab.c b/mm/slab.c
index 473e6c2eaefb..e6c698f55674 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -333,7 +333,7 @@ static __always_inline int index_of(const size_t size)
333 return i; \ 333 return i; \
334 else \ 334 else \
335 i++; 335 i++;
336#include "linux/kmalloc_sizes.h" 336#include <linux/kmalloc_sizes.h>
337#undef CACHE 337#undef CACHE
338 __bad_size(); 338 __bad_size();
339 } else 339 } else
@@ -2964,11 +2964,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2964 struct array_cache *ac; 2964 struct array_cache *ac;
2965 int node; 2965 int node;
2966 2966
2967 node = numa_node_id(); 2967retry:
2968
2969 check_irq_off(); 2968 check_irq_off();
2969 node = numa_node_id();
2970 ac = cpu_cache_get(cachep); 2970 ac = cpu_cache_get(cachep);
2971retry:
2972 batchcount = ac->batchcount; 2971 batchcount = ac->batchcount;
2973 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2972 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2974 /* 2973 /*
@@ -3280,7 +3279,7 @@ retry:
3280 if (local_flags & __GFP_WAIT) 3279 if (local_flags & __GFP_WAIT)
3281 local_irq_enable(); 3280 local_irq_enable();
3282 kmem_flagcheck(cache, flags); 3281 kmem_flagcheck(cache, flags);
3283 obj = kmem_getpages(cache, flags, -1); 3282 obj = kmem_getpages(cache, local_flags, -1);
3284 if (local_flags & __GFP_WAIT) 3283 if (local_flags & __GFP_WAIT)
3285 local_irq_disable(); 3284 local_irq_disable();
3286 if (obj) { 3285 if (obj) {
diff --git a/mm/slub.c b/mm/slub.c
index 0863fd38a5ce..96d63eb3ab17 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1368,7 +1368,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1368 struct page *page = c->page; 1368 struct page *page = c->page;
1369 int tail = 1; 1369 int tail = 1;
1370 1370
1371 if (c->freelist) 1371 if (page->freelist)
1372 stat(c, DEACTIVATE_REMOTE_FREES); 1372 stat(c, DEACTIVATE_REMOTE_FREES);
1373 /* 1373 /*
1374 * Merge cpu freelist into slab freelist. Typically we get here 1374 * Merge cpu freelist into slab freelist. Typically we get here
@@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags,
1856 * The hardware cache alignment cannot override the specified 1856 * The hardware cache alignment cannot override the specified
1857 * alignment though. If that is greater then use it. 1857 * alignment though. If that is greater then use it.
1858 */ 1858 */
1859 if ((flags & SLAB_HWCACHE_ALIGN) && 1859 if (flags & SLAB_HWCACHE_ALIGN) {
1860 size > cache_line_size() / 2) 1860 unsigned long ralign = cache_line_size();
1861 return max_t(unsigned long, align, cache_line_size()); 1861 while (size <= ralign / 2)
1862 ralign /= 2;
1863 align = max(align, ralign);
1864 }
1862 1865
1863 if (align < ARCH_SLAB_MINALIGN) 1866 if (align < ARCH_SLAB_MINALIGN)
1864 return ARCH_SLAB_MINALIGN; 1867 align = ARCH_SLAB_MINALIGN;
1865 1868
1866 return ALIGN(align, sizeof(void *)); 1869 return ALIGN(align, sizeof(void *));
1867} 1870}