aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2012-10-03 02:56:12 -0400
committerPekka Enberg <penberg@kernel.org>2012-10-03 02:56:12 -0400
commit023dc70470502f41b285112d4840f35d9075b767 (patch)
treef2f06d54be9583d9b1b2abae4c76722c5453df83 /mm/slub.c
parenta0d271cbfed1dd50278c6b06bead3d00ba0a88f9 (diff)
parent608da7e3fc7259eca0d983b31bc8915af14cf15e (diff)
Merge branch 'slab/next' into slab/for-linus
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c63
1 files changed, 39 insertions, 24 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2fdd96f9e998..97a49d9a37cd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -568,6 +568,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
568 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf); 568 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
569 printk(KERN_ERR "----------------------------------------" 569 printk(KERN_ERR "----------------------------------------"
570 "-------------------------------------\n\n"); 570 "-------------------------------------\n\n");
571
572 add_taint(TAINT_BAD_PAGE);
571} 573}
572 574
573static void slab_fix(struct kmem_cache *s, char *fmt, ...) 575static void slab_fix(struct kmem_cache *s, char *fmt, ...)
@@ -1069,13 +1071,13 @@ bad:
1069 return 0; 1071 return 0;
1070} 1072}
1071 1073
1072static noinline int free_debug_processing(struct kmem_cache *s, 1074static noinline struct kmem_cache_node *free_debug_processing(
1073 struct page *page, void *object, unsigned long addr) 1075 struct kmem_cache *s, struct page *page, void *object,
1076 unsigned long addr, unsigned long *flags)
1074{ 1077{
1075 unsigned long flags; 1078 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1076 int rc = 0;
1077 1079
1078 local_irq_save(flags); 1080 spin_lock_irqsave(&n->list_lock, *flags);
1079 slab_lock(page); 1081 slab_lock(page);
1080 1082
1081 if (!check_slab(s, page)) 1083 if (!check_slab(s, page))
@@ -1113,15 +1115,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
1113 set_track(s, object, TRACK_FREE, addr); 1115 set_track(s, object, TRACK_FREE, addr);
1114 trace(s, page, object, 0); 1116 trace(s, page, object, 0);
1115 init_object(s, object, SLUB_RED_INACTIVE); 1117 init_object(s, object, SLUB_RED_INACTIVE);
1116 rc = 1;
1117out: 1118out:
1118 slab_unlock(page); 1119 slab_unlock(page);
1119 local_irq_restore(flags); 1120 /*
1120 return rc; 1121 * Keep node_lock to preserve integrity
1122 * until the object is actually freed
1123 */
1124 return n;
1121 1125
1122fail: 1126fail:
1127 slab_unlock(page);
1128 spin_unlock_irqrestore(&n->list_lock, *flags);
1123 slab_fix(s, "Object at 0x%p not freed", object); 1129 slab_fix(s, "Object at 0x%p not freed", object);
1124 goto out; 1130 return NULL;
1125} 1131}
1126 1132
1127static int __init setup_slub_debug(char *str) 1133static int __init setup_slub_debug(char *str)
@@ -1214,8 +1220,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
1214static inline int alloc_debug_processing(struct kmem_cache *s, 1220static inline int alloc_debug_processing(struct kmem_cache *s,
1215 struct page *page, void *object, unsigned long addr) { return 0; } 1221 struct page *page, void *object, unsigned long addr) { return 0; }
1216 1222
1217static inline int free_debug_processing(struct kmem_cache *s, 1223static inline struct kmem_cache_node *free_debug_processing(
1218 struct page *page, void *object, unsigned long addr) { return 0; } 1224 struct kmem_cache *s, struct page *page, void *object,
1225 unsigned long addr, unsigned long *flags) { return NULL; }
1219 1226
1220static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1227static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1221 { return 1; } 1228 { return 1; }
@@ -1714,7 +1721,7 @@ static inline void note_cmpxchg_failure(const char *n,
1714 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 1721 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1715} 1722}
1716 1723
1717void init_kmem_cache_cpus(struct kmem_cache *s) 1724static void init_kmem_cache_cpus(struct kmem_cache *s)
1718{ 1725{
1719 int cpu; 1726 int cpu;
1720 1727
@@ -1939,7 +1946,7 @@ static void unfreeze_partials(struct kmem_cache *s)
1939 * If we did not find a slot then simply move all the partials to the 1946 * If we did not find a slot then simply move all the partials to the
1940 * per node partial list. 1947 * per node partial list.
1941 */ 1948 */
1942int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) 1949static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1943{ 1950{
1944 struct page *oldpage; 1951 struct page *oldpage;
1945 int pages; 1952 int pages;
@@ -1962,6 +1969,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1962 local_irq_save(flags); 1969 local_irq_save(flags);
1963 unfreeze_partials(s); 1970 unfreeze_partials(s);
1964 local_irq_restore(flags); 1971 local_irq_restore(flags);
1972 oldpage = NULL;
1965 pobjects = 0; 1973 pobjects = 0;
1966 pages = 0; 1974 pages = 0;
1967 stat(s, CPU_PARTIAL_DRAIN); 1975 stat(s, CPU_PARTIAL_DRAIN);
@@ -2310,7 +2318,7 @@ new_slab:
2310 * 2318 *
2311 * Otherwise we can simply pick the next object from the lockless free list. 2319 * Otherwise we can simply pick the next object from the lockless free list.
2312 */ 2320 */
2313static __always_inline void *slab_alloc(struct kmem_cache *s, 2321static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2314 gfp_t gfpflags, int node, unsigned long addr) 2322 gfp_t gfpflags, int node, unsigned long addr)
2315{ 2323{
2316 void **object; 2324 void **object;
@@ -2380,9 +2388,15 @@ redo:
2380 return object; 2388 return object;
2381} 2389}
2382 2390
2391static __always_inline void *slab_alloc(struct kmem_cache *s,
2392 gfp_t gfpflags, unsigned long addr)
2393{
2394 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2395}
2396
2383void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 2397void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2384{ 2398{
2385 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2399 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2386 2400
2387 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); 2401 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
2388 2402
@@ -2393,7 +2407,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
2393#ifdef CONFIG_TRACING 2407#ifdef CONFIG_TRACING
2394void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) 2408void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2395{ 2409{
2396 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 2410 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2397 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); 2411 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2398 return ret; 2412 return ret;
2399} 2413}
@@ -2411,7 +2425,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
2411#ifdef CONFIG_NUMA 2425#ifdef CONFIG_NUMA
2412void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 2426void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2413{ 2427{
2414 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2428 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2415 2429
2416 trace_kmem_cache_alloc_node(_RET_IP_, ret, 2430 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2417 s->object_size, s->size, gfpflags, node); 2431 s->object_size, s->size, gfpflags, node);
@@ -2425,7 +2439,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2425 gfp_t gfpflags, 2439 gfp_t gfpflags,
2426 int node, size_t size) 2440 int node, size_t size)
2427{ 2441{
2428 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); 2442 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2429 2443
2430 trace_kmalloc_node(_RET_IP_, ret, 2444 trace_kmalloc_node(_RET_IP_, ret,
2431 size, s->size, gfpflags, node); 2445 size, s->size, gfpflags, node);
@@ -2457,7 +2471,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2457 2471
2458 stat(s, FREE_SLOWPATH); 2472 stat(s, FREE_SLOWPATH);
2459 2473
2460 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) 2474 if (kmem_cache_debug(s) &&
2475 !(n = free_debug_processing(s, page, x, addr, &flags)))
2461 return; 2476 return;
2462 2477
2463 do { 2478 do {
@@ -3362,7 +3377,7 @@ void *__kmalloc(size_t size, gfp_t flags)
3362 if (unlikely(ZERO_OR_NULL_PTR(s))) 3377 if (unlikely(ZERO_OR_NULL_PTR(s)))
3363 return s; 3378 return s;
3364 3379
3365 ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); 3380 ret = slab_alloc(s, flags, _RET_IP_);
3366 3381
3367 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); 3382 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3368 3383
@@ -3405,7 +3420,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3405 if (unlikely(ZERO_OR_NULL_PTR(s))) 3420 if (unlikely(ZERO_OR_NULL_PTR(s)))
3406 return s; 3421 return s;
3407 3422
3408 ret = slab_alloc(s, flags, node, _RET_IP_); 3423 ret = slab_alloc_node(s, flags, node, _RET_IP_);
3409 3424
3410 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); 3425 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3411 3426
@@ -3482,7 +3497,7 @@ void kfree(const void *x)
3482 if (unlikely(!PageSlab(page))) { 3497 if (unlikely(!PageSlab(page))) {
3483 BUG_ON(!PageCompound(page)); 3498 BUG_ON(!PageCompound(page));
3484 kmemleak_free(x); 3499 kmemleak_free(x);
3485 put_page(page); 3500 __free_pages(page, compound_order(page));
3486 return; 3501 return;
3487 } 3502 }
3488 slab_free(page->slab, page, object, _RET_IP_); 3503 slab_free(page->slab, page, object, _RET_IP_);
@@ -4033,7 +4048,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4033 if (unlikely(ZERO_OR_NULL_PTR(s))) 4048 if (unlikely(ZERO_OR_NULL_PTR(s)))
4034 return s; 4049 return s;
4035 4050
4036 ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); 4051 ret = slab_alloc(s, gfpflags, caller);
4037 4052
4038 /* Honor the call site pointer we received. */ 4053 /* Honor the call site pointer we received. */
4039 trace_kmalloc(caller, ret, size, s->size, gfpflags); 4054 trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -4063,7 +4078,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4063 if (unlikely(ZERO_OR_NULL_PTR(s))) 4078 if (unlikely(ZERO_OR_NULL_PTR(s)))
4064 return s; 4079 return s;
4065 4080
4066 ret = slab_alloc(s, gfpflags, node, caller); 4081 ret = slab_alloc_node(s, gfpflags, node, caller);
4067 4082
4068 /* Honor the call site pointer we received. */ 4083 /* Honor the call site pointer we received. */
4069 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); 4084 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);