aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c142
1 files changed, 56 insertions, 86 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 51df8272cfaf..c3eb3d3ca835 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
373#endif 373#endif
374 { 374 {
375 slab_lock(page); 375 slab_lock(page);
376 if (page->freelist == freelist_old && page->counters == counters_old) { 376 if (page->freelist == freelist_old &&
377 page->counters == counters_old) {
377 page->freelist = freelist_new; 378 page->freelist = freelist_new;
378 page->counters = counters_new; 379 page->counters = counters_new;
379 slab_unlock(page); 380 slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
411 412
412 local_irq_save(flags); 413 local_irq_save(flags);
413 slab_lock(page); 414 slab_lock(page);
414 if (page->freelist == freelist_old && page->counters == counters_old) { 415 if (page->freelist == freelist_old &&
416 page->counters == counters_old) {
415 page->freelist = freelist_new; 417 page->freelist = freelist_new;
416 page->counters = counters_new; 418 page->counters = counters_new;
417 slab_unlock(page); 419 slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
553 555
554static void print_page_info(struct page *page) 556static void print_page_info(struct page *page)
555{ 557{
556 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", 558 printk(KERN_ERR
557 page, page->objects, page->inuse, page->freelist, page->flags); 559 "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
560 page, page->objects, page->inuse, page->freelist, page->flags);
558 561
559} 562}
560 563
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
629 print_trailer(s, page, object); 632 print_trailer(s, page, object);
630} 633}
631 634
632static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) 635static void slab_err(struct kmem_cache *s, struct page *page,
636 const char *fmt, ...)
633{ 637{
634 va_list args; 638 va_list args;
635 char buf[100]; 639 char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
788 } else { 792 } else {
789 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 793 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
790 check_bytes_and_report(s, page, p, "Alignment padding", 794 check_bytes_and_report(s, page, p, "Alignment padding",
791 endobject, POISON_INUSE, s->inuse - s->object_size); 795 endobject, POISON_INUSE,
796 s->inuse - s->object_size);
792 } 797 }
793 } 798 }
794 799
@@ -873,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
873 object_err(s, page, object, 878 object_err(s, page, object,
874 "Freechain corrupt"); 879 "Freechain corrupt");
875 set_freepointer(s, object, NULL); 880 set_freepointer(s, object, NULL);
876 break;
877 } else { 881 } else {
878 slab_err(s, page, "Freepointer corrupt"); 882 slab_err(s, page, "Freepointer corrupt");
879 page->freelist = NULL; 883 page->freelist = NULL;
@@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
918 page->freelist); 922 page->freelist);
919 923
920 if (!alloc) 924 if (!alloc)
921 print_section("Object ", (void *)object, s->object_size); 925 print_section("Object ", (void *)object,
926 s->object_size);
922 927
923 dump_stack(); 928 dump_stack();
924 } 929 }
@@ -937,7 +942,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
937 return should_failslab(s->object_size, flags, s->flags); 942 return should_failslab(s->object_size, flags, s->flags);
938} 943}
939 944
940static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 945static inline void slab_post_alloc_hook(struct kmem_cache *s,
946 gfp_t flags, void *object)
941{ 947{
942 flags &= gfp_allowed_mask; 948 flags &= gfp_allowed_mask;
943 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 949 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1045,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
1039 init_tracking(s, object); 1045 init_tracking(s, object);
1040} 1046}
1041 1047
1042static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, 1048static noinline int alloc_debug_processing(struct kmem_cache *s,
1049 struct page *page,
1043 void *object, unsigned long addr) 1050 void *object, unsigned long addr)
1044{ 1051{
1045 if (!check_slab(s, page)) 1052 if (!check_slab(s, page))
@@ -1743,7 +1750,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
1743/* 1750/*
1744 * Remove the cpu slab 1751 * Remove the cpu slab
1745 */ 1752 */
1746static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist) 1753static void deactivate_slab(struct kmem_cache *s, struct page *page,
1754 void *freelist)
1747{ 1755{
1748 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; 1756 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1749 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1757 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -1999,7 +2007,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
1999 page->pobjects = pobjects; 2007 page->pobjects = pobjects;
2000 page->next = oldpage; 2008 page->next = oldpage;
2001 2009
2002 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); 2010 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2011 != oldpage);
2003#endif 2012#endif
2004} 2013}
2005 2014
@@ -2169,8 +2178,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2169} 2178}
2170 2179
2171/* 2180/*
2172 * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist 2181 * Check the page->freelist of a page and either transfer the freelist to the
2173 * or deactivate the page. 2182 * per cpu freelist or deactivate the page.
2174 * 2183 *
2175 * The page is still frozen if the return value is not NULL. 2184 * The page is still frozen if the return value is not NULL.
2176 * 2185 *
@@ -2314,7 +2323,8 @@ new_slab:
2314 goto load_freelist; 2323 goto load_freelist;
2315 2324
2316 /* Only entered in the debug case */ 2325 /* Only entered in the debug case */
2317 if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr)) 2326 if (kmem_cache_debug(s) &&
2327 !alloc_debug_processing(s, page, freelist, addr))
2318 goto new_slab; /* Slab failed checks. Next slab needed */ 2328 goto new_slab; /* Slab failed checks. Next slab needed */
2319 2329
2320 deactivate_slab(s, page, get_freepointer(s, freelist)); 2330 deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2372,7 +2382,7 @@ redo:
2372 2382
2373 object = c->freelist; 2383 object = c->freelist;
2374 page = c->page; 2384 page = c->page;
2375 if (unlikely(!object || !page || !node_match(page, node))) 2385 if (unlikely(!object || !node_match(page, node)))
2376 object = __slab_alloc(s, gfpflags, node, addr, c); 2386 object = __slab_alloc(s, gfpflags, node, addr, c);
2377 2387
2378 else { 2388 else {
@@ -2382,13 +2392,15 @@ redo:
2382 * The cmpxchg will only match if there was no additional 2392 * The cmpxchg will only match if there was no additional
2383 * operation and if we are on the right processor. 2393 * operation and if we are on the right processor.
2384 * 2394 *
2385 * The cmpxchg does the following atomically (without lock semantics!) 2395 * The cmpxchg does the following atomically (without lock
2396 * semantics!)
2386 * 1. Relocate first pointer to the current per cpu area. 2397 * 1. Relocate first pointer to the current per cpu area.
2387 * 2. Verify that tid and freelist have not been changed 2398 * 2. Verify that tid and freelist have not been changed
2388 * 3. If they were not changed replace tid and freelist 2399 * 3. If they were not changed replace tid and freelist
2389 * 2400 *
2390 * Since this is without lock semantics the protection is only against 2401 * Since this is without lock semantics the protection is only
2391 * code executing on this cpu *not* from access by other cpus. 2402 * against code executing on this cpu *not* from access by
2403 * other cpus.
2392 */ 2404 */
2393 if (unlikely(!this_cpu_cmpxchg_double( 2405 if (unlikely(!this_cpu_cmpxchg_double(
2394 s->cpu_slab->freelist, s->cpu_slab->tid, 2406 s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2420,7 +2432,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2420{ 2432{
2421 void *ret = slab_alloc(s, gfpflags, _RET_IP_); 2433 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2422 2434
2423 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); 2435 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2436 s->size, gfpflags);
2424 2437
2425 return ret; 2438 return ret;
2426} 2439}
@@ -2434,14 +2447,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2434 return ret; 2447 return ret;
2435} 2448}
2436EXPORT_SYMBOL(kmem_cache_alloc_trace); 2449EXPORT_SYMBOL(kmem_cache_alloc_trace);
2437
2438void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
2439{
2440 void *ret = kmalloc_order(size, flags, order);
2441 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
2442 return ret;
2443}
2444EXPORT_SYMBOL(kmalloc_order_trace);
2445#endif 2450#endif
2446 2451
2447#ifdef CONFIG_NUMA 2452#ifdef CONFIG_NUMA
@@ -2512,8 +2517,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2512 if (kmem_cache_has_cpu_partial(s) && !prior) 2517 if (kmem_cache_has_cpu_partial(s) && !prior)
2513 2518
2514 /* 2519 /*
2515 * Slab was on no list before and will be partially empty 2520 * Slab was on no list before and will be
2516 * We can defer the list move and instead freeze it. 2521 * partially empty
2522 * We can defer the list move and instead
2523 * freeze it.
2517 */ 2524 */
2518 new.frozen = 1; 2525 new.frozen = 1;
2519 2526
@@ -3071,8 +3078,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3071 * A) The number of objects from per cpu partial slabs dumped to the 3078 * A) The number of objects from per cpu partial slabs dumped to the
3072 * per node list when we reach the limit. 3079 * per node list when we reach the limit.
3073 * B) The number of objects in cpu partial slabs to extract from the 3080 * B) The number of objects in cpu partial slabs to extract from the
3074 * per node list when we run out of per cpu objects. We only fetch 50% 3081 * per node list when we run out of per cpu objects. We only fetch
3075 * to keep some capacity around for frees. 3082 * 50% to keep some capacity around for frees.
3076 */ 3083 */
3077 if (!kmem_cache_has_cpu_partial(s)) 3084 if (!kmem_cache_has_cpu_partial(s))
3078 s->cpu_partial = 0; 3085 s->cpu_partial = 0;
@@ -3099,8 +3106,8 @@ error:
3099 if (flags & SLAB_PANIC) 3106 if (flags & SLAB_PANIC)
3100 panic("Cannot create slab %s size=%lu realsize=%u " 3107 panic("Cannot create slab %s size=%lu realsize=%u "
3101 "order=%u offset=%u flags=%lx\n", 3108 "order=%u offset=%u flags=%lx\n",
3102 s->name, (unsigned long)s->size, s->size, oo_order(s->oo), 3109 s->name, (unsigned long)s->size, s->size,
3103 s->offset, flags); 3110 oo_order(s->oo), s->offset, flags);
3104 return -EINVAL; 3111 return -EINVAL;
3105} 3112}
3106 3113
@@ -3316,42 +3323,6 @@ size_t ksize(const void *object)
3316} 3323}
3317EXPORT_SYMBOL(ksize); 3324EXPORT_SYMBOL(ksize);
3318 3325
3319#ifdef CONFIG_SLUB_DEBUG
3320bool verify_mem_not_deleted(const void *x)
3321{
3322 struct page *page;
3323 void *object = (void *)x;
3324 unsigned long flags;
3325 bool rv;
3326
3327 if (unlikely(ZERO_OR_NULL_PTR(x)))
3328 return false;
3329
3330 local_irq_save(flags);
3331
3332 page = virt_to_head_page(x);
3333 if (unlikely(!PageSlab(page))) {
3334 /* maybe it was from stack? */
3335 rv = true;
3336 goto out_unlock;
3337 }
3338
3339 slab_lock(page);
3340 if (on_freelist(page->slab_cache, page, object)) {
3341 object_err(page->slab_cache, page, object, "Object is on free-list");
3342 rv = false;
3343 } else {
3344 rv = true;
3345 }
3346 slab_unlock(page);
3347
3348out_unlock:
3349 local_irq_restore(flags);
3350 return rv;
3351}
3352EXPORT_SYMBOL(verify_mem_not_deleted);
3353#endif
3354
3355void kfree(const void *x) 3326void kfree(const void *x)
3356{ 3327{
3357 struct page *page; 3328 struct page *page;
@@ -4162,15 +4133,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
4162 !cpumask_empty(to_cpumask(l->cpus)) && 4133 !cpumask_empty(to_cpumask(l->cpus)) &&
4163 len < PAGE_SIZE - 60) { 4134 len < PAGE_SIZE - 60) {
4164 len += sprintf(buf + len, " cpus="); 4135 len += sprintf(buf + len, " cpus=");
4165 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4136 len += cpulist_scnprintf(buf + len,
4137 PAGE_SIZE - len - 50,
4166 to_cpumask(l->cpus)); 4138 to_cpumask(l->cpus));
4167 } 4139 }
4168 4140
4169 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && 4141 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4170 len < PAGE_SIZE - 60) { 4142 len < PAGE_SIZE - 60) {
4171 len += sprintf(buf + len, " nodes="); 4143 len += sprintf(buf + len, " nodes=");
4172 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, 4144 len += nodelist_scnprintf(buf + len,
4173 l->nodes); 4145 PAGE_SIZE - len - 50,
4146 l->nodes);
4174 } 4147 }
4175 4148
4176 len += sprintf(buf + len, "\n"); 4149 len += sprintf(buf + len, "\n");
@@ -4268,18 +4241,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4268 int node; 4241 int node;
4269 int x; 4242 int x;
4270 unsigned long *nodes; 4243 unsigned long *nodes;
4271 unsigned long *per_cpu;
4272 4244
4273 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); 4245 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4274 if (!nodes) 4246 if (!nodes)
4275 return -ENOMEM; 4247 return -ENOMEM;
4276 per_cpu = nodes + nr_node_ids;
4277 4248
4278 if (flags & SO_CPU) { 4249 if (flags & SO_CPU) {
4279 int cpu; 4250 int cpu;
4280 4251
4281 for_each_possible_cpu(cpu) { 4252 for_each_possible_cpu(cpu) {
4282 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 4253 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4254 cpu);
4283 int node; 4255 int node;
4284 struct page *page; 4256 struct page *page;
4285 4257
@@ -4304,8 +4276,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4304 total += x; 4276 total += x;
4305 nodes[node] += x; 4277 nodes[node] += x;
4306 } 4278 }
4307
4308 per_cpu[node]++;
4309 } 4279 }
4310 } 4280 }
4311 4281
@@ -4315,12 +4285,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
4315 for_each_node_state(node, N_NORMAL_MEMORY) { 4285 for_each_node_state(node, N_NORMAL_MEMORY) {
4316 struct kmem_cache_node *n = get_node(s, node); 4286 struct kmem_cache_node *n = get_node(s, node);
4317 4287
4318 if (flags & SO_TOTAL) 4288 if (flags & SO_TOTAL)
4319 x = atomic_long_read(&n->total_objects); 4289 x = atomic_long_read(&n->total_objects);
4320 else if (flags & SO_OBJECTS) 4290 else if (flags & SO_OBJECTS)
4321 x = atomic_long_read(&n->total_objects) - 4291 x = atomic_long_read(&n->total_objects) -
4322 count_partial(n, count_free); 4292 count_partial(n, count_free);
4323
4324 else 4293 else
4325 x = atomic_long_read(&n->nr_slabs); 4294 x = atomic_long_read(&n->nr_slabs);
4326 total += x; 4295 total += x;
@@ -5136,7 +5105,8 @@ static char *create_unique_id(struct kmem_cache *s)
5136 5105
5137#ifdef CONFIG_MEMCG_KMEM 5106#ifdef CONFIG_MEMCG_KMEM
5138 if (!is_root_cache(s)) 5107 if (!is_root_cache(s))
5139 p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg)); 5108 p += sprintf(p, "-%08d",
5109 memcg_cache_id(s->memcg_params->memcg));
5140#endif 5110#endif
5141 5111
5142 BUG_ON(p > name + ID_STR_LENGTH - 1); 5112 BUG_ON(p > name + ID_STR_LENGTH - 1);