aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d2a54fe71ea2..e46e3129697d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -157,14 +157,6 @@
157#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 157#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
158 SLAB_CACHE_DMA | SLAB_NOTRACK) 158 SLAB_CACHE_DMA | SLAB_NOTRACK)
159 159
160#ifndef ARCH_KMALLOC_MINALIGN
161#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
162#endif
163
164#ifndef ARCH_SLAB_MINALIGN
165#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
166#endif
167
168#define OO_SHIFT 16 160#define OO_SHIFT 16
169#define OO_MASK ((1 << OO_SHIFT) - 1) 161#define OO_MASK ((1 << OO_SHIFT) - 1)
170#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 162#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
@@ -1084,7 +1076,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1084 if (node == -1) 1076 if (node == -1)
1085 return alloc_pages(flags, order); 1077 return alloc_pages(flags, order);
1086 else 1078 else
1087 return alloc_pages_node(node, flags, order); 1079 return alloc_pages_exact_node(node, flags, order);
1088} 1080}
1089 1081
1090static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1082static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -2429,9 +2421,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
2429#ifdef CONFIG_SLUB_DEBUG 2421#ifdef CONFIG_SLUB_DEBUG
2430 void *addr = page_address(page); 2422 void *addr = page_address(page);
2431 void *p; 2423 void *p;
2432 DECLARE_BITMAP(map, page->objects); 2424 long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long),
2425 GFP_ATOMIC);
2433 2426
2434 bitmap_zero(map, page->objects); 2427 if (!map)
2428 return;
2435 slab_err(s, page, "%s", text); 2429 slab_err(s, page, "%s", text);
2436 slab_lock(page); 2430 slab_lock(page);
2437 for_each_free_object(p, s, page->freelist) 2431 for_each_free_object(p, s, page->freelist)
@@ -2446,6 +2440,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
2446 } 2440 }
2447 } 2441 }
2448 slab_unlock(page); 2442 slab_unlock(page);
2443 kfree(map);
2449#endif 2444#endif
2450} 2445}
2451 2446
@@ -3338,8 +3333,15 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3338 struct kmem_cache *s; 3333 struct kmem_cache *s;
3339 void *ret; 3334 void *ret;
3340 3335
3341 if (unlikely(size > SLUB_MAX_SIZE)) 3336 if (unlikely(size > SLUB_MAX_SIZE)) {
3342 return kmalloc_large_node(size, gfpflags, node); 3337 ret = kmalloc_large_node(size, gfpflags, node);
3338
3339 trace_kmalloc_node(caller, ret,
3340 size, PAGE_SIZE << get_order(size),
3341 gfpflags, node);
3342
3343 return ret;
3344 }
3343 3345
3344 s = get_slab(size, gfpflags); 3346 s = get_slab(size, gfpflags);
3345 3347
@@ -3651,10 +3653,10 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3651} 3653}
3652 3654
3653static void process_slab(struct loc_track *t, struct kmem_cache *s, 3655static void process_slab(struct loc_track *t, struct kmem_cache *s,
3654 struct page *page, enum track_item alloc) 3656 struct page *page, enum track_item alloc,
3657 long *map)
3655{ 3658{
3656 void *addr = page_address(page); 3659 void *addr = page_address(page);
3657 DECLARE_BITMAP(map, page->objects);
3658 void *p; 3660 void *p;
3659 3661
3660 bitmap_zero(map, page->objects); 3662 bitmap_zero(map, page->objects);
@@ -3673,11 +3675,14 @@ static int list_locations(struct kmem_cache *s, char *buf,
3673 unsigned long i; 3675 unsigned long i;
3674 struct loc_track t = { 0, 0, NULL }; 3676 struct loc_track t = { 0, 0, NULL };
3675 int node; 3677 int node;
3678 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3679 sizeof(unsigned long), GFP_KERNEL);
3676 3680
3677 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3681 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3678 GFP_TEMPORARY)) 3682 GFP_TEMPORARY)) {
3683 kfree(map);
3679 return sprintf(buf, "Out of memory\n"); 3684 return sprintf(buf, "Out of memory\n");
3680 3685 }
3681 /* Push back cpu slabs */ 3686 /* Push back cpu slabs */
3682 flush_all(s); 3687 flush_all(s);
3683 3688
@@ -3691,9 +3696,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
3691 3696
3692 spin_lock_irqsave(&n->list_lock, flags); 3697 spin_lock_irqsave(&n->list_lock, flags);
3693 list_for_each_entry(page, &n->partial, lru) 3698 list_for_each_entry(page, &n->partial, lru)
3694 process_slab(&t, s, page, alloc); 3699 process_slab(&t, s, page, alloc, map);
3695 list_for_each_entry(page, &n->full, lru) 3700 list_for_each_entry(page, &n->full, lru)
3696 process_slab(&t, s, page, alloc); 3701 process_slab(&t, s, page, alloc, map);
3697 spin_unlock_irqrestore(&n->list_lock, flags); 3702 spin_unlock_irqrestore(&n->list_lock, flags);
3698 } 3703 }
3699 3704
@@ -3744,6 +3749,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3744 } 3749 }
3745 3750
3746 free_loc_track(&t); 3751 free_loc_track(&t);
3752 kfree(map);
3747 if (!t.count) 3753 if (!t.count)
3748 len += sprintf(buf, "No data\n"); 3754 len += sprintf(buf, "No data\n");
3749 return len; 3755 return len;