aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c52
1 files changed, 31 insertions, 21 deletions
diff --git a/mm/slub.c b/mm/slub.c
index d2a54fe71ea2..26f0cb9cc584 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -157,14 +157,6 @@
157#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 157#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
158 SLAB_CACHE_DMA | SLAB_NOTRACK) 158 SLAB_CACHE_DMA | SLAB_NOTRACK)
159 159
160#ifndef ARCH_KMALLOC_MINALIGN
161#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
162#endif
163
164#ifndef ARCH_SLAB_MINALIGN
165#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
166#endif
167
168#define OO_SHIFT 16 160#define OO_SHIFT 16
169#define OO_MASK ((1 << OO_SHIFT) - 1) 161#define OO_MASK ((1 << OO_SHIFT) - 1)
170#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ 162#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
@@ -1084,7 +1076,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
1084 if (node == -1) 1076 if (node == -1)
1085 return alloc_pages(flags, order); 1077 return alloc_pages(flags, order);
1086 else 1078 else
1087 return alloc_pages_node(node, flags, order); 1079 return alloc_pages_exact_node(node, flags, order);
1088} 1080}
1089 1081
1090static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 1082static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1368,6 +1360,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1368 get_cycles() % 1024 > s->remote_node_defrag_ratio) 1360 get_cycles() % 1024 > s->remote_node_defrag_ratio)
1369 return NULL; 1361 return NULL;
1370 1362
1363 get_mems_allowed();
1371 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 1364 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
1372 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1365 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1373 struct kmem_cache_node *n; 1366 struct kmem_cache_node *n;
@@ -1377,10 +1370,13 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1377 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1370 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1378 n->nr_partial > s->min_partial) { 1371 n->nr_partial > s->min_partial) {
1379 page = get_partial_node(n); 1372 page = get_partial_node(n);
1380 if (page) 1373 if (page) {
1374 put_mems_allowed();
1381 return page; 1375 return page;
1376 }
1382 } 1377 }
1383 } 1378 }
1379 put_mems_allowed();
1384#endif 1380#endif
1385 return NULL; 1381 return NULL;
1386} 1382}
@@ -2429,9 +2425,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
2429#ifdef CONFIG_SLUB_DEBUG 2425#ifdef CONFIG_SLUB_DEBUG
2430 void *addr = page_address(page); 2426 void *addr = page_address(page);
2431 void *p; 2427 void *p;
2432 DECLARE_BITMAP(map, page->objects); 2428 long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long),
2429 GFP_ATOMIC);
2433 2430
2434 bitmap_zero(map, page->objects); 2431 if (!map)
2432 return;
2435 slab_err(s, page, "%s", text); 2433 slab_err(s, page, "%s", text);
2436 slab_lock(page); 2434 slab_lock(page);
2437 for_each_free_object(p, s, page->freelist) 2435 for_each_free_object(p, s, page->freelist)
@@ -2446,6 +2444,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
2446 } 2444 }
2447 } 2445 }
2448 slab_unlock(page); 2446 slab_unlock(page);
2447 kfree(map);
2449#endif 2448#endif
2450} 2449}
2451 2450
@@ -3338,8 +3337,15 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3338 struct kmem_cache *s; 3337 struct kmem_cache *s;
3339 void *ret; 3338 void *ret;
3340 3339
3341 if (unlikely(size > SLUB_MAX_SIZE)) 3340 if (unlikely(size > SLUB_MAX_SIZE)) {
3342 return kmalloc_large_node(size, gfpflags, node); 3341 ret = kmalloc_large_node(size, gfpflags, node);
3342
3343 trace_kmalloc_node(caller, ret,
3344 size, PAGE_SIZE << get_order(size),
3345 gfpflags, node);
3346
3347 return ret;
3348 }
3343 3349
3344 s = get_slab(size, gfpflags); 3350 s = get_slab(size, gfpflags);
3345 3351
@@ -3651,10 +3657,10 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
3651} 3657}
3652 3658
3653static void process_slab(struct loc_track *t, struct kmem_cache *s, 3659static void process_slab(struct loc_track *t, struct kmem_cache *s,
3654 struct page *page, enum track_item alloc) 3660 struct page *page, enum track_item alloc,
3661 long *map)
3655{ 3662{
3656 void *addr = page_address(page); 3663 void *addr = page_address(page);
3657 DECLARE_BITMAP(map, page->objects);
3658 void *p; 3664 void *p;
3659 3665
3660 bitmap_zero(map, page->objects); 3666 bitmap_zero(map, page->objects);
@@ -3673,11 +3679,14 @@ static int list_locations(struct kmem_cache *s, char *buf,
3673 unsigned long i; 3679 unsigned long i;
3674 struct loc_track t = { 0, 0, NULL }; 3680 struct loc_track t = { 0, 0, NULL };
3675 int node; 3681 int node;
3682 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
3683 sizeof(unsigned long), GFP_KERNEL);
3676 3684
3677 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), 3685 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3678 GFP_TEMPORARY)) 3686 GFP_TEMPORARY)) {
3687 kfree(map);
3679 return sprintf(buf, "Out of memory\n"); 3688 return sprintf(buf, "Out of memory\n");
3680 3689 }
3681 /* Push back cpu slabs */ 3690 /* Push back cpu slabs */
3682 flush_all(s); 3691 flush_all(s);
3683 3692
@@ -3691,9 +3700,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
3691 3700
3692 spin_lock_irqsave(&n->list_lock, flags); 3701 spin_lock_irqsave(&n->list_lock, flags);
3693 list_for_each_entry(page, &n->partial, lru) 3702 list_for_each_entry(page, &n->partial, lru)
3694 process_slab(&t, s, page, alloc); 3703 process_slab(&t, s, page, alloc, map);
3695 list_for_each_entry(page, &n->full, lru) 3704 list_for_each_entry(page, &n->full, lru)
3696 process_slab(&t, s, page, alloc); 3705 process_slab(&t, s, page, alloc, map);
3697 spin_unlock_irqrestore(&n->list_lock, flags); 3706 spin_unlock_irqrestore(&n->list_lock, flags);
3698 } 3707 }
3699 3708
@@ -3744,6 +3753,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
3744 } 3753 }
3745 3754
3746 free_loc_track(&t); 3755 free_loc_track(&t);
3756 kfree(map);
3747 if (!t.count) 3757 if (!t.count)
3748 len += sprintf(buf, "No data\n"); 3758 len += sprintf(buf, "No data\n");
3749 return len; 3759 return len;