diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 51 |
1 files changed, 23 insertions, 28 deletions
@@ -565,7 +565,7 @@ static void init_node_lock_keys(int q) | |||
565 | if (slab_state < UP) | 565 | if (slab_state < UP) |
566 | return; | 566 | return; |
567 | 567 | ||
568 | for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { | 568 | for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) { |
569 | struct kmem_cache_node *n; | 569 | struct kmem_cache_node *n; |
570 | struct kmem_cache *cache = kmalloc_caches[i]; | 570 | struct kmem_cache *cache = kmalloc_caches[i]; |
571 | 571 | ||
@@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node) | |||
1180 | return 0; | 1180 | return 0; |
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | static inline int slabs_tofree(struct kmem_cache *cachep, | ||
1184 | struct kmem_cache_node *n) | ||
1185 | { | ||
1186 | return (n->free_objects + cachep->num - 1) / cachep->num; | ||
1187 | } | ||
1188 | |||
1183 | static void __cpuinit cpuup_canceled(long cpu) | 1189 | static void __cpuinit cpuup_canceled(long cpu) |
1184 | { | 1190 | { |
1185 | struct kmem_cache *cachep; | 1191 | struct kmem_cache *cachep; |
@@ -1241,7 +1247,7 @@ free_array_cache: | |||
1241 | n = cachep->node[node]; | 1247 | n = cachep->node[node]; |
1242 | if (!n) | 1248 | if (!n) |
1243 | continue; | 1249 | continue; |
1244 | drain_freelist(cachep, n, n->free_objects); | 1250 | drain_freelist(cachep, n, slabs_tofree(cachep, n)); |
1245 | } | 1251 | } |
1246 | } | 1252 | } |
1247 | 1253 | ||
@@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node) | |||
1408 | if (!n) | 1414 | if (!n) |
1409 | continue; | 1415 | continue; |
1410 | 1416 | ||
1411 | drain_freelist(cachep, n, n->free_objects); | 1417 | drain_freelist(cachep, n, slabs_tofree(cachep, n)); |
1412 | 1418 | ||
1413 | if (!list_empty(&n->slabs_full) || | 1419 | if (!list_empty(&n->slabs_full) || |
1414 | !list_empty(&n->slabs_partial)) { | 1420 | !list_empty(&n->slabs_partial)) { |
@@ -2532,7 +2538,7 @@ static int __cache_shrink(struct kmem_cache *cachep) | |||
2532 | if (!n) | 2538 | if (!n) |
2533 | continue; | 2539 | continue; |
2534 | 2540 | ||
2535 | drain_freelist(cachep, n, n->free_objects); | 2541 | drain_freelist(cachep, n, slabs_tofree(cachep, n)); |
2536 | 2542 | ||
2537 | ret += !list_empty(&n->slabs_full) || | 2543 | ret += !list_empty(&n->slabs_full) || |
2538 | !list_empty(&n->slabs_partial); | 2544 | !list_empty(&n->slabs_partial); |
@@ -3338,18 +3344,6 @@ done: | |||
3338 | return obj; | 3344 | return obj; |
3339 | } | 3345 | } |
3340 | 3346 | ||
3341 | /** | ||
3342 | * kmem_cache_alloc_node - Allocate an object on the specified node | ||
3343 | * @cachep: The cache to allocate from. | ||
3344 | * @flags: See kmalloc(). | ||
3345 | * @nodeid: node number of the target node. | ||
3346 | * @caller: return address of caller, used for debug information | ||
3347 | * | ||
3348 | * Identical to kmem_cache_alloc but it will allocate memory on the given | ||
3349 | * node, which can improve the performance for cpu bound structures. | ||
3350 | * | ||
3351 | * Fallback to other node is possible if __GFP_THISNODE is not set. | ||
3352 | */ | ||
3353 | static __always_inline void * | 3347 | static __always_inline void * |
3354 | slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | 3348 | slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, |
3355 | unsigned long caller) | 3349 | unsigned long caller) |
@@ -3643,6 +3637,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); | |||
3643 | #endif | 3637 | #endif |
3644 | 3638 | ||
3645 | #ifdef CONFIG_NUMA | 3639 | #ifdef CONFIG_NUMA |
3640 | /** | ||
3641 | * kmem_cache_alloc_node - Allocate an object on the specified node | ||
3642 | * @cachep: The cache to allocate from. | ||
3643 | * @flags: See kmalloc(). | ||
3644 | * @nodeid: node number of the target node. | ||
3645 | * | ||
3646 | * Identical to kmem_cache_alloc but it will allocate memory on the given | ||
3647 | * node, which can improve the performance for cpu bound structures. | ||
3648 | * | ||
3649 | * Fallback to other node is possible if __GFP_THISNODE is not set. | ||
3650 | */ | ||
3646 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | 3651 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
3647 | { | 3652 | { |
3648 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); | 3653 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
@@ -4431,20 +4436,10 @@ static int leaks_show(struct seq_file *m, void *p) | |||
4431 | return 0; | 4436 | return 0; |
4432 | } | 4437 | } |
4433 | 4438 | ||
4434 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | ||
4435 | { | ||
4436 | return seq_list_next(p, &slab_caches, pos); | ||
4437 | } | ||
4438 | |||
4439 | static void s_stop(struct seq_file *m, void *p) | ||
4440 | { | ||
4441 | mutex_unlock(&slab_mutex); | ||
4442 | } | ||
4443 | |||
4444 | static const struct seq_operations slabstats_op = { | 4439 | static const struct seq_operations slabstats_op = { |
4445 | .start = leaks_start, | 4440 | .start = leaks_start, |
4446 | .next = s_next, | 4441 | .next = slab_next, |
4447 | .stop = s_stop, | 4442 | .stop = slab_stop, |
4448 | .show = leaks_show, | 4443 | .show = leaks_show, |
4449 | }; | 4444 | }; |
4450 | 4445 | ||