diff options
| -rw-r--r-- | mm/slab.c | 66 |
1 files changed, 32 insertions, 34 deletions
| @@ -288,8 +288,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) | |||
| 288 | * OTOH the cpuarrays can contain lots of objects, | 288 | * OTOH the cpuarrays can contain lots of objects, |
| 289 | * which could lock up otherwise freeable slabs. | 289 | * which could lock up otherwise freeable slabs. |
| 290 | */ | 290 | */ |
| 291 | #define REAPTIMEOUT_CPUC (2*HZ) | 291 | #define REAPTIMEOUT_AC (2*HZ) |
| 292 | #define REAPTIMEOUT_LIST3 (4*HZ) | 292 | #define REAPTIMEOUT_NODE (4*HZ) |
| 293 | 293 | ||
| 294 | #if STATS | 294 | #if STATS |
| 295 | #define STATS_INC_ACTIVE(x) ((x)->num_active++) | 295 | #define STATS_INC_ACTIVE(x) ((x)->num_active++) |
| @@ -1084,7 +1084,7 @@ static int init_cache_node_node(int node) | |||
| 1084 | 1084 | ||
| 1085 | list_for_each_entry(cachep, &slab_caches, list) { | 1085 | list_for_each_entry(cachep, &slab_caches, list) { |
| 1086 | /* | 1086 | /* |
| 1087 | * Set up the size64 kmemlist for cpu before we can | 1087 | * Set up the kmem_cache_node for cpu before we can |
| 1088 | * begin anything. Make sure some other cpu on this | 1088 | * begin anything. Make sure some other cpu on this |
| 1089 | * node has not already allocated this | 1089 | * node has not already allocated this |
| 1090 | */ | 1090 | */ |
| @@ -1093,12 +1093,12 @@ static int init_cache_node_node(int node) | |||
| 1093 | if (!n) | 1093 | if (!n) |
| 1094 | return -ENOMEM; | 1094 | return -ENOMEM; |
| 1095 | kmem_cache_node_init(n); | 1095 | kmem_cache_node_init(n); |
| 1096 | n->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 1096 | n->next_reap = jiffies + REAPTIMEOUT_NODE + |
| 1097 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 1097 | ((unsigned long)cachep) % REAPTIMEOUT_NODE; |
| 1098 | 1098 | ||
| 1099 | /* | 1099 | /* |
| 1100 | * The l3s don't come and go as CPUs come and | 1100 | * The kmem_cache_nodes don't come and go as CPUs |
| 1101 | * go. slab_mutex is sufficient | 1101 | * come and go. slab_mutex is sufficient |
| 1102 | * protection here. | 1102 | * protection here. |
| 1103 | */ | 1103 | */ |
| 1104 | cachep->node[node] = n; | 1104 | cachep->node[node] = n; |
| @@ -1423,8 +1423,8 @@ static void __init set_up_node(struct kmem_cache *cachep, int index) | |||
| 1423 | for_each_online_node(node) { | 1423 | for_each_online_node(node) { |
| 1424 | cachep->node[node] = &init_kmem_cache_node[index + node]; | 1424 | cachep->node[node] = &init_kmem_cache_node[index + node]; |
| 1425 | cachep->node[node]->next_reap = jiffies + | 1425 | cachep->node[node]->next_reap = jiffies + |
| 1426 | REAPTIMEOUT_LIST3 + | 1426 | REAPTIMEOUT_NODE + |
| 1427 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 1427 | ((unsigned long)cachep) % REAPTIMEOUT_NODE; |
| 1428 | } | 1428 | } |
| 1429 | } | 1429 | } |
| 1430 | 1430 | ||
| @@ -2124,8 +2124,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) | |||
| 2124 | } | 2124 | } |
| 2125 | } | 2125 | } |
| 2126 | cachep->node[numa_mem_id()]->next_reap = | 2126 | cachep->node[numa_mem_id()]->next_reap = |
| 2127 | jiffies + REAPTIMEOUT_LIST3 + | 2127 | jiffies + REAPTIMEOUT_NODE + |
| 2128 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 2128 | ((unsigned long)cachep) % REAPTIMEOUT_NODE; |
| 2129 | 2129 | ||
| 2130 | cpu_cache_get(cachep)->avail = 0; | 2130 | cpu_cache_get(cachep)->avail = 0; |
| 2131 | cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; | 2131 | cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; |
| @@ -2327,10 +2327,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) | |||
| 2327 | if (flags & CFLGS_OFF_SLAB) { | 2327 | if (flags & CFLGS_OFF_SLAB) { |
| 2328 | cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); | 2328 | cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); |
| 2329 | /* | 2329 | /* |
| 2330 | * This is a possibility for one of the malloc_sizes caches. | 2330 | * This is a possibility for one of the kmalloc_{dma,}_caches. |
| 2331 | * But since we go off slab only for object size greater than | 2331 | * But since we go off slab only for object size greater than |
| 2332 | * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, | 2332 | * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created |
| 2333 | * this should not happen at all. | 2333 | * in ascending order,this should not happen at all. |
| 2334 | * But leave a BUG_ON for some lucky dude. | 2334 | * But leave a BUG_ON for some lucky dude. |
| 2335 | */ | 2335 | */ |
| 2336 | BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); | 2336 | BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); |
| @@ -2538,14 +2538,17 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) | |||
| 2538 | 2538 | ||
| 2539 | /* | 2539 | /* |
| 2540 | * Get the memory for a slab management obj. | 2540 | * Get the memory for a slab management obj. |
| 2541 | * For a slab cache when the slab descriptor is off-slab, slab descriptors | 2541 | * |
| 2542 | * always come from malloc_sizes caches. The slab descriptor cannot | 2542 | * For a slab cache when the slab descriptor is off-slab, the |
| 2543 | * come from the same cache which is getting created because, | 2543 | * slab descriptor can't come from the same cache which is being created, |
| 2544 | * when we are searching for an appropriate cache for these | 2544 | * Because if it is the case, that means we defer the creation of |
| 2545 | * descriptors in kmem_cache_create, we search through the malloc_sizes array. | 2545 | * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. |
| 2546 | * If we are creating a malloc_sizes cache here it would not be visible to | 2546 | * And we eventually call down to __kmem_cache_create(), which |
| 2547 | * kmem_find_general_cachep till the initialization is complete. | 2547 | * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. |
| 2548 | * Hence we cannot have freelist_cache same as the original cache. | 2548 | * This is a "chicken-and-egg" problem. |
| 2549 | * | ||
| 2550 | * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, | ||
| 2551 | * which are all initialized during kmem_cache_init(). | ||
| 2549 | */ | 2552 | */ |
| 2550 | static void *alloc_slabmgmt(struct kmem_cache *cachep, | 2553 | static void *alloc_slabmgmt(struct kmem_cache *cachep, |
| 2551 | struct page *page, int colour_off, | 2554 | struct page *page, int colour_off, |
| @@ -3353,7 +3356,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) | |||
| 3353 | } | 3356 | } |
| 3354 | 3357 | ||
| 3355 | /* | 3358 | /* |
| 3356 | * Caller needs to acquire correct kmem_list's list_lock | 3359 | * Caller needs to acquire correct kmem_cache_node's list_lock |
| 3357 | */ | 3360 | */ |
| 3358 | static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | 3361 | static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, |
| 3359 | int node) | 3362 | int node) |
| @@ -3607,11 +3610,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, | |||
| 3607 | struct kmem_cache *cachep; | 3610 | struct kmem_cache *cachep; |
| 3608 | void *ret; | 3611 | void *ret; |
| 3609 | 3612 | ||
| 3610 | /* If you want to save a few bytes .text space: replace | ||
| 3611 | * __ with kmem_. | ||
| 3612 | * Then kmalloc uses the uninlined functions instead of the inline | ||
| 3613 | * functions. | ||
| 3614 | */ | ||
| 3615 | cachep = kmalloc_slab(size, flags); | 3613 | cachep = kmalloc_slab(size, flags); |
| 3616 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) | 3614 | if (unlikely(ZERO_OR_NULL_PTR(cachep))) |
| 3617 | return cachep; | 3615 | return cachep; |
| @@ -3703,7 +3701,7 @@ EXPORT_SYMBOL(kfree); | |||
| 3703 | /* | 3701 | /* |
| 3704 | * This initializes kmem_cache_node or resizes various caches for all nodes. | 3702 | * This initializes kmem_cache_node or resizes various caches for all nodes. |
| 3705 | */ | 3703 | */ |
| 3706 | static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | 3704 | static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) |
| 3707 | { | 3705 | { |
| 3708 | int node; | 3706 | int node; |
| 3709 | struct kmem_cache_node *n; | 3707 | struct kmem_cache_node *n; |
| @@ -3759,8 +3757,8 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) | |||
| 3759 | } | 3757 | } |
| 3760 | 3758 | ||
| 3761 | kmem_cache_node_init(n); | 3759 | kmem_cache_node_init(n); |
| 3762 | n->next_reap = jiffies + REAPTIMEOUT_LIST3 + | 3760 | n->next_reap = jiffies + REAPTIMEOUT_NODE + |
| 3763 | ((unsigned long)cachep) % REAPTIMEOUT_LIST3; | 3761 | ((unsigned long)cachep) % REAPTIMEOUT_NODE; |
| 3764 | n->shared = new_shared; | 3762 | n->shared = new_shared; |
| 3765 | n->alien = new_alien; | 3763 | n->alien = new_alien; |
| 3766 | n->free_limit = (1 + nr_cpus_node(node)) * | 3764 | n->free_limit = (1 + nr_cpus_node(node)) * |
| @@ -3846,7 +3844,7 @@ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, | |||
| 3846 | kfree(ccold); | 3844 | kfree(ccold); |
| 3847 | } | 3845 | } |
| 3848 | kfree(new); | 3846 | kfree(new); |
| 3849 | return alloc_kmemlist(cachep, gfp); | 3847 | return alloc_kmem_cache_node(cachep, gfp); |
| 3850 | } | 3848 | } |
| 3851 | 3849 | ||
| 3852 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, | 3850 | static int do_tune_cpucache(struct kmem_cache *cachep, int limit, |
| @@ -4015,7 +4013,7 @@ static void cache_reap(struct work_struct *w) | |||
| 4015 | if (time_after(n->next_reap, jiffies)) | 4013 | if (time_after(n->next_reap, jiffies)) |
| 4016 | goto next; | 4014 | goto next; |
| 4017 | 4015 | ||
| 4018 | n->next_reap = jiffies + REAPTIMEOUT_LIST3; | 4016 | n->next_reap = jiffies + REAPTIMEOUT_NODE; |
| 4019 | 4017 | ||
| 4020 | drain_array(searchp, n, n->shared, 0, node); | 4018 | drain_array(searchp, n, n->shared, 0, node); |
| 4021 | 4019 | ||
| @@ -4036,7 +4034,7 @@ next: | |||
| 4036 | next_reap_node(); | 4034 | next_reap_node(); |
| 4037 | out: | 4035 | out: |
| 4038 | /* Set up the next iteration */ | 4036 | /* Set up the next iteration */ |
| 4039 | schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); | 4037 | schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); |
| 4040 | } | 4038 | } |
| 4041 | 4039 | ||
| 4042 | #ifdef CONFIG_SLABINFO | 4040 | #ifdef CONFIG_SLABINFO |
