diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 65 |
1 files changed, 21 insertions, 44 deletions
@@ -194,10 +194,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) | |||
194 | #define __OBJECT_POISON 0x80000000UL /* Poison object */ | 194 | #define __OBJECT_POISON 0x80000000UL /* Poison object */ |
195 | #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ | 195 | #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ |
196 | 196 | ||
197 | #ifdef CONFIG_SMP | ||
198 | static struct notifier_block slab_notifier; | ||
199 | #endif | ||
200 | |||
201 | /* | 197 | /* |
202 | * Tracking user of a slab. | 198 | * Tracking user of a slab. |
203 | */ | 199 | */ |
@@ -2305,6 +2301,25 @@ static void flush_all(struct kmem_cache *s) | |||
2305 | } | 2301 | } |
2306 | 2302 | ||
2307 | /* | 2303 | /* |
2304 | * Use the cpu notifier to insure that the cpu slabs are flushed when | ||
2305 | * necessary. | ||
2306 | */ | ||
2307 | static int slub_cpu_dead(unsigned int cpu) | ||
2308 | { | ||
2309 | struct kmem_cache *s; | ||
2310 | unsigned long flags; | ||
2311 | |||
2312 | mutex_lock(&slab_mutex); | ||
2313 | list_for_each_entry(s, &slab_caches, list) { | ||
2314 | local_irq_save(flags); | ||
2315 | __flush_cpu_slab(s, cpu); | ||
2316 | local_irq_restore(flags); | ||
2317 | } | ||
2318 | mutex_unlock(&slab_mutex); | ||
2319 | return 0; | ||
2320 | } | ||
2321 | |||
2322 | /* | ||
2308 | * Check if the objects in a per cpu structure fit numa | 2323 | * Check if the objects in a per cpu structure fit numa |
2309 | * locality expectations. | 2324 | * locality expectations. |
2310 | */ | 2325 | */ |
@@ -4144,9 +4159,8 @@ void __init kmem_cache_init(void) | |||
4144 | /* Setup random freelists for each cache */ | 4159 | /* Setup random freelists for each cache */ |
4145 | init_freelist_randomization(); | 4160 | init_freelist_randomization(); |
4146 | 4161 | ||
4147 | #ifdef CONFIG_SMP | 4162 | cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, |
4148 | register_cpu_notifier(&slab_notifier); | 4163 | slub_cpu_dead); |
4149 | #endif | ||
4150 | 4164 | ||
4151 | pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n", | 4165 | pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n", |
4152 | cache_line_size(), | 4166 | cache_line_size(), |
@@ -4210,43 +4224,6 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) | |||
4210 | return err; | 4224 | return err; |
4211 | } | 4225 | } |
4212 | 4226 | ||
4213 | #ifdef CONFIG_SMP | ||
4214 | /* | ||
4215 | * Use the cpu notifier to insure that the cpu slabs are flushed when | ||
4216 | * necessary. | ||
4217 | */ | ||
4218 | static int slab_cpuup_callback(struct notifier_block *nfb, | ||
4219 | unsigned long action, void *hcpu) | ||
4220 | { | ||
4221 | long cpu = (long)hcpu; | ||
4222 | struct kmem_cache *s; | ||
4223 | unsigned long flags; | ||
4224 | |||
4225 | switch (action) { | ||
4226 | case CPU_UP_CANCELED: | ||
4227 | case CPU_UP_CANCELED_FROZEN: | ||
4228 | case CPU_DEAD: | ||
4229 | case CPU_DEAD_FROZEN: | ||
4230 | mutex_lock(&slab_mutex); | ||
4231 | list_for_each_entry(s, &slab_caches, list) { | ||
4232 | local_irq_save(flags); | ||
4233 | __flush_cpu_slab(s, cpu); | ||
4234 | local_irq_restore(flags); | ||
4235 | } | ||
4236 | mutex_unlock(&slab_mutex); | ||
4237 | break; | ||
4238 | default: | ||
4239 | break; | ||
4240 | } | ||
4241 | return NOTIFY_OK; | ||
4242 | } | ||
4243 | |||
4244 | static struct notifier_block slab_notifier = { | ||
4245 | .notifier_call = slab_cpuup_callback | ||
4246 | }; | ||
4247 | |||
4248 | #endif | ||
4249 | |||
4250 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) | 4227 | void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
4251 | { | 4228 | { |
4252 | struct kmem_cache *s; | 4229 | struct kmem_cache *s; |