diff options
-rw-r--r-- | include/linux/notifier.h | 4 | ||||
-rw-r--r-- | kernel/cpu.c | 4 | ||||
-rw-r--r-- | kernel/sched.c | 25 | ||||
-rw-r--r-- | kernel/workqueue.c | 35 | ||||
-rw-r--r-- | mm/slab.c | 18 |
5 files changed, 36 insertions, 50 deletions
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 0c40cc0b4a36..5dfbc684ce7d 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -207,9 +207,7 @@ static inline int notifier_to_errno(int ret) | |||
207 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ | 207 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ |
208 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ | 208 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ |
209 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | 209 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ |
210 | #define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ | 210 | #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, |
211 | #define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ | ||
212 | #define CPU_DYING 0x000A /* CPU (unsigned)v not running any task, | ||
213 | * not handling interrupts, soon dead */ | 211 | * not handling interrupts, soon dead */ |
214 | 212 | ||
215 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | 213 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend |
diff --git a/kernel/cpu.c b/kernel/cpu.c index b0c4152995f8..e0d3a4f56ecb 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -218,7 +218,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) | |||
218 | return -EINVAL; | 218 | return -EINVAL; |
219 | 219 | ||
220 | cpu_hotplug_begin(); | 220 | cpu_hotplug_begin(); |
221 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); | ||
222 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 221 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
223 | hcpu, -1, &nr_calls); | 222 | hcpu, -1, &nr_calls); |
224 | if (err == NOTIFY_BAD) { | 223 | if (err == NOTIFY_BAD) { |
@@ -271,7 +270,6 @@ out_thread: | |||
271 | out_allowed: | 270 | out_allowed: |
272 | set_cpus_allowed(current, old_allowed); | 271 | set_cpus_allowed(current, old_allowed); |
273 | out_release: | 272 | out_release: |
274 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); | ||
275 | cpu_hotplug_done(); | 273 | cpu_hotplug_done(); |
276 | return err; | 274 | return err; |
277 | } | 275 | } |
@@ -302,7 +300,6 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
302 | return -EINVAL; | 300 | return -EINVAL; |
303 | 301 | ||
304 | cpu_hotplug_begin(); | 302 | cpu_hotplug_begin(); |
305 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); | ||
306 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, | 303 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, |
307 | -1, &nr_calls); | 304 | -1, &nr_calls); |
308 | if (ret == NOTIFY_BAD) { | 305 | if (ret == NOTIFY_BAD) { |
@@ -326,7 +323,6 @@ out_notify: | |||
326 | if (ret != 0) | 323 | if (ret != 0) |
327 | __raw_notifier_call_chain(&cpu_chain, | 324 | __raw_notifier_call_chain(&cpu_chain, |
328 | CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); | 325 | CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
329 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); | ||
330 | cpu_hotplug_done(); | 326 | cpu_hotplug_done(); |
331 | 327 | ||
332 | return ret; | 328 | return ret; |
diff --git a/kernel/sched.c b/kernel/sched.c index 672aa68bfeac..c0e2db683e29 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -439,7 +439,6 @@ struct rq { | |||
439 | }; | 439 | }; |
440 | 440 | ||
441 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 441 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
442 | static DEFINE_MUTEX(sched_hotcpu_mutex); | ||
443 | 442 | ||
444 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) | 443 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) |
445 | { | 444 | { |
@@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4546 | struct task_struct *p; | 4545 | struct task_struct *p; |
4547 | int retval; | 4546 | int retval; |
4548 | 4547 | ||
4549 | mutex_lock(&sched_hotcpu_mutex); | 4548 | get_online_cpus(); |
4550 | read_lock(&tasklist_lock); | 4549 | read_lock(&tasklist_lock); |
4551 | 4550 | ||
4552 | p = find_process_by_pid(pid); | 4551 | p = find_process_by_pid(pid); |
4553 | if (!p) { | 4552 | if (!p) { |
4554 | read_unlock(&tasklist_lock); | 4553 | read_unlock(&tasklist_lock); |
4555 | mutex_unlock(&sched_hotcpu_mutex); | 4554 | put_online_cpus(); |
4556 | return -ESRCH; | 4555 | return -ESRCH; |
4557 | } | 4556 | } |
4558 | 4557 | ||
@@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) | |||
4592 | } | 4591 | } |
4593 | out_unlock: | 4592 | out_unlock: |
4594 | put_task_struct(p); | 4593 | put_task_struct(p); |
4595 | mutex_unlock(&sched_hotcpu_mutex); | 4594 | put_online_cpus(); |
4596 | return retval; | 4595 | return retval; |
4597 | } | 4596 | } |
4598 | 4597 | ||
@@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4649 | struct task_struct *p; | 4648 | struct task_struct *p; |
4650 | int retval; | 4649 | int retval; |
4651 | 4650 | ||
4652 | mutex_lock(&sched_hotcpu_mutex); | 4651 | get_online_cpus(); |
4653 | read_lock(&tasklist_lock); | 4652 | read_lock(&tasklist_lock); |
4654 | 4653 | ||
4655 | retval = -ESRCH; | 4654 | retval = -ESRCH; |
@@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
4665 | 4664 | ||
4666 | out_unlock: | 4665 | out_unlock: |
4667 | read_unlock(&tasklist_lock); | 4666 | read_unlock(&tasklist_lock); |
4668 | mutex_unlock(&sched_hotcpu_mutex); | 4667 | put_online_cpus(); |
4669 | 4668 | ||
4670 | return retval; | 4669 | return retval; |
4671 | } | 4670 | } |
@@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5625 | struct rq *rq; | 5624 | struct rq *rq; |
5626 | 5625 | ||
5627 | switch (action) { | 5626 | switch (action) { |
5628 | case CPU_LOCK_ACQUIRE: | ||
5629 | mutex_lock(&sched_hotcpu_mutex); | ||
5630 | break; | ||
5631 | 5627 | ||
5632 | case CPU_UP_PREPARE: | 5628 | case CPU_UP_PREPARE: |
5633 | case CPU_UP_PREPARE_FROZEN: | 5629 | case CPU_UP_PREPARE_FROZEN: |
@@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5697 | spin_unlock_irq(&rq->lock); | 5693 | spin_unlock_irq(&rq->lock); |
5698 | break; | 5694 | break; |
5699 | #endif | 5695 | #endif |
5700 | case CPU_LOCK_RELEASE: | ||
5701 | mutex_unlock(&sched_hotcpu_mutex); | ||
5702 | break; | ||
5703 | } | 5696 | } |
5704 | return NOTIFY_OK; | 5697 | return NOTIFY_OK; |
5705 | } | 5698 | } |
@@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void) | |||
6655 | { | 6648 | { |
6656 | int err; | 6649 | int err; |
6657 | 6650 | ||
6658 | mutex_lock(&sched_hotcpu_mutex); | 6651 | get_online_cpus(); |
6659 | detach_destroy_domains(&cpu_online_map); | 6652 | detach_destroy_domains(&cpu_online_map); |
6660 | err = arch_init_sched_domains(&cpu_online_map); | 6653 | err = arch_init_sched_domains(&cpu_online_map); |
6661 | mutex_unlock(&sched_hotcpu_mutex); | 6654 | put_online_cpus(); |
6662 | 6655 | ||
6663 | return err; | 6656 | return err; |
6664 | } | 6657 | } |
@@ -6769,12 +6762,12 @@ void __init sched_init_smp(void) | |||
6769 | { | 6762 | { |
6770 | cpumask_t non_isolated_cpus; | 6763 | cpumask_t non_isolated_cpus; |
6771 | 6764 | ||
6772 | mutex_lock(&sched_hotcpu_mutex); | 6765 | get_online_cpus(); |
6773 | arch_init_sched_domains(&cpu_online_map); | 6766 | arch_init_sched_domains(&cpu_online_map); |
6774 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 6767 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); |
6775 | if (cpus_empty(non_isolated_cpus)) | 6768 | if (cpus_empty(non_isolated_cpus)) |
6776 | cpu_set(smp_processor_id(), non_isolated_cpus); | 6769 | cpu_set(smp_processor_id(), non_isolated_cpus); |
6777 | mutex_unlock(&sched_hotcpu_mutex); | 6770 | put_online_cpus(); |
6778 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 6771 | /* XXX: Theoretical race here - CPU may be hotplugged now */ |
6779 | hotcpu_notifier(update_sched_domains, 0); | 6772 | hotcpu_notifier(update_sched_domains, 0); |
6780 | 6773 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8db0b597509e..52db48e7f6e7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -67,9 +67,8 @@ struct workqueue_struct { | |||
67 | #endif | 67 | #endif |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 70 | /* Serializes the accesses to the list of workqueues. */ |
71 | threads to each one as cpus come/go. */ | 71 | static DEFINE_SPINLOCK(workqueue_lock); |
72 | static DEFINE_MUTEX(workqueue_mutex); | ||
73 | static LIST_HEAD(workqueues); | 72 | static LIST_HEAD(workqueues); |
74 | 73 | ||
75 | static int singlethread_cpu __read_mostly; | 74 | static int singlethread_cpu __read_mostly; |
@@ -592,8 +591,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
592 | * Returns zero on success. | 591 | * Returns zero on success. |
593 | * Returns -ve errno on failure. | 592 | * Returns -ve errno on failure. |
594 | * | 593 | * |
595 | * Appears to be racy against CPU hotplug. | ||
596 | * | ||
597 | * schedule_on_each_cpu() is very slow. | 594 | * schedule_on_each_cpu() is very slow. |
598 | */ | 595 | */ |
599 | int schedule_on_each_cpu(work_func_t func) | 596 | int schedule_on_each_cpu(work_func_t func) |
@@ -605,7 +602,7 @@ int schedule_on_each_cpu(work_func_t func) | |||
605 | if (!works) | 602 | if (!works) |
606 | return -ENOMEM; | 603 | return -ENOMEM; |
607 | 604 | ||
608 | preempt_disable(); /* CPU hotplug */ | 605 | get_online_cpus(); |
609 | for_each_online_cpu(cpu) { | 606 | for_each_online_cpu(cpu) { |
610 | struct work_struct *work = per_cpu_ptr(works, cpu); | 607 | struct work_struct *work = per_cpu_ptr(works, cpu); |
611 | 608 | ||
@@ -613,8 +610,8 @@ int schedule_on_each_cpu(work_func_t func) | |||
613 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); | 610 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); |
614 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); | 611 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); |
615 | } | 612 | } |
616 | preempt_enable(); | ||
617 | flush_workqueue(keventd_wq); | 613 | flush_workqueue(keventd_wq); |
614 | put_online_cpus(); | ||
618 | free_percpu(works); | 615 | free_percpu(works); |
619 | return 0; | 616 | return 0; |
620 | } | 617 | } |
@@ -750,8 +747,10 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
750 | err = create_workqueue_thread(cwq, singlethread_cpu); | 747 | err = create_workqueue_thread(cwq, singlethread_cpu); |
751 | start_workqueue_thread(cwq, -1); | 748 | start_workqueue_thread(cwq, -1); |
752 | } else { | 749 | } else { |
753 | mutex_lock(&workqueue_mutex); | 750 | get_online_cpus(); |
751 | spin_lock(&workqueue_lock); | ||
754 | list_add(&wq->list, &workqueues); | 752 | list_add(&wq->list, &workqueues); |
753 | spin_unlock(&workqueue_lock); | ||
755 | 754 | ||
756 | for_each_possible_cpu(cpu) { | 755 | for_each_possible_cpu(cpu) { |
757 | cwq = init_cpu_workqueue(wq, cpu); | 756 | cwq = init_cpu_workqueue(wq, cpu); |
@@ -760,7 +759,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
760 | err = create_workqueue_thread(cwq, cpu); | 759 | err = create_workqueue_thread(cwq, cpu); |
761 | start_workqueue_thread(cwq, cpu); | 760 | start_workqueue_thread(cwq, cpu); |
762 | } | 761 | } |
763 | mutex_unlock(&workqueue_mutex); | 762 | put_online_cpus(); |
764 | } | 763 | } |
765 | 764 | ||
766 | if (err) { | 765 | if (err) { |
@@ -775,7 +774,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
775 | { | 774 | { |
776 | /* | 775 | /* |
777 | * Our caller is either destroy_workqueue() or CPU_DEAD, | 776 | * Our caller is either destroy_workqueue() or CPU_DEAD, |
778 | * workqueue_mutex protects cwq->thread | 777 | * get_online_cpus() protects cwq->thread. |
779 | */ | 778 | */ |
780 | if (cwq->thread == NULL) | 779 | if (cwq->thread == NULL) |
781 | return; | 780 | return; |
@@ -810,9 +809,11 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
810 | struct cpu_workqueue_struct *cwq; | 809 | struct cpu_workqueue_struct *cwq; |
811 | int cpu; | 810 | int cpu; |
812 | 811 | ||
813 | mutex_lock(&workqueue_mutex); | 812 | get_online_cpus(); |
813 | spin_lock(&workqueue_lock); | ||
814 | list_del(&wq->list); | 814 | list_del(&wq->list); |
815 | mutex_unlock(&workqueue_mutex); | 815 | spin_unlock(&workqueue_lock); |
816 | put_online_cpus(); | ||
816 | 817 | ||
817 | for_each_cpu_mask(cpu, *cpu_map) { | 818 | for_each_cpu_mask(cpu, *cpu_map) { |
818 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 819 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
@@ -835,13 +836,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
835 | action &= ~CPU_TASKS_FROZEN; | 836 | action &= ~CPU_TASKS_FROZEN; |
836 | 837 | ||
837 | switch (action) { | 838 | switch (action) { |
838 | case CPU_LOCK_ACQUIRE: | ||
839 | mutex_lock(&workqueue_mutex); | ||
840 | return NOTIFY_OK; | ||
841 | |||
842 | case CPU_LOCK_RELEASE: | ||
843 | mutex_unlock(&workqueue_mutex); | ||
844 | return NOTIFY_OK; | ||
845 | 839 | ||
846 | case CPU_UP_PREPARE: | 840 | case CPU_UP_PREPARE: |
847 | cpu_set(cpu, cpu_populated_map); | 841 | cpu_set(cpu, cpu_populated_map); |
@@ -854,7 +848,8 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
854 | case CPU_UP_PREPARE: | 848 | case CPU_UP_PREPARE: |
855 | if (!create_workqueue_thread(cwq, cpu)) | 849 | if (!create_workqueue_thread(cwq, cpu)) |
856 | break; | 850 | break; |
857 | printk(KERN_ERR "workqueue for %i failed\n", cpu); | 851 | printk(KERN_ERR "workqueue [%s] for %i failed\n", |
852 | wq->name, cpu); | ||
858 | return NOTIFY_BAD; | 853 | return NOTIFY_BAD; |
859 | 854 | ||
860 | case CPU_ONLINE: | 855 | case CPU_ONLINE: |
@@ -730,8 +730,7 @@ static inline void init_lock_keys(void) | |||
730 | #endif | 730 | #endif |
731 | 731 | ||
732 | /* | 732 | /* |
733 | * 1. Guard access to the cache-chain. | 733 | * Guard access to the cache-chain. |
734 | * 2. Protect sanity of cpu_online_map against cpu hotplug events | ||
735 | */ | 734 | */ |
736 | static DEFINE_MUTEX(cache_chain_mutex); | 735 | static DEFINE_MUTEX(cache_chain_mutex); |
737 | static struct list_head cache_chain; | 736 | static struct list_head cache_chain; |
@@ -1331,12 +1330,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1331 | int err = 0; | 1330 | int err = 0; |
1332 | 1331 | ||
1333 | switch (action) { | 1332 | switch (action) { |
1334 | case CPU_LOCK_ACQUIRE: | ||
1335 | mutex_lock(&cache_chain_mutex); | ||
1336 | break; | ||
1337 | case CPU_UP_PREPARE: | 1333 | case CPU_UP_PREPARE: |
1338 | case CPU_UP_PREPARE_FROZEN: | 1334 | case CPU_UP_PREPARE_FROZEN: |
1335 | mutex_lock(&cache_chain_mutex); | ||
1339 | err = cpuup_prepare(cpu); | 1336 | err = cpuup_prepare(cpu); |
1337 | mutex_unlock(&cache_chain_mutex); | ||
1340 | break; | 1338 | break; |
1341 | case CPU_ONLINE: | 1339 | case CPU_ONLINE: |
1342 | case CPU_ONLINE_FROZEN: | 1340 | case CPU_ONLINE_FROZEN: |
@@ -1373,9 +1371,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1373 | #endif | 1371 | #endif |
1374 | case CPU_UP_CANCELED: | 1372 | case CPU_UP_CANCELED: |
1375 | case CPU_UP_CANCELED_FROZEN: | 1373 | case CPU_UP_CANCELED_FROZEN: |
1374 | mutex_lock(&cache_chain_mutex); | ||
1376 | cpuup_canceled(cpu); | 1375 | cpuup_canceled(cpu); |
1377 | break; | ||
1378 | case CPU_LOCK_RELEASE: | ||
1379 | mutex_unlock(&cache_chain_mutex); | 1376 | mutex_unlock(&cache_chain_mutex); |
1380 | break; | 1377 | break; |
1381 | } | 1378 | } |
@@ -2170,6 +2167,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2170 | * We use cache_chain_mutex to ensure a consistent view of | 2167 | * We use cache_chain_mutex to ensure a consistent view of |
2171 | * cpu_online_map as well. Please see cpuup_callback | 2168 | * cpu_online_map as well. Please see cpuup_callback |
2172 | */ | 2169 | */ |
2170 | get_online_cpus(); | ||
2173 | mutex_lock(&cache_chain_mutex); | 2171 | mutex_lock(&cache_chain_mutex); |
2174 | 2172 | ||
2175 | list_for_each_entry(pc, &cache_chain, next) { | 2173 | list_for_each_entry(pc, &cache_chain, next) { |
@@ -2396,6 +2394,7 @@ oops: | |||
2396 | panic("kmem_cache_create(): failed to create slab `%s'\n", | 2394 | panic("kmem_cache_create(): failed to create slab `%s'\n", |
2397 | name); | 2395 | name); |
2398 | mutex_unlock(&cache_chain_mutex); | 2396 | mutex_unlock(&cache_chain_mutex); |
2397 | put_online_cpus(); | ||
2399 | return cachep; | 2398 | return cachep; |
2400 | } | 2399 | } |
2401 | EXPORT_SYMBOL(kmem_cache_create); | 2400 | EXPORT_SYMBOL(kmem_cache_create); |
@@ -2547,9 +2546,11 @@ int kmem_cache_shrink(struct kmem_cache *cachep) | |||
2547 | int ret; | 2546 | int ret; |
2548 | BUG_ON(!cachep || in_interrupt()); | 2547 | BUG_ON(!cachep || in_interrupt()); |
2549 | 2548 | ||
2549 | get_online_cpus(); | ||
2550 | mutex_lock(&cache_chain_mutex); | 2550 | mutex_lock(&cache_chain_mutex); |
2551 | ret = __cache_shrink(cachep); | 2551 | ret = __cache_shrink(cachep); |
2552 | mutex_unlock(&cache_chain_mutex); | 2552 | mutex_unlock(&cache_chain_mutex); |
2553 | put_online_cpus(); | ||
2553 | return ret; | 2554 | return ret; |
2554 | } | 2555 | } |
2555 | EXPORT_SYMBOL(kmem_cache_shrink); | 2556 | EXPORT_SYMBOL(kmem_cache_shrink); |
@@ -2575,6 +2576,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) | |||
2575 | BUG_ON(!cachep || in_interrupt()); | 2576 | BUG_ON(!cachep || in_interrupt()); |
2576 | 2577 | ||
2577 | /* Find the cache in the chain of caches. */ | 2578 | /* Find the cache in the chain of caches. */ |
2579 | get_online_cpus(); | ||
2578 | mutex_lock(&cache_chain_mutex); | 2580 | mutex_lock(&cache_chain_mutex); |
2579 | /* | 2581 | /* |
2580 | * the chain is never empty, cache_cache is never destroyed | 2582 | * the chain is never empty, cache_cache is never destroyed |
@@ -2584,6 +2586,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) | |||
2584 | slab_error(cachep, "Can't free all objects"); | 2586 | slab_error(cachep, "Can't free all objects"); |
2585 | list_add(&cachep->next, &cache_chain); | 2587 | list_add(&cachep->next, &cache_chain); |
2586 | mutex_unlock(&cache_chain_mutex); | 2588 | mutex_unlock(&cache_chain_mutex); |
2589 | put_online_cpus(); | ||
2587 | return; | 2590 | return; |
2588 | } | 2591 | } |
2589 | 2592 | ||
@@ -2592,6 +2595,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) | |||
2592 | 2595 | ||
2593 | __kmem_cache_destroy(cachep); | 2596 | __kmem_cache_destroy(cachep); |
2594 | mutex_unlock(&cache_chain_mutex); | 2597 | mutex_unlock(&cache_chain_mutex); |
2598 | put_online_cpus(); | ||
2595 | } | 2599 | } |
2596 | EXPORT_SYMBOL(kmem_cache_destroy); | 2600 | EXPORT_SYMBOL(kmem_cache_destroy); |
2597 | 2601 | ||