aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c35
-rw-r--r--kernel/futex.c18
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--kernel/timer.c41
-rw-r--r--kernel/workqueue.c33
8 files changed, 76 insertions, 59 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1a649f2bb9bb..4ea6f0dc2fc5 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -816,6 +816,10 @@ static int update_cpumask(struct cpuset *cs, char *buf)
816 struct cpuset trialcs; 816 struct cpuset trialcs;
817 int retval, cpus_unchanged; 817 int retval, cpus_unchanged;
818 818
819 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
820 if (cs == &top_cpuset)
821 return -EACCES;
822
819 trialcs = *cs; 823 trialcs = *cs;
820 retval = cpulist_parse(buf, trialcs.cpus_allowed); 824 retval = cpulist_parse(buf, trialcs.cpus_allowed);
821 if (retval < 0) 825 if (retval < 0)
@@ -2033,6 +2037,33 @@ out:
2033 return err; 2037 return err;
2034} 2038}
2035 2039
2040/*
2041 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2042 * period. This is necessary in order to make cpusets transparent
2043 * (of no affect) on systems that are actively using CPU hotplug
2044 * but making no active use of cpusets.
2045 *
2046 * This handles CPU hotplug (cpuhp) events. If someday Memory
2047 * Nodes can be hotplugged (dynamically changing node_online_map)
2048 * then we should handle that too, perhaps in a similar way.
2049 */
2050
2051#ifdef CONFIG_HOTPLUG_CPU
2052static int cpuset_handle_cpuhp(struct notifier_block *nb,
2053 unsigned long phase, void *cpu)
2054{
2055 mutex_lock(&manage_mutex);
2056 mutex_lock(&callback_mutex);
2057
2058 top_cpuset.cpus_allowed = cpu_online_map;
2059
2060 mutex_unlock(&callback_mutex);
2061 mutex_unlock(&manage_mutex);
2062
2063 return 0;
2064}
2065#endif
2066
2036/** 2067/**
2037 * cpuset_init_smp - initialize cpus_allowed 2068 * cpuset_init_smp - initialize cpus_allowed
2038 * 2069 *
@@ -2043,6 +2074,8 @@ void __init cpuset_init_smp(void)
2043{ 2074{
2044 top_cpuset.cpus_allowed = cpu_online_map; 2075 top_cpuset.cpus_allowed = cpu_online_map;
2045 top_cpuset.mems_allowed = node_online_map; 2076 top_cpuset.mems_allowed = node_online_map;
2077
2078 hotcpu_notifier(cpuset_handle_cpuhp, 0);
2046} 2079}
2047 2080
2048/** 2081/**
@@ -2387,7 +2420,7 @@ EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2387int cpuset_excl_nodes_overlap(const struct task_struct *p) 2420int cpuset_excl_nodes_overlap(const struct task_struct *p)
2388{ 2421{
2389 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ 2422 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
2390 int overlap = 0; /* do cpusets overlap? */ 2423 int overlap = 1; /* do cpusets overlap? */
2391 2424
2392 task_lock(current); 2425 task_lock(current);
2393 if (current->flags & PF_EXITING) { 2426 if (current->flags & PF_EXITING) {
diff --git a/kernel/futex.c b/kernel/futex.c
index c2b2e0b83abf..b9b8aea5389e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -297,7 +297,7 @@ static int futex_handle_fault(unsigned long address, int attempt)
297 struct vm_area_struct * vma; 297 struct vm_area_struct * vma;
298 struct mm_struct *mm = current->mm; 298 struct mm_struct *mm = current->mm;
299 299
300 if (attempt >= 2 || !(vma = find_vma(mm, address)) || 300 if (attempt > 2 || !(vma = find_vma(mm, address)) ||
301 vma->vm_start > address || !(vma->vm_flags & VM_WRITE)) 301 vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
302 return -EFAULT; 302 return -EFAULT;
303 303
@@ -397,7 +397,7 @@ static struct task_struct * futex_find_get_task(pid_t pid)
397 p = NULL; 397 p = NULL;
398 goto out_unlock; 398 goto out_unlock;
399 } 399 }
400 if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { 400 if (p->exit_state != 0) {
401 p = NULL; 401 p = NULL;
402 goto out_unlock; 402 goto out_unlock;
403 } 403 }
@@ -747,8 +747,10 @@ retry:
747 */ 747 */
748 if (attempt++) { 748 if (attempt++) {
749 if (futex_handle_fault((unsigned long)uaddr2, 749 if (futex_handle_fault((unsigned long)uaddr2,
750 attempt)) 750 attempt)) {
751 ret = -EFAULT;
751 goto out; 752 goto out;
753 }
752 goto retry; 754 goto retry;
753 } 755 }
754 756
@@ -1322,9 +1324,10 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1322 * still holding the mmap_sem. 1324 * still holding the mmap_sem.
1323 */ 1325 */
1324 if (attempt++) { 1326 if (attempt++) {
1325 if (futex_handle_fault((unsigned long)uaddr, attempt)) 1327 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1328 ret = -EFAULT;
1326 goto out_unlock_release_sem; 1329 goto out_unlock_release_sem;
1327 1330 }
1328 goto retry_locked; 1331 goto retry_locked;
1329 } 1332 }
1330 1333
@@ -1506,9 +1509,10 @@ pi_faulted:
1506 * still holding the mmap_sem. 1509 * still holding the mmap_sem.
1507 */ 1510 */
1508 if (attempt++) { 1511 if (attempt++) {
1509 if (futex_handle_fault((unsigned long)uaddr, attempt)) 1512 if (futex_handle_fault((unsigned long)uaddr, attempt)) {
1513 ret = -EFAULT;
1510 goto out_unlock; 1514 goto out_unlock;
1511 1515 }
1512 goto retry_locked; 1516 goto retry_locked;
1513 } 1517 }
1514 1518
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index be989efc7856..21c38a7e666b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -187,7 +187,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base)
187{ 187{
188 struct hrtimer_base *new_base; 188 struct hrtimer_base *new_base;
189 189
190 new_base = &__get_cpu_var(hrtimer_bases[base->index]); 190 new_base = &__get_cpu_var(hrtimer_bases)[base->index];
191 191
192 if (base != new_base) { 192 if (base != new_base) {
193 /* 193 /*
diff --git a/kernel/panic.c b/kernel/panic.c
index d8a0bca21233..9b8dcfd1ca93 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/nmi.h> 19#include <linux/nmi.h>
20#include <linux/kexec.h> 20#include <linux/kexec.h>
21#include <linux/debug_locks.h>
21 22
22int panic_on_oops; 23int panic_on_oops;
23int tainted; 24int tainted;
diff --git a/kernel/sched.c b/kernel/sched.c
index a2be2d055299..a234fbee1238 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4162,10 +4162,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4162 read_unlock_irq(&tasklist_lock); 4162 read_unlock_irq(&tasklist_lock);
4163 return -ESRCH; 4163 return -ESRCH;
4164 } 4164 }
4165 get_task_struct(p);
4166 read_unlock_irq(&tasklist_lock);
4167 retval = sched_setscheduler(p, policy, &lparam); 4165 retval = sched_setscheduler(p, policy, &lparam);
4168 put_task_struct(p); 4166 read_unlock_irq(&tasklist_lock);
4169 4167
4170 return retval; 4168 return retval;
4171} 4169}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index dcfb5d731466..51cacd111dbd 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -111,7 +111,6 @@ static int stop_machine(void)
111 /* If some failed, kill them all. */ 111 /* If some failed, kill them all. */
112 if (ret < 0) { 112 if (ret < 0) {
113 stopmachine_set_state(STOPMACHINE_EXIT); 113 stopmachine_set_state(STOPMACHINE_EXIT);
114 up(&stopmachine_mutex);
115 return ret; 114 return ret;
116 } 115 }
117 116
diff --git a/kernel/timer.c b/kernel/timer.c
index b650f04888ed..1d7dd6267c2d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1324,46 +1324,19 @@ asmlinkage long sys_getpid(void)
1324} 1324}
1325 1325
1326/* 1326/*
1327 * Accessing ->group_leader->real_parent is not SMP-safe, it could 1327 * Accessing ->real_parent is not SMP-safe, it could
1328 * change from under us. However, rather than getting any lock 1328 * change from under us. However, we can use a stale
1329 * we can use an optimistic algorithm: get the parent 1329 * value of ->real_parent under rcu_read_lock(), see
1330 * pid, and go back and check that the parent is still 1330 * release_task()->call_rcu(delayed_put_task_struct).
1331 * the same. If it has changed (which is extremely unlikely
1332 * indeed), we just try again..
1333 *
1334 * NOTE! This depends on the fact that even if we _do_
1335 * get an old value of "parent", we can happily dereference
1336 * the pointer (it was and remains a dereferencable kernel pointer
1337 * no matter what): we just can't necessarily trust the result
1338 * until we know that the parent pointer is valid.
1339 *
1340 * NOTE2: ->group_leader never changes from under us.
1341 */ 1331 */
1342asmlinkage long sys_getppid(void) 1332asmlinkage long sys_getppid(void)
1343{ 1333{
1344 int pid; 1334 int pid;
1345 struct task_struct *me = current;
1346 struct task_struct *parent;
1347 1335
1348 parent = me->group_leader->real_parent; 1336 rcu_read_lock();
1349 for (;;) { 1337 pid = rcu_dereference(current->real_parent)->tgid;
1350 pid = parent->tgid; 1338 rcu_read_unlock();
1351#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1352{
1353 struct task_struct *old = parent;
1354 1339
1355 /*
1356 * Make sure we read the pid before re-reading the
1357 * parent pointer:
1358 */
1359 smp_rmb();
1360 parent = me->group_leader->real_parent;
1361 if (old != parent)
1362 continue;
1363}
1364#endif
1365 break;
1366 }
1367 return pid; 1340 return pid;
1368} 1341}
1369 1342
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 448e8f7b342d..835fe28b87a8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,7 +68,7 @@ struct workqueue_struct {
68 68
69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
70 threads to each one as cpus come/go. */ 70 threads to each one as cpus come/go. */
71static DEFINE_SPINLOCK(workqueue_lock); 71static DEFINE_MUTEX(workqueue_mutex);
72static LIST_HEAD(workqueues); 72static LIST_HEAD(workqueues);
73 73
74static int singlethread_cpu; 74static int singlethread_cpu;
@@ -320,10 +320,10 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
320 } else { 320 } else {
321 int cpu; 321 int cpu;
322 322
323 lock_cpu_hotplug(); 323 mutex_lock(&workqueue_mutex);
324 for_each_online_cpu(cpu) 324 for_each_online_cpu(cpu)
325 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 325 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
326 unlock_cpu_hotplug(); 326 mutex_unlock(&workqueue_mutex);
327 } 327 }
328} 328}
329EXPORT_SYMBOL_GPL(flush_workqueue); 329EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -371,8 +371,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
371 } 371 }
372 372
373 wq->name = name; 373 wq->name = name;
374 /* We don't need the distraction of CPUs appearing and vanishing. */ 374 mutex_lock(&workqueue_mutex);
375 lock_cpu_hotplug();
376 if (singlethread) { 375 if (singlethread) {
377 INIT_LIST_HEAD(&wq->list); 376 INIT_LIST_HEAD(&wq->list);
378 p = create_workqueue_thread(wq, singlethread_cpu); 377 p = create_workqueue_thread(wq, singlethread_cpu);
@@ -381,9 +380,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
381 else 380 else
382 wake_up_process(p); 381 wake_up_process(p);
383 } else { 382 } else {
384 spin_lock(&workqueue_lock);
385 list_add(&wq->list, &workqueues); 383 list_add(&wq->list, &workqueues);
386 spin_unlock(&workqueue_lock);
387 for_each_online_cpu(cpu) { 384 for_each_online_cpu(cpu) {
388 p = create_workqueue_thread(wq, cpu); 385 p = create_workqueue_thread(wq, cpu);
389 if (p) { 386 if (p) {
@@ -393,7 +390,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
393 destroy = 1; 390 destroy = 1;
394 } 391 }
395 } 392 }
396 unlock_cpu_hotplug(); 393 mutex_unlock(&workqueue_mutex);
397 394
398 /* 395 /*
399 * Was there any error during startup? If yes then clean up: 396 * Was there any error during startup? If yes then clean up:
@@ -434,17 +431,15 @@ void destroy_workqueue(struct workqueue_struct *wq)
434 flush_workqueue(wq); 431 flush_workqueue(wq);
435 432
436 /* We don't need the distraction of CPUs appearing and vanishing. */ 433 /* We don't need the distraction of CPUs appearing and vanishing. */
437 lock_cpu_hotplug(); 434 mutex_lock(&workqueue_mutex);
438 if (is_single_threaded(wq)) 435 if (is_single_threaded(wq))
439 cleanup_workqueue_thread(wq, singlethread_cpu); 436 cleanup_workqueue_thread(wq, singlethread_cpu);
440 else { 437 else {
441 for_each_online_cpu(cpu) 438 for_each_online_cpu(cpu)
442 cleanup_workqueue_thread(wq, cpu); 439 cleanup_workqueue_thread(wq, cpu);
443 spin_lock(&workqueue_lock);
444 list_del(&wq->list); 440 list_del(&wq->list);
445 spin_unlock(&workqueue_lock);
446 } 441 }
447 unlock_cpu_hotplug(); 442 mutex_unlock(&workqueue_mutex);
448 free_percpu(wq->cpu_wq); 443 free_percpu(wq->cpu_wq);
449 kfree(wq); 444 kfree(wq);
450} 445}
@@ -515,11 +510,13 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
515 if (!works) 510 if (!works)
516 return -ENOMEM; 511 return -ENOMEM;
517 512
513 mutex_lock(&workqueue_mutex);
518 for_each_online_cpu(cpu) { 514 for_each_online_cpu(cpu) {
519 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 515 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
520 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 516 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
521 per_cpu_ptr(works, cpu)); 517 per_cpu_ptr(works, cpu));
522 } 518 }
519 mutex_unlock(&workqueue_mutex);
523 flush_workqueue(keventd_wq); 520 flush_workqueue(keventd_wq);
524 free_percpu(works); 521 free_percpu(works);
525 return 0; 522 return 0;
@@ -635,6 +632,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
635 632
636 switch (action) { 633 switch (action) {
637 case CPU_UP_PREPARE: 634 case CPU_UP_PREPARE:
635 mutex_lock(&workqueue_mutex);
638 /* Create a new workqueue thread for it. */ 636 /* Create a new workqueue thread for it. */
639 list_for_each_entry(wq, &workqueues, list) { 637 list_for_each_entry(wq, &workqueues, list) {
640 if (!create_workqueue_thread(wq, hotcpu)) { 638 if (!create_workqueue_thread(wq, hotcpu)) {
@@ -653,6 +651,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
653 kthread_bind(cwq->thread, hotcpu); 651 kthread_bind(cwq->thread, hotcpu);
654 wake_up_process(cwq->thread); 652 wake_up_process(cwq->thread);
655 } 653 }
654 mutex_unlock(&workqueue_mutex);
656 break; 655 break;
657 656
658 case CPU_UP_CANCELED: 657 case CPU_UP_CANCELED:
@@ -664,6 +663,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
664 any_online_cpu(cpu_online_map)); 663 any_online_cpu(cpu_online_map));
665 cleanup_workqueue_thread(wq, hotcpu); 664 cleanup_workqueue_thread(wq, hotcpu);
666 } 665 }
666 mutex_unlock(&workqueue_mutex);
667 break;
668
669 case CPU_DOWN_PREPARE:
670 mutex_lock(&workqueue_mutex);
671 break;
672
673 case CPU_DOWN_FAILED:
674 mutex_unlock(&workqueue_mutex);
667 break; 675 break;
668 676
669 case CPU_DEAD: 677 case CPU_DEAD:
@@ -671,6 +679,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
671 cleanup_workqueue_thread(wq, hotcpu); 679 cleanup_workqueue_thread(wq, hotcpu);
672 list_for_each_entry(wq, &workqueues, list) 680 list_for_each_entry(wq, &workqueues, list)
673 take_over_work(wq, hotcpu); 681 take_over_work(wq, hotcpu);
682 mutex_unlock(&workqueue_mutex);
674 break; 683 break;
675 } 684 }
676 685