aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2012-02-06 16:10:52 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-02-06 16:11:02 -0500
commit5ddf146f70a44ed4133dea4d377b172605a1cfa5 (patch)
tree058b6f58b5ab756f9fe798983e7c7bbf75450509 /kernel
parent781ba9d2ed9df07dbb413fb5ee80ef7d353841c9 (diff)
parenta4a03fc7ef89020baca4f19174e6a43767c6d78a (diff)
Merge branch 'perf/urgent' into perf/core
So that we can get the perf bench exec stack fixes and then apply the remaining fix for the files added after what is in perf/urgent. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c16
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/power/process.c19
-rw-r--r--kernel/power/user.c9
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/fair.c34
-rw-r--r--kernel/sched/rt.c5
-rw-r--r--kernel/watchdog.c2
8 files changed, 106 insertions, 18 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 294b1709170d..4b4042f9bc6a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1038,6 +1038,22 @@ void do_exit(long code)
1038 if (tsk->nr_dirtied) 1038 if (tsk->nr_dirtied)
1039 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 1039 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
1040 exit_rcu(); 1040 exit_rcu();
1041
1042 /*
1043 * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
1044 * when the following two conditions become true.
1045 * - There is race condition of mmap_sem (It is acquired by
1046 * exit_mm()), and
1047 * - SMI occurs before setting TASK_RUNINNG.
1048 * (or hypervisor of virtual machine switches to other guest)
1049 * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
1050 *
1051 * To avoid it, we have to wait for releasing tsk->pi_lock which
1052 * is held by try_to_wake_up()
1053 */
1054 smp_mb();
1055 raw_spin_unlock_wait(&tsk->pi_lock);
1056
1041 /* causes final put_task_struct in finish_task_switch(). */ 1057 /* causes final put_task_struct in finish_task_switch(). */
1042 tsk->state = TASK_DEAD; 1058 tsk->state = TASK_DEAD;
1043 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */ 1059 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
diff --git a/kernel/fork.c b/kernel/fork.c
index 051f090d40c1..1b2ef3c23ae4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
647} 647}
648EXPORT_SYMBOL_GPL(get_task_mm); 648EXPORT_SYMBOL_GPL(get_task_mm);
649 649
650struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
651{
652 struct mm_struct *mm;
653 int err;
654
655 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
656 if (err)
657 return ERR_PTR(err);
658
659 mm = get_task_mm(task);
660 if (mm && mm != current->mm &&
661 !ptrace_may_access(task, mode)) {
662 mmput(mm);
663 mm = ERR_PTR(-EACCES);
664 }
665 mutex_unlock(&task->signal->cred_guard_mutex);
666
667 return mm;
668}
669
650/* Please note the differences between mmput and mm_release. 670/* Please note the differences between mmput and mm_release.
651 * mmput is called whenever we stop holding onto a mm_struct, 671 * mmput is called whenever we stop holding onto a mm_struct,
652 * error success whatever. 672 * error success whatever.
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 77274c9ba2f1..eeca00311f39 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -188,3 +188,22 @@ void thaw_processes(void)
188 printk("done.\n"); 188 printk("done.\n");
189} 189}
190 190
191void thaw_kernel_threads(void)
192{
193 struct task_struct *g, *p;
194
195 pm_nosig_freezing = false;
196 printk("Restarting kernel threads ... ");
197
198 thaw_workqueues();
199
200 read_lock(&tasklist_lock);
201 do_each_thread(g, p) {
202 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
203 __thaw_task(p);
204 } while_each_thread(g, p);
205 read_unlock(&tasklist_lock);
206
207 schedule();
208 printk("done.\n");
209}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6b1ab7a88522..e5a21a857302 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -274,6 +274,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
274 swsusp_free(); 274 swsusp_free();
275 memset(&data->handle, 0, sizeof(struct snapshot_handle)); 275 memset(&data->handle, 0, sizeof(struct snapshot_handle));
276 data->ready = 0; 276 data->ready = 0;
277 /*
278 * It is necessary to thaw kernel threads here, because
279 * SNAPSHOT_CREATE_IMAGE may be invoked directly after
280 * SNAPSHOT_FREE. In that case, if kernel threads were not
281 * thawed, the preallocation of memory carried out by
282 * hibernation_snapshot() might run into problems (i.e. it
283 * might fail or even deadlock).
284 */
285 thaw_kernel_threads();
277 break; 286 break;
278 287
279 case SNAPSHOT_PREF_IMAGE_SIZE: 288 case SNAPSHOT_PREF_IMAGE_SIZE:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df00cb09263e..5255c9d2e053 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -74,6 +74,7 @@
74 74
75#include <asm/tlb.h> 75#include <asm/tlb.h>
76#include <asm/irq_regs.h> 76#include <asm/irq_regs.h>
77#include <asm/mutex.h>
77#ifdef CONFIG_PARAVIRT 78#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h> 79#include <asm/paravirt.h>
79#endif 80#endif
@@ -723,9 +724,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
723 p->sched_class->dequeue_task(rq, p, flags); 724 p->sched_class->dequeue_task(rq, p, flags);
724} 725}
725 726
726/*
727 * activate_task - move a task to the runqueue.
728 */
729void activate_task(struct rq *rq, struct task_struct *p, int flags) 727void activate_task(struct rq *rq, struct task_struct *p, int flags)
730{ 728{
731 if (task_contributes_to_load(p)) 729 if (task_contributes_to_load(p))
@@ -734,9 +732,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
734 enqueue_task(rq, p, flags); 732 enqueue_task(rq, p, flags);
735} 733}
736 734
737/*
738 * deactivate_task - remove a task from the runqueue.
739 */
740void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 735void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
741{ 736{
742 if (task_contributes_to_load(p)) 737 if (task_contributes_to_load(p))
@@ -4134,7 +4129,7 @@ recheck:
4134 on_rq = p->on_rq; 4129 on_rq = p->on_rq;
4135 running = task_current(rq, p); 4130 running = task_current(rq, p);
4136 if (on_rq) 4131 if (on_rq)
4137 deactivate_task(rq, p, 0); 4132 dequeue_task(rq, p, 0);
4138 if (running) 4133 if (running)
4139 p->sched_class->put_prev_task(rq, p); 4134 p->sched_class->put_prev_task(rq, p);
4140 4135
@@ -4147,7 +4142,7 @@ recheck:
4147 if (running) 4142 if (running)
4148 p->sched_class->set_curr_task(rq); 4143 p->sched_class->set_curr_task(rq);
4149 if (on_rq) 4144 if (on_rq)
4150 activate_task(rq, p, 0); 4145 enqueue_task(rq, p, 0);
4151 4146
4152 check_class_changed(rq, p, prev_class, oldprio); 4147 check_class_changed(rq, p, prev_class, oldprio);
4153 task_rq_unlock(rq, p, &flags); 4148 task_rq_unlock(rq, p, &flags);
@@ -4998,9 +4993,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4998 * placed properly. 4993 * placed properly.
4999 */ 4994 */
5000 if (p->on_rq) { 4995 if (p->on_rq) {
5001 deactivate_task(rq_src, p, 0); 4996 dequeue_task(rq_src, p, 0);
5002 set_task_cpu(p, dest_cpu); 4997 set_task_cpu(p, dest_cpu);
5003 activate_task(rq_dest, p, 0); 4998 enqueue_task(rq_dest, p, 0);
5004 check_preempt_curr(rq_dest, p, 0); 4999 check_preempt_curr(rq_dest, p, 0);
5005 } 5000 }
5006done: 5001done:
@@ -7032,10 +7027,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7032 7027
7033 on_rq = p->on_rq; 7028 on_rq = p->on_rq;
7034 if (on_rq) 7029 if (on_rq)
7035 deactivate_task(rq, p, 0); 7030 dequeue_task(rq, p, 0);
7036 __setscheduler(rq, p, SCHED_NORMAL, 0); 7031 __setscheduler(rq, p, SCHED_NORMAL, 0);
7037 if (on_rq) { 7032 if (on_rq) {
7038 activate_task(rq, p, 0); 7033 enqueue_task(rq, p, 0);
7039 resched_task(rq->curr); 7034 resched_task(rq->curr);
7040 } 7035 }
7041 7036
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 84adb2d66cbd..7c6414fc669d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4866,6 +4866,15 @@ static void nohz_balancer_kick(int cpu)
4866 return; 4866 return;
4867} 4867}
4868 4868
4869static inline void clear_nohz_tick_stopped(int cpu)
4870{
4871 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
4872 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
4873 atomic_dec(&nohz.nr_cpus);
4874 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
4875 }
4876}
4877
4869static inline void set_cpu_sd_state_busy(void) 4878static inline void set_cpu_sd_state_busy(void)
4870{ 4879{
4871 struct sched_domain *sd; 4880 struct sched_domain *sd;
@@ -4904,6 +4913,12 @@ void select_nohz_load_balancer(int stop_tick)
4904{ 4913{
4905 int cpu = smp_processor_id(); 4914 int cpu = smp_processor_id();
4906 4915
4916 /*
4917 * If this cpu is going down, then nothing needs to be done.
4918 */
4919 if (!cpu_active(cpu))
4920 return;
4921
4907 if (stop_tick) { 4922 if (stop_tick) {
4908 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) 4923 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
4909 return; 4924 return;
@@ -4914,6 +4929,18 @@ void select_nohz_load_balancer(int stop_tick)
4914 } 4929 }
4915 return; 4930 return;
4916} 4931}
4932
4933static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
4934 unsigned long action, void *hcpu)
4935{
4936 switch (action & ~CPU_TASKS_FROZEN) {
4937 case CPU_DYING:
4938 clear_nohz_tick_stopped(smp_processor_id());
4939 return NOTIFY_OK;
4940 default:
4941 return NOTIFY_DONE;
4942 }
4943}
4917#endif 4944#endif
4918 4945
4919static DEFINE_SPINLOCK(balancing); 4946static DEFINE_SPINLOCK(balancing);
@@ -5070,11 +5097,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
5070 * busy tick after returning from idle, we will update the busy stats. 5097 * busy tick after returning from idle, we will update the busy stats.
5071 */ 5098 */
5072 set_cpu_sd_state_busy(); 5099 set_cpu_sd_state_busy();
5073 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 5100 clear_nohz_tick_stopped(cpu);
5074 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5075 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5076 atomic_dec(&nohz.nr_cpus);
5077 }
5078 5101
5079 /* 5102 /*
5080 * None are in tickless mode and hence no need for NOHZ idle load 5103 * None are in tickless mode and hence no need for NOHZ idle load
@@ -5590,6 +5613,7 @@ __init void init_sched_fair_class(void)
5590 5613
5591#ifdef CONFIG_NO_HZ 5614#ifdef CONFIG_NO_HZ
5592 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 5615 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
5616 cpu_notifier(sched_ilb_notifier, 0);
5593#endif 5617#endif
5594#endif /* SMP */ 5618#endif /* SMP */
5595 5619
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 3640ebbb466b..f42ae7fb5ec5 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1587,6 +1587,11 @@ static int push_rt_task(struct rq *rq)
1587 if (!next_task) 1587 if (!next_task)
1588 return 0; 1588 return 0;
1589 1589
1590#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1591 if (unlikely(task_running(rq, next_task)))
1592 return 0;
1593#endif
1594
1590retry: 1595retry:
1591 if (unlikely(next_task == rq->curr)) { 1596 if (unlikely(next_task == rq->curr)) {
1592 WARN_ON(1); 1597 WARN_ON(1);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1d7bca7f4f52..d117262deba3 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -296,7 +296,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
296 if (__this_cpu_read(soft_watchdog_warn) == true) 296 if (__this_cpu_read(soft_watchdog_warn) == true)
297 return HRTIMER_RESTART; 297 return HRTIMER_RESTART;
298 298
299 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 299 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
300 smp_processor_id(), duration, 300 smp_processor_id(), duration,
301 current->comm, task_pid_nr(current)); 301 current->comm, task_pid_nr(current));
302 print_modules(); 302 print_modules();