aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2011-10-20 09:14:25 -0400
committerArnd Bergmann <arnd@arndb.de>2011-10-20 09:14:25 -0400
commitb4cbb8a4e602ea77b0525d06eff89c6a6070dab3 (patch)
treea5dd723679582505ef3905c90f0c2c032d191b94 /kernel/sched.c
parent526b264163068f77c5f2409031f5e25caf3900a9 (diff)
parentc5d7a9230e5e277f262b6806b7f4d6b35de5a3fb (diff)
Merge branch 'imx-features-for-arnd' of git://git.pengutronix.de/git/imx/linux-2.6 into imx/devel
Conflicts: arch/arm/mach-mx5/clock-mx51-mx53.c arch/arm/mach-mx5/devices-imx53.h
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c67
1 files changed, 27 insertions, 40 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ccacdbdecf45..b50b0f0c9aa9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3065#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3066 local_irq_disable(); 3066 local_irq_disable();
3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3067#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
3068 perf_event_task_sched_in(current); 3068 perf_event_task_sched_in(prev, current);
3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 3069#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
3070 local_irq_enable(); 3070 local_irq_enable();
3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 3071#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
3725} 3725}
3726 3726
3727/* 3727/*
3728 * Return sum_exec_runtime for the thread group.
3729 * In case the task is currently running, return the sum plus current's
3730 * pending runtime that have not been accounted yet.
3731 *
3732 * Note that the thread group might have other running tasks as well,
3733 * so the return value not includes other pending runtime that other
3734 * running tasks might have.
3735 */
3736unsigned long long thread_group_sched_runtime(struct task_struct *p)
3737{
3738 struct task_cputime totals;
3739 unsigned long flags;
3740 struct rq *rq;
3741 u64 ns;
3742
3743 rq = task_rq_lock(p, &flags);
3744 thread_group_cputime(p, &totals);
3745 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3746 task_rq_unlock(rq, p, &flags);
3747
3748 return ns;
3749}
3750
3751/*
3752 * Account user cpu time to a process. 3728 * Account user cpu time to a process.
3753 * @p: the process that the cpu time gets accounted to 3729 * @p: the process that the cpu time gets accounted to
3754 * @cputime: the cpu time spent in user space since the last update 3730 * @cputime: the cpu time spent in user space since the last update
@@ -4279,9 +4255,9 @@ pick_next_task(struct rq *rq)
4279} 4255}
4280 4256
4281/* 4257/*
4282 * schedule() is the main scheduler function. 4258 * __schedule() is the main scheduler function.
4283 */ 4259 */
4284asmlinkage void __sched schedule(void) 4260static void __sched __schedule(void)
4285{ 4261{
4286 struct task_struct *prev, *next; 4262 struct task_struct *prev, *next;
4287 unsigned long *switch_count; 4263 unsigned long *switch_count;
@@ -4322,16 +4298,6 @@ need_resched:
4322 if (to_wakeup) 4298 if (to_wakeup)
4323 try_to_wake_up_local(to_wakeup); 4299 try_to_wake_up_local(to_wakeup);
4324 } 4300 }
4325
4326 /*
4327 * If we are going to sleep and we have plugged IO
4328 * queued, make sure to submit it to avoid deadlocks.
4329 */
4330 if (blk_needs_flush_plug(prev)) {
4331 raw_spin_unlock(&rq->lock);
4332 blk_schedule_flush_plug(prev);
4333 raw_spin_lock(&rq->lock);
4334 }
4335 } 4301 }
4336 switch_count = &prev->nvcsw; 4302 switch_count = &prev->nvcsw;
4337 } 4303 }
@@ -4369,6 +4335,26 @@ need_resched:
4369 if (need_resched()) 4335 if (need_resched())
4370 goto need_resched; 4336 goto need_resched;
4371} 4337}
4338
4339static inline void sched_submit_work(struct task_struct *tsk)
4340{
4341 if (!tsk->state)
4342 return;
4343 /*
4344 * If we are going to sleep and we have plugged IO queued,
4345 * make sure to submit it to avoid deadlocks.
4346 */
4347 if (blk_needs_flush_plug(tsk))
4348 blk_schedule_flush_plug(tsk);
4349}
4350
4351asmlinkage void __sched schedule(void)
4352{
4353 struct task_struct *tsk = current;
4354
4355 sched_submit_work(tsk);
4356 __schedule();
4357}
4372EXPORT_SYMBOL(schedule); 4358EXPORT_SYMBOL(schedule);
4373 4359
4374#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4360#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4421,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
4435 4421
4436 do { 4422 do {
4437 add_preempt_count_notrace(PREEMPT_ACTIVE); 4423 add_preempt_count_notrace(PREEMPT_ACTIVE);
4438 schedule(); 4424 __schedule();
4439 sub_preempt_count_notrace(PREEMPT_ACTIVE); 4425 sub_preempt_count_notrace(PREEMPT_ACTIVE);
4440 4426
4441 /* 4427 /*
@@ -4463,7 +4449,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4463 do { 4449 do {
4464 add_preempt_count(PREEMPT_ACTIVE); 4450 add_preempt_count(PREEMPT_ACTIVE);
4465 local_irq_enable(); 4451 local_irq_enable();
4466 schedule(); 4452 __schedule();
4467 local_irq_disable(); 4453 local_irq_disable();
4468 sub_preempt_count(PREEMPT_ACTIVE); 4454 sub_preempt_count(PREEMPT_ACTIVE);
4469 4455
@@ -5588,7 +5574,7 @@ static inline int should_resched(void)
5588static void __cond_resched(void) 5574static void __cond_resched(void)
5589{ 5575{
5590 add_preempt_count(PREEMPT_ACTIVE); 5576 add_preempt_count(PREEMPT_ACTIVE);
5591 schedule(); 5577 __schedule();
5592 sub_preempt_count(PREEMPT_ACTIVE); 5578 sub_preempt_count(PREEMPT_ACTIVE);
5593} 5579}
5594 5580
@@ -7443,6 +7429,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
7443 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); 7429 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
7444 if (sd && (sd->flags & SD_OVERLAP)) 7430 if (sd && (sd->flags & SD_OVERLAP))
7445 free_sched_groups(sd->groups, 0); 7431 free_sched_groups(sd->groups, 0);
7432 kfree(*per_cpu_ptr(sdd->sd, j));
7446 kfree(*per_cpu_ptr(sdd->sg, j)); 7433 kfree(*per_cpu_ptr(sdd->sg, j));
7447 kfree(*per_cpu_ptr(sdd->sgp, j)); 7434 kfree(*per_cpu_ptr(sdd->sgp, j));
7448 } 7435 }