aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c62
-rw-r--r--kernel/sched_fair.c2
2 files changed, 28 insertions, 36 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4fb3532dd7e8..38933cafea8a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -216,15 +216,15 @@ static inline struct task_group *task_group(struct task_struct *p)
216} 216}
217 217
218/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 218/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
219static inline void set_task_cfs_rq(struct task_struct *p) 219static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
220{ 220{
221 p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; 221 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
222 p->se.parent = task_group(p)->se[task_cpu(p)]; 222 p->se.parent = task_group(p)->se[cpu];
223} 223}
224 224
225#else 225#else
226 226
227static inline void set_task_cfs_rq(struct task_struct *p) { } 227static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
228 228
229#endif /* CONFIG_FAIR_GROUP_SCHED */ 229#endif /* CONFIG_FAIR_GROUP_SCHED */
230 230
@@ -455,18 +455,18 @@ static void update_rq_clock(struct rq *rq)
455 */ 455 */
456enum { 456enum {
457 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, 457 SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
458 SCHED_FEAT_START_DEBIT = 2, 458 SCHED_FEAT_WAKEUP_PREEMPT = 2,
459 SCHED_FEAT_TREE_AVG = 4, 459 SCHED_FEAT_START_DEBIT = 4,
460 SCHED_FEAT_APPROX_AVG = 8, 460 SCHED_FEAT_TREE_AVG = 8,
461 SCHED_FEAT_WAKEUP_PREEMPT = 16, 461 SCHED_FEAT_APPROX_AVG = 16,
462}; 462};
463 463
464const_debug unsigned int sysctl_sched_features = 464const_debug unsigned int sysctl_sched_features =
465 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | 465 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
466 SCHED_FEAT_WAKEUP_PREEMPT * 1 |
466 SCHED_FEAT_START_DEBIT * 1 | 467 SCHED_FEAT_START_DEBIT * 1 |
467 SCHED_FEAT_TREE_AVG * 0 | 468 SCHED_FEAT_TREE_AVG * 0 |
468 SCHED_FEAT_APPROX_AVG * 0 | 469 SCHED_FEAT_APPROX_AVG * 0;
469 SCHED_FEAT_WAKEUP_PREEMPT * 1;
470 470
471#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) 471#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
472 472
@@ -1022,10 +1022,16 @@ unsigned long weighted_cpuload(const int cpu)
1022 1022
1023static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) 1023static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1024{ 1024{
1025 set_task_cfs_rq(p, cpu);
1025#ifdef CONFIG_SMP 1026#ifdef CONFIG_SMP
1027 /*
1028 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1029 * successfuly executed on another CPU. We must ensure that updates of
1030 * per-task data have been completed by this moment.
1031 */
1032 smp_wmb();
1026 task_thread_info(p)->cpu = cpu; 1033 task_thread_info(p)->cpu = cpu;
1027#endif 1034#endif
1028 set_task_cfs_rq(p);
1029} 1035}
1030 1036
1031#ifdef CONFIG_SMP 1037#ifdef CONFIG_SMP
@@ -3390,10 +3396,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3390 struct rq *rq = this_rq(); 3396 struct rq *rq = this_rq();
3391 cputime64_t tmp; 3397 cputime64_t tmp;
3392 3398
3393 if (p->flags & PF_VCPU) { 3399 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
3394 account_guest_time(p, cputime); 3400 return account_guest_time(p, cputime);
3395 return;
3396 }
3397 3401
3398 p->stime = cputime_add(p->stime, cputime); 3402 p->stime = cputime_add(p->stime, cputime);
3399 3403
@@ -5278,23 +5282,9 @@ static void migrate_live_tasks(int src_cpu)
5278} 5282}
5279 5283
5280/* 5284/*
5281 * activate_idle_task - move idle task to the _front_ of runqueue.
5282 */
5283static void activate_idle_task(struct task_struct *p, struct rq *rq)
5284{
5285 update_rq_clock(rq);
5286
5287 if (p->state == TASK_UNINTERRUPTIBLE)
5288 rq->nr_uninterruptible--;
5289
5290 enqueue_task(rq, p, 0);
5291 inc_nr_running(p, rq);
5292}
5293
5294/*
5295 * Schedules idle task to be the next runnable task on current CPU. 5285 * Schedules idle task to be the next runnable task on current CPU.
5296 * It does so by boosting its priority to highest possible and adding it to 5286 * It does so by boosting its priority to highest possible.
5297 * the _front_ of the runqueue. Used by CPU offline code. 5287 * Used by CPU offline code.
5298 */ 5288 */
5299void sched_idle_next(void) 5289void sched_idle_next(void)
5300{ 5290{
@@ -5314,8 +5304,8 @@ void sched_idle_next(void)
5314 5304
5315 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5305 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5316 5306
5317 /* Add idle task to the _front_ of its priority queue: */ 5307 update_rq_clock(rq);
5318 activate_idle_task(p, rq); 5308 activate_task(rq, p, 0);
5319 5309
5320 spin_unlock_irqrestore(&rq->lock, flags); 5310 spin_unlock_irqrestore(&rq->lock, flags);
5321} 5311}
@@ -7089,8 +7079,10 @@ void sched_move_task(struct task_struct *tsk)
7089 7079
7090 rq = task_rq_lock(tsk, &flags); 7080 rq = task_rq_lock(tsk, &flags);
7091 7081
7092 if (tsk->sched_class != &fair_sched_class) 7082 if (tsk->sched_class != &fair_sched_class) {
7083 set_task_cfs_rq(tsk, task_cpu(tsk));
7093 goto done; 7084 goto done;
7085 }
7094 7086
7095 update_rq_clock(rq); 7087 update_rq_clock(rq);
7096 7088
@@ -7103,7 +7095,7 @@ void sched_move_task(struct task_struct *tsk)
7103 tsk->sched_class->put_prev_task(rq, tsk); 7095 tsk->sched_class->put_prev_task(rq, tsk);
7104 } 7096 }
7105 7097
7106 set_task_cfs_rq(tsk); 7098 set_task_cfs_rq(tsk, task_cpu(tsk));
7107 7099
7108 if (on_rq) { 7100 if (on_rq) {
7109 if (unlikely(running)) 7101 if (unlikely(running))
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index d3c03070872d..ee00da284b12 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -43,7 +43,7 @@ unsigned int sysctl_sched_min_granularity = 1000000ULL;
43/* 43/*
44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
45 */ 45 */
46unsigned int sched_nr_latency = 20; 46static unsigned int sched_nr_latency = 20;
47 47
48/* 48/*
49 * After fork, child runs first. (default) If set to 0 then 49 * After fork, child runs first. (default) If set to 0 then