aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c79
1 files changed, 26 insertions, 53 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b46131ef6aab..fb9764fbc537 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1745,8 +1745,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1745 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; 1745 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
1746 p->numa_scan_period = sysctl_numa_balancing_scan_delay; 1746 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1747 p->numa_work.next = &p->numa_work; 1747 p->numa_work.next = &p->numa_work;
1748 p->numa_faults = NULL; 1748 p->numa_faults_memory = NULL;
1749 p->numa_faults_buffer = NULL; 1749 p->numa_faults_buffer_memory = NULL;
1750 p->last_task_numa_placement = 0;
1751 p->last_sum_exec_runtime = 0;
1750 1752
1751 INIT_LIST_HEAD(&p->numa_entry); 1753 INIT_LIST_HEAD(&p->numa_entry);
1752 p->numa_group = NULL; 1754 p->numa_group = NULL;
@@ -2167,13 +2169,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2167 2169
2168#ifdef CONFIG_SMP 2170#ifdef CONFIG_SMP
2169 2171
2170/* assumes rq->lock is held */
2171static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2172{
2173 if (prev->sched_class->pre_schedule)
2174 prev->sched_class->pre_schedule(rq, prev);
2175}
2176
2177/* rq->lock is NOT held, but preemption is disabled */ 2172/* rq->lock is NOT held, but preemption is disabled */
2178static inline void post_schedule(struct rq *rq) 2173static inline void post_schedule(struct rq *rq)
2179{ 2174{
@@ -2191,10 +2186,6 @@ static inline void post_schedule(struct rq *rq)
2191 2186
2192#else 2187#else
2193 2188
2194static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2195{
2196}
2197
2198static inline void post_schedule(struct rq *rq) 2189static inline void post_schedule(struct rq *rq)
2199{ 2190{
2200} 2191}
@@ -2577,18 +2568,11 @@ static inline void schedule_debug(struct task_struct *prev)
2577 schedstat_inc(this_rq(), sched_count); 2568 schedstat_inc(this_rq(), sched_count);
2578} 2569}
2579 2570
2580static void put_prev_task(struct rq *rq, struct task_struct *prev)
2581{
2582 if (prev->on_rq || rq->skip_clock_update < 0)
2583 update_rq_clock(rq);
2584 prev->sched_class->put_prev_task(rq, prev);
2585}
2586
2587/* 2571/*
2588 * Pick up the highest-prio task: 2572 * Pick up the highest-prio task:
2589 */ 2573 */
2590static inline struct task_struct * 2574static inline struct task_struct *
2591pick_next_task(struct rq *rq) 2575pick_next_task(struct rq *rq, struct task_struct *prev)
2592{ 2576{
2593 const struct sched_class *class; 2577 const struct sched_class *class;
2594 struct task_struct *p; 2578 struct task_struct *p;
@@ -2597,14 +2581,15 @@ pick_next_task(struct rq *rq)
2597 * Optimization: we know that if all tasks are in 2581 * Optimization: we know that if all tasks are in
2598 * the fair class we can call that function directly: 2582 * the fair class we can call that function directly:
2599 */ 2583 */
2600 if (likely(rq->nr_running == rq->cfs.h_nr_running)) { 2584 if (likely(prev->sched_class == &fair_sched_class &&
2601 p = fair_sched_class.pick_next_task(rq); 2585 rq->nr_running == rq->cfs.h_nr_running)) {
2586 p = fair_sched_class.pick_next_task(rq, prev);
2602 if (likely(p)) 2587 if (likely(p))
2603 return p; 2588 return p;
2604 } 2589 }
2605 2590
2606 for_each_class(class) { 2591 for_each_class(class) {
2607 p = class->pick_next_task(rq); 2592 p = class->pick_next_task(rq, prev);
2608 if (p) 2593 if (p)
2609 return p; 2594 return p;
2610 } 2595 }
@@ -2700,13 +2685,10 @@ need_resched:
2700 switch_count = &prev->nvcsw; 2685 switch_count = &prev->nvcsw;
2701 } 2686 }
2702 2687
2703 pre_schedule(rq, prev); 2688 if (prev->on_rq || rq->skip_clock_update < 0)
2704 2689 update_rq_clock(rq);
2705 if (unlikely(!rq->nr_running))
2706 idle_balance(cpu, rq);
2707 2690
2708 put_prev_task(rq, prev); 2691 next = pick_next_task(rq, prev);
2709 next = pick_next_task(rq);
2710 clear_tsk_need_resched(prev); 2692 clear_tsk_need_resched(prev);
2711 clear_preempt_need_resched(); 2693 clear_preempt_need_resched();
2712 rq->skip_clock_update = 0; 2694 rq->skip_clock_update = 0;
@@ -2998,7 +2980,7 @@ void set_user_nice(struct task_struct *p, long nice)
2998 unsigned long flags; 2980 unsigned long flags;
2999 struct rq *rq; 2981 struct rq *rq;
3000 2982
3001 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 2983 if (task_nice(p) == nice || nice < -20 || nice > 19)
3002 return; 2984 return;
3003 /* 2985 /*
3004 * We have to be careful, if called from sys_setpriority(), 2986 * We have to be careful, if called from sys_setpriority(),
@@ -3076,7 +3058,7 @@ SYSCALL_DEFINE1(nice, int, increment)
3076 if (increment > 40) 3058 if (increment > 40)
3077 increment = 40; 3059 increment = 40;
3078 3060
3079 nice = TASK_NICE(current) + increment; 3061 nice = task_nice(current) + increment;
3080 if (nice < -20) 3062 if (nice < -20)
3081 nice = -20; 3063 nice = -20;
3082 if (nice > 19) 3064 if (nice > 19)
@@ -3109,18 +3091,6 @@ int task_prio(const struct task_struct *p)
3109} 3091}
3110 3092
3111/** 3093/**
3112 * task_nice - return the nice value of a given task.
3113 * @p: the task in question.
3114 *
3115 * Return: The nice value [ -20 ... 0 ... 19 ].
3116 */
3117int task_nice(const struct task_struct *p)
3118{
3119 return TASK_NICE(p);
3120}
3121EXPORT_SYMBOL(task_nice);
3122
3123/**
3124 * idle_cpu - is a given cpu idle currently? 3094 * idle_cpu - is a given cpu idle currently?
3125 * @cpu: the processor in question. 3095 * @cpu: the processor in question.
3126 * 3096 *
@@ -3319,7 +3289,7 @@ recheck:
3319 */ 3289 */
3320 if (user && !capable(CAP_SYS_NICE)) { 3290 if (user && !capable(CAP_SYS_NICE)) {
3321 if (fair_policy(policy)) { 3291 if (fair_policy(policy)) {
3322 if (attr->sched_nice < TASK_NICE(p) && 3292 if (attr->sched_nice < task_nice(p) &&
3323 !can_nice(p, attr->sched_nice)) 3293 !can_nice(p, attr->sched_nice))
3324 return -EPERM; 3294 return -EPERM;
3325 } 3295 }
@@ -3343,7 +3313,7 @@ recheck:
3343 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3313 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3344 */ 3314 */
3345 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { 3315 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
3346 if (!can_nice(p, TASK_NICE(p))) 3316 if (!can_nice(p, task_nice(p)))
3347 return -EPERM; 3317 return -EPERM;
3348 } 3318 }
3349 3319
@@ -3383,7 +3353,7 @@ recheck:
3383 * If not changing anything there's no need to proceed further: 3353 * If not changing anything there's no need to proceed further:
3384 */ 3354 */
3385 if (unlikely(policy == p->policy)) { 3355 if (unlikely(policy == p->policy)) {
3386 if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p)) 3356 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
3387 goto change; 3357 goto change;
3388 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3358 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3389 goto change; 3359 goto change;
@@ -3835,7 +3805,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3835 else if (task_has_rt_policy(p)) 3805 else if (task_has_rt_policy(p))
3836 attr.sched_priority = p->rt_priority; 3806 attr.sched_priority = p->rt_priority;
3837 else 3807 else
3838 attr.sched_nice = TASK_NICE(p); 3808 attr.sched_nice = task_nice(p);
3839 3809
3840 rcu_read_unlock(); 3810 rcu_read_unlock();
3841 3811
@@ -4751,7 +4721,7 @@ static void migrate_tasks(unsigned int dead_cpu)
4751 if (rq->nr_running == 1) 4721 if (rq->nr_running == 1)
4752 break; 4722 break;
4753 4723
4754 next = pick_next_task(rq); 4724 next = pick_next_task(rq, NULL);
4755 BUG_ON(!next); 4725 BUG_ON(!next);
4756 next->sched_class->put_prev_task(rq, next); 4726 next->sched_class->put_prev_task(rq, next);
4757 4727
@@ -4841,7 +4811,7 @@ set_table_entry(struct ctl_table *entry,
4841static struct ctl_table * 4811static struct ctl_table *
4842sd_alloc_ctl_domain_table(struct sched_domain *sd) 4812sd_alloc_ctl_domain_table(struct sched_domain *sd)
4843{ 4813{
4844 struct ctl_table *table = sd_alloc_ctl_entry(13); 4814 struct ctl_table *table = sd_alloc_ctl_entry(14);
4845 4815
4846 if (table == NULL) 4816 if (table == NULL)
4847 return NULL; 4817 return NULL;
@@ -4869,9 +4839,12 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
4869 sizeof(int), 0644, proc_dointvec_minmax, false); 4839 sizeof(int), 0644, proc_dointvec_minmax, false);
4870 set_table_entry(&table[10], "flags", &sd->flags, 4840 set_table_entry(&table[10], "flags", &sd->flags,
4871 sizeof(int), 0644, proc_dointvec_minmax, false); 4841 sizeof(int), 0644, proc_dointvec_minmax, false);
4872 set_table_entry(&table[11], "name", sd->name, 4842 set_table_entry(&table[11], "max_newidle_lb_cost",
4843 &sd->max_newidle_lb_cost,
4844 sizeof(long), 0644, proc_doulongvec_minmax, false);
4845 set_table_entry(&table[12], "name", sd->name,
4873 CORENAME_MAX_SIZE, 0444, proc_dostring, false); 4846 CORENAME_MAX_SIZE, 0444, proc_dostring, false);
4874 /* &table[12] is terminator */ 4847 /* &table[13] is terminator */
4875 4848
4876 return table; 4849 return table;
4877} 4850}
@@ -7008,7 +6981,7 @@ void normalize_rt_tasks(void)
7008 * Renice negative nice level userspace 6981 * Renice negative nice level userspace
7009 * tasks back to 0: 6982 * tasks back to 0:
7010 */ 6983 */
7011 if (TASK_NICE(p) < 0 && p->mm) 6984 if (task_nice(p) < 0 && p->mm)
7012 set_user_nice(p, 0); 6985 set_user_nice(p, 0);
7013 continue; 6986 continue;
7014 } 6987 }