diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 39 |
1 files changed, 28 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index aa14a56f9d03..dc91a4d09ac3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -560,18 +560,8 @@ struct rq { | |||
| 560 | 560 | ||
| 561 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 561 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| 562 | 562 | ||
| 563 | static inline | ||
| 564 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
| 565 | { | ||
| 566 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | ||
| 567 | 563 | ||
| 568 | /* | 564 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
| 569 | * A queue event has occurred, and we're going to schedule. In | ||
| 570 | * this case, we can save a useless back to back clock update. | ||
| 571 | */ | ||
| 572 | if (test_tsk_need_resched(p)) | ||
| 573 | rq->skip_clock_update = 1; | ||
| 574 | } | ||
| 575 | 565 | ||
| 576 | static inline int cpu_of(struct rq *rq) | 566 | static inline int cpu_of(struct rq *rq) |
| 577 | { | 567 | { |
| @@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
| 2118 | p->sched_class->prio_changed(rq, p, oldprio, running); | 2108 | p->sched_class->prio_changed(rq, p, oldprio, running); |
| 2119 | } | 2109 | } |
| 2120 | 2110 | ||
| 2111 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
| 2112 | { | ||
| 2113 | const struct sched_class *class; | ||
| 2114 | |||
| 2115 | if (p->sched_class == rq->curr->sched_class) { | ||
| 2116 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | ||
| 2117 | } else { | ||
| 2118 | for_each_class(class) { | ||
| 2119 | if (class == rq->curr->sched_class) | ||
| 2120 | break; | ||
| 2121 | if (class == p->sched_class) { | ||
| 2122 | resched_task(rq->curr); | ||
| 2123 | break; | ||
| 2124 | } | ||
| 2125 | } | ||
| 2126 | } | ||
| 2127 | |||
| 2128 | /* | ||
| 2129 | * A queue event has occurred, and we're going to schedule. In | ||
| 2130 | * this case, we can save a useless back to back clock update. | ||
| 2131 | */ | ||
| 2132 | if (test_tsk_need_resched(rq->curr)) | ||
| 2133 | rq->skip_clock_update = 1; | ||
| 2134 | } | ||
| 2135 | |||
| 2121 | #ifdef CONFIG_SMP | 2136 | #ifdef CONFIG_SMP |
| 2122 | /* | 2137 | /* |
| 2123 | * Is this task likely cache-hot: | 2138 | * Is this task likely cache-hot: |
| @@ -6960,6 +6975,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
| 6960 | if (cpu != group_first_cpu(sd->groups)) | 6975 | if (cpu != group_first_cpu(sd->groups)) |
| 6961 | return; | 6976 | return; |
| 6962 | 6977 | ||
| 6978 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); | ||
| 6979 | |||
| 6963 | child = sd->child; | 6980 | child = sd->child; |
| 6964 | 6981 | ||
| 6965 | sd->groups->cpu_power = 0; | 6982 | sd->groups->cpu_power = 0; |
