aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS3
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/fair.c27
4 files changed, 29 insertions, 11 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e9c7b50c612d..4afcfb4c892b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5267,7 +5267,7 @@ S: Maintained
5267F: Documentation/lockdep*.txt 5267F: Documentation/lockdep*.txt
5268F: Documentation/lockstat.txt 5268F: Documentation/lockstat.txt
5269F: include/linux/lockdep.h 5269F: include/linux/lockdep.h
5270F: kernel/lockdep* 5270F: kernel/locking/
5271 5271
5272LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) 5272LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
5273M: "Richard Russon (FlatCap)" <ldm@flatcap.org> 5273M: "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -7391,7 +7391,6 @@ S: Maintained
7391F: kernel/sched/ 7391F: kernel/sched/
7392F: include/linux/sched.h 7392F: include/linux/sched.h
7393F: include/uapi/linux/sched.h 7393F: include/uapi/linux/sched.h
7394F: kernel/wait.c
7395F: include/linux/wait.h 7394F: include/linux/wait.h
7396 7395
7397SCORE ARCHITECTURE 7396SCORE ARCHITECTURE
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7e35d4b9e14a..768b037dfacb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -831,8 +831,6 @@ struct sched_domain {
831 unsigned int balance_interval; /* initialise to 1. units in ms. */ 831 unsigned int balance_interval; /* initialise to 1. units in ms. */
832 unsigned int nr_balance_failed; /* initialise to 0 */ 832 unsigned int nr_balance_failed; /* initialise to 0 */
833 833
834 u64 last_update;
835
836 /* idle_balance() stats */ 834 /* idle_balance() stats */
837 u64 max_newidle_lb_cost; 835 u64 max_newidle_lb_cost;
838 unsigned long next_decay_max_lb_cost; 836 unsigned long next_decay_max_lb_cost;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c1808606ee5f..e85cda20ab2b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
2660 } while (need_resched()); 2660 } while (need_resched());
2661} 2661}
2662EXPORT_SYMBOL(preempt_schedule); 2662EXPORT_SYMBOL(preempt_schedule);
2663#endif /* CONFIG_PREEMPT */
2663 2664
2664/* 2665/*
2665 * this is the entry point to schedule() from kernel preemption 2666 * this is the entry point to schedule() from kernel preemption
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void)
2693 exception_exit(prev_state); 2694 exception_exit(prev_state);
2694} 2695}
2695 2696
2696#endif /* CONFIG_PREEMPT */
2697
2698int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 2697int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
2699 void *key) 2698 void *key)
2700{ 2699{
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4762 cpumask_clear_cpu(rq->cpu, old_rd->span); 4761 cpumask_clear_cpu(rq->cpu, old_rd->span);
4763 4762
4764 /* 4763 /*
4765 * If we dont want to free the old_rt yet then 4764 * If we dont want to free the old_rd yet then
4766 * set old_rd to NULL to skip the freeing later 4765 * set old_rd to NULL to skip the freeing later
4767 * in this function: 4766 * in this function:
4768 */ 4767 */
@@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu)
4910 if (sd) { 4909 if (sd) {
4911 id = cpumask_first(sched_domain_span(sd)); 4910 id = cpumask_first(sched_domain_span(sd));
4912 size = cpumask_weight(sched_domain_span(sd)); 4911 size = cpumask_weight(sched_domain_span(sd));
4913 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent); 4912 sd = sd->parent; /* sd_busy */
4914 } 4913 }
4914 rcu_assign_pointer(per_cpu(sd_busy, cpu), sd);
4915 4915
4916 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 4916 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
4917 per_cpu(sd_llc_size, cpu) = size; 4917 per_cpu(sd_llc_size, cpu) = size;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8b652ebe027..fd773ade1a31 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
5379 */ 5379 */
5380 5380
5381 for_each_cpu(cpu, sched_group_cpus(sdg)) { 5381 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5382 struct sched_group *sg = cpu_rq(cpu)->sd->groups; 5382 struct sched_group_power *sgp;
5383 struct rq *rq = cpu_rq(cpu);
5383 5384
5384 power_orig += sg->sgp->power_orig; 5385 /*
5385 power += sg->sgp->power; 5386 * build_sched_domains() -> init_sched_groups_power()
5387 * gets here before we've attached the domains to the
5388 * runqueues.
5389 *
5390 * Use power_of(), which is set irrespective of domains
5391 * in update_cpu_power().
5392 *
5393 * This avoids power/power_orig from being 0 and
5394 * causing divide-by-zero issues on boot.
5395 *
5396 * Runtime updates will correct power_orig.
5397 */
5398 if (unlikely(!rq->sd)) {
5399 power_orig += power_of(cpu);
5400 power += power_of(cpu);
5401 continue;
5402 }
5403
5404 sgp = rq->sd->groups->sgp;
5405 power_orig += sgp->power_orig;
5406 power += sgp->power;
5386 } 5407 }
5387 } else { 5408 } else {
5388 /* 5409 /*