diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-02 13:13:44 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-02 13:13:44 -0500 |
| commit | a0b57ca33ec1cd915ba49051512b3463fa44b4e3 (patch) | |
| tree | a68f879562194bffd0774a3c8cac4d19c0c363b1 | |
| parent | e321ae4c207ce4c6c6812fc65e666efc325cc65e (diff) | |
| parent | 96739d6e548e16d76de39d059e1e39e70c187fff (diff) | |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar:
"Various smaller fixlets, all over the place"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/doc: Fix generation of device-drivers
sched: Expose preempt_schedule_irq()
sched: Fix a trivial typo in comments
sched: Remove unused variable in 'struct sched_domain'
sched: Avoid NULL dereference on sd_busy
sched: Check sched_domain before computing group power
MAINTAINERS: Update file patterns in the lockdep and scheduler entries
| -rw-r--r-- | MAINTAINERS | 3 | ||||
| -rw-r--r-- | include/linux/sched.h | 2 | ||||
| -rw-r--r-- | kernel/sched/core.c | 8 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 27 |
4 files changed, 29 insertions, 11 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index e9c7b50c612d..4afcfb4c892b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -5267,7 +5267,7 @@ S: Maintained | |||
| 5267 | F: Documentation/lockdep*.txt | 5267 | F: Documentation/lockdep*.txt |
| 5268 | F: Documentation/lockstat.txt | 5268 | F: Documentation/lockstat.txt |
| 5269 | F: include/linux/lockdep.h | 5269 | F: include/linux/lockdep.h |
| 5270 | F: kernel/lockdep* | 5270 | F: kernel/locking/ |
| 5271 | 5271 | ||
| 5272 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) | 5272 | LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) |
| 5273 | M: "Richard Russon (FlatCap)" <ldm@flatcap.org> | 5273 | M: "Richard Russon (FlatCap)" <ldm@flatcap.org> |
| @@ -7391,7 +7391,6 @@ S: Maintained | |||
| 7391 | F: kernel/sched/ | 7391 | F: kernel/sched/ |
| 7392 | F: include/linux/sched.h | 7392 | F: include/linux/sched.h |
| 7393 | F: include/uapi/linux/sched.h | 7393 | F: include/uapi/linux/sched.h |
| 7394 | F: kernel/wait.c | ||
| 7395 | F: include/linux/wait.h | 7394 | F: include/linux/wait.h |
| 7396 | 7395 | ||
| 7397 | SCORE ARCHITECTURE | 7396 | SCORE ARCHITECTURE |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 7e35d4b9e14a..768b037dfacb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -831,8 +831,6 @@ struct sched_domain { | |||
| 831 | unsigned int balance_interval; /* initialise to 1. units in ms. */ | 831 | unsigned int balance_interval; /* initialise to 1. units in ms. */ |
| 832 | unsigned int nr_balance_failed; /* initialise to 0 */ | 832 | unsigned int nr_balance_failed; /* initialise to 0 */ |
| 833 | 833 | ||
| 834 | u64 last_update; | ||
| 835 | |||
| 836 | /* idle_balance() stats */ | 834 | /* idle_balance() stats */ |
| 837 | u64 max_newidle_lb_cost; | 835 | u64 max_newidle_lb_cost; |
| 838 | unsigned long next_decay_max_lb_cost; | 836 | unsigned long next_decay_max_lb_cost; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c1808606ee5f..e85cda20ab2b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void) | |||
| 2660 | } while (need_resched()); | 2660 | } while (need_resched()); |
| 2661 | } | 2661 | } |
| 2662 | EXPORT_SYMBOL(preempt_schedule); | 2662 | EXPORT_SYMBOL(preempt_schedule); |
| 2663 | #endif /* CONFIG_PREEMPT */ | ||
| 2663 | 2664 | ||
| 2664 | /* | 2665 | /* |
| 2665 | * this is the entry point to schedule() from kernel preemption | 2666 | * this is the entry point to schedule() from kernel preemption |
| @@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
| 2693 | exception_exit(prev_state); | 2694 | exception_exit(prev_state); |
| 2694 | } | 2695 | } |
| 2695 | 2696 | ||
| 2696 | #endif /* CONFIG_PREEMPT */ | ||
| 2697 | |||
| 2698 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, | 2697 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
| 2699 | void *key) | 2698 | void *key) |
| 2700 | { | 2699 | { |
| @@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
| 4762 | cpumask_clear_cpu(rq->cpu, old_rd->span); | 4761 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
| 4763 | 4762 | ||
| 4764 | /* | 4763 | /* |
| 4765 | * If we dont want to free the old_rt yet then | 4764 | * If we dont want to free the old_rd yet then |
| 4766 | * set old_rd to NULL to skip the freeing later | 4765 | * set old_rd to NULL to skip the freeing later |
| 4767 | * in this function: | 4766 | * in this function: |
| 4768 | */ | 4767 | */ |
| @@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu) | |||
| 4910 | if (sd) { | 4909 | if (sd) { |
| 4911 | id = cpumask_first(sched_domain_span(sd)); | 4910 | id = cpumask_first(sched_domain_span(sd)); |
| 4912 | size = cpumask_weight(sched_domain_span(sd)); | 4911 | size = cpumask_weight(sched_domain_span(sd)); |
| 4913 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent); | 4912 | sd = sd->parent; /* sd_busy */ |
| 4914 | } | 4913 | } |
| 4914 | rcu_assign_pointer(per_cpu(sd_busy, cpu), sd); | ||
| 4915 | 4915 | ||
| 4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 4916 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
| 4917 | per_cpu(sd_llc_size, cpu) = size; | 4917 | per_cpu(sd_llc_size, cpu) = size; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e8b652ebe027..fd773ade1a31 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu) | |||
| 5379 | */ | 5379 | */ |
| 5380 | 5380 | ||
| 5381 | for_each_cpu(cpu, sched_group_cpus(sdg)) { | 5381 | for_each_cpu(cpu, sched_group_cpus(sdg)) { |
| 5382 | struct sched_group *sg = cpu_rq(cpu)->sd->groups; | 5382 | struct sched_group_power *sgp; |
| 5383 | struct rq *rq = cpu_rq(cpu); | ||
| 5383 | 5384 | ||
| 5384 | power_orig += sg->sgp->power_orig; | 5385 | /* |
| 5385 | power += sg->sgp->power; | 5386 | * build_sched_domains() -> init_sched_groups_power() |
| 5387 | * gets here before we've attached the domains to the | ||
| 5388 | * runqueues. | ||
| 5389 | * | ||
| 5390 | * Use power_of(), which is set irrespective of domains | ||
| 5391 | * in update_cpu_power(). | ||
| 5392 | * | ||
| 5393 | * This avoids power/power_orig from being 0 and | ||
| 5394 | * causing divide-by-zero issues on boot. | ||
| 5395 | * | ||
| 5396 | * Runtime updates will correct power_orig. | ||
| 5397 | */ | ||
| 5398 | if (unlikely(!rq->sd)) { | ||
| 5399 | power_orig += power_of(cpu); | ||
| 5400 | power += power_of(cpu); | ||
| 5401 | continue; | ||
| 5402 | } | ||
| 5403 | |||
| 5404 | sgp = rq->sd->groups->sgp; | ||
| 5405 | power_orig += sgp->power_orig; | ||
| 5406 | power += sgp->power; | ||
| 5386 | } | 5407 | } |
| 5387 | } else { | 5408 | } else { |
| 5388 | /* | 5409 | /* |
