aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 20:10:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 20:10:33 -0500
commitcfa97f993c275d193fe82c22511dfb5f1e51b661 (patch)
tree552cea5db0ef07bbcc5d53850607dcda6f9b664c /kernel
parent7238eb4ca35cd63340dc02caf757376e40c1210c (diff)
parentdb2f59c8c9b315f2b88b1dac159b988c6009034d (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: fix section mismatch sched: fix double kfree in failure path sched: clean up arch_reinit_sched_domains() sched: mark sched_create_sysfs_power_savings_entries() as __init getrusage: RUSAGE_THREAD should return ru_utime and ru_stime sched: fix sched_slice() sched_clock: prevent scd->clock from moving backwards, take #2 sched: sched.c declare variables before they get used
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/sched_clock.c5
-rw-r--r--kernel/sched_cpupri.c2
-rw-r--r--kernel/sched_fair.c30
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/timekeeping.c7
6 files changed, 31 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 545c6fccd1dc..2e3545f57e77 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6957,7 +6957,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6957 spin_unlock_irqrestore(&rq->lock, flags); 6957 spin_unlock_irqrestore(&rq->lock, flags);
6958} 6958}
6959 6959
6960static int init_rootdomain(struct root_domain *rd, bool bootmem) 6960static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
6961{ 6961{
6962 memset(rd, 0, sizeof(*rd)); 6962 memset(rd, 0, sizeof(*rd));
6963 6963
@@ -6970,7 +6970,7 @@ static int init_rootdomain(struct root_domain *rd, bool bootmem)
6970 } 6970 }
6971 6971
6972 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 6972 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
6973 goto free_rd; 6973 goto out;
6974 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 6974 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
6975 goto free_span; 6975 goto free_span;
6976 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 6976 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
@@ -6986,8 +6986,7 @@ free_online:
6986 free_cpumask_var(rd->online); 6986 free_cpumask_var(rd->online);
6987free_span: 6987free_span:
6988 free_cpumask_var(rd->span); 6988 free_cpumask_var(rd->span);
6989free_rd: 6989out:
6990 kfree(rd);
6991 return -ENOMEM; 6990 return -ENOMEM;
6992} 6991}
6993 6992
@@ -7987,7 +7986,7 @@ match2:
7987} 7986}
7988 7987
7989#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 7988#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
7990int arch_reinit_sched_domains(void) 7989static void arch_reinit_sched_domains(void)
7991{ 7990{
7992 get_online_cpus(); 7991 get_online_cpus();
7993 7992
@@ -7996,13 +7995,10 @@ int arch_reinit_sched_domains(void)
7996 7995
7997 rebuild_sched_domains(); 7996 rebuild_sched_domains();
7998 put_online_cpus(); 7997 put_online_cpus();
7999
8000 return 0;
8001} 7998}
8002 7999
8003static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) 8000static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
8004{ 8001{
8005 int ret;
8006 unsigned int level = 0; 8002 unsigned int level = 0;
8007 8003
8008 if (sscanf(buf, "%u", &level) != 1) 8004 if (sscanf(buf, "%u", &level) != 1)
@@ -8023,9 +8019,9 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
8023 else 8019 else
8024 sched_mc_power_savings = level; 8020 sched_mc_power_savings = level;
8025 8021
8026 ret = arch_reinit_sched_domains(); 8022 arch_reinit_sched_domains();
8027 8023
8028 return ret ? ret : count; 8024 return count;
8029} 8025}
8030 8026
8031#ifdef CONFIG_SCHED_MC 8027#ifdef CONFIG_SCHED_MC
@@ -8060,7 +8056,7 @@ static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
8060 sched_smt_power_savings_store); 8056 sched_smt_power_savings_store);
8061#endif 8057#endif
8062 8058
8063int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) 8059int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
8064{ 8060{
8065 int err = 0; 8061 int err = 0;
8066 8062
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index e8ab096ddfe3..a0b0852414cc 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -124,7 +124,7 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
124 124
125 clock = scd->tick_gtod + delta; 125 clock = scd->tick_gtod + delta;
126 min_clock = wrap_max(scd->tick_gtod, scd->clock); 126 min_clock = wrap_max(scd->tick_gtod, scd->clock);
127 max_clock = scd->tick_gtod + TICK_NSEC; 127 max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
128 128
129 clock = wrap_max(clock, min_clock); 129 clock = wrap_max(clock, min_clock);
130 clock = wrap_min(clock, max_clock); 130 clock = wrap_min(clock, max_clock);
@@ -227,6 +227,9 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
227 */ 227 */
228void sched_clock_idle_wakeup_event(u64 delta_ns) 228void sched_clock_idle_wakeup_event(u64 delta_ns)
229{ 229{
230 if (timekeeping_suspended)
231 return;
232
230 sched_clock_tick(); 233 sched_clock_tick();
231 touch_softlockup_watchdog(); 234 touch_softlockup_watchdog();
232} 235}
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 018b7be1db2e..1e00bfacf9b8 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -151,7 +151,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
151 * 151 *
152 * Returns: -ENOMEM if memory fails. 152 * Returns: -ENOMEM if memory fails.
153 */ 153 */
154int cpupri_init(struct cpupri *cp, bool bootmem) 154int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
155{ 155{
156 int i; 156 int i;
157 157
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 56c0efe902a7..e0c0b4bc3f08 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
386#endif 386#endif
387 387
388/* 388/*
389 * delta *= P[w / rw]
390 */
391static inline unsigned long
392calc_delta_weight(unsigned long delta, struct sched_entity *se)
393{
394 for_each_sched_entity(se) {
395 delta = calc_delta_mine(delta,
396 se->load.weight, &cfs_rq_of(se)->load);
397 }
398
399 return delta;
400}
401
402/*
403 * delta /= w 389 * delta /= w
404 */ 390 */
405static inline unsigned long 391static inline unsigned long
@@ -440,12 +426,20 @@ static u64 __sched_period(unsigned long nr_running)
440 */ 426 */
441static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 427static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
442{ 428{
443 unsigned long nr_running = cfs_rq->nr_running; 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
444 430
445 if (unlikely(!se->on_rq)) 431 for_each_sched_entity(se) {
446 nr_running++; 432 struct load_weight *load = &cfs_rq->load;
447 433
448 return calc_delta_weight(__sched_period(nr_running), se); 434 if (unlikely(!se->on_rq)) {
435 struct load_weight lw = cfs_rq->load;
436
437 update_load_add(&lw, se->load.weight);
438 load = &lw;
439 }
440 slice = calc_delta_mine(slice, se->load.weight, load);
441 }
442 return slice;
449} 443}
450 444
451/* 445/*
diff --git a/kernel/sys.c b/kernel/sys.c
index 4a43617cd565..763c3c17ded3 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1629,6 +1629,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1629 utime = stime = cputime_zero; 1629 utime = stime = cputime_zero;
1630 1630
1631 if (who == RUSAGE_THREAD) { 1631 if (who == RUSAGE_THREAD) {
1632 utime = task_utime(current);
1633 stime = task_stime(current);
1632 accumulate_thread_rusage(p, r); 1634 accumulate_thread_rusage(p, r);
1633 goto out; 1635 goto out;
1634 } 1636 }
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index fa05e88aa76f..900f1b6598d1 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -46,6 +46,9 @@ struct timespec xtime __attribute__ ((aligned (16)));
46struct timespec wall_to_monotonic __attribute__ ((aligned (16))); 46struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
47static unsigned long total_sleep_time; /* seconds */ 47static unsigned long total_sleep_time; /* seconds */
48 48
49/* flag for if timekeeping is suspended */
50int __read_mostly timekeeping_suspended;
51
49static struct timespec xtime_cache __attribute__ ((aligned (16))); 52static struct timespec xtime_cache __attribute__ ((aligned (16)));
50void update_xtime_cache(u64 nsec) 53void update_xtime_cache(u64 nsec)
51{ 54{
@@ -92,6 +95,8 @@ void getnstimeofday(struct timespec *ts)
92 unsigned long seq; 95 unsigned long seq;
93 s64 nsecs; 96 s64 nsecs;
94 97
98 WARN_ON(timekeeping_suspended);
99
95 do { 100 do {
96 seq = read_seqbegin(&xtime_lock); 101 seq = read_seqbegin(&xtime_lock);
97 102
@@ -299,8 +304,6 @@ void __init timekeeping_init(void)
299 write_sequnlock_irqrestore(&xtime_lock, flags); 304 write_sequnlock_irqrestore(&xtime_lock, flags);
300} 305}
301 306
302/* flag for if timekeeping is suspended */
303static int timekeeping_suspended;
304/* time in seconds when suspend began */ 307/* time in seconds when suspend began */
305static unsigned long timekeeping_suspend_time; 308static unsigned long timekeeping_suspend_time;
306 309