From 54da1174922cddd4be83d5a364b2e0fdd693f513 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 23 Jul 2008 20:52:05 +0400 Subject: posix-timers: do_schedule_next_timer: fix the setting of ->si_overrun do_schedule_next_timer() sets info->si_overrun = timr->it_overrun_last, this discards the already accumulated overruns. Signed-off-by: Oleg Nesterov Cc: Mark McLoughlin Cc: Oliver Pinter Cc: Roland McGrath Cc: stable@kernel.org Cc: Andrew Morton Signed-off-by: Thomas Gleixner --- kernel/posix-timers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index dbd8398ddb0b..814a2bb8d2ea 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -289,7 +289,7 @@ void do_schedule_next_timer(struct siginfo *info) else schedule_next_timer(timr); - info->si_overrun = timr->it_overrun_last; + info->si_overrun += timr->it_overrun_last; } if (timr) -- cgit v1.2.2 From ba661292a2bc6ddd305a212b0526e5dc22195fe7 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 23 Jul 2008 20:52:05 +0400 Subject: posix-timers: fix posix_timer_event() vs dequeue_signal() race The bug was reported and analysed by Mark McLoughlin , the patch is based on his and Roland's suggestions. posix_timer_event() always rewrites the pre-allocated siginfo before sending the signal. Most of the written info is the same all the time, but memset(0) is very wrong. If ->sigq is queued we can race with collect_signal() which can fail to find this siginfo looking at .si_signo, or copy_siginfo() can copy the wrong .si_code/si_tid/etc. In short, sys_timer_settime() can in fact stop the active timer, or the user can receive the siginfo with the wrong .si_xxx values. Move "memset(->info, 0)" from posix_timer_event() to alloc_posix_timer(), change send_sigqueue() to set .si_overrun = 0 when ->sigq is not queued. It would be nice to move the whole sigq->info initialization from send to create path, but this is not easy to do without uglifying timer_create() further. As Roland rightly pointed out, we need more cleanups/fixes here, see the "FIXME" comment in the patch. Hopefully this patch makes sense anyway, and it can mask the most bad implications. Reported-by: Mark McLoughlin Signed-off-by: Oleg Nesterov Cc: Mark McLoughlin Cc: Oliver Pinter Cc: Roland McGrath Cc: stable@kernel.org Cc: Andrew Morton Signed-off-by: Thomas Gleixner kernel/posix-timers.c | 17 +++++++++++++---- kernel/signal.c | 1 + 2 files changed, 14 insertions(+), 4 deletions(-) --- kernel/posix-timers.c | 17 +++++++++++++---- kernel/signal.c | 1 + 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 814a2bb8d2ea..0ffaeb075e35 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -296,14 +296,22 @@ void do_schedule_next_timer(struct siginfo *info) unlock_timer(timr, flags); } -int posix_timer_event(struct k_itimer *timr,int si_private) +int posix_timer_event(struct k_itimer *timr, int si_private) { - memset(&timr->sigq->info, 0, sizeof(siginfo_t)); + /* + * FIXME: if ->sigq is queued we can race with + * dequeue_signal()->do_schedule_next_timer(). + * + * If dequeue_signal() sees the "right" value of + * si_sys_private it calls do_schedule_next_timer(). + * We re-queue ->sigq and drop ->it_lock(). + * do_schedule_next_timer() locks the timer + * and re-schedules it while ->sigq is pending. + * Not really bad, but not that we want. + */ timr->sigq->info.si_sys_private = si_private; - /* Send signal to the process that owns this timer.*/ timr->sigq->info.si_signo = timr->it_sigev_signo; - timr->sigq->info.si_errno = 0; timr->sigq->info.si_code = SI_TIMER; timr->sigq->info.si_tid = timr->it_id; timr->sigq->info.si_value = timr->it_sigev_value; @@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void) kmem_cache_free(posix_timers_cache, tmr); tmr = NULL; } + memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); return tmr; } diff --git a/kernel/signal.c b/kernel/signal.c index 72bb4f51f963..13fab9838354 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1280,6 +1280,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) q->info.si_overrun++; goto out; } + q->info.si_overrun = 0; signalfd_notify(t, sig); pending = group ? &t->signal->shared_pending : &t->pending; -- cgit v1.2.2 From 157124c11f4217733691223ecf5ee47558ae9495 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 Jul 2008 11:53:11 +0200 Subject: sched: fix warning in hrtick_start_fair() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Benjamin Herrenschmidt reported: > I get that on ppc64 ... > > In file included from kernel/sched.c:1595: > kernel/sched_fair.c: In function ‘hrtick_start_fair’: > kernel/sched_fair.c:902: warning: comparison of distinct pointer types lacks a cast > > Probably harmless but annoying. s64 delta = slice - ran; --> delta = max(10000LL, delta); Probably ppc64's s64 is long vs long long.. I think hpa was looking at sanitizing all these 64bit types across the architectures. Use max_t with an explicit type meanwhile. Reported-by: Benjamin Herrenschmid Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index cf2cd6ce4cb2..0fe94ea43f32 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -899,7 +899,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) * doesn't make sense. Rely on vruntime for fairness. */ if (rq->curr != p) - delta = max(10000LL, delta); + delta = max_t(s64, 10000LL, delta); hrtick_start(rq, delta); } -- cgit v1.2.2 From 94f565598827e2015dce97f4c1ac4871ab84407b Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Sun, 27 Jul 2008 20:27:06 +0900 Subject: sched: fix SCHED_HRTICK dependency Currently, it seems SCHED_HRTICK allowed for !SMP. But, it seems to have no dependency of it. Fix it. Signed-off-by: OGAWA Hirofumi Signed-off-by: Ingo Molnar --- kernel/Kconfig.hz | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 382dd5a8b2d7..94fabd534b03 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz @@ -55,4 +55,4 @@ config HZ default 1000 if HZ_1000 config SCHED_HRTICK - def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS + def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS) -- cgit v1.2.2 From e26873bb10f722f10b1af9de05119a3d7cbc07b2 Mon Sep 17 00:00:00 2001 From: roel kluin Date: Tue, 22 Jul 2008 16:51:15 -0400 Subject: sched: test runtime rather than period in global_rt_runtime() Test runtime rather than period Signed-off-by: Roel Kluin Acked-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 0236958addcb..0d1717b00225 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -834,7 +834,7 @@ static inline u64 global_rt_period(void) static inline u64 global_rt_runtime(void) { - if (sysctl_sched_rt_period < 0) + if (sysctl_sched_rt_runtime < 0) return RUNTIME_INF; return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; -- cgit v1.2.2 From 2c3d103ba90827cfb478bf10464d9b5b9cea369c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 25 Jul 2008 19:45:00 +0100 Subject: sched: move sched_clock before first use Move sched_clock() up to stop warning: weak declaration of `sched_clock' after first use results in unspecified behavior (if -fno-unit-at-a-time). Signed-off-by: Hugh Dickins Cc: Mike Travis Cc: Ben Herrenschmidt Cc: Linuxppc-dev@ozlabs.org Signed-off-by: Ingo Molnar --- kernel/sched_clock.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 22ed55d1167f..5a2dc7d8fd98 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -32,6 +32,15 @@ #include #include +/* + * Scheduler clock - returns current time in nanosec units. + * This is default implementation. + * Architectures and sub-architectures can override this. + */ +unsigned long long __attribute__((weak)) sched_clock(void) +{ + return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); +} #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK @@ -321,16 +330,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); #endif -/* - * Scheduler clock - returns current time in nanosec units. - * This is default implementation. - * Architectures and sub-architectures can override this. - */ -unsigned long long __attribute__((weak)) sched_clock(void) -{ - return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); -} - unsigned long long cpu_clock(int cpu) { unsigned long long clock; -- cgit v1.2.2 From e4e4e534faa3c2be4e165ce414f44b76ada7208c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 14 Apr 2008 08:50:02 +0200 Subject: sched clock: revert various sched_clock() changes Found an interactivity problem on a quad core test-system - simple CPU loops would occasionally delay the system un an unacceptable way. After much debugging with Peter Zijlstra it turned out that the problem is caused by the string of sched_clock() changes - they caused the CPU clock to jump backwards a bit - which confuses the scheduler arithmetics. (which is unsigned for performance reasons) So revert: # c300ba2: sched_clock: and multiplier for TSC to gtod drift # c0c8773: sched_clock: only update deltas with local reads. # af52a90: sched_clock: stop maximum check on NO HZ # f7cce27: sched_clock: widen the max and min time This solves the interactivity problems. Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Mike Galbraith --- kernel/sched_clock.c | 109 ++++++----------------------------------------- kernel/time/tick-sched.c | 2 - 2 files changed, 13 insertions(+), 98 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 5a2dc7d8fd98..9a7844158ae8 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -44,11 +44,6 @@ unsigned long long __attribute__((weak)) sched_clock(void) #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -#define MULTI_SHIFT 15 -/* Max is double, Min is 1/2 */ -#define MAX_MULTI (2LL << MULTI_SHIFT) -#define MIN_MULTI (1LL << (MULTI_SHIFT-1)) - struct sched_clock_data { /* * Raw spinlock - this is a special case: this might be called @@ -62,10 +57,6 @@ struct sched_clock_data { u64 tick_raw; u64 tick_gtod; u64 clock; - s64 multi; -#ifdef CONFIG_NO_HZ - int check_max; -#endif }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); @@ -97,53 +88,18 @@ void sched_clock_init(void) scd->tick_raw = 0; scd->tick_gtod = ktime_now; scd->clock = ktime_now; - scd->multi = 1 << MULTI_SHIFT; -#ifdef CONFIG_NO_HZ - scd->check_max = 1; -#endif } sched_clock_running = 1; } -#ifdef CONFIG_NO_HZ -/* - * The dynamic ticks makes the delta jiffies inaccurate. This - * prevents us from checking the maximum time update. - * Disable the maximum check during stopped ticks. - */ -void sched_clock_tick_stop(int cpu) -{ - struct sched_clock_data *scd = cpu_sdc(cpu); - - scd->check_max = 0; -} - -void sched_clock_tick_start(int cpu) -{ - struct sched_clock_data *scd = cpu_sdc(cpu); - - scd->check_max = 1; -} - -static int check_max(struct sched_clock_data *scd) -{ - return scd->check_max; -} -#else -static int check_max(struct sched_clock_data *scd) -{ - return 1; -} -#endif /* CONFIG_NO_HZ */ - /* * update the percpu scd from the raw @now value * * - filter out backward motion * - use jiffies to generate a min,max window to clip the raw values */ -static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) +static void __update_sched_clock(struct sched_clock_data *scd, u64 now) { unsigned long now_jiffies = jiffies; long delta_jiffies = now_jiffies - scd->tick_jiffies; @@ -152,31 +108,16 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim s64 delta = now - scd->prev_raw; WARN_ON_ONCE(!irqs_disabled()); - - /* - * At schedule tick the clock can be just under the gtod. We don't - * want to push it too prematurely. - */ - min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); - if (min_clock > TICK_NSEC) - min_clock -= TICK_NSEC / 2; + min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; if (unlikely(delta < 0)) { clock++; goto out; } - /* - * The clock must stay within a jiffie of the gtod. - * But since we may be at the start of a jiffy or the end of one - * we add another jiffy buffer. - */ - max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; - - delta *= scd->multi; - delta >>= MULTI_SHIFT; + max_clock = min_clock + TICK_NSEC; - if (unlikely(clock + delta > max_clock) && check_max(scd)) { + if (unlikely(clock + delta > max_clock)) { if (clock < max_clock) clock = max_clock; else @@ -189,12 +130,9 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim if (unlikely(clock < min_clock)) clock = min_clock; - if (time) - *time = clock; - else { - scd->prev_raw = now; - scd->clock = clock; - } + scd->prev_raw = now; + scd->tick_jiffies = now_jiffies; + scd->clock = clock; } static void lock_double_clock(struct sched_clock_data *data1, @@ -238,26 +176,21 @@ u64 sched_clock_cpu(int cpu) now -= scd->tick_gtod; __raw_spin_unlock(&my_scd->lock); - - __update_sched_clock(scd, now, &clock); - - __raw_spin_unlock(&scd->lock); - } else { __raw_spin_lock(&scd->lock); - __update_sched_clock(scd, now, NULL); - clock = scd->clock; - __raw_spin_unlock(&scd->lock); } + __update_sched_clock(scd, now); + clock = scd->clock; + + __raw_spin_unlock(&scd->lock); + return clock; } void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); - unsigned long now_jiffies = jiffies; - s64 mult, delta_gtod, delta_raw; u64 now, now_gtod; if (unlikely(!sched_clock_running)) @@ -269,29 +202,14 @@ void sched_clock_tick(void) now = sched_clock(); __raw_spin_lock(&scd->lock); - __update_sched_clock(scd, now, NULL); + __update_sched_clock(scd, now); /* * update tick_gtod after __update_sched_clock() because that will * already observe 1 new jiffy; adding a new tick_gtod to that would * increase the clock 2 jiffies. */ - delta_gtod = now_gtod - scd->tick_gtod; - delta_raw = now - scd->tick_raw; - - if ((long)delta_raw > 0) { - mult = delta_gtod << MULTI_SHIFT; - do_div(mult, delta_raw); - scd->multi = mult; - if (scd->multi > MAX_MULTI) - scd->multi = MAX_MULTI; - else if (scd->multi < MIN_MULTI) - scd->multi = MIN_MULTI; - } else - scd->multi = 1 << MULTI_SHIFT; - scd->tick_raw = now; scd->tick_gtod = now_gtod; - scd->tick_jiffies = now_jiffies; __raw_spin_unlock(&scd->lock); } @@ -321,7 +239,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) __raw_spin_lock(&scd->lock); scd->prev_raw = now; scd->clock += delta_ns; - scd->multi = 1 << MULTI_SHIFT; __raw_spin_unlock(&scd->lock); touch_softlockup_watchdog(); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 825b4c00fe44..f5da526424a9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle) ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; rcu_enter_nohz(); - sched_clock_tick_stop(cpu); } /* @@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void) select_nohz_load_balancer(0); now = ktime_get(); tick_do_update_jiffies64(now); - sched_clock_tick_start(cpu); cpu_clear(cpu, nohz_cpu_mask); /* -- cgit v1.2.2 From 50526968e99afbca34924abcb04658b6dd5c5ea5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 30 Jul 2008 09:39:48 +0200 Subject: sched clock: clean up sched_clock_cpu() - simplify the remote clock rebasing Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Mike Galbraith --- kernel/sched_clock.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 9a7844158ae8..b96559cb96a5 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -169,11 +169,8 @@ u64 sched_clock_cpu(int cpu) lock_double_clock(scd, my_scd); - now -= my_scd->tick_raw; - now += scd->tick_raw; - - now += my_scd->tick_gtod; - now -= scd->tick_gtod; + now += scd->tick_raw - my_scd->tick_raw; + now += my_scd->tick_gtod - scd->tick_gtod; __raw_spin_unlock(&my_scd->lock); } else { -- cgit v1.2.2 From 18e4e36c66d6edbdefc639692206cdf01e468713 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 30 Jul 2008 10:13:35 +0200 Subject: sched: eliminate scd->prev_raw eliminate prev_raw and use tick_raw instead. It's enough to base the current time on the scheduler tick timestamp alone - the monotonicity and maximum checks will prevent any damage. Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Mike Galbraith --- kernel/sched_clock.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index b96559cb96a5..4b8474c966dc 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -53,7 +53,6 @@ struct sched_clock_data { raw_spinlock_t lock; unsigned long tick_jiffies; - u64 prev_raw; u64 tick_raw; u64 tick_gtod; u64 clock; @@ -84,7 +83,6 @@ void sched_clock_init(void) scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; scd->tick_jiffies = now_jiffies; - scd->prev_raw = 0; scd->tick_raw = 0; scd->tick_gtod = ktime_now; scd->clock = ktime_now; @@ -105,7 +103,7 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now) long delta_jiffies = now_jiffies - scd->tick_jiffies; u64 clock = scd->clock; u64 min_clock, max_clock; - s64 delta = now - scd->prev_raw; + s64 delta = now - scd->tick_raw; WARN_ON_ONCE(!irqs_disabled()); min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; @@ -130,7 +128,6 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now) if (unlikely(clock < min_clock)) clock = min_clock; - scd->prev_raw = now; scd->tick_jiffies = now_jiffies; scd->clock = clock; } @@ -234,7 +231,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) * rq clock: */ __raw_spin_lock(&scd->lock); - scd->prev_raw = now; scd->clock += delta_ns; __raw_spin_unlock(&scd->lock); -- cgit v1.2.2 From 56b906126d33904d4d67615d0d5b95dbdf1f27ca Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 30 Jul 2008 10:15:55 +0200 Subject: sched clock: simplify __update_sched_clock() - return the current clock instead of letting callers fetch it from scd->clock Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Mike Galbraith --- kernel/sched_clock.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 4b8474c966dc..857a1291fd23 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -97,7 +97,7 @@ void sched_clock_init(void) * - filter out backward motion * - use jiffies to generate a min,max window to clip the raw values */ -static void __update_sched_clock(struct sched_clock_data *scd, u64 now) +static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) { unsigned long now_jiffies = jiffies; long delta_jiffies = now_jiffies - scd->tick_jiffies; @@ -130,6 +130,8 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now) scd->tick_jiffies = now_jiffies; scd->clock = clock; + + return clock; } static void lock_double_clock(struct sched_clock_data *data1, @@ -174,8 +176,7 @@ u64 sched_clock_cpu(int cpu) __raw_spin_lock(&scd->lock); } - __update_sched_clock(scd, now); - clock = scd->clock; + clock = __update_sched_clock(scd, now); __raw_spin_unlock(&scd->lock); -- cgit v1.2.2 From 4a273f209cc95d148f79b4c96d3d03997b44ffda Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 30 Jul 2008 10:22:07 +0200 Subject: sched clock: couple local and remote clocks When taking the time of a remote CPU, use the opportunity to couple (sync) the clocks to each other. (in a monotonic way) Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Mike Galbraith --- kernel/sched_clock.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 857a1291fd23..074edc989379 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -149,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1, u64 sched_clock_cpu(int cpu) { struct sched_clock_data *scd = cpu_sdc(cpu); - u64 now, clock; + u64 now, clock, this_clock, remote_clock; if (unlikely(!sched_clock_running)) return 0ull; @@ -158,26 +158,36 @@ u64 sched_clock_cpu(int cpu) now = sched_clock(); if (cpu != raw_smp_processor_id()) { - /* - * in order to update a remote cpu's clock based on our - * unstable raw time rebase it against: - * tick_raw (offset between raw counters) - * tick_gotd (tick offset between cpus) - */ struct sched_clock_data *my_scd = this_scd(); lock_double_clock(scd, my_scd); - now += scd->tick_raw - my_scd->tick_raw; - now += my_scd->tick_gtod - scd->tick_gtod; + this_clock = __update_sched_clock(my_scd, now); + remote_clock = scd->clock; + + /* + * Use the opportunity that we have both locks + * taken to couple the two clocks: we take the + * larger time as the latest time for both + * runqueues. (this creates monotonic movement) + */ + if (likely(remote_clock < this_clock)) { + clock = this_clock; + scd->clock = clock; + } else { + /* + * Should be rare, but possible: + */ + clock = remote_clock; + my_scd->clock = remote_clock; + } __raw_spin_unlock(&my_scd->lock); } else { __raw_spin_lock(&scd->lock); + clock = __update_sched_clock(scd, now); } - clock = __update_sched_clock(scd, now); - __raw_spin_unlock(&scd->lock); return clock; @@ -223,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); void sched_clock_idle_wakeup_event(u64 delta_ns) { struct sched_clock_data *scd = this_scd(); - u64 now = sched_clock(); /* * Override the previous timestamp and ignore all -- cgit v1.2.2 From cb3952bf7853667a1cb3515e67f27e67f0fce9e8 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Wed, 30 Jul 2008 14:46:50 +0400 Subject: DMA: make dma-coherent.c documentation kdoc-friendly Spotted by Randy. Acked-by: Randy Dunlap Signed-off-by: Dmitry Baryshkov Signed-off-by: Jesse Barnes --- kernel/dma-coherent.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index 91e96950cd52..c1d4d5b4c61c 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c @@ -92,7 +92,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev, EXPORT_SYMBOL(dma_mark_declared_memory_occupied); /** - * Try to allocate memory from the per-device coherent area. + * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area * * @dev: device from which we allocate memory * @size: size of requested memory area @@ -100,11 +100,11 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); * @ret: This pointer will be filled with the virtual address * to allocated area. * - * This function should be only called from per-arch %dma_alloc_coherent() + * This function should be only called from per-arch dma_alloc_coherent() * to support allocation from per-device coherent memory pools. * * Returns 0 if dma_alloc_coherent should continue with allocating from - * generic memory areas, or !0 if dma_alloc_coherent should return %ret. + * generic memory areas, or !0 if dma_alloc_coherent should return @ret. */ int dma_alloc_from_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle, void **ret) @@ -126,7 +126,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, } /** - * Try to free the memory allocated from per-device coherent memory pool. + * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool * @dev: device from which the memory was allocated * @order: the order of pages allocated * @vaddr: virtual address of allocated pages @@ -135,7 +135,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, * coherent memory pool and if so, releases that memory. * * Returns 1 if we correctly released the memory, or 0 if - * %dma_release_coherent() should proceed with releasing memory from + * dma_release_coherent() should proceed with releasing memory from * generic pools. */ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) -- cgit v1.2.2 From c1955a3d4762e7a9bf84035eb3c4886a900f0d15 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 11 Aug 2008 08:59:03 +0200 Subject: sched_clock: delay using sched_clock() Some arch's can't handle sched_clock() being called too early - delay this until sched_clock_init() has been called. Reported-by: Bill Gatliff Signed-off-by: Peter Zijlstra Tested-by: Nishanth Aravamudan CC: Russell King - ARM Linux Signed-off-by: Ingo Molnar --- kernel/sched_clock.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 074edc989379..204991a0bfa7 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void) return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); } +static __read_mostly int sched_clock_running; + #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK struct sched_clock_data { @@ -70,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) return &per_cpu(sched_clock_data, cpu); } -static __read_mostly int sched_clock_running; - void sched_clock_init(void) { u64 ktime_now = ktime_to_ns(ktime_get()); @@ -248,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); +#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ + +void sched_clock_init(void) +{ + sched_clock_running = 1; +} + +u64 sched_clock_cpu(int cpu) +{ + if (unlikely(!sched_clock_running)) + return 0; + + return sched_clock(); +} + #endif unsigned long long cpu_clock(int cpu) -- cgit v1.2.2 From 77ae651347bdd46830da8b28b1efc5e4a9d7cbd0 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Mon, 11 Aug 2008 13:32:02 +0200 Subject: sched: fix mysql+oltp regression Defer commit 6d299f1b53b84e2665f402d9bcc494800aba6386 to the next release. Testing of the tip/sched/clock tree revealed a mysql+oltp regression which bisection eventually traced back to this commit in mainline. Pertinent test results: Three run sysbench averages, throughput units in read/write requests/sec. clients 1 2 4 8 16 32 64 6e0534f 9646 17876 34774 33868 32230 30767 29441 2.6.26.1 9112 17936 34652 33383 31929 30665 29232 6d299f1 9112 14637 28370 33339 32038 30762 29204 Note: subsequent commits hide the majority of this regression until you apply the clock fixes, at which time it reemerges at full magnitude. We cannot see anything bad about the change itself so we defer it to the next release until this problem is fully analysed. Signed-off-by: Mike Galbraith Acked-by: Peter Zijlstra Cc: Gregory Haskins Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0fe94ea43f32..fb8994c6d4bb 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1442,18 +1442,23 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) struct task_struct *p = NULL; struct sched_entity *se; - while (next != &cfs_rq->tasks) { + if (next == &cfs_rq->tasks) + return NULL; + + /* Skip over entities that are not tasks */ + do { se = list_entry(next, struct sched_entity, group_node); next = next->next; + } while (next != &cfs_rq->tasks && !entity_is_task(se)); - /* Skip over entities that are not tasks */ - if (entity_is_task(se)) { - p = task_of(se); - break; - } - } + if (next == &cfs_rq->tasks) + return NULL; cfs_rq->balance_iterator = next; + + if (entity_is_task(se)) + p = task_of(se); + return p; } -- cgit v1.2.2 From 279ef6bbb8308488398c8f33b04c760148428378 Mon Sep 17 00:00:00 2001 From: Dmitry Adamushko Date: Wed, 30 Jul 2008 12:34:04 +0200 Subject: sched, cpu hotplug: fix set_cpus_allowed() use in hotplug callbacks Mark Langsdorf reported: > One of my co-workers noticed that the powernow-k8 > driver no longer restarts when a CPU core is > hot-disabled and then hot-enabled on AMD quad-core > systems. > > The following comands work fine on 2.6.26 and fail > on 2.6.27-rc1: > > echo 0 > /sys/devices/system/cpu/cpu3/online > echo 1 > /sys/devices/system/cpu/cpu3/online > find /sys -name cpufreq > > For 2.6.26, the find will return a cpufreq > directory for each processor. In 2.6.27-rc1, > the cpu3 directory is missing. > > After digging through the code, the following > logic is failing when the core is hot-enabled > at runtime. The code works during the boot > sequence. > > cpumask_t = current->cpus_allowed; > set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); > if (smp_processor_id() != cpu) > return -ENODEV; So set the CPU active before calling the CPU_ONLINE notifier chain, there are a handful of notifiers that use set_cpus_allowed(). This fix also solves the problem with x86-microcode. I've sent alternative patches for microcode, but as this "rely on set_cpus_allowed_ptr() being workable in cpu-hotplug(CPU_ONLINE, ...)" assumption seems to be more broad than what we thought, perhaps this fix should be applied. With this patch we define that by the moment CPU_ONLINE is being sent, a 'cpu' is online and ready for tasks to be migrated onto it. Signed-off-by: Dmitry Adamushko Reported-by: Mark Langsdorf Tested-by: Mark Langsdorf Signed-off-by: Ingo Molnar --- kernel/cpu.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index e202a68d1cc1..c977c339f559 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -349,6 +349,8 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) goto out_notify; BUG_ON(!cpu_online(cpu)); + cpu_set(cpu, cpu_active_map); + /* Now call notifier in preparation. */ raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); @@ -383,9 +385,6 @@ int __cpuinit cpu_up(unsigned int cpu) err = _cpu_up(cpu, 0); - if (cpu_online(cpu)) - cpu_set(cpu, cpu_active_map); - out: cpu_maps_update_done(); return err; -- cgit v1.2.2 From 59f9415ffb9759e950d775f4c400f747b332cc02 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Wed, 30 Jul 2008 12:49:02 -0700 Subject: modules: extend initcall_debug functionality to the module loader The kernel has this really nice facility where if you put "initcall_debug" on the kernel commandline, it'll print which function it's going to execute just before calling an initcall, and then after the call completes it will 1) print if it had an error code 2) checks for a few simple bugs (like leaving irqs off) and 3) print how long the init call took in milliseconds. While trying to optimize the boot speed of my laptop, I have been loving number 3 to figure out what to optimize... ... and then I wished that the same thing was done for module loading. This patch makes the module loader use this exact same functionality; it's a logical extension in my view (since modules are just sort of late binding initcalls anyway) and so far I've found it quite useful in finding where things are too slow in my boot. Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Rusty Russell --- kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 61d212120df4..08864d257eb0 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2288,7 +2288,7 @@ sys_init_module(void __user *umod, /* Start the module */ if (mod->init != NULL) - ret = mod->init(); + ret = do_one_initcall(mod->init); if (ret < 0) { /* Init routine failed: abort. Try to protect us from buggy refcounters. */ -- cgit v1.2.2 From ed6d68763b8b589c0ae9d231cbd72bd01f6685c5 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 31 Jul 2008 10:31:02 +0800 Subject: stop_machine: remove unused variable Signed-off-by: Li Zefan Signed-off-by: Rusty Russell --- kernel/stop_machine.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index e446c7c7d6a9..af3c7cea258b 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -65,7 +65,6 @@ static void ack_state(void) static int stop_cpu(struct stop_machine_data *smdata) { enum stopmachine_state curstate = STOPMACHINE_NONE; - int uninitialized_var(ret); /* Simple state machine */ do { -- cgit v1.2.2 From 3ee1062b4ee82a56294808a065b64f4bc6781a56 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 12 Aug 2008 15:08:40 -0700 Subject: cpu hotplug: s390 doesn't support additional_cpus anymore. s390 doesn't support the additional_cpus kernel parameter anymore since a long time. So we better update the code and documentation to reflect that. Cc: Martin Schwidefsky Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index c977c339f559..f17e9854c246 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -369,7 +369,7 @@ int __cpuinit cpu_up(unsigned int cpu) if (!cpu_isset(cpu, cpu_possible_map)) { printk(KERN_ERR "can't online cpu %d because it is not " "configured as may-hotadd at boot time\n", cpu); -#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390) +#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) printk(KERN_ERR "please check additional_cpus= boot " "parameter\n"); #endif -- cgit v1.2.2 From f18e439d1035d059534d261c414af33f89aee89a Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 12 Aug 2008 15:09:03 -0700 Subject: genirq: switch /proc/irq/*/smp_affinity et al to seqfiles Switch /proc/irq/*/smp_affinity , /proc/irq/default_smp_affinity to seq_files. cat(1) reads with 1024 chunks by default, with high enough NR_CPUS, there will be -EINVAL. As side effect, there are now two less users of the ->read_proc interface. Signed-off-by: Alexey Dobriyan Cc: Paul Jackson Cc: Mike Travis Cc: Al Viro Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/proc.c | 96 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 51 insertions(+), 45 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 6c6d35d68ee9..a09dd29c2fd7 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -8,6 +8,7 @@ #include #include +#include #include #include "internals.h" @@ -16,23 +17,18 @@ static struct proc_dir_entry *root_irq_dir; #ifdef CONFIG_SMP -static int irq_affinity_read_proc(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int irq_affinity_proc_show(struct seq_file *m, void *v) { - struct irq_desc *desc = irq_desc + (long)data; + struct irq_desc *desc = irq_desc + (long)m->private; cpumask_t *mask = &desc->affinity; - int len; #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PENDING) mask = &desc->pending_mask; #endif - len = cpumask_scnprintf(page, count, *mask); - - if (count - len < 2) - return -EINVAL; - len += sprintf(page + len, "\n"); - return len; + seq_cpumask(m, mask); + seq_putc(m, '\n'); + return 0; } #ifndef is_affinity_mask_valid @@ -40,11 +36,12 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off, #endif int no_irq_affinity; -static int irq_affinity_write_proc(struct file *file, const char __user *buffer, - unsigned long count, void *data) +static ssize_t irq_affinity_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) { - unsigned int irq = (int)(long)data, full_count = count, err; + unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; cpumask_t new_value; + int err; if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) @@ -65,28 +62,38 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, if (!cpus_intersects(new_value, cpu_online_map)) /* Special case for empty set - allow the architecture code to set default SMP affinity. */ - return irq_select_affinity(irq) ? -EINVAL : full_count; + return irq_select_affinity(irq) ? -EINVAL : count; irq_set_affinity(irq, new_value); - return full_count; + return count; } -static int default_affinity_read(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int irq_affinity_proc_open(struct inode *inode, struct file *file) { - int len = cpumask_scnprintf(page, count, irq_default_affinity); - if (count - len < 2) - return -EINVAL; - len += sprintf(page + len, "\n"); - return len; + return single_open(file, irq_affinity_proc_show, PDE(inode)->data); } -static int default_affinity_write(struct file *file, const char __user *buffer, - unsigned long count, void *data) +static const struct file_operations irq_affinity_proc_fops = { + .open = irq_affinity_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = irq_affinity_proc_write, +}; + +static int default_affinity_show(struct seq_file *m, void *v) +{ + seq_cpumask(m, &irq_default_affinity); + seq_putc(m, '\n'); + return 0; +} + +static ssize_t default_affinity_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) { - unsigned int full_count = count, err; cpumask_t new_value; + int err; err = cpumask_parse_user(buffer, count, new_value); if (err) @@ -105,8 +112,21 @@ static int default_affinity_write(struct file *file, const char __user *buffer, irq_default_affinity = new_value; - return full_count; + return count; } + +static int default_affinity_open(struct inode *inode, struct file *file) +{ + return single_open(file, default_affinity_show, NULL); +} + +static const struct file_operations default_affinity_proc_fops = { + .open = default_affinity_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = default_affinity_write, +}; #endif static int irq_spurious_read(char *page, char **start, off_t off, @@ -178,16 +198,9 @@ void register_irq_proc(unsigned int irq) irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); #ifdef CONFIG_SMP - { - /* create /proc/irq//smp_affinity */ - entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir); - - if (entry) { - entry->data = (void *)(long)irq; - entry->read_proc = irq_affinity_read_proc; - entry->write_proc = irq_affinity_write_proc; - } - } + /* create /proc/irq//smp_affinity */ + proc_create_data("smp_affinity", 0600, irq_desc[irq].dir, + &irq_affinity_proc_fops, (void *)(long)irq); #endif entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); @@ -208,15 +221,8 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) void register_default_affinity_proc(void) { #ifdef CONFIG_SMP - struct proc_dir_entry *entry; - - /* create /proc/irq/default_smp_affinity */ - entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir); - if (entry) { - entry->data = NULL; - entry->read_proc = default_affinity_read; - entry->write_proc = default_affinity_write; - } + proc_create("irq/default_smp_affinity", 0600, NULL, + &default_affinity_proc_fops); #endif } -- cgit v1.2.2