From 9f31252cb61d5bc641cdef8a069a2ca5a77855f2 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Sun, 2 Apr 2006 13:45:55 +0200 Subject: BUG_ON() Conversion in kernel/signal.c this changes if() BUG(); constructs to BUG_ON() which is cleaner, contains unlikely() and can better optimized away. Signed-off-by: Eric Sesterhenn Signed-off-by: Adrian Bunk --- kernel/timer.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel/timer.c') diff --git a/kernel/timer.c b/kernel/timer.c index 6b812c04737b..c3a874f1393c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1479,8 +1479,7 @@ register_time_interpolator(struct time_interpolator *ti) unsigned long flags; /* Sanity check */ - if (ti->frequency == 0 || ti->mask == 0) - BUG(); + BUG_ON(ti->frequency == 0 || ti->mask == 0); ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; spin_lock(&time_interpolator_lock); -- cgit v1.2.2 From b20367a6c2a0cd937cb1f0a8cf848f1402fef99c Mon Sep 17 00:00:00 2001 From: Jordan Hargrave Date: Fri, 7 Apr 2006 19:50:18 +0200 Subject: [PATCH] x86_64: Fix drift with HPET timer enabled If the HPET timer is enabled, the clock can drift by ~3 seconds a day. This is due to the HPET timer not being initialized with the correct setting (still using PIT count). If HZ changes, this drift can become even more pronounced. HPET patch initializes tick_nsec with correct tick_nsec settings for HPET timer. Vojtech comments: "It's not entirely correct (it assumes the HPET ticks totally exactly), but it's significantly better than assuming the PIT error there." Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- kernel/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/timer.c') diff --git a/kernel/timer.c b/kernel/timer.c index c3a874f1393c..471ab8710b8f 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1455,7 +1455,7 @@ static void time_interpolator_update(long delta_nsec) */ if (jiffies % INTERPOLATOR_ADJUST == 0) { - if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC) + if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) time_interpolator->nsec_per_cyc--; if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) time_interpolator->nsec_per_cyc++; -- cgit v1.2.2 From ba6edfcd1708da2e665f14eee76e87f39448ec40 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 10 Apr 2006 22:53:58 -0700 Subject: [PATCH] timer initialisation fix We need the boot CPU's tvec_bases[] entry to be initialised super-early in boot, for early_serial_setup(). That runs within setup_arch(), before even per-cpu areas are initialised. The patch changes tvec_bases to use compile-time initialisation, and adds a separate array `tvec_base_done' to keep track of which CPU has had its tvec_bases[] entry initialised (because we can no longer use the zeroness of that tvec_bases[] entry to determine whether it has been initialised). Thanks to Eugene Surovegin for diagnosing this. Cc: Eugene Surovegin Cc: Jan Beulich Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'kernel/timer.c') diff --git a/kernel/timer.c b/kernel/timer.c index 471ab8710b8f..883773788836 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -81,9 +81,10 @@ struct tvec_t_base_s { } ____cacheline_aligned_in_smp; typedef struct tvec_t_base_s tvec_base_t; -static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); + tvec_base_t boot_tvec_bases; EXPORT_SYMBOL(boot_tvec_bases); +static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases }; static inline void set_running_timer(tvec_base_t *base, struct timer_list *timer) @@ -1224,28 +1225,36 @@ static int __devinit init_timers_cpu(int cpu) { int j; tvec_base_t *base; + static char __devinitdata tvec_base_done[NR_CPUS]; - base = per_cpu(tvec_bases, cpu); - if (!base) { + if (!tvec_base_done[cpu]) { static char boot_done; - /* - * Cannot do allocation in init_timers as that runs before the - * allocator initializes (and would waste memory if there are - * more possible CPUs than will ever be installed/brought up). - */ if (boot_done) { + /* + * The APs use this path later in boot + */ base = kmalloc_node(sizeof(*base), GFP_KERNEL, cpu_to_node(cpu)); if (!base) return -ENOMEM; memset(base, 0, sizeof(*base)); + per_cpu(tvec_bases, cpu) = base; } else { - base = &boot_tvec_bases; + /* + * This is for the boot CPU - we use compile-time + * static initialisation because per-cpu memory isn't + * ready yet and because the memory allocators are not + * initialised either. + */ boot_done = 1; + base = &boot_tvec_bases; } - per_cpu(tvec_bases, cpu) = base; + tvec_base_done[cpu] = 1; + } else { + base = per_cpu(tvec_bases, cpu); } + spin_lock_init(&base->lock); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); -- cgit v1.2.2