aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c74
1 files changed, 38 insertions, 36 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index 2410c18dbeb1..ab189dd187cb 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -86,7 +86,8 @@ struct tvec_t_base_s {
86} ____cacheline_aligned_in_smp; 86} ____cacheline_aligned_in_smp;
87 87
88typedef struct tvec_t_base_s tvec_base_t; 88typedef struct tvec_t_base_s tvec_base_t;
89static DEFINE_PER_CPU(tvec_base_t, tvec_bases); 89static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
90static tvec_base_t boot_tvec_bases;
90 91
91static inline void set_running_timer(tvec_base_t *base, 92static inline void set_running_timer(tvec_base_t *base,
92 struct timer_list *timer) 93 struct timer_list *timer)
@@ -157,7 +158,7 @@ EXPORT_SYMBOL(__init_timer_base);
157void fastcall init_timer(struct timer_list *timer) 158void fastcall init_timer(struct timer_list *timer)
158{ 159{
159 timer->entry.next = NULL; 160 timer->entry.next = NULL;
160 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; 161 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base;
161} 162}
162EXPORT_SYMBOL(init_timer); 163EXPORT_SYMBOL(init_timer);
163 164
@@ -218,7 +219,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
218 ret = 1; 219 ret = 1;
219 } 220 }
220 221
221 new_base = &__get_cpu_var(tvec_bases); 222 new_base = __get_cpu_var(tvec_bases);
222 223
223 if (base != &new_base->t_base) { 224 if (base != &new_base->t_base) {
224 /* 225 /*
@@ -258,7 +259,7 @@ EXPORT_SYMBOL(__mod_timer);
258 */ 259 */
259void add_timer_on(struct timer_list *timer, int cpu) 260void add_timer_on(struct timer_list *timer, int cpu)
260{ 261{
261 tvec_base_t *base = &per_cpu(tvec_bases, cpu); 262 tvec_base_t *base = per_cpu(tvec_bases, cpu);
262 unsigned long flags; 263 unsigned long flags;
263 264
264 BUG_ON(timer_pending(timer) || !timer->function); 265 BUG_ON(timer_pending(timer) || !timer->function);
@@ -504,7 +505,7 @@ unsigned long next_timer_interrupt(void)
504 } 505 }
505 hr_expires += jiffies; 506 hr_expires += jiffies;
506 507
507 base = &__get_cpu_var(tvec_bases); 508 base = __get_cpu_var(tvec_bases);
508 spin_lock(&base->t_base.lock); 509 spin_lock(&base->t_base.lock);
509 expires = base->timer_jiffies + (LONG_MAX >> 1); 510 expires = base->timer_jiffies + (LONG_MAX >> 1);
510 list = NULL; 511 list = NULL;
@@ -696,18 +697,9 @@ static void second_overflow(void)
696 697
697 /* 698 /*
698 * Compute the frequency estimate and additional phase adjustment due 699 * Compute the frequency estimate and additional phase adjustment due
699 * to frequency error for the next second. When the PPS signal is 700 * to frequency error for the next second.
700 * engaged, gnaw on the watchdog counter and update the frequency
701 * computed by the pll and the PPS signal.
702 */ 701 */
703 pps_valid++; 702 ltemp = time_freq;
704 if (pps_valid == PPS_VALID) { /* PPS signal lost */
705 pps_jitter = MAXTIME;
706 pps_stabil = MAXFREQ;
707 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
708 STA_PPSWANDER | STA_PPSERROR);
709 }
710 ltemp = time_freq + pps_freq;
711 time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); 703 time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
712 704
713#if HZ == 100 705#if HZ == 100
@@ -901,7 +893,7 @@ EXPORT_SYMBOL(xtime_lock);
901 */ 893 */
902static void run_timer_softirq(struct softirq_action *h) 894static void run_timer_softirq(struct softirq_action *h)
903{ 895{
904 tvec_base_t *base = &__get_cpu_var(tvec_bases); 896 tvec_base_t *base = __get_cpu_var(tvec_bases);
905 897
906 hrtimer_run_queues(); 898 hrtimer_run_queues();
907 if (time_after_eq(jiffies, base->timer_jiffies)) 899 if (time_after_eq(jiffies, base->timer_jiffies))
@@ -914,6 +906,7 @@ static void run_timer_softirq(struct softirq_action *h)
914void run_local_timers(void) 906void run_local_timers(void)
915{ 907{
916 raise_softirq(TIMER_SOFTIRQ); 908 raise_softirq(TIMER_SOFTIRQ);
909 softlockup_tick();
917} 910}
918 911
919/* 912/*
@@ -944,7 +937,6 @@ void do_timer(struct pt_regs *regs)
944 /* prevent loading jiffies before storing new jiffies_64 value. */ 937 /* prevent loading jiffies before storing new jiffies_64 value. */
945 barrier(); 938 barrier();
946 update_times(); 939 update_times();
947 softlockup_tick(regs);
948} 940}
949 941
950#ifdef __ARCH_WANT_SYS_ALARM 942#ifdef __ARCH_WANT_SYS_ALARM
@@ -955,19 +947,7 @@ void do_timer(struct pt_regs *regs)
955 */ 947 */
956asmlinkage unsigned long sys_alarm(unsigned int seconds) 948asmlinkage unsigned long sys_alarm(unsigned int seconds)
957{ 949{
958 struct itimerval it_new, it_old; 950 return alarm_setitimer(seconds);
959 unsigned int oldalarm;
960
961 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
962 it_new.it_value.tv_sec = seconds;
963 it_new.it_value.tv_usec = 0;
964 do_setitimer(ITIMER_REAL, &it_new, &it_old);
965 oldalarm = it_old.it_value.tv_sec;
966 /* ehhh.. We can't return 0 if we have an alarm pending.. */
967 /* And we'd better return too much than too little anyway */
968 if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
969 oldalarm++;
970 return oldalarm;
971} 951}
972 952
973#endif 953#endif
@@ -1256,12 +1236,32 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1256 return 0; 1236 return 0;
1257} 1237}
1258 1238
1259static void __devinit init_timers_cpu(int cpu) 1239static int __devinit init_timers_cpu(int cpu)
1260{ 1240{
1261 int j; 1241 int j;
1262 tvec_base_t *base; 1242 tvec_base_t *base;
1263 1243
1264 base = &per_cpu(tvec_bases, cpu); 1244 base = per_cpu(tvec_bases, cpu);
1245 if (!base) {
1246 static char boot_done;
1247
1248 /*
1249 * Cannot do allocation in init_timers as that runs before the
1250 * allocator initializes (and would waste memory if there are
1251 * more possible CPUs than will ever be installed/brought up).
1252 */
1253 if (boot_done) {
1254 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1255 cpu_to_node(cpu));
1256 if (!base)
1257 return -ENOMEM;
1258 memset(base, 0, sizeof(*base));
1259 } else {
1260 base = &boot_tvec_bases;
1261 boot_done = 1;
1262 }
1263 per_cpu(tvec_bases, cpu) = base;
1264 }
1265 spin_lock_init(&base->t_base.lock); 1265 spin_lock_init(&base->t_base.lock);
1266 for (j = 0; j < TVN_SIZE; j++) { 1266 for (j = 0; j < TVN_SIZE; j++) {
1267 INIT_LIST_HEAD(base->tv5.vec + j); 1267 INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1273,6 +1273,7 @@ static void __devinit init_timers_cpu(int cpu)
1273 INIT_LIST_HEAD(base->tv1.vec + j); 1273 INIT_LIST_HEAD(base->tv1.vec + j);
1274 1274
1275 base->timer_jiffies = jiffies; 1275 base->timer_jiffies = jiffies;
1276 return 0;
1276} 1277}
1277 1278
1278#ifdef CONFIG_HOTPLUG_CPU 1279#ifdef CONFIG_HOTPLUG_CPU
@@ -1295,8 +1296,8 @@ static void __devinit migrate_timers(int cpu)
1295 int i; 1296 int i;
1296 1297
1297 BUG_ON(cpu_online(cpu)); 1298 BUG_ON(cpu_online(cpu));
1298 old_base = &per_cpu(tvec_bases, cpu); 1299 old_base = per_cpu(tvec_bases, cpu);
1299 new_base = &get_cpu_var(tvec_bases); 1300 new_base = get_cpu_var(tvec_bases);
1300 1301
1301 local_irq_disable(); 1302 local_irq_disable();
1302 spin_lock(&new_base->t_base.lock); 1303 spin_lock(&new_base->t_base.lock);
@@ -1326,7 +1327,8 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
1326 long cpu = (long)hcpu; 1327 long cpu = (long)hcpu;
1327 switch(action) { 1328 switch(action) {
1328 case CPU_UP_PREPARE: 1329 case CPU_UP_PREPARE:
1329 init_timers_cpu(cpu); 1330 if (init_timers_cpu(cpu) < 0)
1331 return NOTIFY_BAD;
1330 break; 1332 break;
1331#ifdef CONFIG_HOTPLUG_CPU 1333#ifdef CONFIG_HOTPLUG_CPU
1332 case CPU_DEAD: 1334 case CPU_DEAD: