aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/timer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-07-04 05:50:28 -0400
committerIngo Molnar <mingo@kernel.org>2016-07-07 04:35:08 -0400
commit494af3ed7848de08640d98ee5aff57a45c137c3c (patch)
treeef2f76685775495e290839eeab70c72a767688d4 /kernel/time/timer.c
parent15dba1e37b5cfd7fab9bc84e0f05f35c918f4eef (diff)
timers: Give a few structs and members proper names
Some of the names in the internal implementation of the timer code are not longer correct and others are simply too long to type. Clean it up before we switch the wheel implementation over to the new scheme. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Chris Mason <clm@fb.com> Cc: Eric Dumazet <edumazet@google.com> Cc: George Spelvin <linux@sciencehorizons.net> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Len Brown <lenb@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160704094341.948752516@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time/timer.c')
-rw-r--r--kernel/time/timer.c118
1 files changed, 59 insertions, 59 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ba49c1cf80f5..f259a3ef4577 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -77,10 +77,10 @@ struct tvec_root {
77 struct hlist_head vec[TVR_SIZE]; 77 struct hlist_head vec[TVR_SIZE];
78}; 78};
79 79
80struct tvec_base { 80struct timer_base {
81 spinlock_t lock; 81 spinlock_t lock;
82 struct timer_list *running_timer; 82 struct timer_list *running_timer;
83 unsigned long timer_jiffies; 83 unsigned long clk;
84 unsigned long next_timer; 84 unsigned long next_timer;
85 unsigned long active_timers; 85 unsigned long active_timers;
86 unsigned long all_timers; 86 unsigned long all_timers;
@@ -95,7 +95,7 @@ struct tvec_base {
95} ____cacheline_aligned; 95} ____cacheline_aligned;
96 96
97 97
98static DEFINE_PER_CPU(struct tvec_base, tvec_bases); 98static DEFINE_PER_CPU(struct timer_base, timer_bases);
99 99
100#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) 100#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
101unsigned int sysctl_timer_migration = 1; 101unsigned int sysctl_timer_migration = 1;
@@ -106,15 +106,15 @@ void timers_update_migration(bool update_nohz)
106 unsigned int cpu; 106 unsigned int cpu;
107 107
108 /* Avoid the loop, if nothing to update */ 108 /* Avoid the loop, if nothing to update */
109 if (this_cpu_read(tvec_bases.migration_enabled) == on) 109 if (this_cpu_read(timer_bases.migration_enabled) == on)
110 return; 110 return;
111 111
112 for_each_possible_cpu(cpu) { 112 for_each_possible_cpu(cpu) {
113 per_cpu(tvec_bases.migration_enabled, cpu) = on; 113 per_cpu(timer_bases.migration_enabled, cpu) = on;
114 per_cpu(hrtimer_bases.migration_enabled, cpu) = on; 114 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
115 if (!update_nohz) 115 if (!update_nohz)
116 continue; 116 continue;
117 per_cpu(tvec_bases.nohz_active, cpu) = true; 117 per_cpu(timer_bases.nohz_active, cpu) = true;
118 per_cpu(hrtimer_bases.nohz_active, cpu) = true; 118 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
119 } 119 }
120} 120}
@@ -134,18 +134,18 @@ int timer_migration_handler(struct ctl_table *table, int write,
134 return ret; 134 return ret;
135} 135}
136 136
137static inline struct tvec_base *get_target_base(struct tvec_base *base, 137static inline struct timer_base *get_target_base(struct timer_base *base,
138 int pinned) 138 int pinned)
139{ 139{
140 if (pinned || !base->migration_enabled) 140 if (pinned || !base->migration_enabled)
141 return this_cpu_ptr(&tvec_bases); 141 return this_cpu_ptr(&timer_bases);
142 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target()); 142 return per_cpu_ptr(&timer_bases, get_nohz_timer_target());
143} 143}
144#else 144#else
145static inline struct tvec_base *get_target_base(struct tvec_base *base, 145static inline struct timer_base *get_target_base(struct timer_base *base,
146 int pinned) 146 int pinned)
147{ 147{
148 return this_cpu_ptr(&tvec_bases); 148 return this_cpu_ptr(&timer_bases);
149} 149}
150#endif 150#endif
151 151
@@ -371,10 +371,10 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
371EXPORT_SYMBOL_GPL(set_timer_slack); 371EXPORT_SYMBOL_GPL(set_timer_slack);
372 372
373static void 373static void
374__internal_add_timer(struct tvec_base *base, struct timer_list *timer) 374__internal_add_timer(struct timer_base *base, struct timer_list *timer)
375{ 375{
376 unsigned long expires = timer->expires; 376 unsigned long expires = timer->expires;
377 unsigned long idx = expires - base->timer_jiffies; 377 unsigned long idx = expires - base->clk;
378 struct hlist_head *vec; 378 struct hlist_head *vec;
379 379
380 if (idx < TVR_SIZE) { 380 if (idx < TVR_SIZE) {
@@ -394,7 +394,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
394 * Can happen if you add a timer with expires == jiffies, 394 * Can happen if you add a timer with expires == jiffies,
395 * or you set a timer to go off in the past 395 * or you set a timer to go off in the past
396 */ 396 */
397 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); 397 vec = base->tv1.vec + (base->clk & TVR_MASK);
398 } else { 398 } else {
399 int i; 399 int i;
400 /* If the timeout is larger than MAX_TVAL (on 64-bit 400 /* If the timeout is larger than MAX_TVAL (on 64-bit
@@ -403,7 +403,7 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
403 */ 403 */
404 if (idx > MAX_TVAL) { 404 if (idx > MAX_TVAL) {
405 idx = MAX_TVAL; 405 idx = MAX_TVAL;
406 expires = idx + base->timer_jiffies; 406 expires = idx + base->clk;
407 } 407 }
408 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; 408 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
409 vec = base->tv5.vec + i; 409 vec = base->tv5.vec + i;
@@ -412,11 +412,11 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
412 hlist_add_head(&timer->entry, vec); 412 hlist_add_head(&timer->entry, vec);
413} 413}
414 414
415static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) 415static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
416{ 416{
417 /* Advance base->jiffies, if the base is empty */ 417 /* Advance base->jiffies, if the base is empty */
418 if (!base->all_timers++) 418 if (!base->all_timers++)
419 base->timer_jiffies = jiffies; 419 base->clk = jiffies;
420 420
421 __internal_add_timer(base, timer); 421 __internal_add_timer(base, timer);
422 /* 422 /*
@@ -707,7 +707,7 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
707} 707}
708 708
709static inline void 709static inline void
710detach_expired_timer(struct timer_list *timer, struct tvec_base *base) 710detach_expired_timer(struct timer_list *timer, struct timer_base *base)
711{ 711{
712 detach_timer(timer, true); 712 detach_timer(timer, true);
713 if (!(timer->flags & TIMER_DEFERRABLE)) 713 if (!(timer->flags & TIMER_DEFERRABLE))
@@ -715,7 +715,7 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
715 base->all_timers--; 715 base->all_timers--;
716} 716}
717 717
718static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, 718static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
719 bool clear_pending) 719 bool clear_pending)
720{ 720{
721 if (!timer_pending(timer)) 721 if (!timer_pending(timer))
@@ -725,16 +725,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
725 if (!(timer->flags & TIMER_DEFERRABLE)) { 725 if (!(timer->flags & TIMER_DEFERRABLE)) {
726 base->active_timers--; 726 base->active_timers--;
727 if (timer->expires == base->next_timer) 727 if (timer->expires == base->next_timer)
728 base->next_timer = base->timer_jiffies; 728 base->next_timer = base->clk;
729 } 729 }
730 /* If this was the last timer, advance base->jiffies */ 730 /* If this was the last timer, advance base->jiffies */
731 if (!--base->all_timers) 731 if (!--base->all_timers)
732 base->timer_jiffies = jiffies; 732 base->clk = jiffies;
733 return 1; 733 return 1;
734} 734}
735 735
736/* 736/*
737 * We are using hashed locking: holding per_cpu(tvec_bases).lock 737 * We are using hashed locking: holding per_cpu(timer_bases).lock
738 * means that all timers which are tied to this base via timer->base are 738 * means that all timers which are tied to this base via timer->base are
739 * locked, and the base itself is locked too. 739 * locked, and the base itself is locked too.
740 * 740 *
@@ -744,16 +744,16 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
744 * When the timer's base is locked and removed from the list, the 744 * When the timer's base is locked and removed from the list, the
745 * TIMER_MIGRATING flag is set, FIXME 745 * TIMER_MIGRATING flag is set, FIXME
746 */ 746 */
747static struct tvec_base *lock_timer_base(struct timer_list *timer, 747static struct timer_base *lock_timer_base(struct timer_list *timer,
748 unsigned long *flags) 748 unsigned long *flags)
749 __acquires(timer->base->lock) 749 __acquires(timer->base->lock)
750{ 750{
751 for (;;) { 751 for (;;) {
752 u32 tf = timer->flags; 752 u32 tf = timer->flags;
753 struct tvec_base *base; 753 struct timer_base *base;
754 754
755 if (!(tf & TIMER_MIGRATING)) { 755 if (!(tf & TIMER_MIGRATING)) {
756 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); 756 base = per_cpu_ptr(&timer_bases, tf & TIMER_CPUMASK);
757 spin_lock_irqsave(&base->lock, *flags); 757 spin_lock_irqsave(&base->lock, *flags);
758 if (timer->flags == tf) 758 if (timer->flags == tf)
759 return base; 759 return base;
@@ -766,7 +766,7 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
766static inline int 766static inline int
767__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) 767__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
768{ 768{
769 struct tvec_base *base, *new_base; 769 struct timer_base *base, *new_base;
770 unsigned long flags; 770 unsigned long flags;
771 int ret = 0; 771 int ret = 0;
772 772
@@ -933,8 +933,8 @@ EXPORT_SYMBOL(add_timer);
933 */ 933 */
934void add_timer_on(struct timer_list *timer, int cpu) 934void add_timer_on(struct timer_list *timer, int cpu)
935{ 935{
936 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu); 936 struct timer_base *new_base = per_cpu_ptr(&timer_bases, cpu);
937 struct tvec_base *base; 937 struct timer_base *base;
938 unsigned long flags; 938 unsigned long flags;
939 939
940 timer_stats_timer_set_start_info(timer); 940 timer_stats_timer_set_start_info(timer);
@@ -975,7 +975,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
975 */ 975 */
976int del_timer(struct timer_list *timer) 976int del_timer(struct timer_list *timer)
977{ 977{
978 struct tvec_base *base; 978 struct timer_base *base;
979 unsigned long flags; 979 unsigned long flags;
980 int ret = 0; 980 int ret = 0;
981 981
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL(del_timer);
1001 */ 1001 */
1002int try_to_del_timer_sync(struct timer_list *timer) 1002int try_to_del_timer_sync(struct timer_list *timer)
1003{ 1003{
1004 struct tvec_base *base; 1004 struct timer_base *base;
1005 unsigned long flags; 1005 unsigned long flags;
1006 int ret = -1; 1006 int ret = -1;
1007 1007
@@ -1085,7 +1085,7 @@ int del_timer_sync(struct timer_list *timer)
1085EXPORT_SYMBOL(del_timer_sync); 1085EXPORT_SYMBOL(del_timer_sync);
1086#endif 1086#endif
1087 1087
1088static int cascade(struct tvec_base *base, struct tvec *tv, int index) 1088static int cascade(struct timer_base *base, struct tvec *tv, int index)
1089{ 1089{
1090 /* cascade all the timers from tv up one level */ 1090 /* cascade all the timers from tv up one level */
1091 struct timer_list *timer; 1091 struct timer_list *timer;
@@ -1149,7 +1149,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1149 } 1149 }
1150} 1150}
1151 1151
1152#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) 1152#define INDEX(N) ((base->clk >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1153 1153
1154/** 1154/**
1155 * __run_timers - run all expired timers (if any) on this CPU. 1155 * __run_timers - run all expired timers (if any) on this CPU.
@@ -1158,23 +1158,23 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1158 * This function cascades all vectors and executes all expired timer 1158 * This function cascades all vectors and executes all expired timer
1159 * vectors. 1159 * vectors.
1160 */ 1160 */
1161static inline void __run_timers(struct tvec_base *base) 1161static inline void __run_timers(struct timer_base *base)
1162{ 1162{
1163 struct timer_list *timer; 1163 struct timer_list *timer;
1164 1164
1165 spin_lock_irq(&base->lock); 1165 spin_lock_irq(&base->lock);
1166 1166
1167 while (time_after_eq(jiffies, base->timer_jiffies)) { 1167 while (time_after_eq(jiffies, base->clk)) {
1168 struct hlist_head work_list; 1168 struct hlist_head work_list;
1169 struct hlist_head *head = &work_list; 1169 struct hlist_head *head = &work_list;
1170 int index; 1170 int index;
1171 1171
1172 if (!base->all_timers) { 1172 if (!base->all_timers) {
1173 base->timer_jiffies = jiffies; 1173 base->clk = jiffies;
1174 break; 1174 break;
1175 } 1175 }
1176 1176
1177 index = base->timer_jiffies & TVR_MASK; 1177 index = base->clk & TVR_MASK;
1178 1178
1179 /* 1179 /*
1180 * Cascade timers: 1180 * Cascade timers:
@@ -1184,7 +1184,7 @@ static inline void __run_timers(struct tvec_base *base)
1184 (!cascade(base, &base->tv3, INDEX(1))) && 1184 (!cascade(base, &base->tv3, INDEX(1))) &&
1185 !cascade(base, &base->tv4, INDEX(2))) 1185 !cascade(base, &base->tv4, INDEX(2)))
1186 cascade(base, &base->tv5, INDEX(3)); 1186 cascade(base, &base->tv5, INDEX(3));
1187 ++base->timer_jiffies; 1187 ++base->clk;
1188 hlist_move_list(base->tv1.vec + index, head); 1188 hlist_move_list(base->tv1.vec + index, head);
1189 while (!hlist_empty(head)) { 1189 while (!hlist_empty(head)) {
1190 void (*fn)(unsigned long); 1190 void (*fn)(unsigned long);
@@ -1222,16 +1222,16 @@ static inline void __run_timers(struct tvec_base *base)
1222 * is used on S/390 to stop all activity when a CPU is idle. 1222 * is used on S/390 to stop all activity when a CPU is idle.
1223 * This function needs to be called with interrupts disabled. 1223 * This function needs to be called with interrupts disabled.
1224 */ 1224 */
1225static unsigned long __next_timer_interrupt(struct tvec_base *base) 1225static unsigned long __next_timer_interrupt(struct timer_base *base)
1226{ 1226{
1227 unsigned long timer_jiffies = base->timer_jiffies; 1227 unsigned long clk = base->clk;
1228 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; 1228 unsigned long expires = clk + NEXT_TIMER_MAX_DELTA;
1229 int index, slot, array, found = 0; 1229 int index, slot, array, found = 0;
1230 struct timer_list *nte; 1230 struct timer_list *nte;
1231 struct tvec *varray[4]; 1231 struct tvec *varray[4];
1232 1232
1233 /* Look for timer events in tv1. */ 1233 /* Look for timer events in tv1. */
1234 index = slot = timer_jiffies & TVR_MASK; 1234 index = slot = clk & TVR_MASK;
1235 do { 1235 do {
1236 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) { 1236 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1237 if (nte->flags & TIMER_DEFERRABLE) 1237 if (nte->flags & TIMER_DEFERRABLE)
@@ -1250,8 +1250,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
1250cascade: 1250cascade:
1251 /* Calculate the next cascade event */ 1251 /* Calculate the next cascade event */
1252 if (index) 1252 if (index)
1253 timer_jiffies += TVR_SIZE - index; 1253 clk += TVR_SIZE - index;
1254 timer_jiffies >>= TVR_BITS; 1254 clk >>= TVR_BITS;
1255 1255
1256 /* Check tv2-tv5. */ 1256 /* Check tv2-tv5. */
1257 varray[0] = &base->tv2; 1257 varray[0] = &base->tv2;
@@ -1262,7 +1262,7 @@ cascade:
1262 for (array = 0; array < 4; array++) { 1262 for (array = 0; array < 4; array++) {
1263 struct tvec *varp = varray[array]; 1263 struct tvec *varp = varray[array];
1264 1264
1265 index = slot = timer_jiffies & TVN_MASK; 1265 index = slot = clk & TVN_MASK;
1266 do { 1266 do {
1267 hlist_for_each_entry(nte, varp->vec + slot, entry) { 1267 hlist_for_each_entry(nte, varp->vec + slot, entry) {
1268 if (nte->flags & TIMER_DEFERRABLE) 1268 if (nte->flags & TIMER_DEFERRABLE)
@@ -1286,8 +1286,8 @@ cascade:
1286 } while (slot != index); 1286 } while (slot != index);
1287 1287
1288 if (index) 1288 if (index)
1289 timer_jiffies += TVN_SIZE - index; 1289 clk += TVN_SIZE - index;
1290 timer_jiffies >>= TVN_BITS; 1290 clk >>= TVN_BITS;
1291 } 1291 }
1292 return expires; 1292 return expires;
1293} 1293}
@@ -1335,7 +1335,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1335 */ 1335 */
1336u64 get_next_timer_interrupt(unsigned long basej, u64 basem) 1336u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1337{ 1337{
1338 struct tvec_base *base = this_cpu_ptr(&tvec_bases); 1338 struct timer_base *base = this_cpu_ptr(&timer_bases);
1339 u64 expires = KTIME_MAX; 1339 u64 expires = KTIME_MAX;
1340 unsigned long nextevt; 1340 unsigned long nextevt;
1341 1341
@@ -1348,7 +1348,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1348 1348
1349 spin_lock(&base->lock); 1349 spin_lock(&base->lock);
1350 if (base->active_timers) { 1350 if (base->active_timers) {
1351 if (time_before_eq(base->next_timer, base->timer_jiffies)) 1351 if (time_before_eq(base->next_timer, base->clk))
1352 base->next_timer = __next_timer_interrupt(base); 1352 base->next_timer = __next_timer_interrupt(base);
1353 nextevt = base->next_timer; 1353 nextevt = base->next_timer;
1354 if (time_before_eq(nextevt, basej)) 1354 if (time_before_eq(nextevt, basej))
@@ -1387,9 +1387,9 @@ void update_process_times(int user_tick)
1387 */ 1387 */
1388static void run_timer_softirq(struct softirq_action *h) 1388static void run_timer_softirq(struct softirq_action *h)
1389{ 1389{
1390 struct tvec_base *base = this_cpu_ptr(&tvec_bases); 1390 struct timer_base *base = this_cpu_ptr(&timer_bases);
1391 1391
1392 if (time_after_eq(jiffies, base->timer_jiffies)) 1392 if (time_after_eq(jiffies, base->clk))
1393 __run_timers(base); 1393 __run_timers(base);
1394} 1394}
1395 1395
@@ -1534,7 +1534,7 @@ signed long __sched schedule_timeout_idle(signed long timeout)
1534EXPORT_SYMBOL(schedule_timeout_idle); 1534EXPORT_SYMBOL(schedule_timeout_idle);
1535 1535
1536#ifdef CONFIG_HOTPLUG_CPU 1536#ifdef CONFIG_HOTPLUG_CPU
1537static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head) 1537static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1538{ 1538{
1539 struct timer_list *timer; 1539 struct timer_list *timer;
1540 int cpu = new_base->cpu; 1540 int cpu = new_base->cpu;
@@ -1550,13 +1550,13 @@ static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *he
1550 1550
1551static void migrate_timers(int cpu) 1551static void migrate_timers(int cpu)
1552{ 1552{
1553 struct tvec_base *old_base; 1553 struct timer_base *old_base;
1554 struct tvec_base *new_base; 1554 struct timer_base *new_base;
1555 int i; 1555 int i;
1556 1556
1557 BUG_ON(cpu_online(cpu)); 1557 BUG_ON(cpu_online(cpu));
1558 old_base = per_cpu_ptr(&tvec_bases, cpu); 1558 old_base = per_cpu_ptr(&timer_bases, cpu);
1559 new_base = get_cpu_ptr(&tvec_bases); 1559 new_base = get_cpu_ptr(&timer_bases);
1560 /* 1560 /*
1561 * The caller is globally serialized and nobody else 1561 * The caller is globally serialized and nobody else
1562 * takes two locks at once, deadlock is not possible. 1562 * takes two locks at once, deadlock is not possible.
@@ -1580,7 +1580,7 @@ static void migrate_timers(int cpu)
1580 1580
1581 spin_unlock(&old_base->lock); 1581 spin_unlock(&old_base->lock);
1582 spin_unlock_irq(&new_base->lock); 1582 spin_unlock_irq(&new_base->lock);
1583 put_cpu_ptr(&tvec_bases); 1583 put_cpu_ptr(&timer_bases);
1584} 1584}
1585 1585
1586static int timer_cpu_notify(struct notifier_block *self, 1586static int timer_cpu_notify(struct notifier_block *self,
@@ -1608,13 +1608,13 @@ static inline void timer_register_cpu_notifier(void) { }
1608 1608
1609static void __init init_timer_cpu(int cpu) 1609static void __init init_timer_cpu(int cpu)
1610{ 1610{
1611 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu); 1611 struct timer_base *base = per_cpu_ptr(&timer_bases, cpu);
1612 1612
1613 base->cpu = cpu; 1613 base->cpu = cpu;
1614 spin_lock_init(&base->lock); 1614 spin_lock_init(&base->lock);
1615 1615
1616 base->timer_jiffies = jiffies; 1616 base->clk = jiffies;
1617 base->next_timer = base->timer_jiffies; 1617 base->next_timer = base->clk;
1618} 1618}
1619 1619
1620static void __init init_timer_cpus(void) 1620static void __init init_timer_cpus(void)