aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c472
1 files changed, 415 insertions, 57 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index 9e49deed468c..05809c2e2fd6 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -146,7 +146,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
146void fastcall init_timer(struct timer_list *timer) 146void fastcall init_timer(struct timer_list *timer)
147{ 147{
148 timer->entry.next = NULL; 148 timer->entry.next = NULL;
149 timer->base = per_cpu(tvec_bases, raw_smp_processor_id()); 149 timer->base = __raw_get_cpu_var(tvec_bases);
150} 150}
151EXPORT_SYMBOL(init_timer); 151EXPORT_SYMBOL(init_timer);
152 152
@@ -374,6 +374,7 @@ int del_timer_sync(struct timer_list *timer)
374 int ret = try_to_del_timer_sync(timer); 374 int ret = try_to_del_timer_sync(timer);
375 if (ret >= 0) 375 if (ret >= 0)
376 return ret; 376 return ret;
377 cpu_relax();
377 } 378 }
378} 379}
379 380
@@ -383,23 +384,19 @@ EXPORT_SYMBOL(del_timer_sync);
383static int cascade(tvec_base_t *base, tvec_t *tv, int index) 384static int cascade(tvec_base_t *base, tvec_t *tv, int index)
384{ 385{
385 /* cascade all the timers from tv up one level */ 386 /* cascade all the timers from tv up one level */
386 struct list_head *head, *curr; 387 struct timer_list *timer, *tmp;
388 struct list_head tv_list;
389
390 list_replace_init(tv->vec + index, &tv_list);
387 391
388 head = tv->vec + index;
389 curr = head->next;
390 /* 392 /*
391 * We are removing _all_ timers from the list, so we don't have to 393 * We are removing _all_ timers from the list, so we
392 * detach them individually, just clear the list afterwards. 394 * don't have to detach them individually.
393 */ 395 */
394 while (curr != head) { 396 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
395 struct timer_list *tmp; 397 BUG_ON(timer->base != base);
396 398 internal_add_timer(base, timer);
397 tmp = list_entry(curr, struct timer_list, entry);
398 BUG_ON(tmp->base != base);
399 curr = curr->next;
400 internal_add_timer(base, tmp);
401 } 399 }
402 INIT_LIST_HEAD(head);
403 400
404 return index; 401 return index;
405} 402}
@@ -419,10 +416,10 @@ static inline void __run_timers(tvec_base_t *base)
419 416
420 spin_lock_irq(&base->lock); 417 spin_lock_irq(&base->lock);
421 while (time_after_eq(jiffies, base->timer_jiffies)) { 418 while (time_after_eq(jiffies, base->timer_jiffies)) {
422 struct list_head work_list = LIST_HEAD_INIT(work_list); 419 struct list_head work_list;
423 struct list_head *head = &work_list; 420 struct list_head *head = &work_list;
424 int index = base->timer_jiffies & TVR_MASK; 421 int index = base->timer_jiffies & TVR_MASK;
425 422
426 /* 423 /*
427 * Cascade timers: 424 * Cascade timers:
428 */ 425 */
@@ -431,8 +428,8 @@ static inline void __run_timers(tvec_base_t *base)
431 (!cascade(base, &base->tv3, INDEX(1))) && 428 (!cascade(base, &base->tv3, INDEX(1))) &&
432 !cascade(base, &base->tv4, INDEX(2))) 429 !cascade(base, &base->tv4, INDEX(2)))
433 cascade(base, &base->tv5, INDEX(3)); 430 cascade(base, &base->tv5, INDEX(3));
434 ++base->timer_jiffies; 431 ++base->timer_jiffies;
435 list_splice_init(base->tv1.vec + index, &work_list); 432 list_replace_init(base->tv1.vec + index, &work_list);
436 while (!list_empty(head)) { 433 while (!list_empty(head)) {
437 void (*fn)(unsigned long); 434 void (*fn)(unsigned long);
438 unsigned long data; 435 unsigned long data;
@@ -601,7 +598,6 @@ long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
601long time_precision = 1; /* clock precision (us) */ 598long time_precision = 1; /* clock precision (us) */
602long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ 599long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
603long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ 600long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
604static long time_phase; /* phase offset (scaled us) */
605long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; 601long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
606 /* frequency offset (scaled ppm)*/ 602 /* frequency offset (scaled ppm)*/
607static long time_adj; /* tick adjust (scaled 1 / HZ) */ 603static long time_adj; /* tick adjust (scaled 1 / HZ) */
@@ -751,27 +747,14 @@ static long adjtime_adjustment(void)
751} 747}
752 748
753/* in the NTP reference this is called "hardclock()" */ 749/* in the NTP reference this is called "hardclock()" */
754static void update_wall_time_one_tick(void) 750static void update_ntp_one_tick(void)
755{ 751{
756 long time_adjust_step, delta_nsec; 752 long time_adjust_step;
757 753
758 time_adjust_step = adjtime_adjustment(); 754 time_adjust_step = adjtime_adjustment();
759 if (time_adjust_step) 755 if (time_adjust_step)
760 /* Reduce by this step the amount of time left */ 756 /* Reduce by this step the amount of time left */
761 time_adjust -= time_adjust_step; 757 time_adjust -= time_adjust_step;
762 delta_nsec = tick_nsec + time_adjust_step * 1000;
763 /*
764 * Advance the phase, once it gets to one microsecond, then
765 * advance the tick more.
766 */
767 time_phase += time_adj;
768 if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
769 long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
770 time_phase -= ltemp << (SHIFT_SCALE - 10);
771 delta_nsec += ltemp;
772 }
773 xtime.tv_nsec += delta_nsec;
774 time_interpolator_update(delta_nsec);
775 758
776 /* Changes by adjtime() do not take effect till next tick. */ 759 /* Changes by adjtime() do not take effect till next tick. */
777 if (time_next_adjust != 0) { 760 if (time_next_adjust != 0) {
@@ -784,36 +767,404 @@ static void update_wall_time_one_tick(void)
784 * Return how long ticks are at the moment, that is, how much time 767 * Return how long ticks are at the moment, that is, how much time
785 * update_wall_time_one_tick will add to xtime next time we call it 768 * update_wall_time_one_tick will add to xtime next time we call it
786 * (assuming no calls to do_adjtimex in the meantime). 769 * (assuming no calls to do_adjtimex in the meantime).
787 * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10 770 * The return value is in fixed-point nanoseconds shifted by the
788 * bits to the right of the binary point. 771 * specified number of bits to the right of the binary point.
789 * This function has no side-effects. 772 * This function has no side-effects.
790 */ 773 */
791u64 current_tick_length(void) 774u64 current_tick_length(void)
792{ 775{
793 long delta_nsec; 776 long delta_nsec;
777 u64 ret;
794 778
779 /* calculate the finest interval NTP will allow.
780 * ie: nanosecond value shifted by (SHIFT_SCALE - 10)
781 */
795 delta_nsec = tick_nsec + adjtime_adjustment() * 1000; 782 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
796 return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj; 783 ret = (u64)delta_nsec << TICK_LENGTH_SHIFT;
784 ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10));
785
786 return ret;
797} 787}
798 788
799/* 789/* XXX - all of this timekeeping code should be later moved to time.c */
800 * Using a loop looks inefficient, but "ticks" is 790#include <linux/clocksource.h>
801 * usually just one (we shouldn't be losing ticks, 791static struct clocksource *clock; /* pointer to current clocksource */
802 * we're doing this this way mainly for interrupt 792
803 * latency reasons, not because we think we'll 793#ifdef CONFIG_GENERIC_TIME
804 * have lots of lost timer ticks 794/**
795 * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
796 *
797 * private function, must hold xtime_lock lock when being
798 * called. Returns the number of nanoseconds since the
799 * last call to update_wall_time() (adjusted by NTP scaling)
800 */
801static inline s64 __get_nsec_offset(void)
802{
803 cycle_t cycle_now, cycle_delta;
804 s64 ns_offset;
805
806 /* read clocksource: */
807 cycle_now = clocksource_read(clock);
808
809 /* calculate the delta since the last update_wall_time: */
810 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
811
812 /* convert to nanoseconds: */
813 ns_offset = cyc2ns(clock, cycle_delta);
814
815 return ns_offset;
816}
817
818/**
819 * __get_realtime_clock_ts - Returns the time of day in a timespec
820 * @ts: pointer to the timespec to be set
821 *
822 * Returns the time of day in a timespec. Used by
823 * do_gettimeofday() and get_realtime_clock_ts().
805 */ 824 */
806static void update_wall_time(unsigned long ticks) 825static inline void __get_realtime_clock_ts(struct timespec *ts)
807{ 826{
827 unsigned long seq;
828 s64 nsecs;
829
808 do { 830 do {
809 ticks--; 831 seq = read_seqbegin(&xtime_lock);
810 update_wall_time_one_tick(); 832
811 if (xtime.tv_nsec >= 1000000000) { 833 *ts = xtime;
812 xtime.tv_nsec -= 1000000000; 834 nsecs = __get_nsec_offset();
835
836 } while (read_seqretry(&xtime_lock, seq));
837
838 timespec_add_ns(ts, nsecs);
839}
840
841/**
842 * getnstimeofday - Returns the time of day in a timespec
843 * @ts: pointer to the timespec to be set
844 *
845 * Returns the time of day in a timespec.
846 */
847void getnstimeofday(struct timespec *ts)
848{
849 __get_realtime_clock_ts(ts);
850}
851
852EXPORT_SYMBOL(getnstimeofday);
853
854/**
855 * do_gettimeofday - Returns the time of day in a timeval
856 * @tv: pointer to the timeval to be set
857 *
858 * NOTE: Users should be converted to using get_realtime_clock_ts()
859 */
860void do_gettimeofday(struct timeval *tv)
861{
862 struct timespec now;
863
864 __get_realtime_clock_ts(&now);
865 tv->tv_sec = now.tv_sec;
866 tv->tv_usec = now.tv_nsec/1000;
867}
868
869EXPORT_SYMBOL(do_gettimeofday);
870/**
871 * do_settimeofday - Sets the time of day
872 * @tv: pointer to the timespec variable containing the new time
873 *
874 * Sets the time of day to the new time and update NTP and notify hrtimers
875 */
876int do_settimeofday(struct timespec *tv)
877{
878 unsigned long flags;
879 time_t wtm_sec, sec = tv->tv_sec;
880 long wtm_nsec, nsec = tv->tv_nsec;
881
882 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
883 return -EINVAL;
884
885 write_seqlock_irqsave(&xtime_lock, flags);
886
887 nsec -= __get_nsec_offset();
888
889 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
890 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
891
892 set_normalized_timespec(&xtime, sec, nsec);
893 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
894
895 clock->error = 0;
896 ntp_clear();
897
898 write_sequnlock_irqrestore(&xtime_lock, flags);
899
900 /* signal hrtimers about time change */
901 clock_was_set();
902
903 return 0;
904}
905
906EXPORT_SYMBOL(do_settimeofday);
907
908/**
909 * change_clocksource - Swaps clocksources if a new one is available
910 *
911 * Accumulates current time interval and initializes new clocksource
912 */
913static int change_clocksource(void)
914{
915 struct clocksource *new;
916 cycle_t now;
917 u64 nsec;
918 new = clocksource_get_next();
919 if (clock != new) {
920 now = clocksource_read(new);
921 nsec = __get_nsec_offset();
922 timespec_add_ns(&xtime, nsec);
923
924 clock = new;
925 clock->cycle_last = now;
926 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
927 clock->name);
928 return 1;
929 } else if (clock->update_callback) {
930 return clock->update_callback();
931 }
932 return 0;
933}
934#else
935#define change_clocksource() (0)
936#endif
937
938/**
939 * timeofday_is_continuous - check to see if timekeeping is free running
940 */
941int timekeeping_is_continuous(void)
942{
943 unsigned long seq;
944 int ret;
945
946 do {
947 seq = read_seqbegin(&xtime_lock);
948
949 ret = clock->is_continuous;
950
951 } while (read_seqretry(&xtime_lock, seq));
952
953 return ret;
954}
955
956/*
957 * timekeeping_init - Initializes the clocksource and common timekeeping values
958 */
959void __init timekeeping_init(void)
960{
961 unsigned long flags;
962
963 write_seqlock_irqsave(&xtime_lock, flags);
964 clock = clocksource_get_next();
965 clocksource_calculate_interval(clock, tick_nsec);
966 clock->cycle_last = clocksource_read(clock);
967 ntp_clear();
968 write_sequnlock_irqrestore(&xtime_lock, flags);
969}
970
971
972static int timekeeping_suspended;
973/*
974 * timekeeping_resume - Resumes the generic timekeeping subsystem.
975 * @dev: unused
976 *
977 * This is for the generic clocksource timekeeping.
978 * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
979 * still managed by arch specific suspend/resume code.
980 */
981static int timekeeping_resume(struct sys_device *dev)
982{
983 unsigned long flags;
984
985 write_seqlock_irqsave(&xtime_lock, flags);
986 /* restart the last cycle value */
987 clock->cycle_last = clocksource_read(clock);
988 clock->error = 0;
989 timekeeping_suspended = 0;
990 write_sequnlock_irqrestore(&xtime_lock, flags);
991 return 0;
992}
993
994static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
995{
996 unsigned long flags;
997
998 write_seqlock_irqsave(&xtime_lock, flags);
999 timekeeping_suspended = 1;
1000 write_sequnlock_irqrestore(&xtime_lock, flags);
1001 return 0;
1002}
1003
1004/* sysfs resume/suspend bits for timekeeping */
1005static struct sysdev_class timekeeping_sysclass = {
1006 .resume = timekeeping_resume,
1007 .suspend = timekeeping_suspend,
1008 set_kset_name("timekeeping"),
1009};
1010
1011static struct sys_device device_timer = {
1012 .id = 0,
1013 .cls = &timekeeping_sysclass,
1014};
1015
1016static int __init timekeeping_init_device(void)
1017{
1018 int error = sysdev_class_register(&timekeeping_sysclass);
1019 if (!error)
1020 error = sysdev_register(&device_timer);
1021 return error;
1022}
1023
1024device_initcall(timekeeping_init_device);
1025
1026/*
1027 * If the error is already larger, we look ahead even further
1028 * to compensate for late or lost adjustments.
1029 */
1030static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset)
1031{
1032 s64 tick_error, i;
1033 u32 look_ahead, adj;
1034 s32 error2, mult;
1035
1036 /*
1037 * Use the current error value to determine how much to look ahead.
1038 * The larger the error the slower we adjust for it to avoid problems
1039 * with losing too many ticks, otherwise we would overadjust and
1040 * produce an even larger error. The smaller the adjustment the
1041 * faster we try to adjust for it, as lost ticks can do less harm
1042 * here. This is tuned so that an error of about 1 msec is adusted
1043 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1044 */
1045 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
1046 error2 = abs(error2);
1047 for (look_ahead = 0; error2 > 0; look_ahead++)
1048 error2 >>= 2;
1049
1050 /*
1051 * Now calculate the error in (1 << look_ahead) ticks, but first
1052 * remove the single look ahead already included in the error.
1053 */
1054 tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1);
1055 tick_error -= clock->xtime_interval >> 1;
1056 error = ((error - tick_error) >> look_ahead) + tick_error;
1057
1058 /* Finally calculate the adjustment shift value. */
1059 i = *interval;
1060 mult = 1;
1061 if (error < 0) {
1062 error = -error;
1063 *interval = -*interval;
1064 *offset = -*offset;
1065 mult = -1;
1066 }
1067 for (adj = 0; error > i; adj++)
1068 error >>= 1;
1069
1070 *interval <<= adj;
1071 *offset <<= adj;
1072 return mult << adj;
1073}
1074
1075/*
1076 * Adjust the multiplier to reduce the error value,
1077 * this is optimized for the most common adjustments of -1,0,1,
1078 * for other values we can do a bit more work.
1079 */
1080static void clocksource_adjust(struct clocksource *clock, s64 offset)
1081{
1082 s64 error, interval = clock->cycle_interval;
1083 int adj;
1084
1085 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
1086 if (error > interval) {
1087 error >>= 2;
1088 if (likely(error <= interval))
1089 adj = 1;
1090 else
1091 adj = clocksource_bigadjust(error, &interval, &offset);
1092 } else if (error < -interval) {
1093 error >>= 2;
1094 if (likely(error >= -interval)) {
1095 adj = -1;
1096 interval = -interval;
1097 offset = -offset;
1098 } else
1099 adj = clocksource_bigadjust(error, &interval, &offset);
1100 } else
1101 return;
1102
1103 clock->mult += adj;
1104 clock->xtime_interval += interval;
1105 clock->xtime_nsec -= offset;
1106 clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift);
1107}
1108
1109/*
1110 * update_wall_time - Uses the current clocksource to increment the wall time
1111 *
1112 * Called from the timer interrupt, must hold a write on xtime_lock.
1113 */
1114static void update_wall_time(void)
1115{
1116 cycle_t offset;
1117
1118 /* Make sure we're fully resumed: */
1119 if (unlikely(timekeeping_suspended))
1120 return;
1121
1122#ifdef CONFIG_GENERIC_TIME
1123 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
1124#else
1125 offset = clock->cycle_interval;
1126#endif
1127 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
1128
1129 /* normally this loop will run just once, however in the
1130 * case of lost or late ticks, it will accumulate correctly.
1131 */
1132 while (offset >= clock->cycle_interval) {
1133 /* accumulate one interval */
1134 clock->xtime_nsec += clock->xtime_interval;
1135 clock->cycle_last += clock->cycle_interval;
1136 offset -= clock->cycle_interval;
1137
1138 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
1139 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
813 xtime.tv_sec++; 1140 xtime.tv_sec++;
814 second_overflow(); 1141 second_overflow();
815 } 1142 }
816 } while (ticks); 1143
1144 /* interpolator bits */
1145 time_interpolator_update(clock->xtime_interval
1146 >> clock->shift);
1147 /* increment the NTP state machine */
1148 update_ntp_one_tick();
1149
1150 /* accumulate error between NTP and clock interval */
1151 clock->error += current_tick_length();
1152 clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
1153 }
1154
1155 /* correct the clock when NTP error is too big */
1156 clocksource_adjust(clock, offset);
1157
1158 /* store full nanoseconds into xtime */
1159 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
1160 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
1161
1162 /* check to see if there is a new clocksource to use */
1163 if (change_clocksource()) {
1164 clock->error = 0;
1165 clock->xtime_nsec = 0;
1166 clocksource_calculate_interval(clock, tick_nsec);
1167 }
817} 1168}
818 1169
819/* 1170/*
@@ -884,7 +1235,7 @@ unsigned long wall_jiffies = INITIAL_JIFFIES;
884 * playing with xtime and avenrun. 1235 * playing with xtime and avenrun.
885 */ 1236 */
886#ifndef ARCH_HAVE_XTIME_LOCK 1237#ifndef ARCH_HAVE_XTIME_LOCK
887seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; 1238__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
888 1239
889EXPORT_SYMBOL(xtime_lock); 1240EXPORT_SYMBOL(xtime_lock);
890#endif 1241#endif
@@ -919,10 +1270,8 @@ static inline void update_times(void)
919 unsigned long ticks; 1270 unsigned long ticks;
920 1271
921 ticks = jiffies - wall_jiffies; 1272 ticks = jiffies - wall_jiffies;
922 if (ticks) { 1273 wall_jiffies += ticks;
923 wall_jiffies += ticks; 1274 update_wall_time();
924 update_wall_time(ticks);
925 }
926 calc_load(ticks); 1275 calc_load(ticks);
927} 1276}
928 1277
@@ -1046,7 +1395,7 @@ asmlinkage long sys_getegid(void)
1046 1395
1047static void process_timeout(unsigned long __data) 1396static void process_timeout(unsigned long __data)
1048{ 1397{
1049 wake_up_process((task_t *)__data); 1398 wake_up_process((struct task_struct *)__data);
1050} 1399}
1051 1400
1052/** 1401/**
@@ -1237,6 +1586,13 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1237 return 0; 1586 return 0;
1238} 1587}
1239 1588
1589/*
1590 * lockdep: we want to track each per-CPU base as a separate lock-class,
1591 * but timer-bases are kmalloc()-ed, so we need to attach separate
1592 * keys to them:
1593 */
1594static struct lock_class_key base_lock_keys[NR_CPUS];
1595
1240static int __devinit init_timers_cpu(int cpu) 1596static int __devinit init_timers_cpu(int cpu)
1241{ 1597{
1242 int j; 1598 int j;
@@ -1272,6 +1628,8 @@ static int __devinit init_timers_cpu(int cpu)
1272 } 1628 }
1273 1629
1274 spin_lock_init(&base->lock); 1630 spin_lock_init(&base->lock);
1631 lockdep_set_class(&base->lock, base_lock_keys + cpu);
1632
1275 for (j = 0; j < TVN_SIZE; j++) { 1633 for (j = 0; j < TVN_SIZE; j++) {
1276 INIT_LIST_HEAD(base->tv5.vec + j); 1634 INIT_LIST_HEAD(base->tv5.vec + j);
1277 INIT_LIST_HEAD(base->tv4.vec + j); 1635 INIT_LIST_HEAD(base->tv4.vec + j);
@@ -1330,7 +1688,7 @@ static void __devinit migrate_timers(int cpu)
1330} 1688}
1331#endif /* CONFIG_HOTPLUG_CPU */ 1689#endif /* CONFIG_HOTPLUG_CPU */
1332 1690
1333static int timer_cpu_notify(struct notifier_block *self, 1691static int __devinit timer_cpu_notify(struct notifier_block *self,
1334 unsigned long action, void *hcpu) 1692 unsigned long action, void *hcpu)
1335{ 1693{
1336 long cpu = (long)hcpu; 1694 long cpu = (long)hcpu;
@@ -1350,7 +1708,7 @@ static int timer_cpu_notify(struct notifier_block *self,
1350 return NOTIFY_OK; 1708 return NOTIFY_OK;
1351} 1709}
1352 1710
1353static struct notifier_block timers_nb = { 1711static struct notifier_block __devinitdata timers_nb = {
1354 .notifier_call = timer_cpu_notify, 1712 .notifier_call = timer_cpu_notify,
1355}; 1713};
1356 1714