diff options
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 148 |
1 files changed, 143 insertions, 5 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index c1c7fbcffec1..0256ab443d8a 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -80,6 +80,138 @@ tvec_base_t boot_tvec_bases; | |||
80 | EXPORT_SYMBOL(boot_tvec_bases); | 80 | EXPORT_SYMBOL(boot_tvec_bases); |
81 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; | 81 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
82 | 82 | ||
83 | /** | ||
84 | * __round_jiffies - function to round jiffies to a full second | ||
85 | * @j: the time in (absolute) jiffies that should be rounded | ||
86 | * @cpu: the processor number on which the timeout will happen | ||
87 | * | ||
88 | * __round_jiffies rounds an absolute time in the future (in jiffies) | ||
89 | * up or down to (approximately) full seconds. This is useful for timers | ||
90 | * for which the exact time they fire does not matter too much, as long as | ||
91 | * they fire approximately every X seconds. | ||
92 | * | ||
93 | * By rounding these timers to whole seconds, all such timers will fire | ||
94 | * at the same time, rather than at various times spread out. The goal | ||
95 | * of this is to have the CPU wake up less, which saves power. | ||
96 | * | ||
97 | * The exact rounding is skewed for each processor to avoid all | ||
98 | * processors firing at the exact same time, which could lead | ||
99 | * to lock contention or spurious cache line bouncing. | ||
100 | * | ||
101 | * The return value is the rounded version of the "j" parameter. | ||
102 | */ | ||
103 | unsigned long __round_jiffies(unsigned long j, int cpu) | ||
104 | { | ||
105 | int rem; | ||
106 | unsigned long original = j; | ||
107 | |||
108 | /* | ||
109 | * We don't want all cpus firing their timers at once hitting the | ||
110 | * same lock or cachelines, so we skew each extra cpu with an extra | ||
111 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | ||
112 | * already did this. | ||
113 | * The skew is done by adding 3*cpunr, then round, then subtract this | ||
114 | * extra offset again. | ||
115 | */ | ||
116 | j += cpu * 3; | ||
117 | |||
118 | rem = j % HZ; | ||
119 | |||
120 | /* | ||
121 | * If the target jiffie is just after a whole second (which can happen | ||
122 | * due to delays of the timer irq, long irq off times etc etc) then | ||
123 | * we should round down to the whole second, not up. Use 1/4th second | ||
124 | * as cutoff for this rounding as an extreme upper bound for this. | ||
125 | */ | ||
126 | if (rem < HZ/4) /* round down */ | ||
127 | j = j - rem; | ||
128 | else /* round up */ | ||
129 | j = j - rem + HZ; | ||
130 | |||
131 | /* now that we have rounded, subtract the extra skew again */ | ||
132 | j -= cpu * 3; | ||
133 | |||
134 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | ||
135 | return original; | ||
136 | return j; | ||
137 | } | ||
138 | EXPORT_SYMBOL_GPL(__round_jiffies); | ||
139 | |||
140 | /** | ||
141 | * __round_jiffies_relative - function to round jiffies to a full second | ||
142 | * @j: the time in (relative) jiffies that should be rounded | ||
143 | * @cpu: the processor number on which the timeout will happen | ||
144 | * | ||
145 | * __round_jiffies_relative rounds a time delta in the future (in jiffies) | ||
146 | * up or down to (approximately) full seconds. This is useful for timers | ||
147 | * for which the exact time they fire does not matter too much, as long as | ||
148 | * they fire approximately every X seconds. | ||
149 | * | ||
150 | * By rounding these timers to whole seconds, all such timers will fire | ||
151 | * at the same time, rather than at various times spread out. The goal | ||
152 | * of this is to have the CPU wake up less, which saves power. | ||
153 | * | ||
154 | * The exact rounding is skewed for each processor to avoid all | ||
155 | * processors firing at the exact same time, which could lead | ||
156 | * to lock contention or spurious cache line bouncing. | ||
157 | * | ||
158 | * The return value is the rounded version of the "j" parameter. | ||
159 | */ | ||
160 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | ||
161 | { | ||
162 | /* | ||
163 | * In theory the following code can skip a jiffy in case jiffies | ||
164 | * increments right between the addition and the later subtraction. | ||
165 | * However since the entire point of this function is to use approximate | ||
166 | * timeouts, it's entirely ok to not handle that. | ||
167 | */ | ||
168 | return __round_jiffies(j + jiffies, cpu) - jiffies; | ||
169 | } | ||
170 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | ||
171 | |||
172 | /** | ||
173 | * round_jiffies - function to round jiffies to a full second | ||
174 | * @j: the time in (absolute) jiffies that should be rounded | ||
175 | * | ||
176 | * round_jiffies rounds an absolute time in the future (in jiffies) | ||
177 | * up or down to (approximately) full seconds. This is useful for timers | ||
178 | * for which the exact time they fire does not matter too much, as long as | ||
179 | * they fire approximately every X seconds. | ||
180 | * | ||
181 | * By rounding these timers to whole seconds, all such timers will fire | ||
182 | * at the same time, rather than at various times spread out. The goal | ||
183 | * of this is to have the CPU wake up less, which saves power. | ||
184 | * | ||
185 | * The return value is the rounded version of the "j" parameter. | ||
186 | */ | ||
187 | unsigned long round_jiffies(unsigned long j) | ||
188 | { | ||
189 | return __round_jiffies(j, raw_smp_processor_id()); | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(round_jiffies); | ||
192 | |||
193 | /** | ||
194 | * round_jiffies_relative - function to round jiffies to a full second | ||
195 | * @j: the time in (relative) jiffies that should be rounded | ||
196 | * | ||
197 | * round_jiffies_relative rounds a time delta in the future (in jiffies) | ||
198 | * up or down to (approximately) full seconds. This is useful for timers | ||
199 | * for which the exact time they fire does not matter too much, as long as | ||
200 | * they fire approximately every X seconds. | ||
201 | * | ||
202 | * By rounding these timers to whole seconds, all such timers will fire | ||
203 | * at the same time, rather than at various times spread out. The goal | ||
204 | * of this is to have the CPU wake up less, which saves power. | ||
205 | * | ||
206 | * The return value is the rounded version of the "j" parameter. | ||
207 | */ | ||
208 | unsigned long round_jiffies_relative(unsigned long j) | ||
209 | { | ||
210 | return __round_jiffies_relative(j, raw_smp_processor_id()); | ||
211 | } | ||
212 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | ||
213 | |||
214 | |||
83 | static inline void set_running_timer(tvec_base_t *base, | 215 | static inline void set_running_timer(tvec_base_t *base, |
84 | struct timer_list *timer) | 216 | struct timer_list *timer) |
85 | { | 217 | { |
@@ -714,7 +846,7 @@ static int change_clocksource(void) | |||
714 | clock = new; | 846 | clock = new; |
715 | clock->cycle_last = now; | 847 | clock->cycle_last = now; |
716 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", | 848 | printk(KERN_INFO "Time: %s clocksource has been installed.\n", |
717 | clock->name); | 849 | clock->name); |
718 | return 1; | 850 | return 1; |
719 | } else if (clock->update_callback) { | 851 | } else if (clock->update_callback) { |
720 | return clock->update_callback(); | 852 | return clock->update_callback(); |
@@ -722,7 +854,10 @@ static int change_clocksource(void) | |||
722 | return 0; | 854 | return 0; |
723 | } | 855 | } |
724 | #else | 856 | #else |
725 | #define change_clocksource() (0) | 857 | static inline int change_clocksource(void) |
858 | { | ||
859 | return 0; | ||
860 | } | ||
726 | #endif | 861 | #endif |
727 | 862 | ||
728 | /** | 863 | /** |
@@ -820,7 +955,8 @@ device_initcall(timekeeping_init_device); | |||
820 | * If the error is already larger, we look ahead even further | 955 | * If the error is already larger, we look ahead even further |
821 | * to compensate for late or lost adjustments. | 956 | * to compensate for late or lost adjustments. |
822 | */ | 957 | */ |
823 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset) | 958 | static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, |
959 | s64 *offset) | ||
824 | { | 960 | { |
825 | s64 tick_error, i; | 961 | s64 tick_error, i; |
826 | u32 look_ahead, adj; | 962 | u32 look_ahead, adj; |
@@ -844,7 +980,8 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 * | |||
844 | * Now calculate the error in (1 << look_ahead) ticks, but first | 980 | * Now calculate the error in (1 << look_ahead) ticks, but first |
845 | * remove the single look ahead already included in the error. | 981 | * remove the single look ahead already included in the error. |
846 | */ | 982 | */ |
847 | tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1); | 983 | tick_error = current_tick_length() >> |
984 | (TICK_LENGTH_SHIFT - clock->shift + 1); | ||
848 | tick_error -= clock->xtime_interval >> 1; | 985 | tick_error -= clock->xtime_interval >> 1; |
849 | error = ((error - tick_error) >> look_ahead) + tick_error; | 986 | error = ((error - tick_error) >> look_ahead) + tick_error; |
850 | 987 | ||
@@ -896,7 +1033,8 @@ static void clocksource_adjust(struct clocksource *clock, s64 offset) | |||
896 | clock->mult += adj; | 1033 | clock->mult += adj; |
897 | clock->xtime_interval += interval; | 1034 | clock->xtime_interval += interval; |
898 | clock->xtime_nsec -= offset; | 1035 | clock->xtime_nsec -= offset; |
899 | clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift); | 1036 | clock->error -= (interval - offset) << |
1037 | (TICK_LENGTH_SHIFT - clock->shift); | ||
900 | } | 1038 | } |
901 | 1039 | ||
902 | /** | 1040 | /** |