diff options
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 149 |
1 files changed, 110 insertions, 39 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 03bc7f1f1593..566257d1dc10 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -112,27 +112,8 @@ timer_set_base(struct timer_list *timer, struct tvec_base *new_base) | |||
112 | tbase_get_deferrable(timer->base)); | 112 | tbase_get_deferrable(timer->base)); |
113 | } | 113 | } |
114 | 114 | ||
115 | /** | 115 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
116 | * __round_jiffies - function to round jiffies to a full second | 116 | bool force_up) |
117 | * @j: the time in (absolute) jiffies that should be rounded | ||
118 | * @cpu: the processor number on which the timeout will happen | ||
119 | * | ||
120 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | ||
121 | * up or down to (approximately) full seconds. This is useful for timers | ||
122 | * for which the exact time they fire does not matter too much, as long as | ||
123 | * they fire approximately every X seconds. | ||
124 | * | ||
125 | * By rounding these timers to whole seconds, all such timers will fire | ||
126 | * at the same time, rather than at various times spread out. The goal | ||
127 | * of this is to have the CPU wake up less, which saves power. | ||
128 | * | ||
129 | * The exact rounding is skewed for each processor to avoid all | ||
130 | * processors firing at the exact same time, which could lead | ||
131 | * to lock contention or spurious cache line bouncing. | ||
132 | * | ||
133 | * The return value is the rounded version of the @j parameter. | ||
134 | */ | ||
135 | unsigned long __round_jiffies(unsigned long j, int cpu) | ||
136 | { | 117 | { |
137 | int rem; | 118 | int rem; |
138 | unsigned long original = j; | 119 | unsigned long original = j; |
@@ -154,8 +135,9 @@ unsigned long __round_jiffies(unsigned long j, int cpu) | |||
154 | * due to delays of the timer irq, long irq off times etc etc) then | 135 | * due to delays of the timer irq, long irq off times etc etc) then |
155 | * we should round down to the whole second, not up. Use 1/4th second | 136 | * we should round down to the whole second, not up. Use 1/4th second |
156 | * as cutoff for this rounding as an extreme upper bound for this. | 137 | * as cutoff for this rounding as an extreme upper bound for this. |
138 | * But never round down if @force_up is set. | ||
157 | */ | 139 | */ |
158 | if (rem < HZ/4) /* round down */ | 140 | if (rem < HZ/4 && !force_up) /* round down */ |
159 | j = j - rem; | 141 | j = j - rem; |
160 | else /* round up */ | 142 | else /* round up */ |
161 | j = j - rem + HZ; | 143 | j = j - rem + HZ; |
@@ -167,6 +149,31 @@ unsigned long __round_jiffies(unsigned long j, int cpu) | |||
167 | return original; | 149 | return original; |
168 | return j; | 150 | return j; |
169 | } | 151 | } |
152 | |||
153 | /** | ||
154 | * __round_jiffies - function to round jiffies to a full second | ||
155 | * @j: the time in (absolute) jiffies that should be rounded | ||
156 | * @cpu: the processor number on which the timeout will happen | ||
157 | * | ||
158 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | ||
159 | * up or down to (approximately) full seconds. This is useful for timers | ||
160 | * for which the exact time they fire does not matter too much, as long as | ||
161 | * they fire approximately every X seconds. | ||
162 | * | ||
163 | * By rounding these timers to whole seconds, all such timers will fire | ||
164 | * at the same time, rather than at various times spread out. The goal | ||
165 | * of this is to have the CPU wake up less, which saves power. | ||
166 | * | ||
167 | * The exact rounding is skewed for each processor to avoid all | ||
168 | * processors firing at the exact same time, which could lead | ||
169 | * to lock contention or spurious cache line bouncing. | ||
170 | * | ||
171 | * The return value is the rounded version of the @j parameter. | ||
172 | */ | ||
173 | unsigned long __round_jiffies(unsigned long j, int cpu) | ||
174 | { | ||
175 | return round_jiffies_common(j, cpu, false); | ||
176 | } | ||
170 | EXPORT_SYMBOL_GPL(__round_jiffies); | 177 | EXPORT_SYMBOL_GPL(__round_jiffies); |
171 | 178 | ||
172 | /** | 179 | /** |
@@ -191,13 +198,10 @@ EXPORT_SYMBOL_GPL(__round_jiffies); | |||
191 | */ | 198 | */ |
192 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | 199 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) |
193 | { | 200 | { |
194 | /* | 201 | unsigned long j0 = jiffies; |
195 | * In theory the following code can skip a jiffy in case jiffies | 202 | |
196 | * increments right between the addition and the later subtraction. | 203 | /* Use j0 because jiffies might change while we run */ |
197 | * However since the entire point of this function is to use approximate | 204 | return round_jiffies_common(j + j0, cpu, false) - j0; |
198 | * timeouts, it's entirely ok to not handle that. | ||
199 | */ | ||
200 | return __round_jiffies(j + jiffies, cpu) - jiffies; | ||
201 | } | 205 | } |
202 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | 206 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); |
203 | 207 | ||
@@ -218,7 +222,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |||
218 | */ | 222 | */ |
219 | unsigned long round_jiffies(unsigned long j) | 223 | unsigned long round_jiffies(unsigned long j) |
220 | { | 224 | { |
221 | return __round_jiffies(j, raw_smp_processor_id()); | 225 | return round_jiffies_common(j, raw_smp_processor_id(), false); |
222 | } | 226 | } |
223 | EXPORT_SYMBOL_GPL(round_jiffies); | 227 | EXPORT_SYMBOL_GPL(round_jiffies); |
224 | 228 | ||
@@ -243,6 +247,71 @@ unsigned long round_jiffies_relative(unsigned long j) | |||
243 | } | 247 | } |
244 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | 248 | EXPORT_SYMBOL_GPL(round_jiffies_relative); |
245 | 249 | ||
250 | /** | ||
251 | * __round_jiffies_up - function to round jiffies up to a full second | ||
252 | * @j: the time in (absolute) jiffies that should be rounded | ||
253 | * @cpu: the processor number on which the timeout will happen | ||
254 | * | ||
255 | * This is the same as __round_jiffies() except that it will never | ||
256 | * round down. This is useful for timeouts for which the exact time | ||
257 | * of firing does not matter too much, as long as they don't fire too | ||
258 | * early. | ||
259 | */ | ||
260 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | ||
261 | { | ||
262 | return round_jiffies_common(j, cpu, true); | ||
263 | } | ||
264 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | ||
265 | |||
266 | /** | ||
267 | * __round_jiffies_up_relative - function to round jiffies up to a full second | ||
268 | * @j: the time in (relative) jiffies that should be rounded | ||
269 | * @cpu: the processor number on which the timeout will happen | ||
270 | * | ||
271 | * This is the same as __round_jiffies_relative() except that it will never | ||
272 | * round down. This is useful for timeouts for which the exact time | ||
273 | * of firing does not matter too much, as long as they don't fire too | ||
274 | * early. | ||
275 | */ | ||
276 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | ||
277 | { | ||
278 | unsigned long j0 = jiffies; | ||
279 | |||
280 | /* Use j0 because jiffies might change while we run */ | ||
281 | return round_jiffies_common(j + j0, cpu, true) - j0; | ||
282 | } | ||
283 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | ||
284 | |||
285 | /** | ||
286 | * round_jiffies_up - function to round jiffies up to a full second | ||
287 | * @j: the time in (absolute) jiffies that should be rounded | ||
288 | * | ||
289 | * This is the same as round_jiffies() except that it will never | ||
290 | * round down. This is useful for timeouts for which the exact time | ||
291 | * of firing does not matter too much, as long as they don't fire too | ||
292 | * early. | ||
293 | */ | ||
294 | unsigned long round_jiffies_up(unsigned long j) | ||
295 | { | ||
296 | return round_jiffies_common(j, raw_smp_processor_id(), true); | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(round_jiffies_up); | ||
299 | |||
300 | /** | ||
301 | * round_jiffies_up_relative - function to round jiffies up to a full second | ||
302 | * @j: the time in (relative) jiffies that should be rounded | ||
303 | * | ||
304 | * This is the same as round_jiffies_relative() except that it will never | ||
305 | * round down. This is useful for timeouts for which the exact time | ||
306 | * of firing does not matter too much, as long as they don't fire too | ||
307 | * early. | ||
308 | */ | ||
309 | unsigned long round_jiffies_up_relative(unsigned long j) | ||
310 | { | ||
311 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | ||
312 | } | ||
313 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | ||
314 | |||
246 | 315 | ||
247 | static inline void set_running_timer(struct tvec_base *base, | 316 | static inline void set_running_timer(struct tvec_base *base, |
248 | struct timer_list *timer) | 317 | struct timer_list *timer) |
@@ -978,6 +1047,7 @@ void update_process_times(int user_tick) | |||
978 | run_local_timers(); | 1047 | run_local_timers(); |
979 | if (rcu_pending(cpu)) | 1048 | if (rcu_pending(cpu)) |
980 | rcu_check_callbacks(cpu, user_tick); | 1049 | rcu_check_callbacks(cpu, user_tick); |
1050 | printk_tick(); | ||
981 | scheduler_tick(); | 1051 | scheduler_tick(); |
982 | run_posix_cpu_timers(p); | 1052 | run_posix_cpu_timers(p); |
983 | } | 1053 | } |
@@ -1122,25 +1192,25 @@ asmlinkage long sys_getppid(void) | |||
1122 | asmlinkage long sys_getuid(void) | 1192 | asmlinkage long sys_getuid(void) |
1123 | { | 1193 | { |
1124 | /* Only we change this so SMP safe */ | 1194 | /* Only we change this so SMP safe */ |
1125 | return current->uid; | 1195 | return current_uid(); |
1126 | } | 1196 | } |
1127 | 1197 | ||
1128 | asmlinkage long sys_geteuid(void) | 1198 | asmlinkage long sys_geteuid(void) |
1129 | { | 1199 | { |
1130 | /* Only we change this so SMP safe */ | 1200 | /* Only we change this so SMP safe */ |
1131 | return current->euid; | 1201 | return current_euid(); |
1132 | } | 1202 | } |
1133 | 1203 | ||
1134 | asmlinkage long sys_getgid(void) | 1204 | asmlinkage long sys_getgid(void) |
1135 | { | 1205 | { |
1136 | /* Only we change this so SMP safe */ | 1206 | /* Only we change this so SMP safe */ |
1137 | return current->gid; | 1207 | return current_gid(); |
1138 | } | 1208 | } |
1139 | 1209 | ||
1140 | asmlinkage long sys_getegid(void) | 1210 | asmlinkage long sys_getegid(void) |
1141 | { | 1211 | { |
1142 | /* Only we change this so SMP safe */ | 1212 | /* Only we change this so SMP safe */ |
1143 | return current->egid; | 1213 | return current_egid(); |
1144 | } | 1214 | } |
1145 | 1215 | ||
1146 | #endif | 1216 | #endif |
@@ -1435,9 +1505,11 @@ static void __cpuinit migrate_timers(int cpu) | |||
1435 | BUG_ON(cpu_online(cpu)); | 1505 | BUG_ON(cpu_online(cpu)); |
1436 | old_base = per_cpu(tvec_bases, cpu); | 1506 | old_base = per_cpu(tvec_bases, cpu); |
1437 | new_base = get_cpu_var(tvec_bases); | 1507 | new_base = get_cpu_var(tvec_bases); |
1438 | 1508 | /* | |
1439 | local_irq_disable(); | 1509 | * The caller is globally serialized and nobody else |
1440 | spin_lock(&new_base->lock); | 1510 | * takes two locks at once, deadlock is not possible. |
1511 | */ | ||
1512 | spin_lock_irq(&new_base->lock); | ||
1441 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1513 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1442 | 1514 | ||
1443 | BUG_ON(old_base->running_timer); | 1515 | BUG_ON(old_base->running_timer); |
@@ -1452,8 +1524,7 @@ static void __cpuinit migrate_timers(int cpu) | |||
1452 | } | 1524 | } |
1453 | 1525 | ||
1454 | spin_unlock(&old_base->lock); | 1526 | spin_unlock(&old_base->lock); |
1455 | spin_unlock(&new_base->lock); | 1527 | spin_unlock_irq(&new_base->lock); |
1456 | local_irq_enable(); | ||
1457 | put_cpu_var(tvec_bases); | 1528 | put_cpu_var(tvec_bases); |
1458 | } | 1529 | } |
1459 | #endif /* CONFIG_HOTPLUG_CPU */ | 1530 | #endif /* CONFIG_HOTPLUG_CPU */ |