diff options
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 95 |
1 files changed, 39 insertions, 56 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index ab189dd187cb..c3a874f1393c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(jiffies_64); | |||
54 | /* | 54 | /* |
55 | * per-CPU timer vector definitions: | 55 | * per-CPU timer vector definitions: |
56 | */ | 56 | */ |
57 | |||
58 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) | 57 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
59 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | 58 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) |
60 | #define TVN_SIZE (1 << TVN_BITS) | 59 | #define TVN_SIZE (1 << TVN_BITS) |
@@ -62,11 +61,6 @@ EXPORT_SYMBOL(jiffies_64); | |||
62 | #define TVN_MASK (TVN_SIZE - 1) | 61 | #define TVN_MASK (TVN_SIZE - 1) |
63 | #define TVR_MASK (TVR_SIZE - 1) | 62 | #define TVR_MASK (TVR_SIZE - 1) |
64 | 63 | ||
65 | struct timer_base_s { | ||
66 | spinlock_t lock; | ||
67 | struct timer_list *running_timer; | ||
68 | }; | ||
69 | |||
70 | typedef struct tvec_s { | 64 | typedef struct tvec_s { |
71 | struct list_head vec[TVN_SIZE]; | 65 | struct list_head vec[TVN_SIZE]; |
72 | } tvec_t; | 66 | } tvec_t; |
@@ -76,7 +70,8 @@ typedef struct tvec_root_s { | |||
76 | } tvec_root_t; | 70 | } tvec_root_t; |
77 | 71 | ||
78 | struct tvec_t_base_s { | 72 | struct tvec_t_base_s { |
79 | struct timer_base_s t_base; | 73 | spinlock_t lock; |
74 | struct timer_list *running_timer; | ||
80 | unsigned long timer_jiffies; | 75 | unsigned long timer_jiffies; |
81 | tvec_root_t tv1; | 76 | tvec_root_t tv1; |
82 | tvec_t tv2; | 77 | tvec_t tv2; |
@@ -87,13 +82,14 @@ struct tvec_t_base_s { | |||
87 | 82 | ||
88 | typedef struct tvec_t_base_s tvec_base_t; | 83 | typedef struct tvec_t_base_s tvec_base_t; |
89 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); | 84 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); |
90 | static tvec_base_t boot_tvec_bases; | 85 | tvec_base_t boot_tvec_bases; |
86 | EXPORT_SYMBOL(boot_tvec_bases); | ||
91 | 87 | ||
92 | static inline void set_running_timer(tvec_base_t *base, | 88 | static inline void set_running_timer(tvec_base_t *base, |
93 | struct timer_list *timer) | 89 | struct timer_list *timer) |
94 | { | 90 | { |
95 | #ifdef CONFIG_SMP | 91 | #ifdef CONFIG_SMP |
96 | base->t_base.running_timer = timer; | 92 | base->running_timer = timer; |
97 | #endif | 93 | #endif |
98 | } | 94 | } |
99 | 95 | ||
@@ -139,15 +135,6 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) | |||
139 | list_add_tail(&timer->entry, vec); | 135 | list_add_tail(&timer->entry, vec); |
140 | } | 136 | } |
141 | 137 | ||
142 | typedef struct timer_base_s timer_base_t; | ||
143 | /* | ||
144 | * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases) | ||
145 | * at compile time, and we need timer->base to lock the timer. | ||
146 | */ | ||
147 | timer_base_t __init_timer_base | ||
148 | ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED }; | ||
149 | EXPORT_SYMBOL(__init_timer_base); | ||
150 | |||
151 | /*** | 138 | /*** |
152 | * init_timer - initialize a timer. | 139 | * init_timer - initialize a timer. |
153 | * @timer: the timer to be initialized | 140 | * @timer: the timer to be initialized |
@@ -158,7 +145,7 @@ EXPORT_SYMBOL(__init_timer_base); | |||
158 | void fastcall init_timer(struct timer_list *timer) | 145 | void fastcall init_timer(struct timer_list *timer) |
159 | { | 146 | { |
160 | timer->entry.next = NULL; | 147 | timer->entry.next = NULL; |
161 | timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base; | 148 | timer->base = per_cpu(tvec_bases, raw_smp_processor_id()); |
162 | } | 149 | } |
163 | EXPORT_SYMBOL(init_timer); | 150 | EXPORT_SYMBOL(init_timer); |
164 | 151 | ||
@@ -174,7 +161,7 @@ static inline void detach_timer(struct timer_list *timer, | |||
174 | } | 161 | } |
175 | 162 | ||
176 | /* | 163 | /* |
177 | * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock | 164 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
178 | * means that all timers which are tied to this base via timer->base are | 165 | * means that all timers which are tied to this base via timer->base are |
179 | * locked, and the base itself is locked too. | 166 | * locked, and the base itself is locked too. |
180 | * | 167 | * |
@@ -185,10 +172,10 @@ static inline void detach_timer(struct timer_list *timer, | |||
185 | * possible to set timer->base = NULL and drop the lock: the timer remains | 172 | * possible to set timer->base = NULL and drop the lock: the timer remains |
186 | * locked. | 173 | * locked. |
187 | */ | 174 | */ |
188 | static timer_base_t *lock_timer_base(struct timer_list *timer, | 175 | static tvec_base_t *lock_timer_base(struct timer_list *timer, |
189 | unsigned long *flags) | 176 | unsigned long *flags) |
190 | { | 177 | { |
191 | timer_base_t *base; | 178 | tvec_base_t *base; |
192 | 179 | ||
193 | for (;;) { | 180 | for (;;) { |
194 | base = timer->base; | 181 | base = timer->base; |
@@ -205,8 +192,7 @@ static timer_base_t *lock_timer_base(struct timer_list *timer, | |||
205 | 192 | ||
206 | int __mod_timer(struct timer_list *timer, unsigned long expires) | 193 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
207 | { | 194 | { |
208 | timer_base_t *base; | 195 | tvec_base_t *base, *new_base; |
209 | tvec_base_t *new_base; | ||
210 | unsigned long flags; | 196 | unsigned long flags; |
211 | int ret = 0; | 197 | int ret = 0; |
212 | 198 | ||
@@ -221,7 +207,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
221 | 207 | ||
222 | new_base = __get_cpu_var(tvec_bases); | 208 | new_base = __get_cpu_var(tvec_bases); |
223 | 209 | ||
224 | if (base != &new_base->t_base) { | 210 | if (base != new_base) { |
225 | /* | 211 | /* |
226 | * We are trying to schedule the timer on the local CPU. | 212 | * We are trying to schedule the timer on the local CPU. |
227 | * However we can't change timer's base while it is running, | 213 | * However we can't change timer's base while it is running, |
@@ -229,21 +215,19 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
229 | * handler yet has not finished. This also guarantees that | 215 | * handler yet has not finished. This also guarantees that |
230 | * the timer is serialized wrt itself. | 216 | * the timer is serialized wrt itself. |
231 | */ | 217 | */ |
232 | if (unlikely(base->running_timer == timer)) { | 218 | if (likely(base->running_timer != timer)) { |
233 | /* The timer remains on a former base */ | ||
234 | new_base = container_of(base, tvec_base_t, t_base); | ||
235 | } else { | ||
236 | /* See the comment in lock_timer_base() */ | 219 | /* See the comment in lock_timer_base() */ |
237 | timer->base = NULL; | 220 | timer->base = NULL; |
238 | spin_unlock(&base->lock); | 221 | spin_unlock(&base->lock); |
239 | spin_lock(&new_base->t_base.lock); | 222 | base = new_base; |
240 | timer->base = &new_base->t_base; | 223 | spin_lock(&base->lock); |
224 | timer->base = base; | ||
241 | } | 225 | } |
242 | } | 226 | } |
243 | 227 | ||
244 | timer->expires = expires; | 228 | timer->expires = expires; |
245 | internal_add_timer(new_base, timer); | 229 | internal_add_timer(base, timer); |
246 | spin_unlock_irqrestore(&new_base->t_base.lock, flags); | 230 | spin_unlock_irqrestore(&base->lock, flags); |
247 | 231 | ||
248 | return ret; | 232 | return ret; |
249 | } | 233 | } |
@@ -263,10 +247,10 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
263 | unsigned long flags; | 247 | unsigned long flags; |
264 | 248 | ||
265 | BUG_ON(timer_pending(timer) || !timer->function); | 249 | BUG_ON(timer_pending(timer) || !timer->function); |
266 | spin_lock_irqsave(&base->t_base.lock, flags); | 250 | spin_lock_irqsave(&base->lock, flags); |
267 | timer->base = &base->t_base; | 251 | timer->base = base; |
268 | internal_add_timer(base, timer); | 252 | internal_add_timer(base, timer); |
269 | spin_unlock_irqrestore(&base->t_base.lock, flags); | 253 | spin_unlock_irqrestore(&base->lock, flags); |
270 | } | 254 | } |
271 | 255 | ||
272 | 256 | ||
@@ -319,7 +303,7 @@ EXPORT_SYMBOL(mod_timer); | |||
319 | */ | 303 | */ |
320 | int del_timer(struct timer_list *timer) | 304 | int del_timer(struct timer_list *timer) |
321 | { | 305 | { |
322 | timer_base_t *base; | 306 | tvec_base_t *base; |
323 | unsigned long flags; | 307 | unsigned long flags; |
324 | int ret = 0; | 308 | int ret = 0; |
325 | 309 | ||
@@ -346,7 +330,7 @@ EXPORT_SYMBOL(del_timer); | |||
346 | */ | 330 | */ |
347 | int try_to_del_timer_sync(struct timer_list *timer) | 331 | int try_to_del_timer_sync(struct timer_list *timer) |
348 | { | 332 | { |
349 | timer_base_t *base; | 333 | tvec_base_t *base; |
350 | unsigned long flags; | 334 | unsigned long flags; |
351 | int ret = -1; | 335 | int ret = -1; |
352 | 336 | ||
@@ -410,7 +394,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |||
410 | struct timer_list *tmp; | 394 | struct timer_list *tmp; |
411 | 395 | ||
412 | tmp = list_entry(curr, struct timer_list, entry); | 396 | tmp = list_entry(curr, struct timer_list, entry); |
413 | BUG_ON(tmp->base != &base->t_base); | 397 | BUG_ON(tmp->base != base); |
414 | curr = curr->next; | 398 | curr = curr->next; |
415 | internal_add_timer(base, tmp); | 399 | internal_add_timer(base, tmp); |
416 | } | 400 | } |
@@ -432,7 +416,7 @@ static inline void __run_timers(tvec_base_t *base) | |||
432 | { | 416 | { |
433 | struct timer_list *timer; | 417 | struct timer_list *timer; |
434 | 418 | ||
435 | spin_lock_irq(&base->t_base.lock); | 419 | spin_lock_irq(&base->lock); |
436 | while (time_after_eq(jiffies, base->timer_jiffies)) { | 420 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
437 | struct list_head work_list = LIST_HEAD_INIT(work_list); | 421 | struct list_head work_list = LIST_HEAD_INIT(work_list); |
438 | struct list_head *head = &work_list; | 422 | struct list_head *head = &work_list; |
@@ -458,7 +442,7 @@ static inline void __run_timers(tvec_base_t *base) | |||
458 | 442 | ||
459 | set_running_timer(base, timer); | 443 | set_running_timer(base, timer); |
460 | detach_timer(timer, 1); | 444 | detach_timer(timer, 1); |
461 | spin_unlock_irq(&base->t_base.lock); | 445 | spin_unlock_irq(&base->lock); |
462 | { | 446 | { |
463 | int preempt_count = preempt_count(); | 447 | int preempt_count = preempt_count(); |
464 | fn(data); | 448 | fn(data); |
@@ -471,11 +455,11 @@ static inline void __run_timers(tvec_base_t *base) | |||
471 | BUG(); | 455 | BUG(); |
472 | } | 456 | } |
473 | } | 457 | } |
474 | spin_lock_irq(&base->t_base.lock); | 458 | spin_lock_irq(&base->lock); |
475 | } | 459 | } |
476 | } | 460 | } |
477 | set_running_timer(base, NULL); | 461 | set_running_timer(base, NULL); |
478 | spin_unlock_irq(&base->t_base.lock); | 462 | spin_unlock_irq(&base->lock); |
479 | } | 463 | } |
480 | 464 | ||
481 | #ifdef CONFIG_NO_IDLE_HZ | 465 | #ifdef CONFIG_NO_IDLE_HZ |
@@ -506,7 +490,7 @@ unsigned long next_timer_interrupt(void) | |||
506 | hr_expires += jiffies; | 490 | hr_expires += jiffies; |
507 | 491 | ||
508 | base = __get_cpu_var(tvec_bases); | 492 | base = __get_cpu_var(tvec_bases); |
509 | spin_lock(&base->t_base.lock); | 493 | spin_lock(&base->lock); |
510 | expires = base->timer_jiffies + (LONG_MAX >> 1); | 494 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
511 | list = NULL; | 495 | list = NULL; |
512 | 496 | ||
@@ -554,7 +538,7 @@ found: | |||
554 | expires = nte->expires; | 538 | expires = nte->expires; |
555 | } | 539 | } |
556 | } | 540 | } |
557 | spin_unlock(&base->t_base.lock); | 541 | spin_unlock(&base->lock); |
558 | 542 | ||
559 | if (time_before(hr_expires, expires)) | 543 | if (time_before(hr_expires, expires)) |
560 | return hr_expires; | 544 | return hr_expires; |
@@ -841,7 +825,7 @@ void update_process_times(int user_tick) | |||
841 | */ | 825 | */ |
842 | static unsigned long count_active_tasks(void) | 826 | static unsigned long count_active_tasks(void) |
843 | { | 827 | { |
844 | return (nr_running() + nr_uninterruptible()) * FIXED_1; | 828 | return nr_active() * FIXED_1; |
845 | } | 829 | } |
846 | 830 | ||
847 | /* | 831 | /* |
@@ -1262,7 +1246,7 @@ static int __devinit init_timers_cpu(int cpu) | |||
1262 | } | 1246 | } |
1263 | per_cpu(tvec_bases, cpu) = base; | 1247 | per_cpu(tvec_bases, cpu) = base; |
1264 | } | 1248 | } |
1265 | spin_lock_init(&base->t_base.lock); | 1249 | spin_lock_init(&base->lock); |
1266 | for (j = 0; j < TVN_SIZE; j++) { | 1250 | for (j = 0; j < TVN_SIZE; j++) { |
1267 | INIT_LIST_HEAD(base->tv5.vec + j); | 1251 | INIT_LIST_HEAD(base->tv5.vec + j); |
1268 | INIT_LIST_HEAD(base->tv4.vec + j); | 1252 | INIT_LIST_HEAD(base->tv4.vec + j); |
@@ -1284,7 +1268,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) | |||
1284 | while (!list_empty(head)) { | 1268 | while (!list_empty(head)) { |
1285 | timer = list_entry(head->next, struct timer_list, entry); | 1269 | timer = list_entry(head->next, struct timer_list, entry); |
1286 | detach_timer(timer, 0); | 1270 | detach_timer(timer, 0); |
1287 | timer->base = &new_base->t_base; | 1271 | timer->base = new_base; |
1288 | internal_add_timer(new_base, timer); | 1272 | internal_add_timer(new_base, timer); |
1289 | } | 1273 | } |
1290 | } | 1274 | } |
@@ -1300,11 +1284,11 @@ static void __devinit migrate_timers(int cpu) | |||
1300 | new_base = get_cpu_var(tvec_bases); | 1284 | new_base = get_cpu_var(tvec_bases); |
1301 | 1285 | ||
1302 | local_irq_disable(); | 1286 | local_irq_disable(); |
1303 | spin_lock(&new_base->t_base.lock); | 1287 | spin_lock(&new_base->lock); |
1304 | spin_lock(&old_base->t_base.lock); | 1288 | spin_lock(&old_base->lock); |
1289 | |||
1290 | BUG_ON(old_base->running_timer); | ||
1305 | 1291 | ||
1306 | if (old_base->t_base.running_timer) | ||
1307 | BUG(); | ||
1308 | for (i = 0; i < TVR_SIZE; i++) | 1292 | for (i = 0; i < TVR_SIZE; i++) |
1309 | migrate_timer_list(new_base, old_base->tv1.vec + i); | 1293 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1310 | for (i = 0; i < TVN_SIZE; i++) { | 1294 | for (i = 0; i < TVN_SIZE; i++) { |
@@ -1314,8 +1298,8 @@ static void __devinit migrate_timers(int cpu) | |||
1314 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 1298 | migrate_timer_list(new_base, old_base->tv5.vec + i); |
1315 | } | 1299 | } |
1316 | 1300 | ||
1317 | spin_unlock(&old_base->t_base.lock); | 1301 | spin_unlock(&old_base->lock); |
1318 | spin_unlock(&new_base->t_base.lock); | 1302 | spin_unlock(&new_base->lock); |
1319 | local_irq_enable(); | 1303 | local_irq_enable(); |
1320 | put_cpu_var(tvec_bases); | 1304 | put_cpu_var(tvec_bases); |
1321 | } | 1305 | } |
@@ -1495,8 +1479,7 @@ register_time_interpolator(struct time_interpolator *ti) | |||
1495 | unsigned long flags; | 1479 | unsigned long flags; |
1496 | 1480 | ||
1497 | /* Sanity check */ | 1481 | /* Sanity check */ |
1498 | if (ti->frequency == 0 || ti->mask == 0) | 1482 | BUG_ON(ti->frequency == 0 || ti->mask == 0); |
1499 | BUG(); | ||
1500 | 1483 | ||
1501 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | 1484 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; |
1502 | spin_lock(&time_interpolator_lock); | 1485 | spin_lock(&time_interpolator_lock); |