aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-04-12 16:54:16 -0400
committerJeff Garzik <jeff@garzik.org>2006-04-12 16:54:16 -0400
commit875999c5539999f61a45620aae0c3e5fb1d2b035 (patch)
tree4535032a8a10f5782c0aef6a620b1a624ea9f863 /kernel/timer.c
parent79072f38909e3d9883317238887460c39ddcc4cb (diff)
parent26ec634c31a11a003040e10b4d650495158632fd (diff)
Merge branch 'upstream'
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c126
1 files changed, 59 insertions, 67 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index ab189dd187cb..883773788836 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -54,7 +54,6 @@ EXPORT_SYMBOL(jiffies_64);
54/* 54/*
55 * per-CPU timer vector definitions: 55 * per-CPU timer vector definitions:
56 */ 56 */
57
58#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) 57#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
59#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) 58#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
60#define TVN_SIZE (1 << TVN_BITS) 59#define TVN_SIZE (1 << TVN_BITS)
@@ -62,11 +61,6 @@ EXPORT_SYMBOL(jiffies_64);
62#define TVN_MASK (TVN_SIZE - 1) 61#define TVN_MASK (TVN_SIZE - 1)
63#define TVR_MASK (TVR_SIZE - 1) 62#define TVR_MASK (TVR_SIZE - 1)
64 63
65struct timer_base_s {
66 spinlock_t lock;
67 struct timer_list *running_timer;
68};
69
70typedef struct tvec_s { 64typedef struct tvec_s {
71 struct list_head vec[TVN_SIZE]; 65 struct list_head vec[TVN_SIZE];
72} tvec_t; 66} tvec_t;
@@ -76,7 +70,8 @@ typedef struct tvec_root_s {
76} tvec_root_t; 70} tvec_root_t;
77 71
78struct tvec_t_base_s { 72struct tvec_t_base_s {
79 struct timer_base_s t_base; 73 spinlock_t lock;
74 struct timer_list *running_timer;
80 unsigned long timer_jiffies; 75 unsigned long timer_jiffies;
81 tvec_root_t tv1; 76 tvec_root_t tv1;
82 tvec_t tv2; 77 tvec_t tv2;
@@ -86,14 +81,16 @@ struct tvec_t_base_s {
86} ____cacheline_aligned_in_smp; 81} ____cacheline_aligned_in_smp;
87 82
88typedef struct tvec_t_base_s tvec_base_t; 83typedef struct tvec_t_base_s tvec_base_t;
89static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); 84
90static tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases);
87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
91 88
92static inline void set_running_timer(tvec_base_t *base, 89static inline void set_running_timer(tvec_base_t *base,
93 struct timer_list *timer) 90 struct timer_list *timer)
94{ 91{
95#ifdef CONFIG_SMP 92#ifdef CONFIG_SMP
96 base->t_base.running_timer = timer; 93 base->running_timer = timer;
97#endif 94#endif
98} 95}
99 96
@@ -139,15 +136,6 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
139 list_add_tail(&timer->entry, vec); 136 list_add_tail(&timer->entry, vec);
140} 137}
141 138
142typedef struct timer_base_s timer_base_t;
143/*
144 * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
145 * at compile time, and we need timer->base to lock the timer.
146 */
147timer_base_t __init_timer_base
148 ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
149EXPORT_SYMBOL(__init_timer_base);
150
151/*** 139/***
152 * init_timer - initialize a timer. 140 * init_timer - initialize a timer.
153 * @timer: the timer to be initialized 141 * @timer: the timer to be initialized
@@ -158,7 +146,7 @@ EXPORT_SYMBOL(__init_timer_base);
158void fastcall init_timer(struct timer_list *timer) 146void fastcall init_timer(struct timer_list *timer)
159{ 147{
160 timer->entry.next = NULL; 148 timer->entry.next = NULL;
161 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base; 149 timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
162} 150}
163EXPORT_SYMBOL(init_timer); 151EXPORT_SYMBOL(init_timer);
164 152
@@ -174,7 +162,7 @@ static inline void detach_timer(struct timer_list *timer,
174} 162}
175 163
176/* 164/*
177 * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock 165 * We are using hashed locking: holding per_cpu(tvec_bases).lock
178 * means that all timers which are tied to this base via timer->base are 166 * means that all timers which are tied to this base via timer->base are
179 * locked, and the base itself is locked too. 167 * locked, and the base itself is locked too.
180 * 168 *
@@ -185,10 +173,10 @@ static inline void detach_timer(struct timer_list *timer,
185 * possible to set timer->base = NULL and drop the lock: the timer remains 173 * possible to set timer->base = NULL and drop the lock: the timer remains
186 * locked. 174 * locked.
187 */ 175 */
188static timer_base_t *lock_timer_base(struct timer_list *timer, 176static tvec_base_t *lock_timer_base(struct timer_list *timer,
189 unsigned long *flags) 177 unsigned long *flags)
190{ 178{
191 timer_base_t *base; 179 tvec_base_t *base;
192 180
193 for (;;) { 181 for (;;) {
194 base = timer->base; 182 base = timer->base;
@@ -205,8 +193,7 @@ static timer_base_t *lock_timer_base(struct timer_list *timer,
205 193
206int __mod_timer(struct timer_list *timer, unsigned long expires) 194int __mod_timer(struct timer_list *timer, unsigned long expires)
207{ 195{
208 timer_base_t *base; 196 tvec_base_t *base, *new_base;
209 tvec_base_t *new_base;
210 unsigned long flags; 197 unsigned long flags;
211 int ret = 0; 198 int ret = 0;
212 199
@@ -221,7 +208,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
221 208
222 new_base = __get_cpu_var(tvec_bases); 209 new_base = __get_cpu_var(tvec_bases);
223 210
224 if (base != &new_base->t_base) { 211 if (base != new_base) {
225 /* 212 /*
226 * We are trying to schedule the timer on the local CPU. 213 * We are trying to schedule the timer on the local CPU.
227 * However we can't change timer's base while it is running, 214 * However we can't change timer's base while it is running,
@@ -229,21 +216,19 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
229 * handler yet has not finished. This also guarantees that 216 * handler yet has not finished. This also guarantees that
230 * the timer is serialized wrt itself. 217 * the timer is serialized wrt itself.
231 */ 218 */
232 if (unlikely(base->running_timer == timer)) { 219 if (likely(base->running_timer != timer)) {
233 /* The timer remains on a former base */
234 new_base = container_of(base, tvec_base_t, t_base);
235 } else {
236 /* See the comment in lock_timer_base() */ 220 /* See the comment in lock_timer_base() */
237 timer->base = NULL; 221 timer->base = NULL;
238 spin_unlock(&base->lock); 222 spin_unlock(&base->lock);
239 spin_lock(&new_base->t_base.lock); 223 base = new_base;
240 timer->base = &new_base->t_base; 224 spin_lock(&base->lock);
225 timer->base = base;
241 } 226 }
242 } 227 }
243 228
244 timer->expires = expires; 229 timer->expires = expires;
245 internal_add_timer(new_base, timer); 230 internal_add_timer(base, timer);
246 spin_unlock_irqrestore(&new_base->t_base.lock, flags); 231 spin_unlock_irqrestore(&base->lock, flags);
247 232
248 return ret; 233 return ret;
249} 234}
@@ -263,10 +248,10 @@ void add_timer_on(struct timer_list *timer, int cpu)
263 unsigned long flags; 248 unsigned long flags;
264 249
265 BUG_ON(timer_pending(timer) || !timer->function); 250 BUG_ON(timer_pending(timer) || !timer->function);
266 spin_lock_irqsave(&base->t_base.lock, flags); 251 spin_lock_irqsave(&base->lock, flags);
267 timer->base = &base->t_base; 252 timer->base = base;
268 internal_add_timer(base, timer); 253 internal_add_timer(base, timer);
269 spin_unlock_irqrestore(&base->t_base.lock, flags); 254 spin_unlock_irqrestore(&base->lock, flags);
270} 255}
271 256
272 257
@@ -319,7 +304,7 @@ EXPORT_SYMBOL(mod_timer);
319 */ 304 */
320int del_timer(struct timer_list *timer) 305int del_timer(struct timer_list *timer)
321{ 306{
322 timer_base_t *base; 307 tvec_base_t *base;
323 unsigned long flags; 308 unsigned long flags;
324 int ret = 0; 309 int ret = 0;
325 310
@@ -346,7 +331,7 @@ EXPORT_SYMBOL(del_timer);
346 */ 331 */
347int try_to_del_timer_sync(struct timer_list *timer) 332int try_to_del_timer_sync(struct timer_list *timer)
348{ 333{
349 timer_base_t *base; 334 tvec_base_t *base;
350 unsigned long flags; 335 unsigned long flags;
351 int ret = -1; 336 int ret = -1;
352 337
@@ -410,7 +395,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
410 struct timer_list *tmp; 395 struct timer_list *tmp;
411 396
412 tmp = list_entry(curr, struct timer_list, entry); 397 tmp = list_entry(curr, struct timer_list, entry);
413 BUG_ON(tmp->base != &base->t_base); 398 BUG_ON(tmp->base != base);
414 curr = curr->next; 399 curr = curr->next;
415 internal_add_timer(base, tmp); 400 internal_add_timer(base, tmp);
416 } 401 }
@@ -432,7 +417,7 @@ static inline void __run_timers(tvec_base_t *base)
432{ 417{
433 struct timer_list *timer; 418 struct timer_list *timer;
434 419
435 spin_lock_irq(&base->t_base.lock); 420 spin_lock_irq(&base->lock);
436 while (time_after_eq(jiffies, base->timer_jiffies)) { 421 while (time_after_eq(jiffies, base->timer_jiffies)) {
437 struct list_head work_list = LIST_HEAD_INIT(work_list); 422 struct list_head work_list = LIST_HEAD_INIT(work_list);
438 struct list_head *head = &work_list; 423 struct list_head *head = &work_list;
@@ -458,7 +443,7 @@ static inline void __run_timers(tvec_base_t *base)
458 443
459 set_running_timer(base, timer); 444 set_running_timer(base, timer);
460 detach_timer(timer, 1); 445 detach_timer(timer, 1);
461 spin_unlock_irq(&base->t_base.lock); 446 spin_unlock_irq(&base->lock);
462 { 447 {
463 int preempt_count = preempt_count(); 448 int preempt_count = preempt_count();
464 fn(data); 449 fn(data);
@@ -471,11 +456,11 @@ static inline void __run_timers(tvec_base_t *base)
471 BUG(); 456 BUG();
472 } 457 }
473 } 458 }
474 spin_lock_irq(&base->t_base.lock); 459 spin_lock_irq(&base->lock);
475 } 460 }
476 } 461 }
477 set_running_timer(base, NULL); 462 set_running_timer(base, NULL);
478 spin_unlock_irq(&base->t_base.lock); 463 spin_unlock_irq(&base->lock);
479} 464}
480 465
481#ifdef CONFIG_NO_IDLE_HZ 466#ifdef CONFIG_NO_IDLE_HZ
@@ -506,7 +491,7 @@ unsigned long next_timer_interrupt(void)
506 hr_expires += jiffies; 491 hr_expires += jiffies;
507 492
508 base = __get_cpu_var(tvec_bases); 493 base = __get_cpu_var(tvec_bases);
509 spin_lock(&base->t_base.lock); 494 spin_lock(&base->lock);
510 expires = base->timer_jiffies + (LONG_MAX >> 1); 495 expires = base->timer_jiffies + (LONG_MAX >> 1);
511 list = NULL; 496 list = NULL;
512 497
@@ -554,7 +539,7 @@ found:
554 expires = nte->expires; 539 expires = nte->expires;
555 } 540 }
556 } 541 }
557 spin_unlock(&base->t_base.lock); 542 spin_unlock(&base->lock);
558 543
559 if (time_before(hr_expires, expires)) 544 if (time_before(hr_expires, expires))
560 return hr_expires; 545 return hr_expires;
@@ -841,7 +826,7 @@ void update_process_times(int user_tick)
841 */ 826 */
842static unsigned long count_active_tasks(void) 827static unsigned long count_active_tasks(void)
843{ 828{
844 return (nr_running() + nr_uninterruptible()) * FIXED_1; 829 return nr_active() * FIXED_1;
845} 830}
846 831
847/* 832/*
@@ -1240,29 +1225,37 @@ static int __devinit init_timers_cpu(int cpu)
1240{ 1225{
1241 int j; 1226 int j;
1242 tvec_base_t *base; 1227 tvec_base_t *base;
1228 static char __devinitdata tvec_base_done[NR_CPUS];
1243 1229
1244 base = per_cpu(tvec_bases, cpu); 1230 if (!tvec_base_done[cpu]) {
1245 if (!base) {
1246 static char boot_done; 1231 static char boot_done;
1247 1232
1248 /*
1249 * Cannot do allocation in init_timers as that runs before the
1250 * allocator initializes (and would waste memory if there are
1251 * more possible CPUs than will ever be installed/brought up).
1252 */
1253 if (boot_done) { 1233 if (boot_done) {
1234 /*
1235 * The APs use this path later in boot
1236 */
1254 base = kmalloc_node(sizeof(*base), GFP_KERNEL, 1237 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1255 cpu_to_node(cpu)); 1238 cpu_to_node(cpu));
1256 if (!base) 1239 if (!base)
1257 return -ENOMEM; 1240 return -ENOMEM;
1258 memset(base, 0, sizeof(*base)); 1241 memset(base, 0, sizeof(*base));
1242 per_cpu(tvec_bases, cpu) = base;
1259 } else { 1243 } else {
1260 base = &boot_tvec_bases; 1244 /*
1245 * This is for the boot CPU - we use compile-time
1246 * static initialisation because per-cpu memory isn't
1247 * ready yet and because the memory allocators are not
1248 * initialised either.
1249 */
1261 boot_done = 1; 1250 boot_done = 1;
1251 base = &boot_tvec_bases;
1262 } 1252 }
1263 per_cpu(tvec_bases, cpu) = base; 1253 tvec_base_done[cpu] = 1;
1254 } else {
1255 base = per_cpu(tvec_bases, cpu);
1264 } 1256 }
1265 spin_lock_init(&base->t_base.lock); 1257
1258 spin_lock_init(&base->lock);
1266 for (j = 0; j < TVN_SIZE; j++) { 1259 for (j = 0; j < TVN_SIZE; j++) {
1267 INIT_LIST_HEAD(base->tv5.vec + j); 1260 INIT_LIST_HEAD(base->tv5.vec + j);
1268 INIT_LIST_HEAD(base->tv4.vec + j); 1261 INIT_LIST_HEAD(base->tv4.vec + j);
@@ -1284,7 +1277,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1284 while (!list_empty(head)) { 1277 while (!list_empty(head)) {
1285 timer = list_entry(head->next, struct timer_list, entry); 1278 timer = list_entry(head->next, struct timer_list, entry);
1286 detach_timer(timer, 0); 1279 detach_timer(timer, 0);
1287 timer->base = &new_base->t_base; 1280 timer->base = new_base;
1288 internal_add_timer(new_base, timer); 1281 internal_add_timer(new_base, timer);
1289 } 1282 }
1290} 1283}
@@ -1300,11 +1293,11 @@ static void __devinit migrate_timers(int cpu)
1300 new_base = get_cpu_var(tvec_bases); 1293 new_base = get_cpu_var(tvec_bases);
1301 1294
1302 local_irq_disable(); 1295 local_irq_disable();
1303 spin_lock(&new_base->t_base.lock); 1296 spin_lock(&new_base->lock);
1304 spin_lock(&old_base->t_base.lock); 1297 spin_lock(&old_base->lock);
1298
1299 BUG_ON(old_base->running_timer);
1305 1300
1306 if (old_base->t_base.running_timer)
1307 BUG();
1308 for (i = 0; i < TVR_SIZE; i++) 1301 for (i = 0; i < TVR_SIZE; i++)
1309 migrate_timer_list(new_base, old_base->tv1.vec + i); 1302 migrate_timer_list(new_base, old_base->tv1.vec + i);
1310 for (i = 0; i < TVN_SIZE; i++) { 1303 for (i = 0; i < TVN_SIZE; i++) {
@@ -1314,8 +1307,8 @@ static void __devinit migrate_timers(int cpu)
1314 migrate_timer_list(new_base, old_base->tv5.vec + i); 1307 migrate_timer_list(new_base, old_base->tv5.vec + i);
1315 } 1308 }
1316 1309
1317 spin_unlock(&old_base->t_base.lock); 1310 spin_unlock(&old_base->lock);
1318 spin_unlock(&new_base->t_base.lock); 1311 spin_unlock(&new_base->lock);
1319 local_irq_enable(); 1312 local_irq_enable();
1320 put_cpu_var(tvec_bases); 1313 put_cpu_var(tvec_bases);
1321} 1314}
@@ -1471,7 +1464,7 @@ static void time_interpolator_update(long delta_nsec)
1471 */ 1464 */
1472 if (jiffies % INTERPOLATOR_ADJUST == 0) 1465 if (jiffies % INTERPOLATOR_ADJUST == 0)
1473 { 1466 {
1474 if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC) 1467 if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
1475 time_interpolator->nsec_per_cyc--; 1468 time_interpolator->nsec_per_cyc--;
1476 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) 1469 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1477 time_interpolator->nsec_per_cyc++; 1470 time_interpolator->nsec_per_cyc++;
@@ -1495,8 +1488,7 @@ register_time_interpolator(struct time_interpolator *ti)
1495 unsigned long flags; 1488 unsigned long flags;
1496 1489
1497 /* Sanity check */ 1490 /* Sanity check */
1498 if (ti->frequency == 0 || ti->mask == 0) 1491 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1499 BUG();
1500 1492
1501 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; 1493 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1502 spin_lock(&time_interpolator_lock); 1494 spin_lock(&time_interpolator_lock);