aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2007-05-08 03:27:44 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:05 -0400
commit6e453a67510a17f01b63835f18569e8c3939a38c (patch)
tree2cbc50f434cf4397d2f279480ea2c2a87defa9b0
parentda6752964290567a6b4ea180d1becda75e810e87 (diff)
Add support for deferrable timers
Introduce a new flag for timers - deferrable: Timers that work normally when system is busy. But, will not cause CPU to come out of idle (just to service this timer), when CPU is idle. Instead, this timer will be serviced when CPU eventually wakes up with a subsequent non-deferrable timer. The main advantage of this is to avoid unnecessary timer interrupts when CPU is idle. If the routine currently called by a timer can wait until next event without any issues, this new timer can be used to setup timer event for that routine. This, with dynticks, allows CPUs to be lazy, allowing them to stay in idle for extended period of time by reducing unnecesary wakeup and thereby reducing the power consumption. This patch: Builds this new timer on top of existing timer infrastructure. It uses last bit in 'base' pointer of timer_list structure to store this deferrable timer flag. __next_timer_interrupt() function skips over these deferrable timers when CPU looks for next timer event for which it has to wake up. This is exported by a new interface init_timer_deferrable() that can be called in place of regular init_timer(). [akpm@linux-foundation.org: Privatise a #define] Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Oleg Nesterov <oleg@tv-sign.ru> Cc: Dave Jones <davej@codemonkey.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/timer.h1
-rw-r--r--kernel/timer.c65
2 files changed, 58 insertions, 8 deletions
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 719113b652dd..e0c5c16c992f 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -37,6 +37,7 @@ extern struct tvec_t_base_s boot_tvec_bases;
37 TIMER_INITIALIZER(_function, _expires, _data) 37 TIMER_INITIALIZER(_function, _expires, _data)
38 38
39void fastcall init_timer(struct timer_list * timer); 39void fastcall init_timer(struct timer_list * timer);
40void fastcall init_timer_deferrable(struct timer_list *timer);
40 41
41static inline void setup_timer(struct timer_list * timer, 42static inline void setup_timer(struct timer_list * timer,
42 void (*function)(unsigned long), 43 void (*function)(unsigned long),
diff --git a/kernel/timer.c b/kernel/timer.c
index b22bd39740dd..dbe966feff2f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -74,7 +74,7 @@ struct tvec_t_base_s {
74 tvec_t tv3; 74 tvec_t tv3;
75 tvec_t tv4; 75 tvec_t tv4;
76 tvec_t tv5; 76 tvec_t tv5;
77} ____cacheline_aligned_in_smp; 77} ____cacheline_aligned;
78 78
79typedef struct tvec_t_base_s tvec_base_t; 79typedef struct tvec_t_base_s tvec_base_t;
80 80
@@ -82,6 +82,37 @@ tvec_base_t boot_tvec_bases;
82EXPORT_SYMBOL(boot_tvec_bases); 82EXPORT_SYMBOL(boot_tvec_bases);
83static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; 83static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
84 84
85/*
86 * Note that all tvec_bases is 2 byte aligned and lower bit of
87 * base in timer_list is guaranteed to be zero. Use the LSB for
88 * the new flag to indicate whether the timer is deferrable
89 */
90#define TBASE_DEFERRABLE_FLAG (0x1)
91
92/* Functions below help us manage 'deferrable' flag */
93static inline unsigned int tbase_get_deferrable(tvec_base_t *base)
94{
95 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
96}
97
98static inline tvec_base_t *tbase_get_base(tvec_base_t *base)
99{
100 return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
101}
102
103static inline void timer_set_deferrable(struct timer_list *timer)
104{
105 timer->base = ((tvec_base_t *)((unsigned long)(timer->base) |
106 TBASE_DEFERRABLE_FLAG));
107}
108
109static inline void
110timer_set_base(struct timer_list *timer, tvec_base_t *new_base)
111{
112 timer->base = (tvec_base_t *)((unsigned long)(new_base) |
113 tbase_get_deferrable(timer->base));
114}
115
85/** 116/**
86 * __round_jiffies - function to round jiffies to a full second 117 * __round_jiffies - function to round jiffies to a full second
87 * @j: the time in (absolute) jiffies that should be rounded 118 * @j: the time in (absolute) jiffies that should be rounded
@@ -295,6 +326,13 @@ void fastcall init_timer(struct timer_list *timer)
295} 326}
296EXPORT_SYMBOL(init_timer); 327EXPORT_SYMBOL(init_timer);
297 328
329void fastcall init_timer_deferrable(struct timer_list *timer)
330{
331 init_timer(timer);
332 timer_set_deferrable(timer);
333}
334EXPORT_SYMBOL(init_timer_deferrable);
335
298static inline void detach_timer(struct timer_list *timer, 336static inline void detach_timer(struct timer_list *timer,
299 int clear_pending) 337 int clear_pending)
300{ 338{
@@ -325,10 +363,11 @@ static tvec_base_t *lock_timer_base(struct timer_list *timer,
325 tvec_base_t *base; 363 tvec_base_t *base;
326 364
327 for (;;) { 365 for (;;) {
328 base = timer->base; 366 tvec_base_t *prelock_base = timer->base;
367 base = tbase_get_base(prelock_base);
329 if (likely(base != NULL)) { 368 if (likely(base != NULL)) {
330 spin_lock_irqsave(&base->lock, *flags); 369 spin_lock_irqsave(&base->lock, *flags);
331 if (likely(base == timer->base)) 370 if (likely(prelock_base == timer->base))
332 return base; 371 return base;
333 /* The timer has migrated to another CPU */ 372 /* The timer has migrated to another CPU */
334 spin_unlock_irqrestore(&base->lock, *flags); 373 spin_unlock_irqrestore(&base->lock, *flags);
@@ -365,11 +404,11 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
365 */ 404 */
366 if (likely(base->running_timer != timer)) { 405 if (likely(base->running_timer != timer)) {
367 /* See the comment in lock_timer_base() */ 406 /* See the comment in lock_timer_base() */
368 timer->base = NULL; 407 timer_set_base(timer, NULL);
369 spin_unlock(&base->lock); 408 spin_unlock(&base->lock);
370 base = new_base; 409 base = new_base;
371 spin_lock(&base->lock); 410 spin_lock(&base->lock);
372 timer->base = base; 411 timer_set_base(timer, base);
373 } 412 }
374 } 413 }
375 414
@@ -397,7 +436,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
397 timer_stats_timer_set_start_info(timer); 436 timer_stats_timer_set_start_info(timer);
398 BUG_ON(timer_pending(timer) || !timer->function); 437 BUG_ON(timer_pending(timer) || !timer->function);
399 spin_lock_irqsave(&base->lock, flags); 438 spin_lock_irqsave(&base->lock, flags);
400 timer->base = base; 439 timer_set_base(timer, base);
401 internal_add_timer(base, timer); 440 internal_add_timer(base, timer);
402 spin_unlock_irqrestore(&base->lock, flags); 441 spin_unlock_irqrestore(&base->lock, flags);
403} 442}
@@ -550,7 +589,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
550 * don't have to detach them individually. 589 * don't have to detach them individually.
551 */ 590 */
552 list_for_each_entry_safe(timer, tmp, &tv_list, entry) { 591 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
553 BUG_ON(timer->base != base); 592 BUG_ON(tbase_get_base(timer->base) != base);
554 internal_add_timer(base, timer); 593 internal_add_timer(base, timer);
555 } 594 }
556 595
@@ -636,6 +675,9 @@ static unsigned long __next_timer_interrupt(tvec_base_t *base)
636 index = slot = timer_jiffies & TVR_MASK; 675 index = slot = timer_jiffies & TVR_MASK;
637 do { 676 do {
638 list_for_each_entry(nte, base->tv1.vec + slot, entry) { 677 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
678 if (tbase_get_deferrable(nte->base))
679 continue;
680
639 found = 1; 681 found = 1;
640 expires = nte->expires; 682 expires = nte->expires;
641 /* Look at the cascade bucket(s)? */ 683 /* Look at the cascade bucket(s)? */
@@ -1617,6 +1659,13 @@ static int __devinit init_timers_cpu(int cpu)
1617 cpu_to_node(cpu)); 1659 cpu_to_node(cpu));
1618 if (!base) 1660 if (!base)
1619 return -ENOMEM; 1661 return -ENOMEM;
1662
1663 /* Make sure that tvec_base is 2 byte aligned */
1664 if (tbase_get_deferrable(base)) {
1665 WARN_ON(1);
1666 kfree(base);
1667 return -ENOMEM;
1668 }
1620 memset(base, 0, sizeof(*base)); 1669 memset(base, 0, sizeof(*base));
1621 per_cpu(tvec_bases, cpu) = base; 1670 per_cpu(tvec_bases, cpu) = base;
1622 } else { 1671 } else {
@@ -1658,7 +1707,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1658 while (!list_empty(head)) { 1707 while (!list_empty(head)) {
1659 timer = list_entry(head->next, struct timer_list, entry); 1708 timer = list_entry(head->next, struct timer_list, entry);
1660 detach_timer(timer, 0); 1709 detach_timer(timer, 0);
1661 timer->base = new_base; 1710 timer_set_base(timer, new_base);
1662 internal_add_timer(new_base, timer); 1711 internal_add_timer(new_base, timer);
1663 } 1712 }
1664} 1713}