diff options
author | Venki Pallipadi <venkatesh.pallipadi@intel.com> | 2007-05-08 03:27:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 14:15:05 -0400 |
commit | 6e453a67510a17f01b63835f18569e8c3939a38c (patch) | |
tree | 2cbc50f434cf4397d2f279480ea2c2a87defa9b0 /kernel/timer.c | |
parent | da6752964290567a6b4ea180d1becda75e810e87 (diff) |
Add support for deferrable timers
Introduce a new flag for timers - deferrable: Timers that work normally
when system is busy. But, will not cause CPU to come out of idle (just to
service this timer), when CPU is idle. Instead, this timer will be
serviced when CPU eventually wakes up with a subsequent non-deferrable
timer.
The main advantage of this is to avoid unnecessary timer interrupts when
CPU is idle. If the routine currently called by a timer can wait until
next event without any issues, this new timer can be used to setup timer
event for that routine. This, with dynticks, allows CPUs to be lazy,
allowing them to stay in idle for extended period of time by reducing
unnecesary wakeup and thereby reducing the power consumption.
This patch:
Builds this new timer on top of existing timer infrastructure. It uses
last bit in 'base' pointer of timer_list structure to store this deferrable
timer flag. __next_timer_interrupt() function skips over these deferrable
timers when CPU looks for next timer event for which it has to wake up.
This is exported by a new interface init_timer_deferrable() that can be
called in place of regular init_timer().
[akpm@linux-foundation.org: Privatise a #define]
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Dave Jones <davej@codemonkey.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 65 |
1 files changed, 57 insertions, 8 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index b22bd39740dd..dbe966feff2f 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -74,7 +74,7 @@ struct tvec_t_base_s { | |||
74 | tvec_t tv3; | 74 | tvec_t tv3; |
75 | tvec_t tv4; | 75 | tvec_t tv4; |
76 | tvec_t tv5; | 76 | tvec_t tv5; |
77 | } ____cacheline_aligned_in_smp; | 77 | } ____cacheline_aligned; |
78 | 78 | ||
79 | typedef struct tvec_t_base_s tvec_base_t; | 79 | typedef struct tvec_t_base_s tvec_base_t; |
80 | 80 | ||
@@ -82,6 +82,37 @@ tvec_base_t boot_tvec_bases; | |||
82 | EXPORT_SYMBOL(boot_tvec_bases); | 82 | EXPORT_SYMBOL(boot_tvec_bases); |
83 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; | 83 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
84 | 84 | ||
85 | /* | ||
86 | * Note that all tvec_bases is 2 byte aligned and lower bit of | ||
87 | * base in timer_list is guaranteed to be zero. Use the LSB for | ||
88 | * the new flag to indicate whether the timer is deferrable | ||
89 | */ | ||
90 | #define TBASE_DEFERRABLE_FLAG (0x1) | ||
91 | |||
92 | /* Functions below help us manage 'deferrable' flag */ | ||
93 | static inline unsigned int tbase_get_deferrable(tvec_base_t *base) | ||
94 | { | ||
95 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); | ||
96 | } | ||
97 | |||
98 | static inline tvec_base_t *tbase_get_base(tvec_base_t *base) | ||
99 | { | ||
100 | return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); | ||
101 | } | ||
102 | |||
103 | static inline void timer_set_deferrable(struct timer_list *timer) | ||
104 | { | ||
105 | timer->base = ((tvec_base_t *)((unsigned long)(timer->base) | | ||
106 | TBASE_DEFERRABLE_FLAG)); | ||
107 | } | ||
108 | |||
109 | static inline void | ||
110 | timer_set_base(struct timer_list *timer, tvec_base_t *new_base) | ||
111 | { | ||
112 | timer->base = (tvec_base_t *)((unsigned long)(new_base) | | ||
113 | tbase_get_deferrable(timer->base)); | ||
114 | } | ||
115 | |||
85 | /** | 116 | /** |
86 | * __round_jiffies - function to round jiffies to a full second | 117 | * __round_jiffies - function to round jiffies to a full second |
87 | * @j: the time in (absolute) jiffies that should be rounded | 118 | * @j: the time in (absolute) jiffies that should be rounded |
@@ -295,6 +326,13 @@ void fastcall init_timer(struct timer_list *timer) | |||
295 | } | 326 | } |
296 | EXPORT_SYMBOL(init_timer); | 327 | EXPORT_SYMBOL(init_timer); |
297 | 328 | ||
329 | void fastcall init_timer_deferrable(struct timer_list *timer) | ||
330 | { | ||
331 | init_timer(timer); | ||
332 | timer_set_deferrable(timer); | ||
333 | } | ||
334 | EXPORT_SYMBOL(init_timer_deferrable); | ||
335 | |||
298 | static inline void detach_timer(struct timer_list *timer, | 336 | static inline void detach_timer(struct timer_list *timer, |
299 | int clear_pending) | 337 | int clear_pending) |
300 | { | 338 | { |
@@ -325,10 +363,11 @@ static tvec_base_t *lock_timer_base(struct timer_list *timer, | |||
325 | tvec_base_t *base; | 363 | tvec_base_t *base; |
326 | 364 | ||
327 | for (;;) { | 365 | for (;;) { |
328 | base = timer->base; | 366 | tvec_base_t *prelock_base = timer->base; |
367 | base = tbase_get_base(prelock_base); | ||
329 | if (likely(base != NULL)) { | 368 | if (likely(base != NULL)) { |
330 | spin_lock_irqsave(&base->lock, *flags); | 369 | spin_lock_irqsave(&base->lock, *flags); |
331 | if (likely(base == timer->base)) | 370 | if (likely(prelock_base == timer->base)) |
332 | return base; | 371 | return base; |
333 | /* The timer has migrated to another CPU */ | 372 | /* The timer has migrated to another CPU */ |
334 | spin_unlock_irqrestore(&base->lock, *flags); | 373 | spin_unlock_irqrestore(&base->lock, *flags); |
@@ -365,11 +404,11 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
365 | */ | 404 | */ |
366 | if (likely(base->running_timer != timer)) { | 405 | if (likely(base->running_timer != timer)) { |
367 | /* See the comment in lock_timer_base() */ | 406 | /* See the comment in lock_timer_base() */ |
368 | timer->base = NULL; | 407 | timer_set_base(timer, NULL); |
369 | spin_unlock(&base->lock); | 408 | spin_unlock(&base->lock); |
370 | base = new_base; | 409 | base = new_base; |
371 | spin_lock(&base->lock); | 410 | spin_lock(&base->lock); |
372 | timer->base = base; | 411 | timer_set_base(timer, base); |
373 | } | 412 | } |
374 | } | 413 | } |
375 | 414 | ||
@@ -397,7 +436,7 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
397 | timer_stats_timer_set_start_info(timer); | 436 | timer_stats_timer_set_start_info(timer); |
398 | BUG_ON(timer_pending(timer) || !timer->function); | 437 | BUG_ON(timer_pending(timer) || !timer->function); |
399 | spin_lock_irqsave(&base->lock, flags); | 438 | spin_lock_irqsave(&base->lock, flags); |
400 | timer->base = base; | 439 | timer_set_base(timer, base); |
401 | internal_add_timer(base, timer); | 440 | internal_add_timer(base, timer); |
402 | spin_unlock_irqrestore(&base->lock, flags); | 441 | spin_unlock_irqrestore(&base->lock, flags); |
403 | } | 442 | } |
@@ -550,7 +589,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |||
550 | * don't have to detach them individually. | 589 | * don't have to detach them individually. |
551 | */ | 590 | */ |
552 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { | 591 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
553 | BUG_ON(timer->base != base); | 592 | BUG_ON(tbase_get_base(timer->base) != base); |
554 | internal_add_timer(base, timer); | 593 | internal_add_timer(base, timer); |
555 | } | 594 | } |
556 | 595 | ||
@@ -636,6 +675,9 @@ static unsigned long __next_timer_interrupt(tvec_base_t *base) | |||
636 | index = slot = timer_jiffies & TVR_MASK; | 675 | index = slot = timer_jiffies & TVR_MASK; |
637 | do { | 676 | do { |
638 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { | 677 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
678 | if (tbase_get_deferrable(nte->base)) | ||
679 | continue; | ||
680 | |||
639 | found = 1; | 681 | found = 1; |
640 | expires = nte->expires; | 682 | expires = nte->expires; |
641 | /* Look at the cascade bucket(s)? */ | 683 | /* Look at the cascade bucket(s)? */ |
@@ -1617,6 +1659,13 @@ static int __devinit init_timers_cpu(int cpu) | |||
1617 | cpu_to_node(cpu)); | 1659 | cpu_to_node(cpu)); |
1618 | if (!base) | 1660 | if (!base) |
1619 | return -ENOMEM; | 1661 | return -ENOMEM; |
1662 | |||
1663 | /* Make sure that tvec_base is 2 byte aligned */ | ||
1664 | if (tbase_get_deferrable(base)) { | ||
1665 | WARN_ON(1); | ||
1666 | kfree(base); | ||
1667 | return -ENOMEM; | ||
1668 | } | ||
1620 | memset(base, 0, sizeof(*base)); | 1669 | memset(base, 0, sizeof(*base)); |
1621 | per_cpu(tvec_bases, cpu) = base; | 1670 | per_cpu(tvec_bases, cpu) = base; |
1622 | } else { | 1671 | } else { |
@@ -1658,7 +1707,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) | |||
1658 | while (!list_empty(head)) { | 1707 | while (!list_empty(head)) { |
1659 | timer = list_entry(head->next, struct timer_list, entry); | 1708 | timer = list_entry(head->next, struct timer_list, entry); |
1660 | detach_timer(timer, 0); | 1709 | detach_timer(timer, 0); |
1661 | timer->base = new_base; | 1710 | timer_set_base(timer, new_base); |
1662 | internal_add_timer(new_base, timer); | 1711 | internal_add_timer(new_base, timer); |
1663 | } | 1712 | } |
1664 | } | 1713 | } |