diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-06-11 16:10:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-14 02:36:31 -0400 |
commit | 125bb8f5637bd653244728f734bcac218986d910 (patch) | |
tree | 1d78145dd39b13d2dcc0eb5426b8d1c18b035f03 /net/ipv4 | |
parent | 13be8a126850692839934116fbdaf008bfdedec1 (diff) |
net: use a deferred timer in rt_check_expire
For the sake of power saver lovers, use a deferrable timer to fire
rt_check_expire()
As some big routers cache equilibrium depends on garbage collection
done in time, we take into account elapsed time between two
rt_check_expire() invocations to adjust the amount of slots we have to
check.
Based on an initial idea and patch from Tero Kristo
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Tero Kristo <tero.kristo@nokia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/route.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a849bb15d864..cd76b3cb7092 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -131,8 +131,8 @@ static int ip_rt_min_advmss __read_mostly = 256; | |||
131 | static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; | 131 | static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ; |
132 | static int rt_chain_length_max __read_mostly = 20; | 132 | static int rt_chain_length_max __read_mostly = 20; |
133 | 133 | ||
134 | static void rt_worker_func(struct work_struct *work); | 134 | static struct delayed_work expires_work; |
135 | static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); | 135 | static unsigned long expires_ljiffies; |
136 | 136 | ||
137 | /* | 137 | /* |
138 | * Interface to generic destination cache. | 138 | * Interface to generic destination cache. |
@@ -787,9 +787,12 @@ static void rt_check_expire(void) | |||
787 | struct rtable *rth, *aux, **rthp; | 787 | struct rtable *rth, *aux, **rthp; |
788 | unsigned long samples = 0; | 788 | unsigned long samples = 0; |
789 | unsigned long sum = 0, sum2 = 0; | 789 | unsigned long sum = 0, sum2 = 0; |
790 | unsigned long delta; | ||
790 | u64 mult; | 791 | u64 mult; |
791 | 792 | ||
792 | mult = ((u64)ip_rt_gc_interval) << rt_hash_log; | 793 | delta = jiffies - expires_ljiffies; |
794 | expires_ljiffies = jiffies; | ||
795 | mult = ((u64)delta) << rt_hash_log; | ||
793 | if (ip_rt_gc_timeout > 1) | 796 | if (ip_rt_gc_timeout > 1) |
794 | do_div(mult, ip_rt_gc_timeout); | 797 | do_div(mult, ip_rt_gc_timeout); |
795 | goal = (unsigned int)mult; | 798 | goal = (unsigned int)mult; |
@@ -3397,6 +3400,8 @@ int __init ip_rt_init(void) | |||
3397 | /* All the timers, started at system startup tend | 3400 | /* All the timers, started at system startup tend |
3398 | to synchronize. Perturb it a bit. | 3401 | to synchronize. Perturb it a bit. |
3399 | */ | 3402 | */ |
3403 | INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func); | ||
3404 | expires_ljiffies = jiffies; | ||
3400 | schedule_delayed_work(&expires_work, | 3405 | schedule_delayed_work(&expires_work, |
3401 | net_random() % ip_rt_gc_interval + ip_rt_gc_interval); | 3406 | net_random() % ip_rt_gc_interval + ip_rt_gc_interval); |
3402 | 3407 | ||