diff options
author | viresh kumar <viresh.kumar@linaro.org> | 2014-01-22 01:53:32 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-23 00:57:05 -0500 |
commit | 906e073f3e842877b59d669b25aa76f65ba775b3 (patch) | |
tree | a6ab362f3047e8ac28516b4ecb2d5c9b518c1966 /net/ipv4 | |
parent | 7c90cc2d40cab15adc78545edba8b5996bd4cade (diff) |
net/ipv4: queue work on power efficient wq
Workqueue used in ipv4 layer have no real dependency of scheduling these on the
cpu which scheduled them.
On a idle system, it is observed that an idle cpu wakes up many times just to
service this work. It would be better if we can schedule it on a cpu which the
scheduler believes to be the most appropriate one.
This patch replaces normal workqueues with power efficient versions. This
doesn't change existing behavior of code unless CONFIG_WQ_POWER_EFFICIENT is
enabled.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/devinet.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 646023bd5449..ac2dff3c2c1c 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -474,7 +474,7 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh, | |||
474 | inet_hash_insert(dev_net(in_dev->dev), ifa); | 474 | inet_hash_insert(dev_net(in_dev->dev), ifa); |
475 | 475 | ||
476 | cancel_delayed_work(&check_lifetime_work); | 476 | cancel_delayed_work(&check_lifetime_work); |
477 | schedule_delayed_work(&check_lifetime_work, 0); | 477 | queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); |
478 | 478 | ||
479 | /* Send message first, then call notifier. | 479 | /* Send message first, then call notifier. |
480 | Notifier will trigger FIB update, so that | 480 | Notifier will trigger FIB update, so that |
@@ -684,7 +684,8 @@ static void check_lifetime(struct work_struct *work) | |||
684 | if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX)) | 684 | if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX)) |
685 | next_sched = now + ADDRCONF_TIMER_FUZZ_MAX; | 685 | next_sched = now + ADDRCONF_TIMER_FUZZ_MAX; |
686 | 686 | ||
687 | schedule_delayed_work(&check_lifetime_work, next_sched - now); | 687 | queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, |
688 | next_sched - now); | ||
688 | } | 689 | } |
689 | 690 | ||
690 | static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft, | 691 | static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft, |
@@ -842,7 +843,8 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
842 | ifa = ifa_existing; | 843 | ifa = ifa_existing; |
843 | set_ifa_lifetime(ifa, valid_lft, prefered_lft); | 844 | set_ifa_lifetime(ifa, valid_lft, prefered_lft); |
844 | cancel_delayed_work(&check_lifetime_work); | 845 | cancel_delayed_work(&check_lifetime_work); |
845 | schedule_delayed_work(&check_lifetime_work, 0); | 846 | queue_delayed_work(system_power_efficient_wq, |
847 | &check_lifetime_work, 0); | ||
846 | rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); | 848 | rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); |
847 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); | 849 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); |
848 | } | 850 | } |
@@ -2322,7 +2324,7 @@ void __init devinet_init(void) | |||
2322 | register_gifconf(PF_INET, inet_gifconf); | 2324 | register_gifconf(PF_INET, inet_gifconf); |
2323 | register_netdevice_notifier(&ip_netdev_notifier); | 2325 | register_netdevice_notifier(&ip_netdev_notifier); |
2324 | 2326 | ||
2325 | schedule_delayed_work(&check_lifetime_work, 0); | 2327 | queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); |
2326 | 2328 | ||
2327 | rtnl_af_register(&inet_af_ops); | 2329 | rtnl_af_register(&inet_af_ops); |
2328 | 2330 | ||