aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-04-24 07:42:55 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 13:50:06 -0400
commitbbb47bdeae756f04b896b55b51f230f3eb21f207 (patch)
tree130e58aa4301ec10e3e31c9bdb8d56360f142f85 /drivers/net
parent0668106ca3865ba945e155097fb042bf66d364d3 (diff)
PHYLIB: queue work on system_power_efficient_wq
Phylib uses workqueues for multiple purposes. There is no real dependency of scheduling these on the cpu which scheduled them. On a idle system, it is observed that and idle cpu wakes up many times just to service this work. It would be better if we can schedule it on a cpu which the scheduler believes to be the most appropriate one. This patch replaces system_wq with system_power_efficient_wq for PHYLIB. Cc: David S. Miller <davem@davemloft.net> Cc: netdev@vger.kernel.org Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/phy/phy.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c14f14741b3f..984c0b5ba174 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -439,7 +439,7 @@ void phy_start_machine(struct phy_device *phydev,
439{ 439{
440 phydev->adjust_state = handler; 440 phydev->adjust_state = handler;
441 441
442 schedule_delayed_work(&phydev->state_queue, HZ); 442 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
443} 443}
444 444
445/** 445/**
@@ -500,7 +500,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
500 disable_irq_nosync(irq); 500 disable_irq_nosync(irq);
501 atomic_inc(&phydev->irq_disable); 501 atomic_inc(&phydev->irq_disable);
502 502
503 schedule_work(&phydev->phy_queue); 503 queue_work(system_power_efficient_wq, &phydev->phy_queue);
504 504
505 return IRQ_HANDLED; 505 return IRQ_HANDLED;
506} 506}
@@ -655,7 +655,7 @@ static void phy_change(struct work_struct *work)
655 655
656 /* reschedule state queue work to run as soon as possible */ 656 /* reschedule state queue work to run as soon as possible */
657 cancel_delayed_work_sync(&phydev->state_queue); 657 cancel_delayed_work_sync(&phydev->state_queue);
658 schedule_delayed_work(&phydev->state_queue, 0); 658 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
659 659
660 return; 660 return;
661 661
@@ -918,7 +918,8 @@ void phy_state_machine(struct work_struct *work)
918 if (err < 0) 918 if (err < 0)
919 phy_error(phydev); 919 phy_error(phydev);
920 920
921 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); 921 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
922 PHY_STATE_TIME * HZ);
922} 923}
923 924
924static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, 925static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,