aboutsummaryrefslogtreecommitdiffstats
path: root/net/rfkill/core.c
diff options
context:
space:
mode:
authorShaibal Dutta <shaibal.dutta@broadcom.com>2014-01-30 17:43:34 -0500
committerJohannes Berg <johannes.berg@intel.com>2014-02-04 15:58:16 -0500
commit67235cbca44f082e9c4c2ed370f9afe5fc478d49 (patch)
treedfbb6acb68b824afdab6d106da35b5b7cddd024b /net/rfkill/core.c
parent845f3351b15a4cd8c6e47255c0dbfac03c6aceda (diff)
net: rfkill: move poll work to power efficient workqueue
This patch moves the rfkill poll_work to the power efficient workqueue. This work does not have to be bound to the CPU that scheduled it, hence the selection of CPU that executes it would be left to the scheduler. Net result is that CPU idle times would be extended, resulting in power savings. This behaviour is enabled when CONFIG_WQ_POWER_EFFICIENT is selected. Cc: "John W. Linville" <linville@tuxdriver.com> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Shaibal Dutta <shaibal.dutta@broadcom.com> [zoran.markovic@linaro.org: Rebased to latest kernel, added commit message. Fixed workqueue selection after suspend/resume cycle.] Signed-off-by: Zoran Markovic <zoran.markovic@linaro.org> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/rfkill/core.c')
-rw-r--r--net/rfkill/core.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ed7e0b4e7f90..b3b16c070a7f 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -789,7 +789,8 @@ void rfkill_resume_polling(struct rfkill *rfkill)
789 if (!rfkill->ops->poll) 789 if (!rfkill->ops->poll)
790 return; 790 return;
791 791
792 schedule_work(&rfkill->poll_work.work); 792 queue_delayed_work(system_power_efficient_wq,
793 &rfkill->poll_work, 0);
793} 794}
794EXPORT_SYMBOL(rfkill_resume_polling); 795EXPORT_SYMBOL(rfkill_resume_polling);
795 796
@@ -894,7 +895,8 @@ static void rfkill_poll(struct work_struct *work)
894 */ 895 */
895 rfkill->ops->poll(rfkill, rfkill->data); 896 rfkill->ops->poll(rfkill, rfkill->data);
896 897
897 schedule_delayed_work(&rfkill->poll_work, 898 queue_delayed_work(system_power_efficient_wq,
899 &rfkill->poll_work,
898 round_jiffies_relative(POLL_INTERVAL)); 900 round_jiffies_relative(POLL_INTERVAL));
899} 901}
900 902
@@ -958,7 +960,8 @@ int __must_check rfkill_register(struct rfkill *rfkill)
958 INIT_WORK(&rfkill->sync_work, rfkill_sync_work); 960 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
959 961
960 if (rfkill->ops->poll) 962 if (rfkill->ops->poll)
961 schedule_delayed_work(&rfkill->poll_work, 963 queue_delayed_work(system_power_efficient_wq,
964 &rfkill->poll_work,
962 round_jiffies_relative(POLL_INTERVAL)); 965 round_jiffies_relative(POLL_INTERVAL));
963 966
964 if (!rfkill->persistent || rfkill_epo_lock_active) { 967 if (!rfkill->persistent || rfkill_epo_lock_active) {