aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-01-02 22:49:25 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-01-20 22:08:34 -0500
commitb18ae08deac23187e4a22a8c94a1a473be8e8c93 (patch)
treed403d80de1f889c7ef5a9357a5c9da1b876b8508 /arch
parentf32be0c54057faac90e0b5e9e13fa9f8fab127ac (diff)
powerpc/cell: Use system_wq in cpufreq_spudemand
With cmwq, there's no reason to use a separate workqueue in cpufreq_spudemand. Use system_wq instead. The work items are already sync canceled on stop, so it's already guaranteed that no work is running when spu_gov_exit() is entered. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: linuxppc-dev@lists.ozlabs.org Cc: Dave Jones <davej@redhat.com> Cc: cpufreq@vger.kernel.org Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c20
2 files changed, 23 insertions, 22 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 09e4dea4a85a..09d31dbf43f9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -265,11 +265,26 @@ void accumulate_stolen_time(void)
265{ 265{
266 u64 sst, ust; 266 u64 sst, ust;
267 267
268 sst = scan_dispatch_log(get_paca()->starttime_user); 268 u8 save_soft_enabled = local_paca->soft_enabled;
269 ust = scan_dispatch_log(get_paca()->starttime); 269 u8 save_hard_enabled = local_paca->hard_enabled;
270 get_paca()->system_time -= sst; 270
271 get_paca()->user_time -= ust; 271 /* We are called early in the exception entry, before
272 get_paca()->stolen_time += ust + sst; 272 * soft/hard_enabled are sync'ed to the expected state
273 * for the exception. We are hard disabled but the PACA
274 * needs to reflect that so various debug stuff doesn't
275 * complain
276 */
277 local_paca->soft_enabled = 0;
278 local_paca->hard_enabled = 0;
279
280 sst = scan_dispatch_log(local_paca->starttime_user);
281 ust = scan_dispatch_log(local_paca->starttime);
282 local_paca->system_time -= sst;
283 local_paca->user_time -= ust;
284 local_paca->stolen_time += ust + sst;
285
286 local_paca->soft_enabled = save_soft_enabled;
287 local_paca->hard_enabled = save_hard_enabled;
273} 288}
274 289
275static inline u64 calculate_stolen_time(u64 stop_tb) 290static inline u64 calculate_stolen_time(u64 stop_tb)
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 968c1c0b4d5b..d809836bcf5f 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -39,8 +39,6 @@ struct spu_gov_info_struct {
39}; 39};
40static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); 40static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
41 41
42static struct workqueue_struct *kspugov_wq;
43
44static int calc_freq(struct spu_gov_info_struct *info) 42static int calc_freq(struct spu_gov_info_struct *info)
45{ 43{
46 int cpu; 44 int cpu;
@@ -71,14 +69,14 @@ static void spu_gov_work(struct work_struct *work)
71 __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); 69 __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
72 70
73 delay = usecs_to_jiffies(info->poll_int); 71 delay = usecs_to_jiffies(info->poll_int);
74 queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); 72 schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
75} 73}
76 74
77static void spu_gov_init_work(struct spu_gov_info_struct *info) 75static void spu_gov_init_work(struct spu_gov_info_struct *info)
78{ 76{
79 int delay = usecs_to_jiffies(info->poll_int); 77 int delay = usecs_to_jiffies(info->poll_int);
80 INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); 78 INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
81 queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); 79 schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
82} 80}
83 81
84static void spu_gov_cancel_work(struct spu_gov_info_struct *info) 82static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
@@ -152,27 +150,15 @@ static int __init spu_gov_init(void)
152{ 150{
153 int ret; 151 int ret;
154 152
155 kspugov_wq = create_workqueue("kspugov");
156 if (!kspugov_wq) {
157 printk(KERN_ERR "creation of kspugov failed\n");
158 ret = -EFAULT;
159 goto out;
160 }
161
162 ret = cpufreq_register_governor(&spu_governor); 153 ret = cpufreq_register_governor(&spu_governor);
163 if (ret) { 154 if (ret)
164 printk(KERN_ERR "registration of governor failed\n"); 155 printk(KERN_ERR "registration of governor failed\n");
165 destroy_workqueue(kspugov_wq);
166 goto out;
167 }
168out:
169 return ret; 156 return ret;
170} 157}
171 158
172static void __exit spu_gov_exit(void) 159static void __exit spu_gov_exit(void)
173{ 160{
174 cpufreq_unregister_governor(&spu_governor); 161 cpufreq_unregister_governor(&spu_governor);
175 destroy_workqueue(kspugov_wq);
176} 162}
177 163
178 164