diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-09-19 14:00:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-09-19 14:00:07 -0400 |
commit | c5c473e29c641380aef4a9d1f9c39de49219980f (patch) | |
tree | 7cc1d52fa7757ecd0903fc6e86bb22188d2a8bbd | |
parent | 925a6f0bf8bd122d5d2429af7f0ca0fecf4ae71f (diff) | |
parent | 6889125b8b4e09c5e53e6ecab3433bed1ce198c9 (diff) |
Merge branch 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue / powernow-k8 fix from Tejun Heo:
"This is the fix for the bug where cpufreq/powernow-k8 was tripping
BUG_ON() in try_to_wake_up_local() by migrating workqueue worker to a
different CPU.
https://bugzilla.kernel.org/show_bug.cgi?id=47301
As discussed, the fix is now two parts - one to reimplement
work_on_cpu() so that it doesn't create a new kthread each time and
the actual fix which makes powernow-k8 use work_on_cpu() instead of
performing manual migration.
While pretty late in the merge cycle, both changes are on the safer
side. Jiri and I verified two existing users of work_on_cpu() and
Duncan confirmed that the powernow-k8 fix survived about 18 hours of
testing."
* 'for-3.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
cpufreq/powernow-k8: workqueue user shouldn't migrate the kworker to another CPU
workqueue: reimplement work_on_cpu() using system_wq
-rw-r--r-- | drivers/cpufreq/powernow-k8.c | 63 | ||||
-rw-r--r-- | kernel/workqueue.c | 25 |
2 files changed, 42 insertions, 46 deletions
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index c0e816468e3..1a40935c85f 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | #include <linux/cpumask.h> | 37 | #include <linux/cpumask.h> |
38 | #include <linux/sched.h> /* for current / set_cpus_allowed() */ | ||
39 | #include <linux/io.h> | 38 | #include <linux/io.h> |
40 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
41 | 40 | ||
@@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, | |||
1139 | return res; | 1138 | return res; |
1140 | } | 1139 | } |
1141 | 1140 | ||
1142 | /* Driver entry point to switch to the target frequency */ | 1141 | struct powernowk8_target_arg { |
1143 | static int powernowk8_target(struct cpufreq_policy *pol, | 1142 | struct cpufreq_policy *pol; |
1144 | unsigned targfreq, unsigned relation) | 1143 | unsigned targfreq; |
1144 | unsigned relation; | ||
1145 | }; | ||
1146 | |||
1147 | static long powernowk8_target_fn(void *arg) | ||
1145 | { | 1148 | { |
1146 | cpumask_var_t oldmask; | 1149 | struct powernowk8_target_arg *pta = arg; |
1150 | struct cpufreq_policy *pol = pta->pol; | ||
1151 | unsigned targfreq = pta->targfreq; | ||
1152 | unsigned relation = pta->relation; | ||
1147 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1153 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1148 | u32 checkfid; | 1154 | u32 checkfid; |
1149 | u32 checkvid; | 1155 | u32 checkvid; |
1150 | unsigned int newstate; | 1156 | unsigned int newstate; |
1151 | int ret = -EIO; | 1157 | int ret; |
1152 | 1158 | ||
1153 | if (!data) | 1159 | if (!data) |
1154 | return -EINVAL; | 1160 | return -EINVAL; |
@@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1156 | checkfid = data->currfid; | 1162 | checkfid = data->currfid; |
1157 | checkvid = data->currvid; | 1163 | checkvid = data->currvid; |
1158 | 1164 | ||
1159 | /* only run on specific CPU from here on. */ | ||
1160 | /* This is poor form: use a workqueue or smp_call_function_single */ | ||
1161 | if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) | ||
1162 | return -ENOMEM; | ||
1163 | |||
1164 | cpumask_copy(oldmask, tsk_cpus_allowed(current)); | ||
1165 | set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); | ||
1166 | |||
1167 | if (smp_processor_id() != pol->cpu) { | ||
1168 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | ||
1169 | goto err_out; | ||
1170 | } | ||
1171 | |||
1172 | if (pending_bit_stuck()) { | 1165 | if (pending_bit_stuck()) { |
1173 | printk(KERN_ERR PFX "failing targ, change pending bit set\n"); | 1166 | printk(KERN_ERR PFX "failing targ, change pending bit set\n"); |
1174 | goto err_out; | 1167 | return -EIO; |
1175 | } | 1168 | } |
1176 | 1169 | ||
1177 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", | 1170 | pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", |
1178 | pol->cpu, targfreq, pol->min, pol->max, relation); | 1171 | pol->cpu, targfreq, pol->min, pol->max, relation); |
1179 | 1172 | ||
1180 | if (query_current_values_with_pending_wait(data)) | 1173 | if (query_current_values_with_pending_wait(data)) |
1181 | goto err_out; | 1174 | return -EIO; |
1182 | 1175 | ||
1183 | if (cpu_family != CPU_HW_PSTATE) { | 1176 | if (cpu_family != CPU_HW_PSTATE) { |
1184 | pr_debug("targ: curr fid 0x%x, vid 0x%x\n", | 1177 | pr_debug("targ: curr fid 0x%x, vid 0x%x\n", |
@@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1196 | 1189 | ||
1197 | if (cpufreq_frequency_table_target(pol, data->powernow_table, | 1190 | if (cpufreq_frequency_table_target(pol, data->powernow_table, |
1198 | targfreq, relation, &newstate)) | 1191 | targfreq, relation, &newstate)) |
1199 | goto err_out; | 1192 | return -EIO; |
1200 | 1193 | ||
1201 | mutex_lock(&fidvid_mutex); | 1194 | mutex_lock(&fidvid_mutex); |
1202 | 1195 | ||
@@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1209 | ret = transition_frequency_fidvid(data, newstate); | 1202 | ret = transition_frequency_fidvid(data, newstate); |
1210 | if (ret) { | 1203 | if (ret) { |
1211 | printk(KERN_ERR PFX "transition frequency failed\n"); | 1204 | printk(KERN_ERR PFX "transition frequency failed\n"); |
1212 | ret = 1; | ||
1213 | mutex_unlock(&fidvid_mutex); | 1205 | mutex_unlock(&fidvid_mutex); |
1214 | goto err_out; | 1206 | return 1; |
1215 | } | 1207 | } |
1216 | mutex_unlock(&fidvid_mutex); | 1208 | mutex_unlock(&fidvid_mutex); |
1217 | 1209 | ||
@@ -1220,12 +1212,25 @@ static int powernowk8_target(struct cpufreq_policy *pol, | |||
1220 | data->powernow_table[newstate].index); | 1212 | data->powernow_table[newstate].index); |
1221 | else | 1213 | else |
1222 | pol->cur = find_khz_freq_from_fid(data->currfid); | 1214 | pol->cur = find_khz_freq_from_fid(data->currfid); |
1223 | ret = 0; | ||
1224 | 1215 | ||
1225 | err_out: | 1216 | return 0; |
1226 | set_cpus_allowed_ptr(current, oldmask); | 1217 | } |
1227 | free_cpumask_var(oldmask); | 1218 | |
1228 | return ret; | 1219 | /* Driver entry point to switch to the target frequency */ |
1220 | static int powernowk8_target(struct cpufreq_policy *pol, | ||
1221 | unsigned targfreq, unsigned relation) | ||
1222 | { | ||
1223 | struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, | ||
1224 | .relation = relation }; | ||
1225 | |||
1226 | /* | ||
1227 | * Must run on @pol->cpu. cpufreq core is responsible for ensuring | ||
1228 | * that we're bound to the current CPU and pol->cpu stays online. | ||
1229 | */ | ||
1230 | if (smp_processor_id() == pol->cpu) | ||
1231 | return powernowk8_target_fn(&pta); | ||
1232 | else | ||
1233 | return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); | ||
1229 | } | 1234 | } |
1230 | 1235 | ||
1231 | /* Driver entry point to verify the policy and range of frequencies */ | 1236 | /* Driver entry point to verify the policy and range of frequencies */ |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b80065a2450..3c5a79e2134 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -3576,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, | |||
3576 | #ifdef CONFIG_SMP | 3576 | #ifdef CONFIG_SMP |
3577 | 3577 | ||
3578 | struct work_for_cpu { | 3578 | struct work_for_cpu { |
3579 | struct completion completion; | 3579 | struct work_struct work; |
3580 | long (*fn)(void *); | 3580 | long (*fn)(void *); |
3581 | void *arg; | 3581 | void *arg; |
3582 | long ret; | 3582 | long ret; |
3583 | }; | 3583 | }; |
3584 | 3584 | ||
3585 | static int do_work_for_cpu(void *_wfc) | 3585 | static void work_for_cpu_fn(struct work_struct *work) |
3586 | { | 3586 | { |
3587 | struct work_for_cpu *wfc = _wfc; | 3587 | struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); |
3588 | |||
3588 | wfc->ret = wfc->fn(wfc->arg); | 3589 | wfc->ret = wfc->fn(wfc->arg); |
3589 | complete(&wfc->completion); | ||
3590 | return 0; | ||
3591 | } | 3590 | } |
3592 | 3591 | ||
3593 | /** | 3592 | /** |
@@ -3602,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc) | |||
3602 | */ | 3601 | */ |
3603 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 3602 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
3604 | { | 3603 | { |
3605 | struct task_struct *sub_thread; | 3604 | struct work_for_cpu wfc = { .fn = fn, .arg = arg }; |
3606 | struct work_for_cpu wfc = { | ||
3607 | .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), | ||
3608 | .fn = fn, | ||
3609 | .arg = arg, | ||
3610 | }; | ||
3611 | 3605 | ||
3612 | sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); | 3606 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
3613 | if (IS_ERR(sub_thread)) | 3607 | schedule_work_on(cpu, &wfc.work); |
3614 | return PTR_ERR(sub_thread); | 3608 | flush_work(&wfc.work); |
3615 | kthread_bind(sub_thread, cpu); | ||
3616 | wake_up_process(sub_thread); | ||
3617 | wait_for_completion(&wfc.completion); | ||
3618 | return wfc.ret; | 3609 | return wfc.ret; |
3619 | } | 3610 | } |
3620 | EXPORT_SYMBOL_GPL(work_on_cpu); | 3611 | EXPORT_SYMBOL_GPL(work_on_cpu); |