aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2012-10-26 02:36:17 -0400
committerNicolas Pitre <nicolas.pitre@linaro.org>2013-07-30 09:02:14 -0400
commit71ce1deeff8f9341ae3b21983e9bdde28e8c96fe (patch)
treeb4beef1523de4e5f4fb7e6b13a553e2ed3bc9ad1
parent3f09d4799ecc076cccc11ab2333a36ec849d24f5 (diff)
ARM: bL_switcher: move to dedicated threads rather than workqueues
The workqueues are problematic as they may be contended. They can't be scheduled with top priority either. Also the optimization in bL_switch_request() to skip the workqueue entirely when the target CPU and the calling CPU were the same didn't allow for bL_switch_request() to be called from atomic context, as might be the case for some cpufreq drivers. Let's move to dedicated kthreads instead. Signed-off-by: Nicolas Pitre <nico@linaro.org>
-rw-r--r--arch/arm/common/bL_switcher.c101
-rw-r--r--arch/arm/include/asm/bL_switcher.h2
2 files changed, 79 insertions, 24 deletions
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index ca04b5384bb0..407c4cc64c0b 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -15,8 +15,10 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/cpu_pm.h> 17#include <linux/cpu_pm.h>
18#include <linux/cpu.h>
18#include <linux/cpumask.h> 19#include <linux/cpumask.h>
19#include <linux/workqueue.h> 20#include <linux/kthread.h>
21#include <linux/wait.h>
20#include <linux/clockchips.h> 22#include <linux/clockchips.h>
21#include <linux/hrtimer.h> 23#include <linux/hrtimer.h>
22#include <linux/tick.h> 24#include <linux/tick.h>
@@ -219,15 +221,48 @@ static int bL_switch_to(unsigned int new_cluster_id)
219 return ret; 221 return ret;
220} 222}
221 223
222struct switch_args { 224struct bL_thread {
223 unsigned int cluster; 225 struct task_struct *task;
224 struct work_struct work; 226 wait_queue_head_t wq;
227 int wanted_cluster;
225}; 228};
226 229
227static void __bL_switch_to(struct work_struct *work) 230static struct bL_thread bL_threads[NR_CPUS];
231
232static int bL_switcher_thread(void *arg)
233{
234 struct bL_thread *t = arg;
235 struct sched_param param = { .sched_priority = 1 };
236 int cluster;
237
238 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
239
240 do {
241 if (signal_pending(current))
242 flush_signals(current);
243 wait_event_interruptible(t->wq,
244 t->wanted_cluster != -1 ||
245 kthread_should_stop());
246 cluster = xchg(&t->wanted_cluster, -1);
247 if (cluster != -1)
248 bL_switch_to(cluster);
249 } while (!kthread_should_stop());
250
251 return 0;
252}
253
254static struct task_struct * __init bL_switcher_thread_create(int cpu, void *arg)
228{ 255{
229 struct switch_args *args = container_of(work, struct switch_args, work); 256 struct task_struct *task;
230 bL_switch_to(args->cluster); 257
258 task = kthread_create_on_node(bL_switcher_thread, arg,
259 cpu_to_node(cpu), "kswitcher_%d", cpu);
260 if (!IS_ERR(task)) {
261 kthread_bind(task, cpu);
262 wake_up_process(task);
263 } else
264 pr_err("%s failed for CPU %d\n", __func__, cpu);
265 return task;
231} 266}
232 267
233/* 268/*
@@ -236,26 +271,46 @@ static void __bL_switch_to(struct work_struct *work)
236 * @cpu: the CPU to switch 271 * @cpu: the CPU to switch
237 * @new_cluster_id: the ID of the cluster to switch to. 272 * @new_cluster_id: the ID of the cluster to switch to.
238 * 273 *
239 * This function causes a cluster switch on the given CPU. If the given 274 * This function causes a cluster switch on the given CPU by waking up
240 * CPU is the same as the calling CPU then the switch happens right away. 275 * the appropriate switcher thread. This function may or may not return
241 * Otherwise the request is put on a work queue to be scheduled on the 276 * before the switch has occurred.
242 * remote CPU.
243 */ 277 */
244void bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) 278int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
245{ 279{
246 unsigned int this_cpu = get_cpu(); 280 struct bL_thread *t;
247 struct switch_args args;
248 281
249 if (cpu == this_cpu) { 282 if (cpu >= ARRAY_SIZE(bL_threads)) {
250 bL_switch_to(new_cluster_id); 283 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
251 put_cpu(); 284 return -EINVAL;
252 return;
253 } 285 }
254 put_cpu();
255 286
256 args.cluster = new_cluster_id; 287 t = &bL_threads[cpu];
257 INIT_WORK_ONSTACK(&args.work, __bL_switch_to); 288 if (IS_ERR(t->task))
258 schedule_work_on(cpu, &args.work); 289 return PTR_ERR(t->task);
259 flush_work(&args.work); 290 if (!t->task)
291 return -ESRCH;
292
293 t->wanted_cluster = new_cluster_id;
294 wake_up(&t->wq);
295 return 0;
260} 296}
261EXPORT_SYMBOL_GPL(bL_switch_request); 297EXPORT_SYMBOL_GPL(bL_switch_request);
298
299static int __init bL_switcher_init(void)
300{
301 int cpu;
302
303 pr_info("big.LITTLE switcher initializing\n");
304
305 for_each_online_cpu(cpu) {
306 struct bL_thread *t = &bL_threads[cpu];
307 init_waitqueue_head(&t->wq);
308 t->wanted_cluster = -1;
309 t->task = bL_switcher_thread_create(cpu, t);
310 }
311
312 pr_info("big.LITTLE switcher initialized\n");
313 return 0;
314}
315
316late_initcall(bL_switcher_init);
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
index 72efe3f349b9..e0c0bba70bbf 100644
--- a/arch/arm/include/asm/bL_switcher.h
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -12,6 +12,6 @@
12#ifndef ASM_BL_SWITCHER_H 12#ifndef ASM_BL_SWITCHER_H
13#define ASM_BL_SWITCHER_H 13#define ASM_BL_SWITCHER_H
14 14
15void bL_switch_request(unsigned int cpu, unsigned int new_cluster_id); 15int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id);
16 16
17#endif 17#endif