aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c60
1 files changed, 54 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dee48658805c..26b8839fba34 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/notifier.h> 28#include <linux/notifier.h>
29#include <linux/syscalls.h>
29#include <linux/kthread.h> 30#include <linux/kthread.h>
30#include <linux/hardirq.h> 31#include <linux/hardirq.h>
31#include <linux/mempolicy.h> 32#include <linux/mempolicy.h>
@@ -36,6 +37,8 @@
36#define CREATE_TRACE_POINTS 37#define CREATE_TRACE_POINTS
37#include <trace/events/workqueue.h> 38#include <trace/events/workqueue.h>
38 39
40#include <asm/uaccess.h>
41
39/* 42/*
40 * The per-CPU workqueue (if single thread, we always use the first 43 * The per-CPU workqueue (if single thread, we always use the first
41 * possible cpu). 44 * possible cpu).
@@ -270,13 +273,14 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
270 * 273 *
271 * We queue the work to the CPU on which it was submitted, but if the CPU dies 274 * We queue the work to the CPU on which it was submitted, but if the CPU dies
272 * it can be processed by another CPU. 275 * it can be processed by another CPU.
276 *
277 * Especially no such guarantee on PREEMPT_RT.
273 */ 278 */
274int queue_work(struct workqueue_struct *wq, struct work_struct *work) 279int queue_work(struct workqueue_struct *wq, struct work_struct *work)
275{ 280{
276 int ret; 281 int ret = 0, cpu = raw_smp_processor_id();
277 282
278 ret = queue_work_on(get_cpu(), wq, work); 283 ret = queue_work_on(cpu, wq, work);
279 put_cpu();
280 284
281 return ret; 285 return ret;
282} 286}
@@ -313,7 +317,7 @@ static void delayed_work_timer_fn(unsigned long __data)
313 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 317 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
314 struct workqueue_struct *wq = cwq->wq; 318 struct workqueue_struct *wq = cwq->wq;
315 319
316 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); 320 __queue_work(wq_per_cpu(wq, raw_smp_processor_id()), &dwork->work);
317} 321}
318 322
319/** 323/**
@@ -774,9 +778,9 @@ void flush_delayed_work(struct delayed_work *dwork)
774{ 778{
775 if (del_timer_sync(&dwork->timer)) { 779 if (del_timer_sync(&dwork->timer)) {
776 struct cpu_workqueue_struct *cwq; 780 struct cpu_workqueue_struct *cwq;
777 cwq = wq_per_cpu(keventd_wq, get_cpu()); 781 int cpu = raw_smp_processor_id();
782 cwq = wq_per_cpu(keventd_wq, cpu);
778 __queue_work(cwq, &dwork->work); 783 __queue_work(cwq, &dwork->work);
779 put_cpu();
780 } 784 }
781 flush_work(&dwork->work); 785 flush_work(&dwork->work);
782} 786}
@@ -1044,6 +1048,49 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1044 cwq->thread = NULL; 1048 cwq->thread = NULL;
1045} 1049}
1046 1050
1051void set_workqueue_thread_prio(struct workqueue_struct *wq, int cpu,
1052 int policy, int rt_priority, int nice)
1053{
1054 struct sched_param param = { .sched_priority = rt_priority };
1055 struct cpu_workqueue_struct *cwq;
1056 mm_segment_t oldfs = get_fs();
1057 struct task_struct *p;
1058 unsigned long flags;
1059 int ret;
1060
1061 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1062 spin_lock_irqsave(&cwq->lock, flags);
1063 p = cwq->thread;
1064 spin_unlock_irqrestore(&cwq->lock, flags);
1065
1066 set_user_nice(p, nice);
1067
1068 set_fs(KERNEL_DS);
1069 ret = sys_sched_setscheduler(p->pid, policy, &param);
1070 set_fs(oldfs);
1071
1072 WARN_ON(ret);
1073}
1074
1075void set_workqueue_prio(struct workqueue_struct *wq, int policy,
1076 int rt_priority, int nice)
1077{
1078 int cpu;
1079
1080 /* We don't need the distraction of CPUs appearing and vanishing. */
1081 get_online_cpus();
1082 spin_lock(&workqueue_lock);
1083 if (is_wq_single_threaded(wq))
1084 set_workqueue_thread_prio(wq, 0, policy, rt_priority, nice);
1085 else {
1086 for_each_online_cpu(cpu)
1087 set_workqueue_thread_prio(wq, cpu, policy,
1088 rt_priority, nice);
1089 }
1090 spin_unlock(&workqueue_lock);
1091 put_online_cpus();
1092}
1093
1047/** 1094/**
1048 * destroy_workqueue - safely terminate a workqueue 1095 * destroy_workqueue - safely terminate a workqueue
1049 * @wq: target workqueue 1096 * @wq: target workqueue
@@ -1176,4 +1223,5 @@ void __init init_workqueues(void)
1176 hotcpu_notifier(workqueue_cpu_callback, 0); 1223 hotcpu_notifier(workqueue_cpu_callback, 0);
1177 keventd_wq = create_workqueue("events"); 1224 keventd_wq = create_workqueue("events");
1178 BUG_ON(!keventd_wq); 1225 BUG_ON(!keventd_wq);
1226 set_workqueue_prio(keventd_wq, SCHED_FIFO, 1, -20);
1179} 1227}