aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulia Cartwright <julia@ni.com>2019-02-12 11:25:53 -0500
committerThomas Gleixner <tglx@linutronix.de>2019-02-28 05:18:38 -0500
commitfe99a4f4d6022ec92f9b52a5528cb9b77513e7d1 (patch)
tree87e1bdb98c37e47aab67bf52170fe4a61ca3bde1
parentc89d92eddfad11e912fb506f85e1796064a9f9d2 (diff)
kthread: Convert worker lock to raw spinlock
In order to enable the queuing of kthread work items from hardirq context even when PREEMPT_RT_FULL is enabled, convert the worker spin_lock to a raw_spin_lock. This is only acceptable to do because the work performed under the lock is well-bounded and minimal. Reported-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> Reported-by: Tim Sander <tim@krieglstein.org> Signed-off-by: Julia Cartwright <julia@ni.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: Guenter Roeck <linux@roeck-us.net> Link: https://lkml.kernel.org/r/20190212162554.19779-1-bigeasy@linutronix.de
-rw-r--r--include/linux/kthread.h4
-rw-r--r--kernel/kthread.c42
2 files changed, 23 insertions, 23 deletions
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..6b8c064f0cbc 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -85,7 +85,7 @@ enum {
85 85
86struct kthread_worker { 86struct kthread_worker {
87 unsigned int flags; 87 unsigned int flags;
88 spinlock_t lock; 88 raw_spinlock_t lock;
89 struct list_head work_list; 89 struct list_head work_list;
90 struct list_head delayed_work_list; 90 struct list_head delayed_work_list;
91 struct task_struct *task; 91 struct task_struct *task;
@@ -106,7 +106,7 @@ struct kthread_delayed_work {
106}; 106};
107 107
108#define KTHREAD_WORKER_INIT(worker) { \ 108#define KTHREAD_WORKER_INIT(worker) { \
109 .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ 109 .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
110 .work_list = LIST_HEAD_INIT((worker).work_list), \ 110 .work_list = LIST_HEAD_INIT((worker).work_list), \
111 .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ 111 .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
112 } 112 }
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b5..5641b55783a6 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
599 struct lock_class_key *key) 599 struct lock_class_key *key)
600{ 600{
601 memset(worker, 0, sizeof(struct kthread_worker)); 601 memset(worker, 0, sizeof(struct kthread_worker));
602 spin_lock_init(&worker->lock); 602 raw_spin_lock_init(&worker->lock);
603 lockdep_set_class_and_name(&worker->lock, key, name); 603 lockdep_set_class_and_name(&worker->lock, key, name);
604 INIT_LIST_HEAD(&worker->work_list); 604 INIT_LIST_HEAD(&worker->work_list);
605 INIT_LIST_HEAD(&worker->delayed_work_list); 605 INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +641,21 @@ repeat:
641 641
642 if (kthread_should_stop()) { 642 if (kthread_should_stop()) {
643 __set_current_state(TASK_RUNNING); 643 __set_current_state(TASK_RUNNING);
644 spin_lock_irq(&worker->lock); 644 raw_spin_lock_irq(&worker->lock);
645 worker->task = NULL; 645 worker->task = NULL;
646 spin_unlock_irq(&worker->lock); 646 raw_spin_unlock_irq(&worker->lock);
647 return 0; 647 return 0;
648 } 648 }
649 649
650 work = NULL; 650 work = NULL;
651 spin_lock_irq(&worker->lock); 651 raw_spin_lock_irq(&worker->lock);
652 if (!list_empty(&worker->work_list)) { 652 if (!list_empty(&worker->work_list)) {
653 work = list_first_entry(&worker->work_list, 653 work = list_first_entry(&worker->work_list,
654 struct kthread_work, node); 654 struct kthread_work, node);
655 list_del_init(&work->node); 655 list_del_init(&work->node);
656 } 656 }
657 worker->current_work = work; 657 worker->current_work = work;
658 spin_unlock_irq(&worker->lock); 658 raw_spin_unlock_irq(&worker->lock);
659 659
660 if (work) { 660 if (work) {
661 __set_current_state(TASK_RUNNING); 661 __set_current_state(TASK_RUNNING);
@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
812 bool ret = false; 812 bool ret = false;
813 unsigned long flags; 813 unsigned long flags;
814 814
815 spin_lock_irqsave(&worker->lock, flags); 815 raw_spin_lock_irqsave(&worker->lock, flags);
816 if (!queuing_blocked(worker, work)) { 816 if (!queuing_blocked(worker, work)) {
817 kthread_insert_work(worker, work, &worker->work_list); 817 kthread_insert_work(worker, work, &worker->work_list);
818 ret = true; 818 ret = true;
819 } 819 }
820 spin_unlock_irqrestore(&worker->lock, flags); 820 raw_spin_unlock_irqrestore(&worker->lock, flags);
821 return ret; 821 return ret;
822} 822}
823EXPORT_SYMBOL_GPL(kthread_queue_work); 823EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
843 if (WARN_ON_ONCE(!worker)) 843 if (WARN_ON_ONCE(!worker))
844 return; 844 return;
845 845
846 spin_lock(&worker->lock); 846 raw_spin_lock(&worker->lock);
847 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 847 /* Work must not be used with >1 worker, see kthread_queue_work(). */
848 WARN_ON_ONCE(work->worker != worker); 848 WARN_ON_ONCE(work->worker != worker);
849 849
@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
852 list_del_init(&work->node); 852 list_del_init(&work->node);
853 kthread_insert_work(worker, work, &worker->work_list); 853 kthread_insert_work(worker, work, &worker->work_list);
854 854
855 spin_unlock(&worker->lock); 855 raw_spin_unlock(&worker->lock);
856} 856}
857EXPORT_SYMBOL(kthread_delayed_work_timer_fn); 857EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
858 858
@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
908 unsigned long flags; 908 unsigned long flags;
909 bool ret = false; 909 bool ret = false;
910 910
911 spin_lock_irqsave(&worker->lock, flags); 911 raw_spin_lock_irqsave(&worker->lock, flags);
912 912
913 if (!queuing_blocked(worker, work)) { 913 if (!queuing_blocked(worker, work)) {
914 __kthread_queue_delayed_work(worker, dwork, delay); 914 __kthread_queue_delayed_work(worker, dwork, delay);
915 ret = true; 915 ret = true;
916 } 916 }
917 917
918 spin_unlock_irqrestore(&worker->lock, flags); 918 raw_spin_unlock_irqrestore(&worker->lock, flags);
919 return ret; 919 return ret;
920} 920}
921EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 921EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_work *work)
951 if (!worker) 951 if (!worker)
952 return; 952 return;
953 953
954 spin_lock_irq(&worker->lock); 954 raw_spin_lock_irq(&worker->lock);
955 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 955 /* Work must not be used with >1 worker, see kthread_queue_work(). */
956 WARN_ON_ONCE(work->worker != worker); 956 WARN_ON_ONCE(work->worker != worker);
957 957
@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_work *work)
963 else 963 else
964 noop = true; 964 noop = true;
965 965
966 spin_unlock_irq(&worker->lock); 966 raw_spin_unlock_irq(&worker->lock);
967 967
968 if (!noop) 968 if (!noop)
969 wait_for_completion(&fwork.done); 969 wait_for_completion(&fwork.done);
@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
996 * any queuing is blocked by setting the canceling counter. 996 * any queuing is blocked by setting the canceling counter.
997 */ 997 */
998 work->canceling++; 998 work->canceling++;
999 spin_unlock_irqrestore(&worker->lock, *flags); 999 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1000 del_timer_sync(&dwork->timer); 1000 del_timer_sync(&dwork->timer);
1001 spin_lock_irqsave(&worker->lock, *flags); 1001 raw_spin_lock_irqsave(&worker->lock, *flags);
1002 work->canceling--; 1002 work->canceling--;
1003 } 1003 }
1004 1004
@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
1045 unsigned long flags; 1045 unsigned long flags;
1046 int ret = false; 1046 int ret = false;
1047 1047
1048 spin_lock_irqsave(&worker->lock, flags); 1048 raw_spin_lock_irqsave(&worker->lock, flags);
1049 1049
1050 /* Do not bother with canceling when never queued. */ 1050 /* Do not bother with canceling when never queued. */
1051 if (!work->worker) 1051 if (!work->worker)
@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
1062fast_queue: 1062fast_queue:
1063 __kthread_queue_delayed_work(worker, dwork, delay); 1063 __kthread_queue_delayed_work(worker, dwork, delay);
1064out: 1064out:
1065 spin_unlock_irqrestore(&worker->lock, flags); 1065 raw_spin_unlock_irqrestore(&worker->lock, flags);
1066 return ret; 1066 return ret;
1067} 1067}
1068EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 1068EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1076 if (!worker) 1076 if (!worker)
1077 goto out; 1077 goto out;
1078 1078
1079 spin_lock_irqsave(&worker->lock, flags); 1079 raw_spin_lock_irqsave(&worker->lock, flags);
1080 /* Work must not be used with >1 worker, see kthread_queue_work(). */ 1080 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1081 WARN_ON_ONCE(work->worker != worker); 1081 WARN_ON_ONCE(work->worker != worker);
1082 1082
@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1090 * In the meantime, block any queuing by setting the canceling counter. 1090 * In the meantime, block any queuing by setting the canceling counter.
1091 */ 1091 */
1092 work->canceling++; 1092 work->canceling++;
1093 spin_unlock_irqrestore(&worker->lock, flags); 1093 raw_spin_unlock_irqrestore(&worker->lock, flags);
1094 kthread_flush_work(work); 1094 kthread_flush_work(work);
1095 spin_lock_irqsave(&worker->lock, flags); 1095 raw_spin_lock_irqsave(&worker->lock, flags);
1096 work->canceling--; 1096 work->canceling--;
1097 1097
1098out_fast: 1098out_fast:
1099 spin_unlock_irqrestore(&worker->lock, flags); 1099 raw_spin_unlock_irqrestore(&worker->lock, flags);
1100out: 1100out:
1101 return ret; 1101 return ret;
1102} 1102}