aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorIvo Sieben <meltedpianoman@gmail.com>2013-06-03 06:12:02 -0400
committerThomas Gleixner <tglx@linutronix.de>2013-06-11 10:18:50 -0400
commitee23871389d51e07380d23887333622fbe7d3dd9 (patch)
tree729b38096a39a2e9ff318149c40a6ca53199707e /kernel/irq
parent9dbd90f17e4f380593ec5194c2a4d5e52c5f72d1 (diff)
genirq: Set irq thread to RT priority on creation
When a threaded irq handler is installed the irq thread is initially created on normal scheduling priority. Only after the irq thread is woken up it sets its priority to RT_FIFO MAX_USER_RT_PRIO/2 itself. This means that interrupts that occur directly after the irq handler is installed will be handled on a normal scheduling priority instead of the realtime priority that one would expect. Fix this by setting the RT priority on creation of the irq_thread. Signed-off-by: Ivo Sieben <meltedpianoman@gmail.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1370254322-17240-1-git-send-email-meltedpianoman@gmail.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/manage.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index fa17855ca65a..e16caa81f887 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -840,9 +840,6 @@ static void irq_thread_dtor(struct callback_head *unused)
840static int irq_thread(void *data) 840static int irq_thread(void *data)
841{ 841{
842 struct callback_head on_exit_work; 842 struct callback_head on_exit_work;
843 static const struct sched_param param = {
844 .sched_priority = MAX_USER_RT_PRIO/2,
845 };
846 struct irqaction *action = data; 843 struct irqaction *action = data;
847 struct irq_desc *desc = irq_to_desc(action->irq); 844 struct irq_desc *desc = irq_to_desc(action->irq);
848 irqreturn_t (*handler_fn)(struct irq_desc *desc, 845 irqreturn_t (*handler_fn)(struct irq_desc *desc,
@@ -854,8 +851,6 @@ static int irq_thread(void *data)
854 else 851 else
855 handler_fn = irq_thread_fn; 852 handler_fn = irq_thread_fn;
856 853
857 sched_setscheduler(current, SCHED_FIFO, &param);
858
859 init_task_work(&on_exit_work, irq_thread_dtor); 854 init_task_work(&on_exit_work, irq_thread_dtor);
860 task_work_add(current, &on_exit_work, false); 855 task_work_add(current, &on_exit_work, false);
861 856
@@ -950,6 +945,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
950 */ 945 */
951 if (new->thread_fn && !nested) { 946 if (new->thread_fn && !nested) {
952 struct task_struct *t; 947 struct task_struct *t;
948 static const struct sched_param param = {
949 .sched_priority = MAX_USER_RT_PRIO/2,
950 };
953 951
954 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 952 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
955 new->name); 953 new->name);
@@ -957,6 +955,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
957 ret = PTR_ERR(t); 955 ret = PTR_ERR(t);
958 goto out_mput; 956 goto out_mput;
959 } 957 }
958
959 sched_setscheduler(t, SCHED_FIFO, &param);
960
960 /* 961 /*
961 * We keep the reference to the task struct even if 962 * We keep the reference to the task struct even if
962 * the thread dies to avoid that the interrupt code 963 * the thread dies to avoid that the interrupt code