aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2011-08-19 14:39:11 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:47 -0400
commit5b61b0baa9e80289c53413e573befc5790a04ac7 (patch)
tree5f3c4b002089b6d6e92fafaba42ab4a42ef1f1f9 /kernel/rcutree_plugin.h
parentab8f11e5f6655861ad4758a7da76b2fc0e0dcc98 (diff)
rcu: Wire up RCU_BOOST_PRIO for rcutree
RCU boost threads start life at RCU_BOOST_PRIO, while others remain at RCU_KTHREAD_PRIO. While here, change thread names to match other kthreads, and adjust rcu_yield() to not override the priority set by the user. This last change sets the stage for runtime changes to priority in the -rt tree. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h20
1 files changed, 15 insertions, 5 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 28422767d854..b4cbe5bf2326 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -27,6 +27,14 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/stop_machine.h> 28#include <linux/stop_machine.h>
29 29
30#define RCU_KTHREAD_PRIO 1
31
32#ifdef CONFIG_RCU_BOOST
33#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34#else
35#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36#endif
37
30/* 38/*
31 * Check the RCU kernel configuration parameters and print informative 39 * Check the RCU kernel configuration parameters and print informative
32 * messages about anything out of the ordinary. If you like #ifdef, you 40 * messages about anything out of the ordinary. If you like #ifdef, you
@@ -1364,13 +1372,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1364 if (rnp->boost_kthread_task != NULL) 1372 if (rnp->boost_kthread_task != NULL)
1365 return 0; 1373 return 0;
1366 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1374 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1367 "rcub%d", rnp_index); 1375 "rcub/%d", rnp_index);
1368 if (IS_ERR(t)) 1376 if (IS_ERR(t))
1369 return PTR_ERR(t); 1377 return PTR_ERR(t);
1370 raw_spin_lock_irqsave(&rnp->lock, flags); 1378 raw_spin_lock_irqsave(&rnp->lock, flags);
1371 rnp->boost_kthread_task = t; 1379 rnp->boost_kthread_task = t;
1372 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1380 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1373 sp.sched_priority = RCU_KTHREAD_PRIO; 1381 sp.sched_priority = RCU_BOOST_PRIO;
1374 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1382 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1375 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1383 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1376 return 0; 1384 return 0;
@@ -1465,6 +1473,7 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1465{ 1473{
1466 struct sched_param sp; 1474 struct sched_param sp;
1467 struct timer_list yield_timer; 1475 struct timer_list yield_timer;
1476 int prio = current->rt_priority;
1468 1477
1469 setup_timer_on_stack(&yield_timer, f, arg); 1478 setup_timer_on_stack(&yield_timer, f, arg);
1470 mod_timer(&yield_timer, jiffies + 2); 1479 mod_timer(&yield_timer, jiffies + 2);
@@ -1472,7 +1481,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1472 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); 1481 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1473 set_user_nice(current, 19); 1482 set_user_nice(current, 19);
1474 schedule(); 1483 schedule();
1475 sp.sched_priority = RCU_KTHREAD_PRIO; 1484 set_user_nice(current, 0);
1485 sp.sched_priority = prio;
1476 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1486 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1477 del_timer(&yield_timer); 1487 del_timer(&yield_timer);
1478} 1488}
@@ -1591,7 +1601,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1591 t = kthread_create_on_node(rcu_cpu_kthread, 1601 t = kthread_create_on_node(rcu_cpu_kthread,
1592 (void *)(long)cpu, 1602 (void *)(long)cpu,
1593 cpu_to_node(cpu), 1603 cpu_to_node(cpu),
1594 "rcuc%d", cpu); 1604 "rcuc/%d", cpu);
1595 if (IS_ERR(t)) 1605 if (IS_ERR(t))
1596 return PTR_ERR(t); 1606 return PTR_ERR(t);
1597 if (cpu_online(cpu)) 1607 if (cpu_online(cpu))
@@ -1700,7 +1710,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1700 return 0; 1710 return 0;
1701 if (rnp->node_kthread_task == NULL) { 1711 if (rnp->node_kthread_task == NULL) {
1702 t = kthread_create(rcu_node_kthread, (void *)rnp, 1712 t = kthread_create(rcu_node_kthread, (void *)rnp,
1703 "rcun%d", rnp_index); 1713 "rcun/%d", rnp_index);
1704 if (IS_ERR(t)) 1714 if (IS_ERR(t))
1705 return PTR_ERR(t); 1715 return PTR_ERR(t);
1706 raw_spin_lock_irqsave(&rnp->lock, flags); 1716 raw_spin_lock_irqsave(&rnp->lock, flags);