aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h597
1 files changed, 231 insertions, 366 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 7f3244c0df01..f92115488187 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -25,6 +25,8 @@
25 */ 25 */
26 26
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/oom.h>
29#include <linux/smpboot.h>
28 30
29#define RCU_KTHREAD_PRIO 1 31#define RCU_KTHREAD_PRIO 1
30 32
@@ -118,7 +120,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
118 */ 120 */
119void rcu_force_quiescent_state(void) 121void rcu_force_quiescent_state(void)
120{ 122{
121 force_quiescent_state(&rcu_preempt_state, 0); 123 force_quiescent_state(&rcu_preempt_state);
122} 124}
123EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 125EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
124 126
@@ -136,8 +138,6 @@ static void rcu_preempt_qs(int cpu)
136{ 138{
137 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 139 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
138 140
139 rdp->passed_quiesce_gpnum = rdp->gpnum;
140 barrier();
141 if (rdp->passed_quiesce == 0) 141 if (rdp->passed_quiesce == 0)
142 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); 142 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
143 rdp->passed_quiesce = 1; 143 rdp->passed_quiesce = 1;
@@ -422,9 +422,11 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
422 unsigned long flags; 422 unsigned long flags;
423 struct task_struct *t; 423 struct task_struct *t;
424 424
425 if (!rcu_preempt_blocked_readers_cgp(rnp))
426 return;
427 raw_spin_lock_irqsave(&rnp->lock, flags); 425 raw_spin_lock_irqsave(&rnp->lock, flags);
426 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
427 raw_spin_unlock_irqrestore(&rnp->lock, flags);
428 return;
429 }
428 t = list_entry(rnp->gp_tasks, 430 t = list_entry(rnp->gp_tasks,
429 struct task_struct, rcu_node_entry); 431 struct task_struct, rcu_node_entry);
430 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) 432 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
@@ -584,17 +586,23 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
584 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 586 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
585 } 587 }
586 588
589 rnp->gp_tasks = NULL;
590 rnp->exp_tasks = NULL;
587#ifdef CONFIG_RCU_BOOST 591#ifdef CONFIG_RCU_BOOST
588 /* In case root is being boosted and leaf is not. */ 592 rnp->boost_tasks = NULL;
593 /*
594 * In case root is being boosted and leaf was not. Make sure
595 * that we boost the tasks blocking the current grace period
596 * in this case.
597 */
589 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 598 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
590 if (rnp_root->boost_tasks != NULL && 599 if (rnp_root->boost_tasks != NULL &&
591 rnp_root->boost_tasks != rnp_root->gp_tasks) 600 rnp_root->boost_tasks != rnp_root->gp_tasks &&
601 rnp_root->boost_tasks != rnp_root->exp_tasks)
592 rnp_root->boost_tasks = rnp_root->gp_tasks; 602 rnp_root->boost_tasks = rnp_root->gp_tasks;
593 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ 603 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
594#endif /* #ifdef CONFIG_RCU_BOOST */ 604#endif /* #ifdef CONFIG_RCU_BOOST */
595 605
596 rnp->gp_tasks = NULL;
597 rnp->exp_tasks = NULL;
598 return retval; 606 return retval;
599} 607}
600 608
@@ -676,7 +684,7 @@ void synchronize_rcu(void)
676EXPORT_SYMBOL_GPL(synchronize_rcu); 684EXPORT_SYMBOL_GPL(synchronize_rcu);
677 685
678static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); 686static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
679static long sync_rcu_preempt_exp_count; 687static unsigned long sync_rcu_preempt_exp_count;
680static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); 688static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
681 689
682/* 690/*
@@ -791,7 +799,7 @@ void synchronize_rcu_expedited(void)
791 unsigned long flags; 799 unsigned long flags;
792 struct rcu_node *rnp; 800 struct rcu_node *rnp;
793 struct rcu_state *rsp = &rcu_preempt_state; 801 struct rcu_state *rsp = &rcu_preempt_state;
794 long snap; 802 unsigned long snap;
795 int trycount = 0; 803 int trycount = 0;
796 804
797 smp_mb(); /* Caller's modifications seen first by other CPUs. */ 805 smp_mb(); /* Caller's modifications seen first by other CPUs. */
@@ -799,33 +807,47 @@ void synchronize_rcu_expedited(void)
799 smp_mb(); /* Above access cannot bleed into critical section. */ 807 smp_mb(); /* Above access cannot bleed into critical section. */
800 808
801 /* 809 /*
810 * Block CPU-hotplug operations. This means that any CPU-hotplug
811 * operation that finds an rcu_node structure with tasks in the
812 * process of being boosted will know that all tasks blocking
813 * this expedited grace period will already be in the process of
814 * being boosted. This simplifies the process of moving tasks
815 * from leaf to root rcu_node structures.
816 */
817 get_online_cpus();
818
819 /*
802 * Acquire lock, falling back to synchronize_rcu() if too many 820 * Acquire lock, falling back to synchronize_rcu() if too many
803 * lock-acquisition failures. Of course, if someone does the 821 * lock-acquisition failures. Of course, if someone does the
804 * expedited grace period for us, just leave. 822 * expedited grace period for us, just leave.
805 */ 823 */
806 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { 824 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
825 if (ULONG_CMP_LT(snap,
826 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
827 put_online_cpus();
828 goto mb_ret; /* Others did our work for us. */
829 }
807 if (trycount++ < 10) { 830 if (trycount++ < 10) {
808 udelay(trycount * num_online_cpus()); 831 udelay(trycount * num_online_cpus());
809 } else { 832 } else {
833 put_online_cpus();
810 synchronize_rcu(); 834 synchronize_rcu();
811 return; 835 return;
812 } 836 }
813 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
814 goto mb_ret; /* Others did our work for us. */
815 } 837 }
816 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) 838 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
839 put_online_cpus();
817 goto unlock_mb_ret; /* Others did our work for us. */ 840 goto unlock_mb_ret; /* Others did our work for us. */
841 }
818 842
819 /* force all RCU readers onto ->blkd_tasks lists. */ 843 /* force all RCU readers onto ->blkd_tasks lists. */
820 synchronize_sched_expedited(); 844 synchronize_sched_expedited();
821 845
822 raw_spin_lock_irqsave(&rsp->onofflock, flags);
823
824 /* Initialize ->expmask for all non-leaf rcu_node structures. */ 846 /* Initialize ->expmask for all non-leaf rcu_node structures. */
825 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { 847 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
826 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 848 raw_spin_lock_irqsave(&rnp->lock, flags);
827 rnp->expmask = rnp->qsmaskinit; 849 rnp->expmask = rnp->qsmaskinit;
828 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 850 raw_spin_unlock_irqrestore(&rnp->lock, flags);
829 } 851 }
830 852
831 /* Snapshot current state of ->blkd_tasks lists. */ 853 /* Snapshot current state of ->blkd_tasks lists. */
@@ -834,7 +856,7 @@ void synchronize_rcu_expedited(void)
834 if (NUM_RCU_NODES > 1) 856 if (NUM_RCU_NODES > 1)
835 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); 857 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
836 858
837 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 859 put_online_cpus();
838 860
839 /* Wait for snapshotted ->blkd_tasks lists to drain. */ 861 /* Wait for snapshotted ->blkd_tasks lists to drain. */
840 rnp = rcu_get_root(rsp); 862 rnp = rcu_get_root(rsp);
@@ -1069,6 +1091,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1069 1091
1070#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1092#endif /* #else #ifdef CONFIG_RCU_TRACE */
1071 1093
1094static void rcu_wake_cond(struct task_struct *t, int status)
1095{
1096 /*
1097 * If the thread is yielding, only wake it when this
1098 * is invoked from idle
1099 */
1100 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1101 wake_up_process(t);
1102}
1103
1072/* 1104/*
1073 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1105 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1074 * or ->boost_tasks, advancing the pointer to the next task in the 1106 * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1141,17 +1173,6 @@ static int rcu_boost(struct rcu_node *rnp)
1141} 1173}
1142 1174
1143/* 1175/*
1144 * Timer handler to initiate waking up of boost kthreads that
1145 * have yielded the CPU due to excessive numbers of tasks to
1146 * boost. We wake up the per-rcu_node kthread, which in turn
1147 * will wake up the booster kthread.
1148 */
1149static void rcu_boost_kthread_timer(unsigned long arg)
1150{
1151 invoke_rcu_node_kthread((struct rcu_node *)arg);
1152}
1153
1154/*
1155 * Priority-boosting kthread. One per leaf rcu_node and one for the 1176 * Priority-boosting kthread. One per leaf rcu_node and one for the
1156 * root rcu_node. 1177 * root rcu_node.
1157 */ 1178 */
@@ -1174,8 +1195,9 @@ static int rcu_boost_kthread(void *arg)
1174 else 1195 else
1175 spincnt = 0; 1196 spincnt = 0;
1176 if (spincnt > 10) { 1197 if (spincnt > 10) {
1198 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1177 trace_rcu_utilization("End boost kthread@rcu_yield"); 1199 trace_rcu_utilization("End boost kthread@rcu_yield");
1178 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); 1200 schedule_timeout_interruptible(2);
1179 trace_rcu_utilization("Start boost kthread@rcu_yield"); 1201 trace_rcu_utilization("Start boost kthread@rcu_yield");
1180 spincnt = 0; 1202 spincnt = 0;
1181 } 1203 }
@@ -1191,9 +1213,9 @@ static int rcu_boost_kthread(void *arg)
1191 * kthread to start boosting them. If there is an expedited grace 1213 * kthread to start boosting them. If there is an expedited grace
1192 * period in progress, it is always time to boost. 1214 * period in progress, it is always time to boost.
1193 * 1215 *
1194 * The caller must hold rnp->lock, which this function releases, 1216 * The caller must hold rnp->lock, which this function releases.
1195 * but irqs remain disabled. The ->boost_kthread_task is immortal, 1217 * The ->boost_kthread_task is immortal, so we don't need to worry
1196 * so we don't need to worry about it going away. 1218 * about it going away.
1197 */ 1219 */
1198static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1220static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1199{ 1221{
@@ -1213,8 +1235,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1213 rnp->boost_tasks = rnp->gp_tasks; 1235 rnp->boost_tasks = rnp->gp_tasks;
1214 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1236 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1215 t = rnp->boost_kthread_task; 1237 t = rnp->boost_kthread_task;
1216 if (t != NULL) 1238 if (t)
1217 wake_up_process(t); 1239 rcu_wake_cond(t, rnp->boost_kthread_status);
1218 } else { 1240 } else {
1219 rcu_initiate_boost_trace(rnp); 1241 rcu_initiate_boost_trace(rnp);
1220 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1242 raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1231,8 +1253,10 @@ static void invoke_rcu_callbacks_kthread(void)
1231 local_irq_save(flags); 1253 local_irq_save(flags);
1232 __this_cpu_write(rcu_cpu_has_work, 1); 1254 __this_cpu_write(rcu_cpu_has_work, 1);
1233 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1255 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1234 current != __this_cpu_read(rcu_cpu_kthread_task)) 1256 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1235 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); 1257 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1258 __this_cpu_read(rcu_cpu_kthread_status));
1259 }
1236 local_irq_restore(flags); 1260 local_irq_restore(flags);
1237} 1261}
1238 1262
@@ -1245,21 +1269,6 @@ static bool rcu_is_callbacks_kthread(void)
1245 return __get_cpu_var(rcu_cpu_kthread_task) == current; 1269 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1246} 1270}
1247 1271
1248/*
1249 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1250 * held, so no one should be messing with the existence of the boost
1251 * kthread.
1252 */
1253static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1254 cpumask_var_t cm)
1255{
1256 struct task_struct *t;
1257
1258 t = rnp->boost_kthread_task;
1259 if (t != NULL)
1260 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1261}
1262
1263#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1272#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1264 1273
1265/* 1274/*
@@ -1276,15 +1285,19 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1276 * Returns zero if all is well, a negated errno otherwise. 1285 * Returns zero if all is well, a negated errno otherwise.
1277 */ 1286 */
1278static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1287static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1279 struct rcu_node *rnp, 1288 struct rcu_node *rnp)
1280 int rnp_index)
1281{ 1289{
1290 int rnp_index = rnp - &rsp->node[0];
1282 unsigned long flags; 1291 unsigned long flags;
1283 struct sched_param sp; 1292 struct sched_param sp;
1284 struct task_struct *t; 1293 struct task_struct *t;
1285 1294
1286 if (&rcu_preempt_state != rsp) 1295 if (&rcu_preempt_state != rsp)
1287 return 0; 1296 return 0;
1297
1298 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1299 return 0;
1300
1288 rsp->boost = 1; 1301 rsp->boost = 1;
1289 if (rnp->boost_kthread_task != NULL) 1302 if (rnp->boost_kthread_task != NULL)
1290 return 0; 1303 return 0;
@@ -1301,25 +1314,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1301 return 0; 1314 return 0;
1302} 1315}
1303 1316
1304#ifdef CONFIG_HOTPLUG_CPU
1305
1306/*
1307 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1308 */
1309static void rcu_stop_cpu_kthread(int cpu)
1310{
1311 struct task_struct *t;
1312
1313 /* Stop the CPU's kthread. */
1314 t = per_cpu(rcu_cpu_kthread_task, cpu);
1315 if (t != NULL) {
1316 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1317 kthread_stop(t);
1318 }
1319}
1320
1321#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1322
1323static void rcu_kthread_do_work(void) 1317static void rcu_kthread_do_work(void)
1324{ 1318{
1325 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); 1319 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
@@ -1327,112 +1321,22 @@ static void rcu_kthread_do_work(void)
1327 rcu_preempt_do_callbacks(); 1321 rcu_preempt_do_callbacks();
1328} 1322}
1329 1323
1330/* 1324static void rcu_cpu_kthread_setup(unsigned int cpu)
1331 * Wake up the specified per-rcu_node-structure kthread.
1332 * Because the per-rcu_node kthreads are immortal, we don't need
1333 * to do anything to keep them alive.
1334 */
1335static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1336{
1337 struct task_struct *t;
1338
1339 t = rnp->node_kthread_task;
1340 if (t != NULL)
1341 wake_up_process(t);
1342}
1343
1344/*
1345 * Set the specified CPU's kthread to run RT or not, as specified by
1346 * the to_rt argument. The CPU-hotplug locks are held, so the task
1347 * is not going away.
1348 */
1349static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1350{ 1325{
1351 int policy;
1352 struct sched_param sp; 1326 struct sched_param sp;
1353 struct task_struct *t;
1354 1327
1355 t = per_cpu(rcu_cpu_kthread_task, cpu); 1328 sp.sched_priority = RCU_KTHREAD_PRIO;
1356 if (t == NULL) 1329 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1357 return;
1358 if (to_rt) {
1359 policy = SCHED_FIFO;
1360 sp.sched_priority = RCU_KTHREAD_PRIO;
1361 } else {
1362 policy = SCHED_NORMAL;
1363 sp.sched_priority = 0;
1364 }
1365 sched_setscheduler_nocheck(t, policy, &sp);
1366} 1330}
1367 1331
1368/* 1332static void rcu_cpu_kthread_park(unsigned int cpu)
1369 * Timer handler to initiate the waking up of per-CPU kthreads that
1370 * have yielded the CPU due to excess numbers of RCU callbacks.
1371 * We wake up the per-rcu_node kthread, which in turn will wake up
1372 * the booster kthread.
1373 */
1374static void rcu_cpu_kthread_timer(unsigned long arg)
1375{ 1333{
1376 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); 1334 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1377 struct rcu_node *rnp = rdp->mynode;
1378
1379 atomic_or(rdp->grpmask, &rnp->wakemask);
1380 invoke_rcu_node_kthread(rnp);
1381} 1335}
1382 1336
1383/* 1337static int rcu_cpu_kthread_should_run(unsigned int cpu)
1384 * Drop to non-real-time priority and yield, but only after posting a
1385 * timer that will cause us to regain our real-time priority if we
1386 * remain preempted. Either way, we restore our real-time priority
1387 * before returning.
1388 */
1389static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1390{ 1338{
1391 struct sched_param sp; 1339 return __get_cpu_var(rcu_cpu_has_work);
1392 struct timer_list yield_timer;
1393 int prio = current->rt_priority;
1394
1395 setup_timer_on_stack(&yield_timer, f, arg);
1396 mod_timer(&yield_timer, jiffies + 2);
1397 sp.sched_priority = 0;
1398 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1399 set_user_nice(current, 19);
1400 schedule();
1401 set_user_nice(current, 0);
1402 sp.sched_priority = prio;
1403 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1404 del_timer(&yield_timer);
1405}
1406
1407/*
1408 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1409 * This can happen while the corresponding CPU is either coming online
1410 * or going offline. We cannot wait until the CPU is fully online
1411 * before starting the kthread, because the various notifier functions
1412 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1413 * the corresponding CPU is online.
1414 *
1415 * Return 1 if the kthread needs to stop, 0 otherwise.
1416 *
1417 * Caller must disable bh. This function can momentarily enable it.
1418 */
1419static int rcu_cpu_kthread_should_stop(int cpu)
1420{
1421 while (cpu_is_offline(cpu) ||
1422 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1423 smp_processor_id() != cpu) {
1424 if (kthread_should_stop())
1425 return 1;
1426 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1427 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1428 local_bh_enable();
1429 schedule_timeout_uninterruptible(1);
1430 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1431 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1432 local_bh_disable();
1433 }
1434 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1435 return 0;
1436} 1340}
1437 1341
1438/* 1342/*
@@ -1440,138 +1344,35 @@ static int rcu_cpu_kthread_should_stop(int cpu)
1440 * RCU softirq used in flavors and configurations of RCU that do not 1344 * RCU softirq used in flavors and configurations of RCU that do not
1441 * support RCU priority boosting. 1345 * support RCU priority boosting.
1442 */ 1346 */
1443static int rcu_cpu_kthread(void *arg) 1347static void rcu_cpu_kthread(unsigned int cpu)
1444{ 1348{
1445 int cpu = (int)(long)arg; 1349 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1446 unsigned long flags; 1350 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1447 int spincnt = 0; 1351 int spincnt;
1448 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1449 char work;
1450 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1451 1352
1452 trace_rcu_utilization("Start CPU kthread@init"); 1353 for (spincnt = 0; spincnt < 10; spincnt++) {
1453 for (;;) {
1454 *statusp = RCU_KTHREAD_WAITING;
1455 trace_rcu_utilization("End CPU kthread@rcu_wait");
1456 rcu_wait(*workp != 0 || kthread_should_stop());
1457 trace_rcu_utilization("Start CPU kthread@rcu_wait"); 1354 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1458 local_bh_disable(); 1355 local_bh_disable();
1459 if (rcu_cpu_kthread_should_stop(cpu)) {
1460 local_bh_enable();
1461 break;
1462 }
1463 *statusp = RCU_KTHREAD_RUNNING; 1356 *statusp = RCU_KTHREAD_RUNNING;
1464 per_cpu(rcu_cpu_kthread_loops, cpu)++; 1357 this_cpu_inc(rcu_cpu_kthread_loops);
1465 local_irq_save(flags); 1358 local_irq_disable();
1466 work = *workp; 1359 work = *workp;
1467 *workp = 0; 1360 *workp = 0;
1468 local_irq_restore(flags); 1361 local_irq_enable();
1469 if (work) 1362 if (work)
1470 rcu_kthread_do_work(); 1363 rcu_kthread_do_work();
1471 local_bh_enable(); 1364 local_bh_enable();
1472 if (*workp != 0) 1365 if (*workp == 0) {
1473 spincnt++; 1366 trace_rcu_utilization("End CPU kthread@rcu_wait");
1474 else 1367 *statusp = RCU_KTHREAD_WAITING;
1475 spincnt = 0; 1368 return;
1476 if (spincnt > 10) {
1477 *statusp = RCU_KTHREAD_YIELDING;
1478 trace_rcu_utilization("End CPU kthread@rcu_yield");
1479 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1480 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1481 spincnt = 0;
1482 }
1483 }
1484 *statusp = RCU_KTHREAD_STOPPED;
1485 trace_rcu_utilization("End CPU kthread@term");
1486 return 0;
1487}
1488
1489/*
1490 * Spawn a per-CPU kthread, setting up affinity and priority.
1491 * Because the CPU hotplug lock is held, no other CPU will be attempting
1492 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1493 * attempting to access it during boot, but the locking in kthread_bind()
1494 * will enforce sufficient ordering.
1495 *
1496 * Please note that we cannot simply refuse to wake up the per-CPU
1497 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1498 * which can result in softlockup complaints if the task ends up being
1499 * idle for more than a couple of minutes.
1500 *
1501 * However, please note also that we cannot bind the per-CPU kthread to its
1502 * CPU until that CPU is fully online. We also cannot wait until the
1503 * CPU is fully online before we create its per-CPU kthread, as this would
1504 * deadlock the system when CPU notifiers tried waiting for grace
1505 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1506 * is online. If its CPU is not yet fully online, then the code in
1507 * rcu_cpu_kthread() will wait until it is fully online, and then do
1508 * the binding.
1509 */
1510static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1511{
1512 struct sched_param sp;
1513 struct task_struct *t;
1514
1515 if (!rcu_scheduler_fully_active ||
1516 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1517 return 0;
1518 t = kthread_create_on_node(rcu_cpu_kthread,
1519 (void *)(long)cpu,
1520 cpu_to_node(cpu),
1521 "rcuc/%d", cpu);
1522 if (IS_ERR(t))
1523 return PTR_ERR(t);
1524 if (cpu_online(cpu))
1525 kthread_bind(t, cpu);
1526 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1527 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1528 sp.sched_priority = RCU_KTHREAD_PRIO;
1529 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1530 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1531 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1532 return 0;
1533}
1534
1535/*
1536 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1537 * kthreads when needed. We ignore requests to wake up kthreads
1538 * for offline CPUs, which is OK because force_quiescent_state()
1539 * takes care of this case.
1540 */
1541static int rcu_node_kthread(void *arg)
1542{
1543 int cpu;
1544 unsigned long flags;
1545 unsigned long mask;
1546 struct rcu_node *rnp = (struct rcu_node *)arg;
1547 struct sched_param sp;
1548 struct task_struct *t;
1549
1550 for (;;) {
1551 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1552 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1553 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1554 raw_spin_lock_irqsave(&rnp->lock, flags);
1555 mask = atomic_xchg(&rnp->wakemask, 0);
1556 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1557 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1558 if ((mask & 0x1) == 0)
1559 continue;
1560 preempt_disable();
1561 t = per_cpu(rcu_cpu_kthread_task, cpu);
1562 if (!cpu_online(cpu) || t == NULL) {
1563 preempt_enable();
1564 continue;
1565 }
1566 per_cpu(rcu_cpu_has_work, cpu) = 1;
1567 sp.sched_priority = RCU_KTHREAD_PRIO;
1568 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1569 preempt_enable();
1570 } 1369 }
1571 } 1370 }
1572 /* NOTREACHED */ 1371 *statusp = RCU_KTHREAD_YIELDING;
1573 rnp->node_kthread_status = RCU_KTHREAD_STOPPED; 1372 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1574 return 0; 1373 schedule_timeout_interruptible(2);
1374 trace_rcu_utilization("End CPU kthread@rcu_yield");
1375 *statusp = RCU_KTHREAD_WAITING;
1575} 1376}
1576 1377
1577/* 1378/*
@@ -1583,17 +1384,17 @@ static int rcu_node_kthread(void *arg)
1583 * no outgoing CPU. If there are no CPUs left in the affinity set, 1384 * no outgoing CPU. If there are no CPUs left in the affinity set,
1584 * this function allows the kthread to execute on any CPU. 1385 * this function allows the kthread to execute on any CPU.
1585 */ 1386 */
1586static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1387static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1587{ 1388{
1389 struct task_struct *t = rnp->boost_kthread_task;
1390 unsigned long mask = rnp->qsmaskinit;
1588 cpumask_var_t cm; 1391 cpumask_var_t cm;
1589 int cpu; 1392 int cpu;
1590 unsigned long mask = rnp->qsmaskinit;
1591 1393
1592 if (rnp->node_kthread_task == NULL) 1394 if (!t)
1593 return; 1395 return;
1594 if (!alloc_cpumask_var(&cm, GFP_KERNEL)) 1396 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1595 return; 1397 return;
1596 cpumask_clear(cm);
1597 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1398 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1598 if ((mask & 0x1) && cpu != outgoingcpu) 1399 if ((mask & 0x1) && cpu != outgoingcpu)
1599 cpumask_set_cpu(cpu, cm); 1400 cpumask_set_cpu(cpu, cm);
@@ -1603,62 +1404,36 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1603 cpumask_clear_cpu(cpu, cm); 1404 cpumask_clear_cpu(cpu, cm);
1604 WARN_ON_ONCE(cpumask_weight(cm) == 0); 1405 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1605 } 1406 }
1606 set_cpus_allowed_ptr(rnp->node_kthread_task, cm); 1407 set_cpus_allowed_ptr(t, cm);
1607 rcu_boost_kthread_setaffinity(rnp, cm);
1608 free_cpumask_var(cm); 1408 free_cpumask_var(cm);
1609} 1409}
1610 1410
1611/* 1411static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1612 * Spawn a per-rcu_node kthread, setting priority and affinity. 1412 .store = &rcu_cpu_kthread_task,
1613 * Called during boot before online/offline can happen, or, if 1413 .thread_should_run = rcu_cpu_kthread_should_run,
1614 * during runtime, with the main CPU-hotplug locks held. So only 1414 .thread_fn = rcu_cpu_kthread,
1615 * one of these can be executing at a time. 1415 .thread_comm = "rcuc/%u",
1616 */ 1416 .setup = rcu_cpu_kthread_setup,
1617static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, 1417 .park = rcu_cpu_kthread_park,
1618 struct rcu_node *rnp) 1418};
1619{
1620 unsigned long flags;
1621 int rnp_index = rnp - &rsp->node[0];
1622 struct sched_param sp;
1623 struct task_struct *t;
1624
1625 if (!rcu_scheduler_fully_active ||
1626 rnp->qsmaskinit == 0)
1627 return 0;
1628 if (rnp->node_kthread_task == NULL) {
1629 t = kthread_create(rcu_node_kthread, (void *)rnp,
1630 "rcun/%d", rnp_index);
1631 if (IS_ERR(t))
1632 return PTR_ERR(t);
1633 raw_spin_lock_irqsave(&rnp->lock, flags);
1634 rnp->node_kthread_task = t;
1635 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1636 sp.sched_priority = 99;
1637 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1638 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1639 }
1640 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1641}
1642 1419
1643/* 1420/*
1644 * Spawn all kthreads -- called as soon as the scheduler is running. 1421 * Spawn all kthreads -- called as soon as the scheduler is running.
1645 */ 1422 */
1646static int __init rcu_spawn_kthreads(void) 1423static int __init rcu_spawn_kthreads(void)
1647{ 1424{
1648 int cpu;
1649 struct rcu_node *rnp; 1425 struct rcu_node *rnp;
1426 int cpu;
1650 1427
1651 rcu_scheduler_fully_active = 1; 1428 rcu_scheduler_fully_active = 1;
1652 for_each_possible_cpu(cpu) { 1429 for_each_possible_cpu(cpu)
1653 per_cpu(rcu_cpu_has_work, cpu) = 0; 1430 per_cpu(rcu_cpu_has_work, cpu) = 0;
1654 if (cpu_online(cpu)) 1431 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1655 (void)rcu_spawn_one_cpu_kthread(cpu);
1656 }
1657 rnp = rcu_get_root(rcu_state); 1432 rnp = rcu_get_root(rcu_state);
1658 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1433 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1659 if (NUM_RCU_NODES > 1) { 1434 if (NUM_RCU_NODES > 1) {
1660 rcu_for_each_leaf_node(rcu_state, rnp) 1435 rcu_for_each_leaf_node(rcu_state, rnp)
1661 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1436 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1662 } 1437 }
1663 return 0; 1438 return 0;
1664} 1439}
@@ -1670,11 +1445,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1670 struct rcu_node *rnp = rdp->mynode; 1445 struct rcu_node *rnp = rdp->mynode;
1671 1446
1672 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1447 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1673 if (rcu_scheduler_fully_active) { 1448 if (rcu_scheduler_fully_active)
1674 (void)rcu_spawn_one_cpu_kthread(cpu); 1449 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1675 if (rnp->node_kthread_task == NULL)
1676 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1677 }
1678} 1450}
1679 1451
1680#else /* #ifdef CONFIG_RCU_BOOST */ 1452#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1698,19 +1470,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1698{ 1470{
1699} 1471}
1700 1472
1701#ifdef CONFIG_HOTPLUG_CPU 1473static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1702
1703static void rcu_stop_cpu_kthread(int cpu)
1704{
1705}
1706
1707#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1708
1709static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1710{
1711}
1712
1713static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1714{ 1474{
1715} 1475}
1716 1476
@@ -1997,6 +1757,26 @@ static void rcu_prepare_for_idle(int cpu)
1997 if (!tne) 1757 if (!tne)
1998 return; 1758 return;
1999 1759
1760 /* Adaptive-tick mode, where usermode execution is idle to RCU. */
1761 if (!is_idle_task(current)) {
1762 rdtp->dyntick_holdoff = jiffies - 1;
1763 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1764 trace_rcu_prep_idle("User dyntick with callbacks");
1765 rdtp->idle_gp_timer_expires =
1766 round_up(jiffies + RCU_IDLE_GP_DELAY,
1767 RCU_IDLE_GP_DELAY);
1768 } else if (rcu_cpu_has_callbacks(cpu)) {
1769 rdtp->idle_gp_timer_expires =
1770 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1771 trace_rcu_prep_idle("User dyntick with lazy callbacks");
1772 } else {
1773 return;
1774 }
1775 tp = &rdtp->idle_gp_timer;
1776 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1777 return;
1778 }
1779
2000 /* 1780 /*
2001 * If this is an idle re-entry, for example, due to use of 1781 * If this is an idle re-entry, for example, due to use of
2002 * RCU_NONIDLE() or the new idle-loop tracing API within the idle 1782 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
@@ -2075,16 +1855,16 @@ static void rcu_prepare_for_idle(int cpu)
2075#ifdef CONFIG_TREE_PREEMPT_RCU 1855#ifdef CONFIG_TREE_PREEMPT_RCU
2076 if (per_cpu(rcu_preempt_data, cpu).nxtlist) { 1856 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2077 rcu_preempt_qs(cpu); 1857 rcu_preempt_qs(cpu);
2078 force_quiescent_state(&rcu_preempt_state, 0); 1858 force_quiescent_state(&rcu_preempt_state);
2079 } 1859 }
2080#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1860#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2081 if (per_cpu(rcu_sched_data, cpu).nxtlist) { 1861 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2082 rcu_sched_qs(cpu); 1862 rcu_sched_qs(cpu);
2083 force_quiescent_state(&rcu_sched_state, 0); 1863 force_quiescent_state(&rcu_sched_state);
2084 } 1864 }
2085 if (per_cpu(rcu_bh_data, cpu).nxtlist) { 1865 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2086 rcu_bh_qs(cpu); 1866 rcu_bh_qs(cpu);
2087 force_quiescent_state(&rcu_bh_state, 0); 1867 force_quiescent_state(&rcu_bh_state);
2088 } 1868 }
2089 1869
2090 /* 1870 /*
@@ -2112,6 +1892,88 @@ static void rcu_idle_count_callbacks_posted(void)
2112 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); 1892 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
2113} 1893}
2114 1894
1895/*
1896 * Data for flushing lazy RCU callbacks at OOM time.
1897 */
1898static atomic_t oom_callback_count;
1899static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1900
1901/*
1902 * RCU OOM callback -- decrement the outstanding count and deliver the
1903 * wake-up if we are the last one.
1904 */
1905static void rcu_oom_callback(struct rcu_head *rhp)
1906{
1907 if (atomic_dec_and_test(&oom_callback_count))
1908 wake_up(&oom_callback_wq);
1909}
1910
1911/*
1912 * Post an rcu_oom_notify callback on the current CPU if it has at
1913 * least one lazy callback. This will unnecessarily post callbacks
1914 * to CPUs that already have a non-lazy callback at the end of their
1915 * callback list, but this is an infrequent operation, so accept some
1916 * extra overhead to keep things simple.
1917 */
1918static void rcu_oom_notify_cpu(void *unused)
1919{
1920 struct rcu_state *rsp;
1921 struct rcu_data *rdp;
1922
1923 for_each_rcu_flavor(rsp) {
1924 rdp = __this_cpu_ptr(rsp->rda);
1925 if (rdp->qlen_lazy != 0) {
1926 atomic_inc(&oom_callback_count);
1927 rsp->call(&rdp->oom_head, rcu_oom_callback);
1928 }
1929 }
1930}
1931
1932/*
1933 * If low on memory, ensure that each CPU has a non-lazy callback.
1934 * This will wake up CPUs that have only lazy callbacks, in turn
1935 * ensuring that they free up the corresponding memory in a timely manner.
1936 * Because an uncertain amount of memory will be freed in some uncertain
1937 * timeframe, we do not claim to have freed anything.
1938 */
1939static int rcu_oom_notify(struct notifier_block *self,
1940 unsigned long notused, void *nfreed)
1941{
1942 int cpu;
1943
1944 /* Wait for callbacks from earlier instance to complete. */
1945 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1946
1947 /*
1948 * Prevent premature wakeup: ensure that all increments happen
1949 * before there is a chance of the counter reaching zero.
1950 */
1951 atomic_set(&oom_callback_count, 1);
1952
1953 get_online_cpus();
1954 for_each_online_cpu(cpu) {
1955 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1956 cond_resched();
1957 }
1958 put_online_cpus();
1959
1960 /* Unconditionally decrement: no need to wake ourselves up. */
1961 atomic_dec(&oom_callback_count);
1962
1963 return NOTIFY_OK;
1964}
1965
1966static struct notifier_block rcu_oom_nb = {
1967 .notifier_call = rcu_oom_notify
1968};
1969
1970static int __init rcu_register_oom_notifier(void)
1971{
1972 register_oom_notifier(&rcu_oom_nb);
1973 return 0;
1974}
1975early_initcall(rcu_register_oom_notifier);
1976
2115#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1977#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2116 1978
2117#ifdef CONFIG_RCU_CPU_STALL_INFO 1979#ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -2122,11 +1984,15 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2122{ 1984{
2123 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); 1985 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2124 struct timer_list *tltp = &rdtp->idle_gp_timer; 1986 struct timer_list *tltp = &rdtp->idle_gp_timer;
1987 char c;
2125 1988
2126 sprintf(cp, "drain=%d %c timer=%lu", 1989 c = rdtp->dyntick_holdoff == jiffies ? 'H' : '.';
2127 rdtp->dyntick_drain, 1990 if (timer_pending(tltp))
2128 rdtp->dyntick_holdoff == jiffies ? 'H' : '.', 1991 sprintf(cp, "drain=%d %c timer=%lu",
2129 timer_pending(tltp) ? tltp->expires - jiffies : -1); 1992 rdtp->dyntick_drain, c, tltp->expires - jiffies);
1993 else
1994 sprintf(cp, "drain=%d %c timer not pending",
1995 rdtp->dyntick_drain, c);
2130} 1996}
2131 1997
2132#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ 1998#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
@@ -2194,11 +2060,10 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2194/* Increment ->ticks_this_gp for all flavors of RCU. */ 2060/* Increment ->ticks_this_gp for all flavors of RCU. */
2195static void increment_cpu_stall_ticks(void) 2061static void increment_cpu_stall_ticks(void)
2196{ 2062{
2197 __get_cpu_var(rcu_sched_data).ticks_this_gp++; 2063 struct rcu_state *rsp;
2198 __get_cpu_var(rcu_bh_data).ticks_this_gp++; 2064
2199#ifdef CONFIG_TREE_PREEMPT_RCU 2065 for_each_rcu_flavor(rsp)
2200 __get_cpu_var(rcu_preempt_data).ticks_this_gp++; 2066 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
2201#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2202} 2067}
2203 2068
2204#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ 2069#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */