diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-07-16 06:42:38 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2012-08-13 11:01:08 -0400 |
commit | 62ab7072476ae1600e877cc62b43758e485f4f1e (patch) | |
tree | c1df0992432cf5bae0f9122d606e73628368fb8e /kernel/rcutree_plugin.h | |
parent | bcd951cf10f24e341defcd002c15a1f4eea13ddb (diff) |
rcu: Use smp_hotplug_thread facility for RCUs per-CPU kthread
Bring RCU into the new-age CPU-hotplug fold by modifying RCU's per-CPU
kthread code to use the new smp_hotplug_thread facility.
[ tglx: Adapted it to use callbacks and to the simplified rcu yield ]
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/20120716103948.673354828@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 203 |
1 files changed, 40 insertions, 163 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 0f8b5ec64a7d..c1961aed1213 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/smpboot.h> | ||
28 | 29 | ||
29 | #define RCU_KTHREAD_PRIO 1 | 30 | #define RCU_KTHREAD_PRIO 1 |
30 | 31 | ||
@@ -1292,25 +1293,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1292 | return 0; | 1293 | return 0; |
1293 | } | 1294 | } |
1294 | 1295 | ||
1295 | #ifdef CONFIG_HOTPLUG_CPU | ||
1296 | |||
1297 | /* | ||
1298 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | ||
1299 | */ | ||
1300 | static void rcu_stop_cpu_kthread(int cpu) | ||
1301 | { | ||
1302 | struct task_struct *t; | ||
1303 | |||
1304 | /* Stop the CPU's kthread. */ | ||
1305 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1306 | if (t != NULL) { | ||
1307 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1308 | kthread_stop(t); | ||
1309 | } | ||
1310 | } | ||
1311 | |||
1312 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1313 | |||
1314 | static void rcu_kthread_do_work(void) | 1296 | static void rcu_kthread_do_work(void) |
1315 | { | 1297 | { |
1316 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | 1298 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); |
@@ -1318,59 +1300,22 @@ static void rcu_kthread_do_work(void) | |||
1318 | rcu_preempt_do_callbacks(); | 1300 | rcu_preempt_do_callbacks(); |
1319 | } | 1301 | } |
1320 | 1302 | ||
1321 | /* | 1303 | static void rcu_cpu_kthread_setup(unsigned int cpu) |
1322 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1323 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1324 | * is not going away. | ||
1325 | */ | ||
1326 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1327 | { | 1304 | { |
1328 | int policy; | ||
1329 | struct sched_param sp; | 1305 | struct sched_param sp; |
1330 | struct task_struct *t; | ||
1331 | 1306 | ||
1332 | t = per_cpu(rcu_cpu_kthread_task, cpu); | 1307 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1333 | if (t == NULL) | 1308 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
1334 | return; | ||
1335 | if (to_rt) { | ||
1336 | policy = SCHED_FIFO; | ||
1337 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1338 | } else { | ||
1339 | policy = SCHED_NORMAL; | ||
1340 | sp.sched_priority = 0; | ||
1341 | } | ||
1342 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1343 | } | 1309 | } |
1344 | 1310 | ||
1345 | /* | 1311 | static void rcu_cpu_kthread_park(unsigned int cpu) |
1346 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1347 | * This can happen while the corresponding CPU is either coming online | ||
1348 | * or going offline. We cannot wait until the CPU is fully online | ||
1349 | * before starting the kthread, because the various notifier functions | ||
1350 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1351 | * the corresponding CPU is online. | ||
1352 | * | ||
1353 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1354 | * | ||
1355 | * Caller must disable bh. This function can momentarily enable it. | ||
1356 | */ | ||
1357 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1358 | { | 1312 | { |
1359 | while (cpu_is_offline(cpu) || | 1313 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; |
1360 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | 1314 | } |
1361 | smp_processor_id() != cpu) { | 1315 | |
1362 | if (kthread_should_stop()) | 1316 | static int rcu_cpu_kthread_should_run(unsigned int cpu) |
1363 | return 1; | 1317 | { |
1364 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | 1318 | return __get_cpu_var(rcu_cpu_has_work); |
1365 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1366 | local_bh_enable(); | ||
1367 | schedule_timeout_uninterruptible(1); | ||
1368 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1369 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1370 | local_bh_disable(); | ||
1371 | } | ||
1372 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1373 | return 0; | ||
1374 | } | 1319 | } |
1375 | 1320 | ||
1376 | /* | 1321 | /* |
@@ -1378,96 +1323,35 @@ static int rcu_cpu_kthread_should_stop(int cpu) | |||
1378 | * RCU softirq used in flavors and configurations of RCU that do not | 1323 | * RCU softirq used in flavors and configurations of RCU that do not |
1379 | * support RCU priority boosting. | 1324 | * support RCU priority boosting. |
1380 | */ | 1325 | */ |
1381 | static int rcu_cpu_kthread(void *arg) | 1326 | static void rcu_cpu_kthread(unsigned int cpu) |
1382 | { | 1327 | { |
1383 | int cpu = (int)(long)arg; | 1328 | unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); |
1384 | unsigned long flags; | 1329 | char work, *workp = &__get_cpu_var(rcu_cpu_has_work); |
1385 | int spincnt = 0; | 1330 | int spincnt; |
1386 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1387 | char work; | ||
1388 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1389 | 1331 | ||
1390 | trace_rcu_utilization("Start CPU kthread@init"); | 1332 | for (spincnt = 0; spincnt < 10; spincnt++) { |
1391 | for (;;) { | ||
1392 | *statusp = RCU_KTHREAD_WAITING; | ||
1393 | trace_rcu_utilization("End CPU kthread@rcu_wait"); | ||
1394 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
1395 | trace_rcu_utilization("Start CPU kthread@rcu_wait"); | 1333 | trace_rcu_utilization("Start CPU kthread@rcu_wait"); |
1396 | local_bh_disable(); | 1334 | local_bh_disable(); |
1397 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1398 | local_bh_enable(); | ||
1399 | break; | ||
1400 | } | ||
1401 | *statusp = RCU_KTHREAD_RUNNING; | 1335 | *statusp = RCU_KTHREAD_RUNNING; |
1402 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | 1336 | this_cpu_inc(rcu_cpu_kthread_loops); |
1403 | local_irq_save(flags); | 1337 | local_irq_disable(); |
1404 | work = *workp; | 1338 | work = *workp; |
1405 | *workp = 0; | 1339 | *workp = 0; |
1406 | local_irq_restore(flags); | 1340 | local_irq_enable(); |
1407 | if (work) | 1341 | if (work) |
1408 | rcu_kthread_do_work(); | 1342 | rcu_kthread_do_work(); |
1409 | local_bh_enable(); | 1343 | local_bh_enable(); |
1410 | if (*workp != 0) | 1344 | if (*workp == 0) { |
1411 | spincnt++; | 1345 | trace_rcu_utilization("End CPU kthread@rcu_wait"); |
1412 | else | 1346 | *statusp = RCU_KTHREAD_WAITING; |
1413 | spincnt = 0; | 1347 | return; |
1414 | if (spincnt > 10) { | ||
1415 | *statusp = RCU_KTHREAD_YIELDING; | ||
1416 | trace_rcu_utilization("End CPU kthread@rcu_yield"); | ||
1417 | schedule_timeout_interruptible(2); | ||
1418 | trace_rcu_utilization("Start CPU kthread@rcu_yield"); | ||
1419 | spincnt = 0; | ||
1420 | } | 1348 | } |
1421 | } | 1349 | } |
1422 | *statusp = RCU_KTHREAD_STOPPED; | 1350 | *statusp = RCU_KTHREAD_YIELDING; |
1423 | trace_rcu_utilization("End CPU kthread@term"); | 1351 | trace_rcu_utilization("Start CPU kthread@rcu_yield"); |
1424 | return 0; | 1352 | schedule_timeout_interruptible(2); |
1425 | } | 1353 | trace_rcu_utilization("End CPU kthread@rcu_yield"); |
1426 | 1354 | *statusp = RCU_KTHREAD_WAITING; | |
1427 | /* | ||
1428 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1429 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1430 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1431 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1432 | * will enforce sufficient ordering. | ||
1433 | * | ||
1434 | * Please note that we cannot simply refuse to wake up the per-CPU | ||
1435 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | ||
1436 | * which can result in softlockup complaints if the task ends up being | ||
1437 | * idle for more than a couple of minutes. | ||
1438 | * | ||
1439 | * However, please note also that we cannot bind the per-CPU kthread to its | ||
1440 | * CPU until that CPU is fully online. We also cannot wait until the | ||
1441 | * CPU is fully online before we create its per-CPU kthread, as this would | ||
1442 | * deadlock the system when CPU notifiers tried waiting for grace | ||
1443 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | ||
1444 | * is online. If its CPU is not yet fully online, then the code in | ||
1445 | * rcu_cpu_kthread() will wait until it is fully online, and then do | ||
1446 | * the binding. | ||
1447 | */ | ||
1448 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1449 | { | ||
1450 | struct sched_param sp; | ||
1451 | struct task_struct *t; | ||
1452 | |||
1453 | if (!rcu_scheduler_fully_active || | ||
1454 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1455 | return 0; | ||
1456 | t = kthread_create_on_node(rcu_cpu_kthread, | ||
1457 | (void *)(long)cpu, | ||
1458 | cpu_to_node(cpu), | ||
1459 | "rcuc/%d", cpu); | ||
1460 | if (IS_ERR(t)) | ||
1461 | return PTR_ERR(t); | ||
1462 | if (cpu_online(cpu)) | ||
1463 | kthread_bind(t, cpu); | ||
1464 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1465 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1466 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1467 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1468 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1469 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | ||
1470 | return 0; | ||
1471 | } | 1355 | } |
1472 | 1356 | ||
1473 | /* | 1357 | /* |
@@ -1503,6 +1387,15 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |||
1503 | free_cpumask_var(cm); | 1387 | free_cpumask_var(cm); |
1504 | } | 1388 | } |
1505 | 1389 | ||
1390 | static struct smp_hotplug_thread rcu_cpu_thread_spec = { | ||
1391 | .store = &rcu_cpu_kthread_task, | ||
1392 | .thread_should_run = rcu_cpu_kthread_should_run, | ||
1393 | .thread_fn = rcu_cpu_kthread, | ||
1394 | .thread_comm = "rcuc/%u", | ||
1395 | .setup = rcu_cpu_kthread_setup, | ||
1396 | .park = rcu_cpu_kthread_park, | ||
1397 | }; | ||
1398 | |||
1506 | /* | 1399 | /* |
1507 | * Spawn all kthreads -- called as soon as the scheduler is running. | 1400 | * Spawn all kthreads -- called as soon as the scheduler is running. |
1508 | */ | 1401 | */ |
@@ -1512,11 +1405,9 @@ static int __init rcu_spawn_kthreads(void) | |||
1512 | int cpu; | 1405 | int cpu; |
1513 | 1406 | ||
1514 | rcu_scheduler_fully_active = 1; | 1407 | rcu_scheduler_fully_active = 1; |
1515 | for_each_possible_cpu(cpu) { | 1408 | for_each_possible_cpu(cpu) |
1516 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1409 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1517 | if (cpu_online(cpu)) | 1410 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); |
1518 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1519 | } | ||
1520 | rnp = rcu_get_root(rcu_state); | 1411 | rnp = rcu_get_root(rcu_state); |
1521 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1412 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); |
1522 | if (NUM_RCU_NODES > 1) { | 1413 | if (NUM_RCU_NODES > 1) { |
@@ -1533,10 +1424,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
1533 | struct rcu_node *rnp = rdp->mynode; | 1424 | struct rcu_node *rnp = rdp->mynode; |
1534 | 1425 | ||
1535 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | 1426 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ |
1536 | if (rcu_scheduler_fully_active) { | 1427 | if (rcu_scheduler_fully_active) |
1537 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1538 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); | 1428 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); |
1539 | } | ||
1540 | } | 1429 | } |
1541 | 1430 | ||
1542 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1431 | #else /* #ifdef CONFIG_RCU_BOOST */ |
@@ -1560,22 +1449,10 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
1560 | { | 1449 | { |
1561 | } | 1450 | } |
1562 | 1451 | ||
1563 | #ifdef CONFIG_HOTPLUG_CPU | ||
1564 | |||
1565 | static void rcu_stop_cpu_kthread(int cpu) | ||
1566 | { | ||
1567 | } | ||
1568 | |||
1569 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1570 | |||
1571 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | 1452 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) |
1572 | { | 1453 | { |
1573 | } | 1454 | } |
1574 | 1455 | ||
1575 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1576 | { | ||
1577 | } | ||
1578 | |||
1579 | static int __init rcu_scheduler_really_started(void) | 1456 | static int __init rcu_scheduler_really_started(void) |
1580 | { | 1457 | { |
1581 | rcu_scheduler_fully_active = 1; | 1458 | rcu_scheduler_fully_active = 1; |