diff options
Diffstat (limited to 'kernel/rcutree.c')
| -rw-r--r-- | kernel/rcutree.c | 332 |
1 files changed, 244 insertions, 88 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d0c5baf1ab18..0da7b88d92d0 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -75,6 +75,8 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | |||
| 75 | .gpnum = -300, \ | 75 | .gpnum = -300, \ |
| 76 | .completed = -300, \ | 76 | .completed = -300, \ |
| 77 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \ | 77 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \ |
| 78 | .orphan_nxttail = &structname##_state.orphan_nxtlist, \ | ||
| 79 | .orphan_donetail = &structname##_state.orphan_donelist, \ | ||
| 78 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \ | 80 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \ |
| 79 | .n_force_qs = 0, \ | 81 | .n_force_qs = 0, \ |
| 80 | .n_force_qs_ngp = 0, \ | 82 | .n_force_qs_ngp = 0, \ |
| @@ -145,6 +147,13 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | |||
| 145 | unsigned long rcutorture_testseq; | 147 | unsigned long rcutorture_testseq; |
| 146 | unsigned long rcutorture_vernum; | 148 | unsigned long rcutorture_vernum; |
| 147 | 149 | ||
| 150 | /* State information for rcu_barrier() and friends. */ | ||
| 151 | |||
| 152 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
| 153 | static atomic_t rcu_barrier_cpu_count; | ||
| 154 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
| 155 | static struct completion rcu_barrier_completion; | ||
| 156 | |||
| 148 | /* | 157 | /* |
| 149 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 158 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
| 150 | * permit this function to be invoked without holding the root rcu_node | 159 | * permit this function to be invoked without holding the root rcu_node |
| @@ -192,7 +201,6 @@ void rcu_note_context_switch(int cpu) | |||
| 192 | { | 201 | { |
| 193 | trace_rcu_utilization("Start context switch"); | 202 | trace_rcu_utilization("Start context switch"); |
| 194 | rcu_sched_qs(cpu); | 203 | rcu_sched_qs(cpu); |
| 195 | rcu_preempt_note_context_switch(cpu); | ||
| 196 | trace_rcu_utilization("End context switch"); | 204 | trace_rcu_utilization("End context switch"); |
| 197 | } | 205 | } |
| 198 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | 206 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
| @@ -1311,95 +1319,133 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1311 | #ifdef CONFIG_HOTPLUG_CPU | 1319 | #ifdef CONFIG_HOTPLUG_CPU |
| 1312 | 1320 | ||
| 1313 | /* | 1321 | /* |
| 1314 | * Move a dying CPU's RCU callbacks to online CPU's callback list. | 1322 | * Send the specified CPU's RCU callbacks to the orphanage. The |
| 1315 | * Also record a quiescent state for this CPU for the current grace period. | 1323 | * specified CPU must be offline, and the caller must hold the |
| 1316 | * Synchronization and interrupt disabling are not required because | 1324 | * ->onofflock. |
| 1317 | * this function executes in stop_machine() context. Therefore, cleanup | ||
| 1318 | * operations that might block must be done later from the CPU_DEAD | ||
| 1319 | * notifier. | ||
| 1320 | * | ||
| 1321 | * Note that the outgoing CPU's bit has already been cleared in the | ||
| 1322 | * cpu_online_mask. This allows us to randomly pick a callback | ||
| 1323 | * destination from the bits set in that mask. | ||
| 1324 | */ | 1325 | */ |
| 1325 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | 1326 | static void |
| 1327 | rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, | ||
| 1328 | struct rcu_node *rnp, struct rcu_data *rdp) | ||
| 1326 | { | 1329 | { |
| 1327 | int i; | 1330 | int i; |
| 1328 | unsigned long mask; | ||
| 1329 | int receive_cpu = cpumask_any(cpu_online_mask); | ||
| 1330 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | ||
| 1331 | struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu); | ||
| 1332 | RCU_TRACE(struct rcu_node *rnp = rdp->mynode); /* For dying CPU. */ | ||
| 1333 | 1331 | ||
| 1334 | /* First, adjust the counts. */ | 1332 | /* |
| 1333 | * Orphan the callbacks. First adjust the counts. This is safe | ||
| 1334 | * because ->onofflock excludes _rcu_barrier()'s adoption of | ||
| 1335 | * the callbacks, thus no memory barrier is required. | ||
| 1336 | */ | ||
| 1335 | if (rdp->nxtlist != NULL) { | 1337 | if (rdp->nxtlist != NULL) { |
| 1336 | receive_rdp->qlen_lazy += rdp->qlen_lazy; | 1338 | rsp->qlen_lazy += rdp->qlen_lazy; |
| 1337 | receive_rdp->qlen += rdp->qlen; | 1339 | rsp->qlen += rdp->qlen; |
| 1340 | rdp->n_cbs_orphaned += rdp->qlen; | ||
| 1338 | rdp->qlen_lazy = 0; | 1341 | rdp->qlen_lazy = 0; |
| 1339 | rdp->qlen = 0; | 1342 | rdp->qlen = 0; |
| 1340 | } | 1343 | } |
| 1341 | 1344 | ||
| 1342 | /* | 1345 | /* |
| 1343 | * Next, move ready-to-invoke callbacks to be invoked on some | 1346 | * Next, move those callbacks still needing a grace period to |
| 1344 | * other CPU. These will not be required to pass through another | 1347 | * the orphanage, where some other CPU will pick them up. |
| 1345 | * grace period: They are done, regardless of CPU. | 1348 | * Some of the callbacks might have gone partway through a grace |
| 1349 | * period, but that is too bad. They get to start over because we | ||
| 1350 | * cannot assume that grace periods are synchronized across CPUs. | ||
| 1351 | * We don't bother updating the ->nxttail[] array yet, instead | ||
| 1352 | * we just reset the whole thing later on. | ||
| 1346 | */ | 1353 | */ |
| 1347 | if (rdp->nxtlist != NULL && | 1354 | if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) { |
| 1348 | rdp->nxttail[RCU_DONE_TAIL] != &rdp->nxtlist) { | 1355 | *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL]; |
| 1349 | struct rcu_head *oldhead; | 1356 | rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL]; |
| 1350 | struct rcu_head **oldtail; | 1357 | *rdp->nxttail[RCU_DONE_TAIL] = NULL; |
| 1351 | struct rcu_head **newtail; | ||
| 1352 | |||
| 1353 | oldhead = rdp->nxtlist; | ||
| 1354 | oldtail = receive_rdp->nxttail[RCU_DONE_TAIL]; | ||
| 1355 | rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; | ||
| 1356 | *rdp->nxttail[RCU_DONE_TAIL] = *oldtail; | ||
| 1357 | *receive_rdp->nxttail[RCU_DONE_TAIL] = oldhead; | ||
| 1358 | newtail = rdp->nxttail[RCU_DONE_TAIL]; | ||
| 1359 | for (i = RCU_DONE_TAIL; i < RCU_NEXT_SIZE; i++) { | ||
| 1360 | if (receive_rdp->nxttail[i] == oldtail) | ||
| 1361 | receive_rdp->nxttail[i] = newtail; | ||
| 1362 | if (rdp->nxttail[i] == newtail) | ||
| 1363 | rdp->nxttail[i] = &rdp->nxtlist; | ||
| 1364 | } | ||
| 1365 | } | 1358 | } |
| 1366 | 1359 | ||
| 1367 | /* | 1360 | /* |
| 1368 | * Finally, put the rest of the callbacks at the end of the list. | 1361 | * Then move the ready-to-invoke callbacks to the orphanage, |
| 1369 | * The ones that made it partway through get to start over: We | 1362 | * where some other CPU will pick them up. These will not be |
| 1370 | * cannot assume that grace periods are synchronized across CPUs. | 1363 | * required to pass though another grace period: They are done. |
| 1371 | * (We could splice RCU_WAIT_TAIL into RCU_NEXT_READY_TAIL, but | ||
| 1372 | * this does not seem compelling. Not yet, anyway.) | ||
| 1373 | */ | 1364 | */ |
| 1374 | if (rdp->nxtlist != NULL) { | 1365 | if (rdp->nxtlist != NULL) { |
| 1375 | *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; | 1366 | *rsp->orphan_donetail = rdp->nxtlist; |
| 1376 | receive_rdp->nxttail[RCU_NEXT_TAIL] = | 1367 | rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL]; |
| 1377 | rdp->nxttail[RCU_NEXT_TAIL]; | ||
| 1378 | receive_rdp->n_cbs_adopted += rdp->qlen; | ||
| 1379 | rdp->n_cbs_orphaned += rdp->qlen; | ||
| 1380 | |||
| 1381 | rdp->nxtlist = NULL; | ||
| 1382 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
| 1383 | rdp->nxttail[i] = &rdp->nxtlist; | ||
| 1384 | } | 1368 | } |
| 1385 | 1369 | ||
| 1370 | /* Finally, initialize the rcu_data structure's list to empty. */ | ||
| 1371 | rdp->nxtlist = NULL; | ||
| 1372 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
| 1373 | rdp->nxttail[i] = &rdp->nxtlist; | ||
| 1374 | } | ||
| 1375 | |||
| 1376 | /* | ||
| 1377 | * Adopt the RCU callbacks from the specified rcu_state structure's | ||
| 1378 | * orphanage. The caller must hold the ->onofflock. | ||
| 1379 | */ | ||
| 1380 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
| 1381 | { | ||
| 1382 | int i; | ||
| 1383 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); | ||
| 1384 | |||
| 1386 | /* | 1385 | /* |
| 1387 | * Record a quiescent state for the dying CPU. This is safe | 1386 | * If there is an rcu_barrier() operation in progress, then |
| 1388 | * only because we have already cleared out the callbacks. | 1387 | * only the task doing that operation is permitted to adopt |
| 1389 | * (Otherwise, the RCU core might try to schedule the invocation | 1388 | * callbacks. To do otherwise breaks rcu_barrier() and friends |
| 1390 | * of callbacks on this now-offline CPU, which would be bad.) | 1389 | * by causing them to fail to wait for the callbacks in the |
| 1390 | * orphanage. | ||
| 1391 | */ | 1391 | */ |
| 1392 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | 1392 | if (rsp->rcu_barrier_in_progress && |
| 1393 | rsp->rcu_barrier_in_progress != current) | ||
| 1394 | return; | ||
| 1395 | |||
| 1396 | /* Do the accounting first. */ | ||
| 1397 | rdp->qlen_lazy += rsp->qlen_lazy; | ||
| 1398 | rdp->qlen += rsp->qlen; | ||
| 1399 | rdp->n_cbs_adopted += rsp->qlen; | ||
| 1400 | rsp->qlen_lazy = 0; | ||
| 1401 | rsp->qlen = 0; | ||
| 1402 | |||
| 1403 | /* | ||
| 1404 | * We do not need a memory barrier here because the only way we | ||
| 1405 | * can get here if there is an rcu_barrier() in flight is if | ||
| 1406 | * we are the task doing the rcu_barrier(). | ||
| 1407 | */ | ||
| 1408 | |||
| 1409 | /* First adopt the ready-to-invoke callbacks. */ | ||
| 1410 | if (rsp->orphan_donelist != NULL) { | ||
| 1411 | *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL]; | ||
| 1412 | *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist; | ||
| 1413 | for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--) | ||
| 1414 | if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL]) | ||
| 1415 | rdp->nxttail[i] = rsp->orphan_donetail; | ||
| 1416 | rsp->orphan_donelist = NULL; | ||
| 1417 | rsp->orphan_donetail = &rsp->orphan_donelist; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | /* And then adopt the callbacks that still need a grace period. */ | ||
| 1421 | if (rsp->orphan_nxtlist != NULL) { | ||
| 1422 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist; | ||
| 1423 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail; | ||
| 1424 | rsp->orphan_nxtlist = NULL; | ||
| 1425 | rsp->orphan_nxttail = &rsp->orphan_nxtlist; | ||
| 1426 | } | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | /* | ||
| 1430 | * Trace the fact that this CPU is going offline. | ||
| 1431 | */ | ||
| 1432 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | ||
| 1433 | { | ||
| 1434 | RCU_TRACE(unsigned long mask); | ||
| 1435 | RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda)); | ||
| 1436 | RCU_TRACE(struct rcu_node *rnp = rdp->mynode); | ||
| 1437 | |||
| 1438 | RCU_TRACE(mask = rdp->grpmask); | ||
| 1393 | trace_rcu_grace_period(rsp->name, | 1439 | trace_rcu_grace_period(rsp->name, |
| 1394 | rnp->gpnum + 1 - !!(rnp->qsmask & mask), | 1440 | rnp->gpnum + 1 - !!(rnp->qsmask & mask), |
| 1395 | "cpuofl"); | 1441 | "cpuofl"); |
| 1396 | rcu_report_qs_rdp(smp_processor_id(), rsp, rdp, rsp->gpnum); | ||
| 1397 | /* Note that rcu_report_qs_rdp() might call trace_rcu_grace_period(). */ | ||
| 1398 | } | 1442 | } |
| 1399 | 1443 | ||
| 1400 | /* | 1444 | /* |
| 1401 | * The CPU has been completely removed, and some other CPU is reporting | 1445 | * The CPU has been completely removed, and some other CPU is reporting |
| 1402 | * this fact from process context. Do the remainder of the cleanup. | 1446 | * this fact from process context. Do the remainder of the cleanup, |
| 1447 | * including orphaning the outgoing CPU's RCU callbacks, and also | ||
| 1448 | * adopting them, if there is no _rcu_barrier() instance running. | ||
| 1403 | * There can only be one CPU hotplug operation at a time, so no other | 1449 | * There can only be one CPU hotplug operation at a time, so no other |
| 1404 | * CPU can be attempting to update rcu_cpu_kthread_task. | 1450 | * CPU can be attempting to update rcu_cpu_kthread_task. |
| 1405 | */ | 1451 | */ |
| @@ -1409,17 +1455,21 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
| 1409 | unsigned long mask; | 1455 | unsigned long mask; |
| 1410 | int need_report = 0; | 1456 | int need_report = 0; |
| 1411 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1457 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
| 1412 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rnp. */ | 1458 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ |
| 1413 | 1459 | ||
| 1414 | /* Adjust any no-longer-needed kthreads. */ | 1460 | /* Adjust any no-longer-needed kthreads. */ |
| 1415 | rcu_stop_cpu_kthread(cpu); | 1461 | rcu_stop_cpu_kthread(cpu); |
| 1416 | rcu_node_kthread_setaffinity(rnp, -1); | 1462 | rcu_node_kthread_setaffinity(rnp, -1); |
| 1417 | 1463 | ||
| 1418 | /* Remove the dying CPU from the bitmasks in the rcu_node hierarchy. */ | 1464 | /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ |
| 1419 | 1465 | ||
| 1420 | /* Exclude any attempts to start a new grace period. */ | 1466 | /* Exclude any attempts to start a new grace period. */ |
| 1421 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1467 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
| 1422 | 1468 | ||
| 1469 | /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ | ||
| 1470 | rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); | ||
| 1471 | rcu_adopt_orphan_cbs(rsp); | ||
| 1472 | |||
| 1423 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | 1473 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ |
| 1424 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | 1474 | mask = rdp->grpmask; /* rnp->grplo is constant. */ |
| 1425 | do { | 1475 | do { |
| @@ -1456,6 +1506,10 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
| 1456 | 1506 | ||
| 1457 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 1507 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 1458 | 1508 | ||
| 1509 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
| 1510 | { | ||
| 1511 | } | ||
| 1512 | |||
| 1459 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | 1513 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) |
| 1460 | { | 1514 | { |
| 1461 | } | 1515 | } |
| @@ -1524,9 +1578,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1524 | rcu_is_callbacks_kthread()); | 1578 | rcu_is_callbacks_kthread()); |
| 1525 | 1579 | ||
| 1526 | /* Update count, and requeue any remaining callbacks. */ | 1580 | /* Update count, and requeue any remaining callbacks. */ |
| 1527 | rdp->qlen_lazy -= count_lazy; | ||
| 1528 | rdp->qlen -= count; | ||
| 1529 | rdp->n_cbs_invoked += count; | ||
| 1530 | if (list != NULL) { | 1581 | if (list != NULL) { |
| 1531 | *tail = rdp->nxtlist; | 1582 | *tail = rdp->nxtlist; |
| 1532 | rdp->nxtlist = list; | 1583 | rdp->nxtlist = list; |
| @@ -1536,6 +1587,10 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1536 | else | 1587 | else |
| 1537 | break; | 1588 | break; |
| 1538 | } | 1589 | } |
| 1590 | smp_mb(); /* List handling before counting for rcu_barrier(). */ | ||
| 1591 | rdp->qlen_lazy -= count_lazy; | ||
| 1592 | rdp->qlen -= count; | ||
| 1593 | rdp->n_cbs_invoked += count; | ||
| 1539 | 1594 | ||
| 1540 | /* Reinstate batch limit if we have worked down the excess. */ | 1595 | /* Reinstate batch limit if we have worked down the excess. */ |
| 1541 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1596 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
| @@ -1823,11 +1878,14 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
| 1823 | rdp = this_cpu_ptr(rsp->rda); | 1878 | rdp = this_cpu_ptr(rsp->rda); |
| 1824 | 1879 | ||
| 1825 | /* Add the callback to our list. */ | 1880 | /* Add the callback to our list. */ |
| 1826 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | ||
| 1827 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | ||
| 1828 | rdp->qlen++; | 1881 | rdp->qlen++; |
| 1829 | if (lazy) | 1882 | if (lazy) |
| 1830 | rdp->qlen_lazy++; | 1883 | rdp->qlen_lazy++; |
| 1884 | else | ||
| 1885 | rcu_idle_count_callbacks_posted(); | ||
| 1886 | smp_mb(); /* Count before adding callback for rcu_barrier(). */ | ||
| 1887 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | ||
| 1888 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | ||
| 1831 | 1889 | ||
| 1832 | if (__is_kfree_rcu_offset((unsigned long)func)) | 1890 | if (__is_kfree_rcu_offset((unsigned long)func)) |
| 1833 | trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, | 1891 | trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, |
| @@ -1893,6 +1951,38 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
| 1893 | } | 1951 | } |
| 1894 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 1952 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
| 1895 | 1953 | ||
| 1954 | /* | ||
| 1955 | * Because a context switch is a grace period for RCU-sched and RCU-bh, | ||
| 1956 | * any blocking grace-period wait automatically implies a grace period | ||
| 1957 | * if there is only one CPU online at any point time during execution | ||
| 1958 | * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to | ||
| 1959 | * occasionally incorrectly indicate that there are multiple CPUs online | ||
| 1960 | * when there was in fact only one the whole time, as this just adds | ||
| 1961 | * some overhead: RCU still operates correctly. | ||
| 1962 | * | ||
| 1963 | * Of course, sampling num_online_cpus() with preemption enabled can | ||
| 1964 | * give erroneous results if there are concurrent CPU-hotplug operations. | ||
| 1965 | * For example, given a demonic sequence of preemptions in num_online_cpus() | ||
| 1966 | * and CPU-hotplug operations, there could be two or more CPUs online at | ||
| 1967 | * all times, but num_online_cpus() might well return one (or even zero). | ||
| 1968 | * | ||
| 1969 | * However, all such demonic sequences require at least one CPU-offline | ||
| 1970 | * operation. Furthermore, rcu_blocking_is_gp() giving the wrong answer | ||
| 1971 | * is only a problem if there is an RCU read-side critical section executing | ||
| 1972 | * throughout. But RCU-sched and RCU-bh read-side critical sections | ||
| 1973 | * disable either preemption or bh, which prevents a CPU from going offline. | ||
| 1974 | * Therefore, the only way that rcu_blocking_is_gp() can incorrectly return | ||
| 1975 | * that there is only one CPU when in fact there was more than one throughout | ||
| 1976 | * is when there were no RCU readers in the system. If there are no | ||
| 1977 | * RCU readers, the grace period by definition can be of zero length, | ||
| 1978 | * regardless of the number of online CPUs. | ||
| 1979 | */ | ||
| 1980 | static inline int rcu_blocking_is_gp(void) | ||
| 1981 | { | ||
| 1982 | might_sleep(); /* Check for RCU read-side critical section. */ | ||
| 1983 | return num_online_cpus() <= 1; | ||
| 1984 | } | ||
| 1985 | |||
| 1896 | /** | 1986 | /** |
| 1897 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | 1987 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. |
| 1898 | * | 1988 | * |
| @@ -2166,11 +2256,10 @@ static int rcu_cpu_has_callbacks(int cpu) | |||
| 2166 | rcu_preempt_cpu_has_callbacks(cpu); | 2256 | rcu_preempt_cpu_has_callbacks(cpu); |
| 2167 | } | 2257 | } |
| 2168 | 2258 | ||
| 2169 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 2259 | /* |
| 2170 | static atomic_t rcu_barrier_cpu_count; | 2260 | * RCU callback function for _rcu_barrier(). If we are last, wake |
| 2171 | static DEFINE_MUTEX(rcu_barrier_mutex); | 2261 | * up the task executing _rcu_barrier(). |
| 2172 | static struct completion rcu_barrier_completion; | 2262 | */ |
| 2173 | |||
| 2174 | static void rcu_barrier_callback(struct rcu_head *notused) | 2263 | static void rcu_barrier_callback(struct rcu_head *notused) |
| 2175 | { | 2264 | { |
| 2176 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 2265 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
| @@ -2200,27 +2289,94 @@ static void _rcu_barrier(struct rcu_state *rsp, | |||
| 2200 | void (*call_rcu_func)(struct rcu_head *head, | 2289 | void (*call_rcu_func)(struct rcu_head *head, |
| 2201 | void (*func)(struct rcu_head *head))) | 2290 | void (*func)(struct rcu_head *head))) |
| 2202 | { | 2291 | { |
| 2203 | BUG_ON(in_interrupt()); | 2292 | int cpu; |
| 2293 | unsigned long flags; | ||
| 2294 | struct rcu_data *rdp; | ||
| 2295 | struct rcu_head rh; | ||
| 2296 | |||
| 2297 | init_rcu_head_on_stack(&rh); | ||
| 2298 | |||
| 2204 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ | 2299 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ |
| 2205 | mutex_lock(&rcu_barrier_mutex); | 2300 | mutex_lock(&rcu_barrier_mutex); |
| 2206 | init_completion(&rcu_barrier_completion); | 2301 | |
| 2302 | smp_mb(); /* Prevent any prior operations from leaking in. */ | ||
| 2303 | |||
| 2207 | /* | 2304 | /* |
| 2208 | * Initialize rcu_barrier_cpu_count to 1, then invoke | 2305 | * Initialize the count to one rather than to zero in order to |
| 2209 | * rcu_barrier_func() on each CPU, so that each CPU also has | 2306 | * avoid a too-soon return to zero in case of a short grace period |
| 2210 | * incremented rcu_barrier_cpu_count. Only then is it safe to | 2307 | * (or preemption of this task). Also flag this task as doing |
| 2211 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | 2308 | * an rcu_barrier(). This will prevent anyone else from adopting |
| 2212 | * might complete its grace period before all of the other CPUs | 2309 | * orphaned callbacks, which could cause otherwise failure if a |
| 2213 | * did their increment, causing this function to return too | 2310 | * CPU went offline and quickly came back online. To see this, |
| 2214 | * early. Note that on_each_cpu() disables irqs, which prevents | 2311 | * consider the following sequence of events: |
| 2215 | * any CPUs from coming online or going offline until each online | 2312 | * |
| 2216 | * CPU has queued its RCU-barrier callback. | 2313 | * 1. We cause CPU 0 to post an rcu_barrier_callback() callback. |
| 2314 | * 2. CPU 1 goes offline, orphaning its callbacks. | ||
| 2315 | * 3. CPU 0 adopts CPU 1's orphaned callbacks. | ||
| 2316 | * 4. CPU 1 comes back online. | ||
| 2317 | * 5. We cause CPU 1 to post an rcu_barrier_callback() callback. | ||
| 2318 | * 6. Both rcu_barrier_callback() callbacks are invoked, awakening | ||
| 2319 | * us -- but before CPU 1's orphaned callbacks are invoked!!! | ||
| 2217 | */ | 2320 | */ |
| 2321 | init_completion(&rcu_barrier_completion); | ||
| 2218 | atomic_set(&rcu_barrier_cpu_count, 1); | 2322 | atomic_set(&rcu_barrier_cpu_count, 1); |
| 2219 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); | 2323 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
| 2324 | rsp->rcu_barrier_in_progress = current; | ||
| 2325 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
| 2326 | |||
| 2327 | /* | ||
| 2328 | * Force every CPU with callbacks to register a new callback | ||
| 2329 | * that will tell us when all the preceding callbacks have | ||
| 2330 | * been invoked. If an offline CPU has callbacks, wait for | ||
| 2331 | * it to either come back online or to finish orphaning those | ||
| 2332 | * callbacks. | ||
| 2333 | */ | ||
| 2334 | for_each_possible_cpu(cpu) { | ||
| 2335 | preempt_disable(); | ||
| 2336 | rdp = per_cpu_ptr(rsp->rda, cpu); | ||
| 2337 | if (cpu_is_offline(cpu)) { | ||
| 2338 | preempt_enable(); | ||
| 2339 | while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) | ||
| 2340 | schedule_timeout_interruptible(1); | ||
| 2341 | } else if (ACCESS_ONCE(rdp->qlen)) { | ||
| 2342 | smp_call_function_single(cpu, rcu_barrier_func, | ||
| 2343 | (void *)call_rcu_func, 1); | ||
| 2344 | preempt_enable(); | ||
| 2345 | } else { | ||
| 2346 | preempt_enable(); | ||
| 2347 | } | ||
| 2348 | } | ||
| 2349 | |||
| 2350 | /* | ||
| 2351 | * Now that all online CPUs have rcu_barrier_callback() callbacks | ||
| 2352 | * posted, we can adopt all of the orphaned callbacks and place | ||
| 2353 | * an rcu_barrier_callback() callback after them. When that is done, | ||
| 2354 | * we are guaranteed to have an rcu_barrier_callback() callback | ||
| 2355 | * following every callback that could possibly have been | ||
| 2356 | * registered before _rcu_barrier() was called. | ||
| 2357 | */ | ||
| 2358 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | ||
| 2359 | rcu_adopt_orphan_cbs(rsp); | ||
| 2360 | rsp->rcu_barrier_in_progress = NULL; | ||
| 2361 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
| 2362 | atomic_inc(&rcu_barrier_cpu_count); | ||
| 2363 | smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ | ||
| 2364 | call_rcu_func(&rh, rcu_barrier_callback); | ||
| 2365 | |||
| 2366 | /* | ||
| 2367 | * Now that we have an rcu_barrier_callback() callback on each | ||
| 2368 | * CPU, and thus each counted, remove the initial count. | ||
| 2369 | */ | ||
| 2220 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 2370 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
| 2221 | complete(&rcu_barrier_completion); | 2371 | complete(&rcu_barrier_completion); |
| 2372 | |||
| 2373 | /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ | ||
| 2222 | wait_for_completion(&rcu_barrier_completion); | 2374 | wait_for_completion(&rcu_barrier_completion); |
| 2375 | |||
| 2376 | /* Other rcu_barrier() invocations can now safely proceed. */ | ||
| 2223 | mutex_unlock(&rcu_barrier_mutex); | 2377 | mutex_unlock(&rcu_barrier_mutex); |
| 2378 | |||
| 2379 | destroy_rcu_head_on_stack(&rh); | ||
| 2224 | } | 2380 | } |
| 2225 | 2381 | ||
| 2226 | /** | 2382 | /** |
| @@ -2417,7 +2573,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) | |||
| 2417 | 2573 | ||
| 2418 | for (i = NUM_RCU_LVLS - 1; i > 0; i--) | 2574 | for (i = NUM_RCU_LVLS - 1; i > 0; i--) |
| 2419 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; | 2575 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; |
| 2420 | rsp->levelspread[0] = RCU_FANOUT_LEAF; | 2576 | rsp->levelspread[0] = CONFIG_RCU_FANOUT_LEAF; |
| 2421 | } | 2577 | } |
| 2422 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ | 2578 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ |
| 2423 | static void __init rcu_init_levelspread(struct rcu_state *rsp) | 2579 | static void __init rcu_init_levelspread(struct rcu_state *rsp) |
