aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c80
1 files changed, 80 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e3d3bbddbcd5..4ca7e0292fd8 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,6 +46,7 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
49 50
50#include "rcutree.h" 51#include "rcutree.h"
51 52
@@ -79,6 +80,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
79struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 80struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
80DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 81DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
81 82
83static int rcu_scheduler_active __read_mostly;
84
82 85
83/* 86/*
84 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 87 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
@@ -1396,6 +1399,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1396} 1399}
1397EXPORT_SYMBOL_GPL(call_rcu_bh); 1400EXPORT_SYMBOL_GPL(call_rcu_bh);
1398 1401
1402/**
1403 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1404 *
1405 * Control will return to the caller some time after a full rcu-sched
1406 * grace period has elapsed, in other words after all currently executing
1407 * rcu-sched read-side critical sections have completed. These read-side
1408 * critical sections are delimited by rcu_read_lock_sched() and
1409 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1410 * local_irq_disable(), and so on may be used in place of
1411 * rcu_read_lock_sched().
1412 *
1413 * This means that all preempt_disable code sequences, including NMI and
1414 * hardware-interrupt handlers, in progress on entry will have completed
1415 * before this primitive returns. However, this does not guarantee that
1416 * softirq handlers will have completed, since in some kernels, these
1417 * handlers can run in process context, and can block.
1418 *
1419 * This primitive provides the guarantees made by the (now removed)
1420 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1421 * guarantees that rcu_read_lock() sections will have completed.
1422 * In "classic RCU", these two guarantees happen to be one and
1423 * the same, but can differ in realtime RCU implementations.
1424 */
1425void synchronize_sched(void)
1426{
1427 struct rcu_synchronize rcu;
1428
1429 if (rcu_blocking_is_gp())
1430 return;
1431
1432 init_completion(&rcu.completion);
1433 /* Will wake me after RCU finished. */
1434 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1435 /* Wait for it. */
1436 wait_for_completion(&rcu.completion);
1437}
1438EXPORT_SYMBOL_GPL(synchronize_sched);
1439
1440/**
1441 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1442 *
1443 * Control will return to the caller some time after a full rcu_bh grace
1444 * period has elapsed, in other words after all currently executing rcu_bh
1445 * read-side critical sections have completed. RCU read-side critical
1446 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1447 * and may be nested.
1448 */
1449void synchronize_rcu_bh(void)
1450{
1451 struct rcu_synchronize rcu;
1452
1453 if (rcu_blocking_is_gp())
1454 return;
1455
1456 init_completion(&rcu.completion);
1457 /* Will wake me after RCU finished. */
1458 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1459 /* Wait for it. */
1460 wait_for_completion(&rcu.completion);
1461}
1462EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1463
1399/* 1464/*
1400 * Check to see if there is any immediate RCU-related work to be done 1465 * Check to see if there is any immediate RCU-related work to be done
1401 * by the current CPU, for the specified type of RCU, returning 1 if so. 1466 * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1480,6 +1545,21 @@ int rcu_needs_cpu(int cpu)
1480 rcu_preempt_needs_cpu(cpu); 1545 rcu_preempt_needs_cpu(cpu);
1481} 1546}
1482 1547
1548/*
1549 * This function is invoked towards the end of the scheduler's initialization
1550 * process. Before this is called, the idle task might contain
1551 * RCU read-side critical sections (during which time, this idle
1552 * task is booting the system). After this function is called, the
1553 * idle tasks are prohibited from containing RCU read-side critical
1554 * sections.
1555 */
1556void rcu_scheduler_starting(void)
1557{
1558 WARN_ON(num_online_cpus() != 1);
1559 WARN_ON(nr_context_switches() > 0);
1560 rcu_scheduler_active = 1;
1561}
1562
1483static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 1563static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1484static atomic_t rcu_barrier_cpu_count; 1564static atomic_t rcu_barrier_cpu_count;
1485static DEFINE_MUTEX(rcu_barrier_mutex); 1565static DEFINE_MUTEX(rcu_barrier_mutex);