diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-01-23 20:05:46 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-02-21 12:06:04 -0500 |
commit | 3d3b7db0a22085cfc05c3318b9874f7fb8266d18 (patch) | |
tree | 7f6a080ca64f8a07dd058c2771b983c602fd1a65 /kernel/rcutree.c | |
parent | c0d6d01bffdce19fa19baad6cb8cc3eed7bfd6f5 (diff) |
rcu: Move synchronize_sched_expedited() to rcutree.c
Now that TREE_RCU and TREE_PREEMPT_RCU no longer do anything different
for the single-CPU case, there is no need for multiple definitions of
synchronize_sched_expedited(). It is no longer in any sense a plug-in,
so move it from kernel/rcutree_plugin.h to kernel/rcutree.c.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 117 |
1 files changed, 117 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index bcf7db2f2fd2..05470d4caba3 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -50,6 +50,8 @@ | |||
50 | #include <linux/wait.h> | 50 | #include <linux/wait.h> |
51 | #include <linux/kthread.h> | 51 | #include <linux/kthread.h> |
52 | #include <linux/prefetch.h> | 52 | #include <linux/prefetch.h> |
53 | #include <linux/delay.h> | ||
54 | #include <linux/stop_machine.h> | ||
53 | 55 | ||
54 | #include "rcutree.h" | 56 | #include "rcutree.h" |
55 | #include <trace/events/rcu.h> | 57 | #include <trace/events/rcu.h> |
@@ -1918,6 +1920,121 @@ void synchronize_rcu_bh(void) | |||
1918 | } | 1920 | } |
1919 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 1921 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
1920 | 1922 | ||
1923 | static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); | ||
1924 | static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); | ||
1925 | |||
1926 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
1927 | { | ||
1928 | /* | ||
1929 | * There must be a full memory barrier on each affected CPU | ||
1930 | * between the time that try_stop_cpus() is called and the | ||
1931 | * time that it returns. | ||
1932 | * | ||
1933 | * In the current initial implementation of cpu_stop, the | ||
1934 | * above condition is already met when the control reaches | ||
1935 | * this point and the following smp_mb() is not strictly | ||
1936 | * necessary. Do smp_mb() anyway for documentation and | ||
1937 | * robustness against future implementation changes. | ||
1938 | */ | ||
1939 | smp_mb(); /* See above comment block. */ | ||
1940 | return 0; | ||
1941 | } | ||
1942 | |||
1943 | /* | ||
1944 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
1945 | * approach to force grace period to end quickly. This consumes | ||
1946 | * significant time on all CPUs, and is thus not recommended for | ||
1947 | * any sort of common-case code. | ||
1948 | * | ||
1949 | * Note that it is illegal to call this function while holding any | ||
1950 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
1951 | * observe this restriction will result in deadlock. | ||
1952 | * | ||
1953 | * This implementation can be thought of as an application of ticket | ||
1954 | * locking to RCU, with sync_sched_expedited_started and | ||
1955 | * sync_sched_expedited_done taking on the roles of the halves | ||
1956 | * of the ticket-lock word. Each task atomically increments | ||
1957 | * sync_sched_expedited_started upon entry, snapshotting the old value, | ||
1958 | * then attempts to stop all the CPUs. If this succeeds, then each | ||
1959 | * CPU will have executed a context switch, resulting in an RCU-sched | ||
1960 | * grace period. We are then done, so we use atomic_cmpxchg() to | ||
1961 | * update sync_sched_expedited_done to match our snapshot -- but | ||
1962 | * only if someone else has not already advanced past our snapshot. | ||
1963 | * | ||
1964 | * On the other hand, if try_stop_cpus() fails, we check the value | ||
1965 | * of sync_sched_expedited_done. If it has advanced past our | ||
1966 | * initial snapshot, then someone else must have forced a grace period | ||
1967 | * some time after we took our snapshot. In this case, our work is | ||
1968 | * done for us, and we can simply return. Otherwise, we try again, | ||
1969 | * but keep our initial snapshot for purposes of checking for someone | ||
1970 | * doing our work for us. | ||
1971 | * | ||
1972 | * If we fail too many times in a row, we fall back to synchronize_sched(). | ||
1973 | */ | ||
1974 | void synchronize_sched_expedited(void) | ||
1975 | { | ||
1976 | int firstsnap, s, snap, trycount = 0; | ||
1977 | |||
1978 | /* Note that atomic_inc_return() implies full memory barrier. */ | ||
1979 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); | ||
1980 | get_online_cpus(); | ||
1981 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | ||
1982 | |||
1983 | /* | ||
1984 | * Each pass through the following loop attempts to force a | ||
1985 | * context switch on each CPU. | ||
1986 | */ | ||
1987 | while (try_stop_cpus(cpu_online_mask, | ||
1988 | synchronize_sched_expedited_cpu_stop, | ||
1989 | NULL) == -EAGAIN) { | ||
1990 | put_online_cpus(); | ||
1991 | |||
1992 | /* No joy, try again later. Or just synchronize_sched(). */ | ||
1993 | if (trycount++ < 10) | ||
1994 | udelay(trycount * num_online_cpus()); | ||
1995 | else { | ||
1996 | synchronize_sched(); | ||
1997 | return; | ||
1998 | } | ||
1999 | |||
2000 | /* Check to see if someone else did our work for us. */ | ||
2001 | s = atomic_read(&sync_sched_expedited_done); | ||
2002 | if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { | ||
2003 | smp_mb(); /* ensure test happens before caller kfree */ | ||
2004 | return; | ||
2005 | } | ||
2006 | |||
2007 | /* | ||
2008 | * Refetching sync_sched_expedited_started allows later | ||
2009 | * callers to piggyback on our grace period. We subtract | ||
2010 | * 1 to get the same token that the last incrementer got. | ||
2011 | * We retry after they started, so our grace period works | ||
2012 | * for them, and they started after our first try, so their | ||
2013 | * grace period works for us. | ||
2014 | */ | ||
2015 | get_online_cpus(); | ||
2016 | snap = atomic_read(&sync_sched_expedited_started); | ||
2017 | smp_mb(); /* ensure read is before try_stop_cpus(). */ | ||
2018 | } | ||
2019 | |||
2020 | /* | ||
2021 | * Everyone up to our most recent fetch is covered by our grace | ||
2022 | * period. Update the counter, but only if our work is still | ||
2023 | * relevant -- which it won't be if someone who started later | ||
2024 | * than we did beat us to the punch. | ||
2025 | */ | ||
2026 | do { | ||
2027 | s = atomic_read(&sync_sched_expedited_done); | ||
2028 | if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { | ||
2029 | smp_mb(); /* ensure test happens before caller kfree */ | ||
2030 | break; | ||
2031 | } | ||
2032 | } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); | ||
2033 | |||
2034 | put_online_cpus(); | ||
2035 | } | ||
2036 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
2037 | |||
1921 | /* | 2038 | /* |
1922 | * Check to see if there is any immediate RCU-related work to be done | 2039 | * Check to see if there is any immediate RCU-related work to be done |
1923 | * by the current CPU, for the specified type of RCU, returning 1 if so. | 2040 | * by the current CPU, for the specified type of RCU, returning 1 if so. |