diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-01-23 20:05:46 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-02-21 12:06:04 -0500 |
commit | 3d3b7db0a22085cfc05c3318b9874f7fb8266d18 (patch) | |
tree | 7f6a080ca64f8a07dd058c2771b983c602fd1a65 /kernel/rcutree_plugin.h | |
parent | c0d6d01bffdce19fa19baad6cb8cc3eed7bfd6f5 (diff) |
rcu: Move synchronize_sched_expedited() to rcutree.c
Now that TREE_RCU and TREE_PREEMPT_RCU no longer do anything different
for the single-CPU case, there is no need for multiple definitions of
synchronize_sched_expedited(). It is no longer in any sense a plug-in,
so move it from kernel/rcutree_plugin.h to kernel/rcutree.c.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 116 |
1 files changed, 0 insertions, 116 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index cecea84f4f3f..98ce17cf1fb5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -25,7 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/stop_machine.h> | ||
29 | 28 | ||
30 | #define RCU_KTHREAD_PRIO 1 | 29 | #define RCU_KTHREAD_PRIO 1 |
31 | 30 | ||
@@ -1888,121 +1887,6 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
1888 | 1887 | ||
1889 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1888 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1890 | 1889 | ||
1891 | static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); | ||
1892 | static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); | ||
1893 | |||
1894 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
1895 | { | ||
1896 | /* | ||
1897 | * There must be a full memory barrier on each affected CPU | ||
1898 | * between the time that try_stop_cpus() is called and the | ||
1899 | * time that it returns. | ||
1900 | * | ||
1901 | * In the current initial implementation of cpu_stop, the | ||
1902 | * above condition is already met when the control reaches | ||
1903 | * this point and the following smp_mb() is not strictly | ||
1904 | * necessary. Do smp_mb() anyway for documentation and | ||
1905 | * robustness against future implementation changes. | ||
1906 | */ | ||
1907 | smp_mb(); /* See above comment block. */ | ||
1908 | return 0; | ||
1909 | } | ||
1910 | |||
1911 | /* | ||
1912 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
1913 | * approach to force grace period to end quickly. This consumes | ||
1914 | * significant time on all CPUs, and is thus not recommended for | ||
1915 | * any sort of common-case code. | ||
1916 | * | ||
1917 | * Note that it is illegal to call this function while holding any | ||
1918 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
1919 | * observe this restriction will result in deadlock. | ||
1920 | * | ||
1921 | * This implementation can be thought of as an application of ticket | ||
1922 | * locking to RCU, with sync_sched_expedited_started and | ||
1923 | * sync_sched_expedited_done taking on the roles of the halves | ||
1924 | * of the ticket-lock word. Each task atomically increments | ||
1925 | * sync_sched_expedited_started upon entry, snapshotting the old value, | ||
1926 | * then attempts to stop all the CPUs. If this succeeds, then each | ||
1927 | * CPU will have executed a context switch, resulting in an RCU-sched | ||
1928 | * grace period. We are then done, so we use atomic_cmpxchg() to | ||
1929 | * update sync_sched_expedited_done to match our snapshot -- but | ||
1930 | * only if someone else has not already advanced past our snapshot. | ||
1931 | * | ||
1932 | * On the other hand, if try_stop_cpus() fails, we check the value | ||
1933 | * of sync_sched_expedited_done. If it has advanced past our | ||
1934 | * initial snapshot, then someone else must have forced a grace period | ||
1935 | * some time after we took our snapshot. In this case, our work is | ||
1936 | * done for us, and we can simply return. Otherwise, we try again, | ||
1937 | * but keep our initial snapshot for purposes of checking for someone | ||
1938 | * doing our work for us. | ||
1939 | * | ||
1940 | * If we fail too many times in a row, we fall back to synchronize_sched(). | ||
1941 | */ | ||
1942 | void synchronize_sched_expedited(void) | ||
1943 | { | ||
1944 | int firstsnap, s, snap, trycount = 0; | ||
1945 | |||
1946 | /* Note that atomic_inc_return() implies full memory barrier. */ | ||
1947 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); | ||
1948 | get_online_cpus(); | ||
1949 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | ||
1950 | |||
1951 | /* | ||
1952 | * Each pass through the following loop attempts to force a | ||
1953 | * context switch on each CPU. | ||
1954 | */ | ||
1955 | while (try_stop_cpus(cpu_online_mask, | ||
1956 | synchronize_sched_expedited_cpu_stop, | ||
1957 | NULL) == -EAGAIN) { | ||
1958 | put_online_cpus(); | ||
1959 | |||
1960 | /* No joy, try again later. Or just synchronize_sched(). */ | ||
1961 | if (trycount++ < 10) | ||
1962 | udelay(trycount * num_online_cpus()); | ||
1963 | else { | ||
1964 | synchronize_sched(); | ||
1965 | return; | ||
1966 | } | ||
1967 | |||
1968 | /* Check to see if someone else did our work for us. */ | ||
1969 | s = atomic_read(&sync_sched_expedited_done); | ||
1970 | if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { | ||
1971 | smp_mb(); /* ensure test happens before caller kfree */ | ||
1972 | return; | ||
1973 | } | ||
1974 | |||
1975 | /* | ||
1976 | * Refetching sync_sched_expedited_started allows later | ||
1977 | * callers to piggyback on our grace period. We subtract | ||
1978 | * 1 to get the same token that the last incrementer got. | ||
1979 | * We retry after they started, so our grace period works | ||
1980 | * for them, and they started after our first try, so their | ||
1981 | * grace period works for us. | ||
1982 | */ | ||
1983 | get_online_cpus(); | ||
1984 | snap = atomic_read(&sync_sched_expedited_started); | ||
1985 | smp_mb(); /* ensure read is before try_stop_cpus(). */ | ||
1986 | } | ||
1987 | |||
1988 | /* | ||
1989 | * Everyone up to our most recent fetch is covered by our grace | ||
1990 | * period. Update the counter, but only if our work is still | ||
1991 | * relevant -- which it won't be if someone who started later | ||
1992 | * than we did beat us to the punch. | ||
1993 | */ | ||
1994 | do { | ||
1995 | s = atomic_read(&sync_sched_expedited_done); | ||
1996 | if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { | ||
1997 | smp_mb(); /* ensure test happens before caller kfree */ | ||
1998 | break; | ||
1999 | } | ||
2000 | } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); | ||
2001 | |||
2002 | put_online_cpus(); | ||
2003 | } | ||
2004 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
2005 | |||
2006 | #if !defined(CONFIG_RCU_FAST_NO_HZ) | 1890 | #if !defined(CONFIG_RCU_FAST_NO_HZ) |
2007 | 1891 | ||
2008 | /* | 1892 | /* |