aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-01-23 20:05:46 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-02-21 12:06:04 -0500
commit3d3b7db0a22085cfc05c3318b9874f7fb8266d18 (patch)
tree7f6a080ca64f8a07dd058c2771b983c602fd1a65
parentc0d6d01bffdce19fa19baad6cb8cc3eed7bfd6f5 (diff)
rcu: Move synchronize_sched_expedited() to rcutree.c
Now that TREE_RCU and TREE_PREEMPT_RCU no longer do anything different for the single-CPU case, there is no need for multiple definitions of synchronize_sched_expedited(). It is no longer in any sense a plug-in, so move it from kernel/rcutree_plugin.h to kernel/rcutree.c. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcutree.c117
-rw-r--r--kernel/rcutree_plugin.h116
2 files changed, 117 insertions, 116 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index bcf7db2f2fd2..05470d4caba3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -50,6 +50,8 @@
50#include <linux/wait.h> 50#include <linux/wait.h>
51#include <linux/kthread.h> 51#include <linux/kthread.h>
52#include <linux/prefetch.h> 52#include <linux/prefetch.h>
53#include <linux/delay.h>
54#include <linux/stop_machine.h>
53 55
54#include "rcutree.h" 56#include "rcutree.h"
55#include <trace/events/rcu.h> 57#include <trace/events/rcu.h>
@@ -1918,6 +1920,121 @@ void synchronize_rcu_bh(void)
1918} 1920}
1919EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 1921EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1920 1922
1923static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1924static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1925
1926static int synchronize_sched_expedited_cpu_stop(void *data)
1927{
1928 /*
1929 * There must be a full memory barrier on each affected CPU
1930 * between the time that try_stop_cpus() is called and the
1931 * time that it returns.
1932 *
1933 * In the current initial implementation of cpu_stop, the
1934 * above condition is already met when the control reaches
1935 * this point and the following smp_mb() is not strictly
1936 * necessary. Do smp_mb() anyway for documentation and
1937 * robustness against future implementation changes.
1938 */
1939 smp_mb(); /* See above comment block. */
1940 return 0;
1941}
1942
1943/*
1944 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1945 * approach to force grace period to end quickly. This consumes
1946 * significant time on all CPUs, and is thus not recommended for
1947 * any sort of common-case code.
1948 *
1949 * Note that it is illegal to call this function while holding any
1950 * lock that is acquired by a CPU-hotplug notifier. Failing to
1951 * observe this restriction will result in deadlock.
1952 *
1953 * This implementation can be thought of as an application of ticket
1954 * locking to RCU, with sync_sched_expedited_started and
1955 * sync_sched_expedited_done taking on the roles of the halves
1956 * of the ticket-lock word. Each task atomically increments
1957 * sync_sched_expedited_started upon entry, snapshotting the old value,
1958 * then attempts to stop all the CPUs. If this succeeds, then each
1959 * CPU will have executed a context switch, resulting in an RCU-sched
1960 * grace period. We are then done, so we use atomic_cmpxchg() to
1961 * update sync_sched_expedited_done to match our snapshot -- but
1962 * only if someone else has not already advanced past our snapshot.
1963 *
1964 * On the other hand, if try_stop_cpus() fails, we check the value
1965 * of sync_sched_expedited_done. If it has advanced past our
1966 * initial snapshot, then someone else must have forced a grace period
1967 * some time after we took our snapshot. In this case, our work is
1968 * done for us, and we can simply return. Otherwise, we try again,
1969 * but keep our initial snapshot for purposes of checking for someone
1970 * doing our work for us.
1971 *
1972 * If we fail too many times in a row, we fall back to synchronize_sched().
1973 */
1974void synchronize_sched_expedited(void)
1975{
1976 int firstsnap, s, snap, trycount = 0;
1977
1978 /* Note that atomic_inc_return() implies full memory barrier. */
1979 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1980 get_online_cpus();
1981 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1982
1983 /*
1984 * Each pass through the following loop attempts to force a
1985 * context switch on each CPU.
1986 */
1987 while (try_stop_cpus(cpu_online_mask,
1988 synchronize_sched_expedited_cpu_stop,
1989 NULL) == -EAGAIN) {
1990 put_online_cpus();
1991
1992 /* No joy, try again later. Or just synchronize_sched(). */
1993 if (trycount++ < 10)
1994 udelay(trycount * num_online_cpus());
1995 else {
1996 synchronize_sched();
1997 return;
1998 }
1999
2000 /* Check to see if someone else did our work for us. */
2001 s = atomic_read(&sync_sched_expedited_done);
2002 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
2003 smp_mb(); /* ensure test happens before caller kfree */
2004 return;
2005 }
2006
2007 /*
2008 * Refetching sync_sched_expedited_started allows later
2009 * callers to piggyback on our grace period. We subtract
2010 * 1 to get the same token that the last incrementer got.
2011 * We retry after they started, so our grace period works
2012 * for them, and they started after our first try, so their
2013 * grace period works for us.
2014 */
2015 get_online_cpus();
2016 snap = atomic_read(&sync_sched_expedited_started);
2017 smp_mb(); /* ensure read is before try_stop_cpus(). */
2018 }
2019
2020 /*
2021 * Everyone up to our most recent fetch is covered by our grace
2022 * period. Update the counter, but only if our work is still
2023 * relevant -- which it won't be if someone who started later
2024 * than we did beat us to the punch.
2025 */
2026 do {
2027 s = atomic_read(&sync_sched_expedited_done);
2028 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
2029 smp_mb(); /* ensure test happens before caller kfree */
2030 break;
2031 }
2032 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
2033
2034 put_online_cpus();
2035}
2036EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2037
1921/* 2038/*
1922 * Check to see if there is any immediate RCU-related work to be done 2039 * Check to see if there is any immediate RCU-related work to be done
1923 * by the current CPU, for the specified type of RCU, returning 1 if so. 2040 * by the current CPU, for the specified type of RCU, returning 1 if so.
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index cecea84f4f3f..98ce17cf1fb5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -25,7 +25,6 @@
25 */ 25 */
26 26
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/stop_machine.h>
29 28
30#define RCU_KTHREAD_PRIO 1 29#define RCU_KTHREAD_PRIO 1
31 30
@@ -1888,121 +1887,6 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1888 1887
1889#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1888#endif /* #else #ifdef CONFIG_RCU_BOOST */
1890 1889
1891static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1892static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1893
1894static int synchronize_sched_expedited_cpu_stop(void *data)
1895{
1896 /*
1897 * There must be a full memory barrier on each affected CPU
1898 * between the time that try_stop_cpus() is called and the
1899 * time that it returns.
1900 *
1901 * In the current initial implementation of cpu_stop, the
1902 * above condition is already met when the control reaches
1903 * this point and the following smp_mb() is not strictly
1904 * necessary. Do smp_mb() anyway for documentation and
1905 * robustness against future implementation changes.
1906 */
1907 smp_mb(); /* See above comment block. */
1908 return 0;
1909}
1910
1911/*
1912 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1913 * approach to force grace period to end quickly. This consumes
1914 * significant time on all CPUs, and is thus not recommended for
1915 * any sort of common-case code.
1916 *
1917 * Note that it is illegal to call this function while holding any
1918 * lock that is acquired by a CPU-hotplug notifier. Failing to
1919 * observe this restriction will result in deadlock.
1920 *
1921 * This implementation can be thought of as an application of ticket
1922 * locking to RCU, with sync_sched_expedited_started and
1923 * sync_sched_expedited_done taking on the roles of the halves
1924 * of the ticket-lock word. Each task atomically increments
1925 * sync_sched_expedited_started upon entry, snapshotting the old value,
1926 * then attempts to stop all the CPUs. If this succeeds, then each
1927 * CPU will have executed a context switch, resulting in an RCU-sched
1928 * grace period. We are then done, so we use atomic_cmpxchg() to
1929 * update sync_sched_expedited_done to match our snapshot -- but
1930 * only if someone else has not already advanced past our snapshot.
1931 *
1932 * On the other hand, if try_stop_cpus() fails, we check the value
1933 * of sync_sched_expedited_done. If it has advanced past our
1934 * initial snapshot, then someone else must have forced a grace period
1935 * some time after we took our snapshot. In this case, our work is
1936 * done for us, and we can simply return. Otherwise, we try again,
1937 * but keep our initial snapshot for purposes of checking for someone
1938 * doing our work for us.
1939 *
1940 * If we fail too many times in a row, we fall back to synchronize_sched().
1941 */
1942void synchronize_sched_expedited(void)
1943{
1944 int firstsnap, s, snap, trycount = 0;
1945
1946 /* Note that atomic_inc_return() implies full memory barrier. */
1947 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1948 get_online_cpus();
1949 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1950
1951 /*
1952 * Each pass through the following loop attempts to force a
1953 * context switch on each CPU.
1954 */
1955 while (try_stop_cpus(cpu_online_mask,
1956 synchronize_sched_expedited_cpu_stop,
1957 NULL) == -EAGAIN) {
1958 put_online_cpus();
1959
1960 /* No joy, try again later. Or just synchronize_sched(). */
1961 if (trycount++ < 10)
1962 udelay(trycount * num_online_cpus());
1963 else {
1964 synchronize_sched();
1965 return;
1966 }
1967
1968 /* Check to see if someone else did our work for us. */
1969 s = atomic_read(&sync_sched_expedited_done);
1970 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1971 smp_mb(); /* ensure test happens before caller kfree */
1972 return;
1973 }
1974
1975 /*
1976 * Refetching sync_sched_expedited_started allows later
1977 * callers to piggyback on our grace period. We subtract
1978 * 1 to get the same token that the last incrementer got.
1979 * We retry after they started, so our grace period works
1980 * for them, and they started after our first try, so their
1981 * grace period works for us.
1982 */
1983 get_online_cpus();
1984 snap = atomic_read(&sync_sched_expedited_started);
1985 smp_mb(); /* ensure read is before try_stop_cpus(). */
1986 }
1987
1988 /*
1989 * Everyone up to our most recent fetch is covered by our grace
1990 * period. Update the counter, but only if our work is still
1991 * relevant -- which it won't be if someone who started later
1992 * than we did beat us to the punch.
1993 */
1994 do {
1995 s = atomic_read(&sync_sched_expedited_done);
1996 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1997 smp_mb(); /* ensure test happens before caller kfree */
1998 break;
1999 }
2000 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
2001
2002 put_online_cpus();
2003}
2004EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2005
2006#if !defined(CONFIG_RCU_FAST_NO_HZ) 1890#if !defined(CONFIG_RCU_FAST_NO_HZ)
2007 1891
2008/* 1892/*