aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-05-30 06:21:48 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-07-02 15:34:24 -0400
commit29154c57e35a191c83b19c61b1935c9f21957662 (patch)
treee761fdd59279de1e8c45bb42b40361c8bc4ff04d /kernel/rcutree.c
parenta16b7a693430406dc229ab0c6b154f669a2031c5 (diff)
rcu: Split RCU core processing out of __call_rcu()
The __call_rcu() function is a bit overweight, so this commit splits it into actual enqueuing of and accounting for the callback (__call_rcu()) and associated RCU-core processing (__call_rcu_core()). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c90
1 files changed, 49 insertions, 41 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ceaa95923a87..70c4da7d2a97 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1861,45 +1861,12 @@ static void invoke_rcu_core(void)
1861 raise_softirq(RCU_SOFTIRQ); 1861 raise_softirq(RCU_SOFTIRQ);
1862} 1862}
1863 1863
1864static void 1864/*
1865__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1865 * Handle any core-RCU processing required by a call_rcu() invocation.
1866 struct rcu_state *rsp, bool lazy) 1866 */
1867static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
1868 struct rcu_head *head, unsigned long flags)
1867{ 1869{
1868 unsigned long flags;
1869 struct rcu_data *rdp;
1870
1871 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
1872 debug_rcu_head_queue(head);
1873 head->func = func;
1874 head->next = NULL;
1875
1876 smp_mb(); /* Ensure RCU update seen before callback registry. */
1877
1878 /*
1879 * Opportunistically note grace-period endings and beginnings.
1880 * Note that we might see a beginning right after we see an
1881 * end, but never vice versa, since this CPU has to pass through
1882 * a quiescent state betweentimes.
1883 */
1884 local_irq_save(flags);
1885 rdp = this_cpu_ptr(rsp->rda);
1886
1887 /* Add the callback to our list. */
1888 ACCESS_ONCE(rdp->qlen)++;
1889 if (lazy)
1890 rdp->qlen_lazy++;
1891 else
1892 rcu_idle_count_callbacks_posted();
1893 smp_mb(); /* Count before adding callback for rcu_barrier(). */
1894 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1895 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1896
1897 if (__is_kfree_rcu_offset((unsigned long)func))
1898 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
1899 rdp->qlen_lazy, rdp->qlen);
1900 else
1901 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
1902
1903 /* 1870 /*
1904 * If called from an extended quiescent state, invoke the RCU 1871 * If called from an extended quiescent state, invoke the RCU
1905 * core in order to force a re-evaluation of RCU's idleness. 1872 * core in order to force a re-evaluation of RCU's idleness.
@@ -1908,10 +1875,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1908 invoke_rcu_core(); 1875 invoke_rcu_core();
1909 1876
1910 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 1877 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
1911 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) { 1878 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
1912 local_irq_restore(flags);
1913 return; 1879 return;
1914 }
1915 1880
1916 /* 1881 /*
1917 * Force the grace period if too many callbacks or too long waiting. 1882 * Force the grace period if too many callbacks or too long waiting.
@@ -1944,6 +1909,49 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1944 } 1909 }
1945 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) 1910 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1946 force_quiescent_state(rsp, 1); 1911 force_quiescent_state(rsp, 1);
1912}
1913
1914static void
1915__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1916 struct rcu_state *rsp, bool lazy)
1917{
1918 unsigned long flags;
1919 struct rcu_data *rdp;
1920
1921 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
1922 debug_rcu_head_queue(head);
1923 head->func = func;
1924 head->next = NULL;
1925
1926 smp_mb(); /* Ensure RCU update seen before callback registry. */
1927
1928 /*
1929 * Opportunistically note grace-period endings and beginnings.
1930 * Note that we might see a beginning right after we see an
1931 * end, but never vice versa, since this CPU has to pass through
1932 * a quiescent state betweentimes.
1933 */
1934 local_irq_save(flags);
1935 rdp = this_cpu_ptr(rsp->rda);
1936
1937 /* Add the callback to our list. */
1938 ACCESS_ONCE(rdp->qlen)++;
1939 if (lazy)
1940 rdp->qlen_lazy++;
1941 else
1942 rcu_idle_count_callbacks_posted();
1943 smp_mb(); /* Count before adding callback for rcu_barrier(). */
1944 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1945 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1946
1947 if (__is_kfree_rcu_offset((unsigned long)func))
1948 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
1949 rdp->qlen_lazy, rdp->qlen);
1950 else
1951 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
1952
1953 /* Go handle any RCU core processing required. */
1954 __call_rcu_core(rsp, rdp, head, flags);
1947 local_irq_restore(flags); 1955 local_irq_restore(flags);
1948} 1956}
1949 1957