diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-04-08 01:47:23 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-05-06 02:16:59 -0400 |
commit | 2655d57ef35aa327a2e58a1c5dc7b65c65003f4e (patch) | |
tree | 4a71e82b23ee84705cad15cd16490f8c5aace74f /kernel | |
parent | baa1ae0c9f1c618bc60706efa75fef3508bcee58 (diff) |
rcu: prevent call_rcu() from diving into rcu core if irqs disabled
This commit marks a first step towards making call_rcu() have
real-time behavior. If irqs are disabled, don't dive into the
RCU core. Later on, this new early exit will wake up the
per-CPU kthread, which first must be modified to handle the
cases involving callback storms.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index fe85600ba8c2..78923a50cdb2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1839,6 +1839,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1839 | /* Add the callback to our list. */ | 1839 | /* Add the callback to our list. */ |
1840 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | 1840 | *rdp->nxttail[RCU_NEXT_TAIL] = head; |
1841 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | 1841 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
1842 | rdp->qlen++; | ||
1843 | |||
1844 | /* If interrupts were disabled, don't dive into RCU core. */ | ||
1845 | if (irqs_disabled_flags(flags)) { | ||
1846 | local_irq_restore(flags); | ||
1847 | return; | ||
1848 | } | ||
1842 | 1849 | ||
1843 | /* | 1850 | /* |
1844 | * Force the grace period if too many callbacks or too long waiting. | 1851 | * Force the grace period if too many callbacks or too long waiting. |
@@ -1847,7 +1854,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1847 | * invoking force_quiescent_state() if the newly enqueued callback | 1854 | * invoking force_quiescent_state() if the newly enqueued callback |
1848 | * is the only one waiting for a grace period to complete. | 1855 | * is the only one waiting for a grace period to complete. |
1849 | */ | 1856 | */ |
1850 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | 1857 | if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { |
1851 | 1858 | ||
1852 | /* Are we ignoring a completed grace period? */ | 1859 | /* Are we ignoring a completed grace period? */ |
1853 | rcu_process_gp_end(rsp, rdp); | 1860 | rcu_process_gp_end(rsp, rdp); |