aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2017-03-01 17:28:39 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-03-22 07:43:32 -0400
commit3d87dce3dfd665a892d107b797e68697204c3e43 (patch)
treee0110d523117321e125396228a462f03c3df0806
parent62fe0521fb6cc3b887fdc00ddca9c38d2614b8b0 (diff)
net: net_enable_timestamp() can be called from irq contexts
[ Upstream commit 13baa00ad01bb3a9f893e3a08cbc2d072fc0c15d ] It is now very clear that silly TCP listeners might play with enabling/disabling timestamping while new children are added to their accept queue. Meaning net_enable_timestamp() can be called from BH context while current state of the static key is not enabled. Lets play safe and allow all contexts. The work queue is scheduled only under the problematic cases, which are the static key enable/disable transition, to not slow down critical paths. This extends and improves what we did in commit 5fa8bbda38c6 ("net: use a work queue to defer net_disable_timestamp() work") Fixes: b90e5794c5bd ("net: dont call jump_label_dec from irq context") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--net/core/dev.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 60b0a6049e72..2e04fd188081 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1697,27 +1697,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1697static struct static_key netstamp_needed __read_mostly; 1697static struct static_key netstamp_needed __read_mostly;
1698#ifdef HAVE_JUMP_LABEL 1698#ifdef HAVE_JUMP_LABEL
1699static atomic_t netstamp_needed_deferred; 1699static atomic_t netstamp_needed_deferred;
1700static atomic_t netstamp_wanted;
1700static void netstamp_clear(struct work_struct *work) 1701static void netstamp_clear(struct work_struct *work)
1701{ 1702{
1702 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1703 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1704 int wanted;
1703 1705
1704 while (deferred--) 1706 wanted = atomic_add_return(deferred, &netstamp_wanted);
1705 static_key_slow_dec(&netstamp_needed); 1707 if (wanted > 0)
1708 static_key_enable(&netstamp_needed);
1709 else
1710 static_key_disable(&netstamp_needed);
1706} 1711}
1707static DECLARE_WORK(netstamp_work, netstamp_clear); 1712static DECLARE_WORK(netstamp_work, netstamp_clear);
1708#endif 1713#endif
1709 1714
1710void net_enable_timestamp(void) 1715void net_enable_timestamp(void)
1711{ 1716{
1717#ifdef HAVE_JUMP_LABEL
1718 int wanted;
1719
1720 while (1) {
1721 wanted = atomic_read(&netstamp_wanted);
1722 if (wanted <= 0)
1723 break;
1724 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1725 return;
1726 }
1727 atomic_inc(&netstamp_needed_deferred);
1728 schedule_work(&netstamp_work);
1729#else
1712 static_key_slow_inc(&netstamp_needed); 1730 static_key_slow_inc(&netstamp_needed);
1731#endif
1713} 1732}
1714EXPORT_SYMBOL(net_enable_timestamp); 1733EXPORT_SYMBOL(net_enable_timestamp);
1715 1734
1716void net_disable_timestamp(void) 1735void net_disable_timestamp(void)
1717{ 1736{
1718#ifdef HAVE_JUMP_LABEL 1737#ifdef HAVE_JUMP_LABEL
1719 /* net_disable_timestamp() can be called from non process context */ 1738 int wanted;
1720 atomic_inc(&netstamp_needed_deferred); 1739
1740 while (1) {
1741 wanted = atomic_read(&netstamp_wanted);
1742 if (wanted <= 1)
1743 break;
1744 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1745 return;
1746 }
1747 atomic_dec(&netstamp_needed_deferred);
1721 schedule_work(&netstamp_work); 1748 schedule_work(&netstamp_work);
1722#else 1749#else
1723 static_key_slow_dec(&netstamp_needed); 1750 static_key_slow_dec(&netstamp_needed);