aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c35
-rw-r--r--net/core/ethtool.c11
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/lwtunnel.c66
5 files changed, 89 insertions, 32 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 662bea587165..ea633342ab0d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
332EXPORT_SYMBOL(__skb_free_datagram_locked); 332EXPORT_SYMBOL(__skb_free_datagram_locked);
333 333
334int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, 334int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
335 unsigned int flags) 335 unsigned int flags,
336 void (*destructor)(struct sock *sk,
337 struct sk_buff *skb))
336{ 338{
337 int err = 0; 339 int err = 0;
338 340
@@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
342 if (skb == skb_peek(&sk->sk_receive_queue)) { 344 if (skb == skb_peek(&sk->sk_receive_queue)) {
343 __skb_unlink(skb, &sk->sk_receive_queue); 345 __skb_unlink(skb, &sk->sk_receive_queue);
344 atomic_dec(&skb->users); 346 atomic_dec(&skb->users);
347 if (destructor)
348 destructor(sk, skb);
345 err = 0; 349 err = 0;
346 } 350 }
347 spin_unlock_bh(&sk->sk_receive_queue.lock); 351 spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
375 379
376int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) 380int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
377{ 381{
378 int err = __sk_queue_drop_skb(sk, skb, flags); 382 int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
379 383
380 kfree_skb(skb); 384 kfree_skb(skb);
381 sk_mem_reclaim_partial(sk); 385 sk_mem_reclaim_partial(sk);
diff --git a/net/core/dev.c b/net/core/dev.c
index 07b307b0b414..29101c98399f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1695 1695
1696static struct static_key netstamp_needed __read_mostly; 1696static struct static_key netstamp_needed __read_mostly;
1697#ifdef HAVE_JUMP_LABEL 1697#ifdef HAVE_JUMP_LABEL
1698/* We are not allowed to call static_key_slow_dec() from irq context
1699 * If net_disable_timestamp() is called from irq context, defer the
1700 * static_key_slow_dec() calls.
1701 */
1702static atomic_t netstamp_needed_deferred; 1698static atomic_t netstamp_needed_deferred;
1703#endif 1699static void netstamp_clear(struct work_struct *work)
1704
1705void net_enable_timestamp(void)
1706{ 1700{
1707#ifdef HAVE_JUMP_LABEL
1708 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1701 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1709 1702
1710 if (deferred) { 1703 while (deferred--)
1711 while (--deferred) 1704 static_key_slow_dec(&netstamp_needed);
1712 static_key_slow_dec(&netstamp_needed); 1705}
1713 return; 1706static DECLARE_WORK(netstamp_work, netstamp_clear);
1714 }
1715#endif 1707#endif
1708
1709void net_enable_timestamp(void)
1710{
1716 static_key_slow_inc(&netstamp_needed); 1711 static_key_slow_inc(&netstamp_needed);
1717} 1712}
1718EXPORT_SYMBOL(net_enable_timestamp); 1713EXPORT_SYMBOL(net_enable_timestamp);
@@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
1720void net_disable_timestamp(void) 1715void net_disable_timestamp(void)
1721{ 1716{
1722#ifdef HAVE_JUMP_LABEL 1717#ifdef HAVE_JUMP_LABEL
1723 if (in_interrupt()) { 1718 /* net_disable_timestamp() can be called from non process context */
1724 atomic_inc(&netstamp_needed_deferred); 1719 atomic_inc(&netstamp_needed_deferred);
1725 return; 1720 schedule_work(&netstamp_work);
1726 } 1721#else
1727#endif
1728 static_key_slow_dec(&netstamp_needed); 1722 static_key_slow_dec(&netstamp_needed);
1723#endif
1729} 1724}
1730EXPORT_SYMBOL(net_disable_timestamp); 1725EXPORT_SYMBOL(net_disable_timestamp);
1731 1726
@@ -2795,9 +2790,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2795 if (skb->ip_summed != CHECKSUM_NONE && 2790 if (skb->ip_summed != CHECKSUM_NONE &&
2796 !can_checksum_protocol(features, type)) { 2791 !can_checksum_protocol(features, type)) {
2797 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2792 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2798 } else if (illegal_highdma(skb->dev, skb)) {
2799 features &= ~NETIF_F_SG;
2800 } 2793 }
2794 if (illegal_highdma(skb->dev, skb))
2795 features &= ~NETIF_F_SG;
2801 2796
2802 return features; 2797 return features;
2803} 2798}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e23766c7e3ba..d92de0a1f0a4 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1405,9 +1405,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1405 if (regs.len > reglen) 1405 if (regs.len > reglen)
1406 regs.len = reglen; 1406 regs.len = reglen;
1407 1407
1408 regbuf = vzalloc(reglen); 1408 regbuf = NULL;
1409 if (reglen && !regbuf) 1409 if (reglen) {
1410 return -ENOMEM; 1410 regbuf = vzalloc(reglen);
1411 if (!regbuf)
1412 return -ENOMEM;
1413 }
1411 1414
1412 ops->get_regs(dev, &regs, regbuf); 1415 ops->get_regs(dev, &regs, regbuf);
1413 1416
@@ -1712,7 +1715,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1712static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 1715static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1713 void __user *useraddr) 1716 void __user *useraddr)
1714{ 1717{
1715 struct ethtool_channels channels, max; 1718 struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
1716 u32 max_rx_in_use = 0; 1719 u32 max_rx_in_use = 0;
1717 1720
1718 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) 1721 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 71bb3e2eca08..b3eef90b2df9 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
386 .fill_encap = bpf_fill_encap_info, 386 .fill_encap = bpf_fill_encap_info,
387 .get_encap_size = bpf_encap_nlsize, 387 .get_encap_size = bpf_encap_nlsize,
388 .cmp_encap = bpf_encap_cmp, 388 .cmp_encap = bpf_encap_cmp,
389 .owner = THIS_MODULE,
389}; 390};
390 391
391static int __init bpf_lwt_init(void) 392static int __init bpf_lwt_init(void)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index a5d4e866ce88..c23465005f2f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -26,6 +26,7 @@
26#include <net/lwtunnel.h> 26#include <net/lwtunnel.h>
27#include <net/rtnetlink.h> 27#include <net/rtnetlink.h>
28#include <net/ip6_fib.h> 28#include <net/ip6_fib.h>
29#include <net/nexthop.h>
29 30
30#ifdef CONFIG_MODULES 31#ifdef CONFIG_MODULES
31 32
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
114 ret = -EOPNOTSUPP; 115 ret = -EOPNOTSUPP;
115 rcu_read_lock(); 116 rcu_read_lock();
116 ops = rcu_dereference(lwtun_encaps[encap_type]); 117 ops = rcu_dereference(lwtun_encaps[encap_type]);
118 if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
119 ret = ops->build_state(dev, encap, family, cfg, lws);
120 if (ret)
121 module_put(ops->owner);
122 }
123 rcu_read_unlock();
124
125 return ret;
126}
127EXPORT_SYMBOL(lwtunnel_build_state);
128
129int lwtunnel_valid_encap_type(u16 encap_type)
130{
131 const struct lwtunnel_encap_ops *ops;
132 int ret = -EINVAL;
133
134 if (encap_type == LWTUNNEL_ENCAP_NONE ||
135 encap_type > LWTUNNEL_ENCAP_MAX)
136 return ret;
137
138 rcu_read_lock();
139 ops = rcu_dereference(lwtun_encaps[encap_type]);
140 rcu_read_unlock();
117#ifdef CONFIG_MODULES 141#ifdef CONFIG_MODULES
118 if (!ops) { 142 if (!ops) {
119 const char *encap_type_str = lwtunnel_encap_str(encap_type); 143 const char *encap_type_str = lwtunnel_encap_str(encap_type);
120 144
121 if (encap_type_str) { 145 if (encap_type_str) {
122 rcu_read_unlock(); 146 __rtnl_unlock();
123 request_module("rtnl-lwt-%s", encap_type_str); 147 request_module("rtnl-lwt-%s", encap_type_str);
148 rtnl_lock();
149
124 rcu_read_lock(); 150 rcu_read_lock();
125 ops = rcu_dereference(lwtun_encaps[encap_type]); 151 ops = rcu_dereference(lwtun_encaps[encap_type]);
152 rcu_read_unlock();
126 } 153 }
127 } 154 }
128#endif 155#endif
129 if (likely(ops && ops->build_state)) 156 return ops ? 0 : -EOPNOTSUPP;
130 ret = ops->build_state(dev, encap, family, cfg, lws); 157}
131 rcu_read_unlock(); 158EXPORT_SYMBOL(lwtunnel_valid_encap_type);
132 159
133 return ret; 160int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
161{
162 struct rtnexthop *rtnh = (struct rtnexthop *)attr;
163 struct nlattr *nla_entype;
164 struct nlattr *attrs;
165 struct nlattr *nla;
166 u16 encap_type;
167 int attrlen;
168
169 while (rtnh_ok(rtnh, remaining)) {
170 attrlen = rtnh_attrlen(rtnh);
171 if (attrlen > 0) {
172 attrs = rtnh_attrs(rtnh);
173 nla = nla_find(attrs, attrlen, RTA_ENCAP);
174 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
175
176 if (nla_entype) {
177 encap_type = nla_get_u16(nla_entype);
178
179 if (lwtunnel_valid_encap_type(encap_type) != 0)
180 return -EOPNOTSUPP;
181 }
182 }
183 rtnh = rtnh_next(rtnh, &remaining);
184 }
185
186 return 0;
134} 187}
135EXPORT_SYMBOL(lwtunnel_build_state); 188EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
136 189
137void lwtstate_free(struct lwtunnel_state *lws) 190void lwtstate_free(struct lwtunnel_state *lws)
138{ 191{
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
144 } else { 197 } else {
145 kfree(lws); 198 kfree(lws);
146 } 199 }
200 module_put(ops->owner);
147} 201}
148EXPORT_SYMBOL(lwtstate_free); 202EXPORT_SYMBOL(lwtstate_free);
149 203