diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 255 |
1 files changed, 200 insertions, 55 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 8637b2b71f3d..96cf83da0d66 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -81,6 +81,7 @@ | |||
81 | #include <linux/hash.h> | 81 | #include <linux/hash.h> |
82 | #include <linux/slab.h> | 82 | #include <linux/slab.h> |
83 | #include <linux/sched.h> | 83 | #include <linux/sched.h> |
84 | #include <linux/sched/mm.h> | ||
84 | #include <linux/mutex.h> | 85 | #include <linux/mutex.h> |
85 | #include <linux/string.h> | 86 | #include <linux/string.h> |
86 | #include <linux/mm.h> | 87 | #include <linux/mm.h> |
@@ -95,6 +96,7 @@ | |||
95 | #include <linux/notifier.h> | 96 | #include <linux/notifier.h> |
96 | #include <linux/skbuff.h> | 97 | #include <linux/skbuff.h> |
97 | #include <linux/bpf.h> | 98 | #include <linux/bpf.h> |
99 | #include <linux/bpf_trace.h> | ||
98 | #include <net/net_namespace.h> | 100 | #include <net/net_namespace.h> |
99 | #include <net/sock.h> | 101 | #include <net/sock.h> |
100 | #include <net/busy_poll.h> | 102 | #include <net/busy_poll.h> |
@@ -1304,6 +1306,7 @@ void netdev_notify_peers(struct net_device *dev) | |||
1304 | { | 1306 | { |
1305 | rtnl_lock(); | 1307 | rtnl_lock(); |
1306 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); | 1308 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); |
1309 | call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); | ||
1307 | rtnl_unlock(); | 1310 | rtnl_unlock(); |
1308 | } | 1311 | } |
1309 | EXPORT_SYMBOL(netdev_notify_peers); | 1312 | EXPORT_SYMBOL(netdev_notify_peers); |
@@ -2449,6 +2452,9 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) | |||
2449 | { | 2452 | { |
2450 | unsigned long flags; | 2453 | unsigned long flags; |
2451 | 2454 | ||
2455 | if (unlikely(!skb)) | ||
2456 | return; | ||
2457 | |||
2452 | if (likely(atomic_read(&skb->users) == 1)) { | 2458 | if (likely(atomic_read(&skb->users) == 1)) { |
2453 | smp_rmb(); | 2459 | smp_rmb(); |
2454 | atomic_set(&skb->users, 0); | 2460 | atomic_set(&skb->users, 0); |
@@ -2971,6 +2977,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device | |||
2971 | __skb_linearize(skb)) | 2977 | __skb_linearize(skb)) |
2972 | goto out_kfree_skb; | 2978 | goto out_kfree_skb; |
2973 | 2979 | ||
2980 | if (validate_xmit_xfrm(skb, features)) | ||
2981 | goto out_kfree_skb; | ||
2982 | |||
2974 | /* If packet is not checksummed and device does not | 2983 | /* If packet is not checksummed and device does not |
2975 | * support checksumming for this protocol, complete | 2984 | * support checksumming for this protocol, complete |
2976 | * checksumming here. | 2985 | * checksumming here. |
@@ -3440,6 +3449,7 @@ EXPORT_SYMBOL(netdev_max_backlog); | |||
3440 | 3449 | ||
3441 | int netdev_tstamp_prequeue __read_mostly = 1; | 3450 | int netdev_tstamp_prequeue __read_mostly = 1; |
3442 | int netdev_budget __read_mostly = 300; | 3451 | int netdev_budget __read_mostly = 300; |
3452 | unsigned int __read_mostly netdev_budget_usecs = 2000; | ||
3443 | int weight_p __read_mostly = 64; /* old backlog weight */ | 3453 | int weight_p __read_mostly = 64; /* old backlog weight */ |
3444 | int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ | 3454 | int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ |
3445 | int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ | 3455 | int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ |
@@ -4226,7 +4236,7 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
4226 | int ret; | 4236 | int ret; |
4227 | 4237 | ||
4228 | if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { | 4238 | if (sk_memalloc_socks() && skb_pfmemalloc(skb)) { |
4229 | unsigned long pflags = current->flags; | 4239 | unsigned int noreclaim_flag; |
4230 | 4240 | ||
4231 | /* | 4241 | /* |
4232 | * PFMEMALLOC skbs are special, they should | 4242 | * PFMEMALLOC skbs are special, they should |
@@ -4237,15 +4247,134 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
4237 | * Use PF_MEMALLOC as this saves us from propagating the allocation | 4247 | * Use PF_MEMALLOC as this saves us from propagating the allocation |
4238 | * context down to all allocation sites. | 4248 | * context down to all allocation sites. |
4239 | */ | 4249 | */ |
4240 | current->flags |= PF_MEMALLOC; | 4250 | noreclaim_flag = memalloc_noreclaim_save(); |
4241 | ret = __netif_receive_skb_core(skb, true); | 4251 | ret = __netif_receive_skb_core(skb, true); |
4242 | tsk_restore_flags(current, pflags, PF_MEMALLOC); | 4252 | memalloc_noreclaim_restore(noreclaim_flag); |
4243 | } else | 4253 | } else |
4244 | ret = __netif_receive_skb_core(skb, false); | 4254 | ret = __netif_receive_skb_core(skb, false); |
4245 | 4255 | ||
4246 | return ret; | 4256 | return ret; |
4247 | } | 4257 | } |
4248 | 4258 | ||
4259 | static struct static_key generic_xdp_needed __read_mostly; | ||
4260 | |||
4261 | static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp) | ||
4262 | { | ||
4263 | struct bpf_prog *new = xdp->prog; | ||
4264 | int ret = 0; | ||
4265 | |||
4266 | switch (xdp->command) { | ||
4267 | case XDP_SETUP_PROG: { | ||
4268 | struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); | ||
4269 | |||
4270 | rcu_assign_pointer(dev->xdp_prog, new); | ||
4271 | if (old) | ||
4272 | bpf_prog_put(old); | ||
4273 | |||
4274 | if (old && !new) { | ||
4275 | static_key_slow_dec(&generic_xdp_needed); | ||
4276 | } else if (new && !old) { | ||
4277 | static_key_slow_inc(&generic_xdp_needed); | ||
4278 | dev_disable_lro(dev); | ||
4279 | } | ||
4280 | break; | ||
4281 | } | ||
4282 | |||
4283 | case XDP_QUERY_PROG: | ||
4284 | xdp->prog_attached = !!rcu_access_pointer(dev->xdp_prog); | ||
4285 | break; | ||
4286 | |||
4287 | default: | ||
4288 | ret = -EINVAL; | ||
4289 | break; | ||
4290 | } | ||
4291 | |||
4292 | return ret; | ||
4293 | } | ||
4294 | |||
4295 | static u32 netif_receive_generic_xdp(struct sk_buff *skb, | ||
4296 | struct bpf_prog *xdp_prog) | ||
4297 | { | ||
4298 | struct xdp_buff xdp; | ||
4299 | u32 act = XDP_DROP; | ||
4300 | void *orig_data; | ||
4301 | int hlen, off; | ||
4302 | u32 mac_len; | ||
4303 | |||
4304 | /* Reinjected packets coming from act_mirred or similar should | ||
4305 | * not get XDP generic processing. | ||
4306 | */ | ||
4307 | if (skb_cloned(skb)) | ||
4308 | return XDP_PASS; | ||
4309 | |||
4310 | if (skb_linearize(skb)) | ||
4311 | goto do_drop; | ||
4312 | |||
4313 | /* The XDP program wants to see the packet starting at the MAC | ||
4314 | * header. | ||
4315 | */ | ||
4316 | mac_len = skb->data - skb_mac_header(skb); | ||
4317 | hlen = skb_headlen(skb) + mac_len; | ||
4318 | xdp.data = skb->data - mac_len; | ||
4319 | xdp.data_end = xdp.data + hlen; | ||
4320 | xdp.data_hard_start = skb->data - skb_headroom(skb); | ||
4321 | orig_data = xdp.data; | ||
4322 | |||
4323 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | ||
4324 | |||
4325 | off = xdp.data - orig_data; | ||
4326 | if (off > 0) | ||
4327 | __skb_pull(skb, off); | ||
4328 | else if (off < 0) | ||
4329 | __skb_push(skb, -off); | ||
4330 | |||
4331 | switch (act) { | ||
4332 | case XDP_TX: | ||
4333 | __skb_push(skb, mac_len); | ||
4334 | /* fall through */ | ||
4335 | case XDP_PASS: | ||
4336 | break; | ||
4337 | |||
4338 | default: | ||
4339 | bpf_warn_invalid_xdp_action(act); | ||
4340 | /* fall through */ | ||
4341 | case XDP_ABORTED: | ||
4342 | trace_xdp_exception(skb->dev, xdp_prog, act); | ||
4343 | /* fall through */ | ||
4344 | case XDP_DROP: | ||
4345 | do_drop: | ||
4346 | kfree_skb(skb); | ||
4347 | break; | ||
4348 | } | ||
4349 | |||
4350 | return act; | ||
4351 | } | ||
4352 | |||
4353 | /* When doing generic XDP we have to bypass the qdisc layer and the | ||
4354 | * network taps in order to match in-driver-XDP behavior. | ||
4355 | */ | ||
4356 | static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) | ||
4357 | { | ||
4358 | struct net_device *dev = skb->dev; | ||
4359 | struct netdev_queue *txq; | ||
4360 | bool free_skb = true; | ||
4361 | int cpu, rc; | ||
4362 | |||
4363 | txq = netdev_pick_tx(dev, skb, NULL); | ||
4364 | cpu = smp_processor_id(); | ||
4365 | HARD_TX_LOCK(dev, txq, cpu); | ||
4366 | if (!netif_xmit_stopped(txq)) { | ||
4367 | rc = netdev_start_xmit(skb, dev, txq, 0); | ||
4368 | if (dev_xmit_complete(rc)) | ||
4369 | free_skb = false; | ||
4370 | } | ||
4371 | HARD_TX_UNLOCK(dev, txq); | ||
4372 | if (free_skb) { | ||
4373 | trace_xdp_exception(dev, xdp_prog, XDP_TX); | ||
4374 | kfree_skb(skb); | ||
4375 | } | ||
4376 | } | ||
4377 | |||
4249 | static int netif_receive_skb_internal(struct sk_buff *skb) | 4378 | static int netif_receive_skb_internal(struct sk_buff *skb) |
4250 | { | 4379 | { |
4251 | int ret; | 4380 | int ret; |
@@ -4257,6 +4386,21 @@ static int netif_receive_skb_internal(struct sk_buff *skb) | |||
4257 | 4386 | ||
4258 | rcu_read_lock(); | 4387 | rcu_read_lock(); |
4259 | 4388 | ||
4389 | if (static_key_false(&generic_xdp_needed)) { | ||
4390 | struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog); | ||
4391 | |||
4392 | if (xdp_prog) { | ||
4393 | u32 act = netif_receive_generic_xdp(skb, xdp_prog); | ||
4394 | |||
4395 | if (act != XDP_PASS) { | ||
4396 | rcu_read_unlock(); | ||
4397 | if (act == XDP_TX) | ||
4398 | generic_xdp_tx(skb, xdp_prog); | ||
4399 | return NET_RX_DROP; | ||
4400 | } | ||
4401 | } | ||
4402 | } | ||
4403 | |||
4260 | #ifdef CONFIG_RPS | 4404 | #ifdef CONFIG_RPS |
4261 | if (static_key_false(&rps_needed)) { | 4405 | if (static_key_false(&rps_needed)) { |
4262 | struct rps_dev_flow voidflow, *rflow = &voidflow; | 4406 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
@@ -4489,7 +4633,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff | |||
4489 | enum gro_result ret; | 4633 | enum gro_result ret; |
4490 | int grow; | 4634 | int grow; |
4491 | 4635 | ||
4492 | if (!(skb->dev->features & NETIF_F_GRO)) | 4636 | if (netif_elide_gro(skb->dev)) |
4493 | goto normal; | 4637 | goto normal; |
4494 | 4638 | ||
4495 | if (skb->csum_bad) | 4639 | if (skb->csum_bad) |
@@ -5059,27 +5203,28 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) | |||
5059 | do_softirq(); | 5203 | do_softirq(); |
5060 | } | 5204 | } |
5061 | 5205 | ||
5062 | bool sk_busy_loop(struct sock *sk, int nonblock) | 5206 | void napi_busy_loop(unsigned int napi_id, |
5207 | bool (*loop_end)(void *, unsigned long), | ||
5208 | void *loop_end_arg) | ||
5063 | { | 5209 | { |
5064 | unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; | 5210 | unsigned long start_time = loop_end ? busy_loop_current_time() : 0; |
5065 | int (*napi_poll)(struct napi_struct *napi, int budget); | 5211 | int (*napi_poll)(struct napi_struct *napi, int budget); |
5066 | void *have_poll_lock = NULL; | 5212 | void *have_poll_lock = NULL; |
5067 | struct napi_struct *napi; | 5213 | struct napi_struct *napi; |
5068 | int rc; | ||
5069 | 5214 | ||
5070 | restart: | 5215 | restart: |
5071 | rc = false; | ||
5072 | napi_poll = NULL; | 5216 | napi_poll = NULL; |
5073 | 5217 | ||
5074 | rcu_read_lock(); | 5218 | rcu_read_lock(); |
5075 | 5219 | ||
5076 | napi = napi_by_id(sk->sk_napi_id); | 5220 | napi = napi_by_id(napi_id); |
5077 | if (!napi) | 5221 | if (!napi) |
5078 | goto out; | 5222 | goto out; |
5079 | 5223 | ||
5080 | preempt_disable(); | 5224 | preempt_disable(); |
5081 | for (;;) { | 5225 | for (;;) { |
5082 | rc = 0; | 5226 | int work = 0; |
5227 | |||
5083 | local_bh_disable(); | 5228 | local_bh_disable(); |
5084 | if (!napi_poll) { | 5229 | if (!napi_poll) { |
5085 | unsigned long val = READ_ONCE(napi->state); | 5230 | unsigned long val = READ_ONCE(napi->state); |
@@ -5097,16 +5242,15 @@ restart: | |||
5097 | have_poll_lock = netpoll_poll_lock(napi); | 5242 | have_poll_lock = netpoll_poll_lock(napi); |
5098 | napi_poll = napi->poll; | 5243 | napi_poll = napi->poll; |
5099 | } | 5244 | } |
5100 | rc = napi_poll(napi, BUSY_POLL_BUDGET); | 5245 | work = napi_poll(napi, BUSY_POLL_BUDGET); |
5101 | trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); | 5246 | trace_napi_poll(napi, work, BUSY_POLL_BUDGET); |
5102 | count: | 5247 | count: |
5103 | if (rc > 0) | 5248 | if (work > 0) |
5104 | __NET_ADD_STATS(sock_net(sk), | 5249 | __NET_ADD_STATS(dev_net(napi->dev), |
5105 | LINUX_MIB_BUSYPOLLRXPACKETS, rc); | 5250 | LINUX_MIB_BUSYPOLLRXPACKETS, work); |
5106 | local_bh_enable(); | 5251 | local_bh_enable(); |
5107 | 5252 | ||
5108 | if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) || | 5253 | if (!loop_end || loop_end(loop_end_arg, start_time)) |
5109 | busy_loop_timeout(end_time)) | ||
5110 | break; | 5254 | break; |
5111 | 5255 | ||
5112 | if (unlikely(need_resched())) { | 5256 | if (unlikely(need_resched())) { |
@@ -5115,9 +5259,8 @@ count: | |||
5115 | preempt_enable(); | 5259 | preempt_enable(); |
5116 | rcu_read_unlock(); | 5260 | rcu_read_unlock(); |
5117 | cond_resched(); | 5261 | cond_resched(); |
5118 | rc = !skb_queue_empty(&sk->sk_receive_queue); | 5262 | if (loop_end(loop_end_arg, start_time)) |
5119 | if (rc || busy_loop_timeout(end_time)) | 5263 | return; |
5120 | return rc; | ||
5121 | goto restart; | 5264 | goto restart; |
5122 | } | 5265 | } |
5123 | cpu_relax(); | 5266 | cpu_relax(); |
@@ -5125,12 +5268,10 @@ count: | |||
5125 | if (napi_poll) | 5268 | if (napi_poll) |
5126 | busy_poll_stop(napi, have_poll_lock); | 5269 | busy_poll_stop(napi, have_poll_lock); |
5127 | preempt_enable(); | 5270 | preempt_enable(); |
5128 | rc = !skb_queue_empty(&sk->sk_receive_queue); | ||
5129 | out: | 5271 | out: |
5130 | rcu_read_unlock(); | 5272 | rcu_read_unlock(); |
5131 | return rc; | ||
5132 | } | 5273 | } |
5133 | EXPORT_SYMBOL(sk_busy_loop); | 5274 | EXPORT_SYMBOL(napi_busy_loop); |
5134 | 5275 | ||
5135 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 5276 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
5136 | 5277 | ||
@@ -5142,10 +5283,10 @@ static void napi_hash_add(struct napi_struct *napi) | |||
5142 | 5283 | ||
5143 | spin_lock(&napi_hash_lock); | 5284 | spin_lock(&napi_hash_lock); |
5144 | 5285 | ||
5145 | /* 0..NR_CPUS+1 range is reserved for sender_cpu use */ | 5286 | /* 0..NR_CPUS range is reserved for sender_cpu use */ |
5146 | do { | 5287 | do { |
5147 | if (unlikely(++napi_gen_id < NR_CPUS + 1)) | 5288 | if (unlikely(++napi_gen_id < MIN_NAPI_ID)) |
5148 | napi_gen_id = NR_CPUS + 1; | 5289 | napi_gen_id = MIN_NAPI_ID; |
5149 | } while (napi_by_id(napi_gen_id)); | 5290 | } while (napi_by_id(napi_gen_id)); |
5150 | napi->napi_id = napi_gen_id; | 5291 | napi->napi_id = napi_gen_id; |
5151 | 5292 | ||
@@ -5309,7 +5450,8 @@ out_unlock: | |||
5309 | static __latent_entropy void net_rx_action(struct softirq_action *h) | 5450 | static __latent_entropy void net_rx_action(struct softirq_action *h) |
5310 | { | 5451 | { |
5311 | struct softnet_data *sd = this_cpu_ptr(&softnet_data); | 5452 | struct softnet_data *sd = this_cpu_ptr(&softnet_data); |
5312 | unsigned long time_limit = jiffies + 2; | 5453 | unsigned long time_limit = jiffies + |
5454 | usecs_to_jiffies(netdev_budget_usecs); | ||
5313 | int budget = netdev_budget; | 5455 | int budget = netdev_budget; |
5314 | LIST_HEAD(list); | 5456 | LIST_HEAD(list); |
5315 | LIST_HEAD(repoll); | 5457 | LIST_HEAD(repoll); |
@@ -6713,13 +6855,16 @@ EXPORT_SYMBOL(dev_change_proto_down); | |||
6713 | /** | 6855 | /** |
6714 | * dev_change_xdp_fd - set or clear a bpf program for a device rx path | 6856 | * dev_change_xdp_fd - set or clear a bpf program for a device rx path |
6715 | * @dev: device | 6857 | * @dev: device |
6858 | * @extack: netlink extended ack | ||
6716 | * @fd: new program fd or negative value to clear | 6859 | * @fd: new program fd or negative value to clear |
6717 | * @flags: xdp-related flags | 6860 | * @flags: xdp-related flags |
6718 | * | 6861 | * |
6719 | * Set or clear a bpf program for a device | 6862 | * Set or clear a bpf program for a device |
6720 | */ | 6863 | */ |
6721 | int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) | 6864 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
6865 | int fd, u32 flags) | ||
6722 | { | 6866 | { |
6867 | int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp); | ||
6723 | const struct net_device_ops *ops = dev->netdev_ops; | 6868 | const struct net_device_ops *ops = dev->netdev_ops; |
6724 | struct bpf_prog *prog = NULL; | 6869 | struct bpf_prog *prog = NULL; |
6725 | struct netdev_xdp xdp; | 6870 | struct netdev_xdp xdp; |
@@ -6727,14 +6872,16 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) | |||
6727 | 6872 | ||
6728 | ASSERT_RTNL(); | 6873 | ASSERT_RTNL(); |
6729 | 6874 | ||
6730 | if (!ops->ndo_xdp) | 6875 | xdp_op = ops->ndo_xdp; |
6731 | return -EOPNOTSUPP; | 6876 | if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) |
6877 | xdp_op = generic_xdp_install; | ||
6878 | |||
6732 | if (fd >= 0) { | 6879 | if (fd >= 0) { |
6733 | if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { | 6880 | if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { |
6734 | memset(&xdp, 0, sizeof(xdp)); | 6881 | memset(&xdp, 0, sizeof(xdp)); |
6735 | xdp.command = XDP_QUERY_PROG; | 6882 | xdp.command = XDP_QUERY_PROG; |
6736 | 6883 | ||
6737 | err = ops->ndo_xdp(dev, &xdp); | 6884 | err = xdp_op(dev, &xdp); |
6738 | if (err < 0) | 6885 | if (err < 0) |
6739 | return err; | 6886 | return err; |
6740 | if (xdp.prog_attached) | 6887 | if (xdp.prog_attached) |
@@ -6748,15 +6895,15 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) | |||
6748 | 6895 | ||
6749 | memset(&xdp, 0, sizeof(xdp)); | 6896 | memset(&xdp, 0, sizeof(xdp)); |
6750 | xdp.command = XDP_SETUP_PROG; | 6897 | xdp.command = XDP_SETUP_PROG; |
6898 | xdp.extack = extack; | ||
6751 | xdp.prog = prog; | 6899 | xdp.prog = prog; |
6752 | 6900 | ||
6753 | err = ops->ndo_xdp(dev, &xdp); | 6901 | err = xdp_op(dev, &xdp); |
6754 | if (err < 0 && prog) | 6902 | if (err < 0 && prog) |
6755 | bpf_prog_put(prog); | 6903 | bpf_prog_put(prog); |
6756 | 6904 | ||
6757 | return err; | 6905 | return err; |
6758 | } | 6906 | } |
6759 | EXPORT_SYMBOL(dev_change_xdp_fd); | ||
6760 | 6907 | ||
6761 | /** | 6908 | /** |
6762 | * dev_new_index - allocate an ifindex | 6909 | * dev_new_index - allocate an ifindex |
@@ -7102,13 +7249,10 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev, | |||
7102 | else | 7249 | else |
7103 | netif_dormant_off(dev); | 7250 | netif_dormant_off(dev); |
7104 | 7251 | ||
7105 | if (netif_carrier_ok(rootdev)) { | 7252 | if (netif_carrier_ok(rootdev)) |
7106 | if (!netif_carrier_ok(dev)) | 7253 | netif_carrier_on(dev); |
7107 | netif_carrier_on(dev); | 7254 | else |
7108 | } else { | 7255 | netif_carrier_off(dev); |
7109 | if (netif_carrier_ok(dev)) | ||
7110 | netif_carrier_off(dev); | ||
7111 | } | ||
7112 | } | 7256 | } |
7113 | EXPORT_SYMBOL(netif_stacked_transfer_operstate); | 7257 | EXPORT_SYMBOL(netif_stacked_transfer_operstate); |
7114 | 7258 | ||
@@ -7121,12 +7265,10 @@ static int netif_alloc_rx_queues(struct net_device *dev) | |||
7121 | 7265 | ||
7122 | BUG_ON(count < 1); | 7266 | BUG_ON(count < 1); |
7123 | 7267 | ||
7124 | rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); | 7268 | rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT); |
7125 | if (!rx) { | 7269 | if (!rx) |
7126 | rx = vzalloc(sz); | 7270 | return -ENOMEM; |
7127 | if (!rx) | 7271 | |
7128 | return -ENOMEM; | ||
7129 | } | ||
7130 | dev->_rx = rx; | 7272 | dev->_rx = rx; |
7131 | 7273 | ||
7132 | for (i = 0; i < count; i++) | 7274 | for (i = 0; i < count; i++) |
@@ -7163,12 +7305,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev) | |||
7163 | if (count < 1 || count > 0xffff) | 7305 | if (count < 1 || count > 0xffff) |
7164 | return -EINVAL; | 7306 | return -EINVAL; |
7165 | 7307 | ||
7166 | tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); | 7308 | tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT); |
7167 | if (!tx) { | 7309 | if (!tx) |
7168 | tx = vzalloc(sz); | 7310 | return -ENOMEM; |
7169 | if (!tx) | 7311 | |
7170 | return -ENOMEM; | ||
7171 | } | ||
7172 | dev->_tx = tx; | 7312 | dev->_tx = tx; |
7173 | 7313 | ||
7174 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); | 7314 | netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); |
@@ -7702,9 +7842,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, | |||
7702 | /* ensure 32-byte alignment of whole construct */ | 7842 | /* ensure 32-byte alignment of whole construct */ |
7703 | alloc_size += NETDEV_ALIGN - 1; | 7843 | alloc_size += NETDEV_ALIGN - 1; |
7704 | 7844 | ||
7705 | p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); | 7845 | p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT); |
7706 | if (!p) | ||
7707 | p = vzalloc(alloc_size); | ||
7708 | if (!p) | 7846 | if (!p) |
7709 | return NULL; | 7847 | return NULL; |
7710 | 7848 | ||
@@ -7791,6 +7929,7 @@ EXPORT_SYMBOL(alloc_netdev_mqs); | |||
7791 | void free_netdev(struct net_device *dev) | 7929 | void free_netdev(struct net_device *dev) |
7792 | { | 7930 | { |
7793 | struct napi_struct *p, *n; | 7931 | struct napi_struct *p, *n; |
7932 | struct bpf_prog *prog; | ||
7794 | 7933 | ||
7795 | might_sleep(); | 7934 | might_sleep(); |
7796 | netif_free_tx_queues(dev); | 7935 | netif_free_tx_queues(dev); |
@@ -7809,6 +7948,12 @@ void free_netdev(struct net_device *dev) | |||
7809 | free_percpu(dev->pcpu_refcnt); | 7948 | free_percpu(dev->pcpu_refcnt); |
7810 | dev->pcpu_refcnt = NULL; | 7949 | dev->pcpu_refcnt = NULL; |
7811 | 7950 | ||
7951 | prog = rcu_dereference_protected(dev->xdp_prog, 1); | ||
7952 | if (prog) { | ||
7953 | bpf_prog_put(prog); | ||
7954 | static_key_slow_dec(&generic_xdp_needed); | ||
7955 | } | ||
7956 | |||
7812 | /* Compatibility with error handling in drivers */ | 7957 | /* Compatibility with error handling in drivers */ |
7813 | if (dev->reg_state == NETREG_UNINITIALIZED) { | 7958 | if (dev->reg_state == NETREG_UNINITIALIZED) { |
7814 | netdev_freemem(dev); | 7959 | netdev_freemem(dev); |