diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/core/dev.c | 17 | ||||
| -rw-r--r-- | net/core/filter.c | 12 | ||||
| -rw-r--r-- | net/core/lwt_bpf.c | 2 | ||||
| -rw-r--r-- | net/core/xdp.c | 3 | ||||
| -rw-r--r-- | net/dsa/slave.c | 6 | ||||
| -rw-r--r-- | net/ipv4/fib_frontend.c | 4 | ||||
| -rw-r--r-- | net/ipv4/igmp.c | 3 | ||||
| -rw-r--r-- | net/ipv4/inet_fragment.c | 6 | ||||
| -rw-r--r-- | net/ipv4/ip_fragment.c | 5 | ||||
| -rw-r--r-- | net/ipv4/tcp_bbr.c | 4 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 9 | ||||
| -rw-r--r-- | net/ipv6/esp6.c | 4 | ||||
| -rw-r--r-- | net/ipv6/ip6_vti.c | 11 | ||||
| -rw-r--r-- | net/l2tp/l2tp_ppp.c | 13 | ||||
| -rw-r--r-- | net/netlink/af_netlink.c | 7 | ||||
| -rw-r--r-- | net/openvswitch/meter.c | 10 | ||||
| -rw-r--r-- | net/rds/ib_frmr.c | 5 | ||||
| -rw-r--r-- | net/rds/ib_mr.h | 3 | ||||
| -rw-r--r-- | net/rds/ib_rdma.c | 21 | ||||
| -rw-r--r-- | net/rds/rdma.c | 13 | ||||
| -rw-r--r-- | net/rds/rds.h | 5 | ||||
| -rw-r--r-- | net/rds/send.c | 12 | ||||
| -rw-r--r-- | net/rxrpc/call_accept.c | 4 | ||||
| -rw-r--r-- | net/smc/smc_cdc.c | 3 | ||||
| -rw-r--r-- | net/socket.c | 5 | ||||
| -rw-r--r-- | net/xdp/xsk.c | 4 | ||||
| -rw-r--r-- | net/xdp/xsk_queue.h | 2 | ||||
| -rw-r--r-- | net/xfrm/xfrm_policy.c | 3 | ||||
| -rw-r--r-- | net/xfrm/xfrm_user.c | 18 |
29 files changed, 143 insertions, 71 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index a5aa1c7444e6..559a91271f82 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -7149,16 +7149,19 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) | |||
| 7149 | dev->tx_queue_len = new_len; | 7149 | dev->tx_queue_len = new_len; |
| 7150 | res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); | 7150 | res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); |
| 7151 | res = notifier_to_errno(res); | 7151 | res = notifier_to_errno(res); |
| 7152 | if (res) { | 7152 | if (res) |
| 7153 | netdev_err(dev, | 7153 | goto err_rollback; |
| 7154 | "refused to change device tx_queue_len\n"); | 7154 | res = dev_qdisc_change_tx_queue_len(dev); |
| 7155 | dev->tx_queue_len = orig_len; | 7155 | if (res) |
| 7156 | return res; | 7156 | goto err_rollback; |
| 7157 | } | ||
| 7158 | return dev_qdisc_change_tx_queue_len(dev); | ||
| 7159 | } | 7157 | } |
| 7160 | 7158 | ||
| 7161 | return 0; | 7159 | return 0; |
| 7160 | |||
| 7161 | err_rollback: | ||
| 7162 | netdev_err(dev, "refused to change device tx_queue_len\n"); | ||
| 7163 | dev->tx_queue_len = orig_len; | ||
| 7164 | return res; | ||
| 7162 | } | 7165 | } |
| 7163 | 7166 | ||
| 7164 | /** | 7167 | /** |
diff --git a/net/core/filter.c b/net/core/filter.c index 06da770f543f..9dfd145eedcc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -1712,24 +1712,26 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { | |||
| 1712 | BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, | 1712 | BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, |
| 1713 | u32, offset, void *, to, u32, len, u32, start_header) | 1713 | u32, offset, void *, to, u32, len, u32, start_header) |
| 1714 | { | 1714 | { |
| 1715 | u8 *end = skb_tail_pointer(skb); | ||
| 1716 | u8 *net = skb_network_header(skb); | ||
| 1717 | u8 *mac = skb_mac_header(skb); | ||
| 1715 | u8 *ptr; | 1718 | u8 *ptr; |
| 1716 | 1719 | ||
| 1717 | if (unlikely(offset > 0xffff || len > skb_headlen(skb))) | 1720 | if (unlikely(offset > 0xffff || len > (end - mac))) |
| 1718 | goto err_clear; | 1721 | goto err_clear; |
| 1719 | 1722 | ||
| 1720 | switch (start_header) { | 1723 | switch (start_header) { |
| 1721 | case BPF_HDR_START_MAC: | 1724 | case BPF_HDR_START_MAC: |
| 1722 | ptr = skb_mac_header(skb) + offset; | 1725 | ptr = mac + offset; |
| 1723 | break; | 1726 | break; |
| 1724 | case BPF_HDR_START_NET: | 1727 | case BPF_HDR_START_NET: |
| 1725 | ptr = skb_network_header(skb) + offset; | 1728 | ptr = net + offset; |
| 1726 | break; | 1729 | break; |
| 1727 | default: | 1730 | default: |
| 1728 | goto err_clear; | 1731 | goto err_clear; |
| 1729 | } | 1732 | } |
| 1730 | 1733 | ||
| 1731 | if (likely(ptr >= skb_mac_header(skb) && | 1734 | if (likely(ptr >= mac && ptr + len <= end)) { |
| 1732 | ptr + len <= skb_tail_pointer(skb))) { | ||
| 1733 | memcpy(to, ptr, len); | 1735 | memcpy(to, ptr, len); |
| 1734 | return 0; | 1736 | return 0; |
| 1735 | } | 1737 | } |
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index e7e626fb87bb..e45098593dc0 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c | |||
| @@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog, | |||
| 217 | if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) | 217 | if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) |
| 218 | return -EINVAL; | 218 | return -EINVAL; |
| 219 | 219 | ||
| 220 | prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); | 220 | prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC); |
| 221 | if (!prog->name) | 221 | if (!prog->name) |
| 222 | return -ENOMEM; | 222 | return -ENOMEM; |
| 223 | 223 | ||
diff --git a/net/core/xdp.c b/net/core/xdp.c index 9d1f22072d5d..6771f1855b96 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c | |||
| @@ -345,7 +345,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, | |||
| 345 | rcu_read_lock(); | 345 | rcu_read_lock(); |
| 346 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ | 346 | /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ |
| 347 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); | 347 | xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); |
| 348 | xa->zc_alloc->free(xa->zc_alloc, handle); | 348 | if (!WARN_ON_ONCE(!xa)) |
| 349 | xa->zc_alloc->free(xa->zc_alloc, handle); | ||
| 349 | rcu_read_unlock(); | 350 | rcu_read_unlock(); |
| 350 | default: | 351 | default: |
| 351 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ | 352 | /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 1e3b6a6d8a40..732369c80644 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -1248,6 +1248,9 @@ int dsa_slave_suspend(struct net_device *slave_dev) | |||
| 1248 | { | 1248 | { |
| 1249 | struct dsa_port *dp = dsa_slave_to_port(slave_dev); | 1249 | struct dsa_port *dp = dsa_slave_to_port(slave_dev); |
| 1250 | 1250 | ||
| 1251 | if (!netif_running(slave_dev)) | ||
| 1252 | return 0; | ||
| 1253 | |||
| 1251 | netif_device_detach(slave_dev); | 1254 | netif_device_detach(slave_dev); |
| 1252 | 1255 | ||
| 1253 | rtnl_lock(); | 1256 | rtnl_lock(); |
| @@ -1261,6 +1264,9 @@ int dsa_slave_resume(struct net_device *slave_dev) | |||
| 1261 | { | 1264 | { |
| 1262 | struct dsa_port *dp = dsa_slave_to_port(slave_dev); | 1265 | struct dsa_port *dp = dsa_slave_to_port(slave_dev); |
| 1263 | 1266 | ||
| 1267 | if (!netif_running(slave_dev)) | ||
| 1268 | return 0; | ||
| 1269 | |||
| 1264 | netif_device_attach(slave_dev); | 1270 | netif_device_attach(slave_dev); |
| 1265 | 1271 | ||
| 1266 | rtnl_lock(); | 1272 | rtnl_lock(); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index e46cdd310e5f..2998b0e47d4b 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -292,19 +292,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) | |||
| 292 | return ip_hdr(skb)->daddr; | 292 | return ip_hdr(skb)->daddr; |
| 293 | 293 | ||
| 294 | in_dev = __in_dev_get_rcu(dev); | 294 | in_dev = __in_dev_get_rcu(dev); |
| 295 | BUG_ON(!in_dev); | ||
| 296 | 295 | ||
| 297 | net = dev_net(dev); | 296 | net = dev_net(dev); |
| 298 | 297 | ||
| 299 | scope = RT_SCOPE_UNIVERSE; | 298 | scope = RT_SCOPE_UNIVERSE; |
| 300 | if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { | 299 | if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { |
| 300 | bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); | ||
| 301 | struct flowi4 fl4 = { | 301 | struct flowi4 fl4 = { |
| 302 | .flowi4_iif = LOOPBACK_IFINDEX, | 302 | .flowi4_iif = LOOPBACK_IFINDEX, |
| 303 | .flowi4_oif = l3mdev_master_ifindex_rcu(dev), | 303 | .flowi4_oif = l3mdev_master_ifindex_rcu(dev), |
| 304 | .daddr = ip_hdr(skb)->saddr, | 304 | .daddr = ip_hdr(skb)->saddr, |
| 305 | .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), | 305 | .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), |
| 306 | .flowi4_scope = scope, | 306 | .flowi4_scope = scope, |
| 307 | .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, | 307 | .flowi4_mark = vmark ? skb->mark : 0, |
| 308 | }; | 308 | }; |
| 309 | if (!fib_lookup(net, &fl4, &res, 0)) | 309 | if (!fib_lookup(net, &fl4, &res, 0)) |
| 310 | return FIB_RES_PREFSRC(net, res); | 310 | return FIB_RES_PREFSRC(net, res); |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 28fef7d15959..75151be21413 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
| @@ -1387,7 +1387,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev, | |||
| 1387 | /* | 1387 | /* |
| 1388 | * A socket has joined a multicast group on device dev. | 1388 | * A socket has joined a multicast group on device dev. |
| 1389 | */ | 1389 | */ |
| 1390 | void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) | 1390 | static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, |
| 1391 | unsigned int mode) | ||
| 1391 | { | 1392 | { |
| 1392 | struct ip_mc_list *im; | 1393 | struct ip_mc_list *im; |
| 1393 | #ifdef CONFIG_IP_MULTICAST | 1394 | #ifdef CONFIG_IP_MULTICAST |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 1e4cf3ab560f..0d70608cc2e1 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -157,9 +157,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, | |||
| 157 | { | 157 | { |
| 158 | struct inet_frag_queue *q; | 158 | struct inet_frag_queue *q; |
| 159 | 159 | ||
| 160 | if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) | ||
| 161 | return NULL; | ||
| 162 | |||
| 163 | q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); | 160 | q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); |
| 164 | if (!q) | 161 | if (!q) |
| 165 | return NULL; | 162 | return NULL; |
| @@ -204,6 +201,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) | |||
| 204 | { | 201 | { |
| 205 | struct inet_frag_queue *fq; | 202 | struct inet_frag_queue *fq; |
| 206 | 203 | ||
| 204 | if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) | ||
| 205 | return NULL; | ||
| 206 | |||
| 207 | rcu_read_lock(); | 207 | rcu_read_lock(); |
| 208 | 208 | ||
| 209 | fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); | 209 | fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 8e9528ebaa8e..d14d741fb05e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -383,11 +383,16 @@ found: | |||
| 383 | int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ | 383 | int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ |
| 384 | 384 | ||
| 385 | if (i < next->len) { | 385 | if (i < next->len) { |
| 386 | int delta = -next->truesize; | ||
| 387 | |||
| 386 | /* Eat head of the next overlapped fragment | 388 | /* Eat head of the next overlapped fragment |
| 387 | * and leave the loop. The next ones cannot overlap. | 389 | * and leave the loop. The next ones cannot overlap. |
| 388 | */ | 390 | */ |
| 389 | if (!pskb_pull(next, i)) | 391 | if (!pskb_pull(next, i)) |
| 390 | goto err; | 392 | goto err; |
| 393 | delta += next->truesize; | ||
| 394 | if (delta) | ||
| 395 | add_frag_mem_limit(qp->q.net, delta); | ||
| 391 | next->ip_defrag_offset += i; | 396 | next->ip_defrag_offset += i; |
| 392 | qp->q.meat -= i; | 397 | qp->q.meat -= i; |
| 393 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 398 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 58e2f479ffb4..4bfff3c87e8e 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c | |||
| @@ -354,6 +354,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) | |||
| 354 | /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ | 354 | /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ |
| 355 | cwnd = (cwnd + 1) & ~1U; | 355 | cwnd = (cwnd + 1) & ~1U; |
| 356 | 356 | ||
| 357 | /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ | ||
| 358 | if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT) | ||
| 359 | cwnd += 2; | ||
| 360 | |||
| 357 | return cwnd; | 361 | return cwnd; |
| 358 | } | 362 | } |
| 359 | 363 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3bcd30a2ba06..f9dcb29be12d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -246,8 +246,15 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp) | |||
| 246 | 246 | ||
| 247 | static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) | 247 | static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) |
| 248 | { | 248 | { |
| 249 | if (tcp_hdr(skb)->cwr) | 249 | if (tcp_hdr(skb)->cwr) { |
| 250 | tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; | 250 | tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; |
| 251 | |||
| 252 | /* If the sender is telling us it has entered CWR, then its | ||
| 253 | * cwnd may be very low (even just 1 packet), so we should ACK | ||
| 254 | * immediately. | ||
| 255 | */ | ||
| 256 | tcp_enter_quickack_mode((struct sock *)tp, 2); | ||
| 257 | } | ||
| 251 | } | 258 | } |
| 252 | 259 | ||
| 253 | static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) | 260 | static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 97513f35bcc5..88a7579c23bd 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
| @@ -669,8 +669,10 @@ skip_cow: | |||
| 669 | 669 | ||
| 670 | sg_init_table(sg, nfrags); | 670 | sg_init_table(sg, nfrags); |
| 671 | ret = skb_to_sgvec(skb, sg, 0, skb->len); | 671 | ret = skb_to_sgvec(skb, sg, 0, skb->len); |
| 672 | if (unlikely(ret < 0)) | 672 | if (unlikely(ret < 0)) { |
| 673 | kfree(tmp); | ||
| 673 | goto out; | 674 | goto out; |
| 675 | } | ||
| 674 | 676 | ||
| 675 | skb->ip_summed = CHECKSUM_NONE; | 677 | skb->ip_summed = CHECKSUM_NONE; |
| 676 | 678 | ||
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index b7f28deddaea..c72ae3a4fe09 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
| @@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
| 480 | goto tx_err_dst_release; | 480 | goto tx_err_dst_release; |
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); | ||
| 484 | skb_dst_set(skb, dst); | ||
| 485 | skb->dev = skb_dst(skb)->dev; | ||
| 486 | |||
| 487 | mtu = dst_mtu(dst); | 483 | mtu = dst_mtu(dst); |
| 488 | if (!skb->ignore_df && skb->len > mtu) { | 484 | if (!skb->ignore_df && skb->len > mtu) { |
| 489 | skb_dst_update_pmtu(skb, mtu); | 485 | skb_dst_update_pmtu(skb, mtu); |
| @@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
| 498 | htonl(mtu)); | 494 | htonl(mtu)); |
| 499 | } | 495 | } |
| 500 | 496 | ||
| 501 | return -EMSGSIZE; | 497 | err = -EMSGSIZE; |
| 498 | goto tx_err_dst_release; | ||
| 502 | } | 499 | } |
| 503 | 500 | ||
| 501 | skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); | ||
| 502 | skb_dst_set(skb, dst); | ||
| 503 | skb->dev = skb_dst(skb)->dev; | ||
| 504 | |||
| 504 | err = dst_output(t->net, skb->sk, skb); | 505 | err = dst_output(t->net, skb->sk, skb); |
| 505 | if (net_xmit_eval(err) == 0) { | 506 | if (net_xmit_eval(err) == 0) { |
| 506 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | 507 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e398797878a9..cf6cca260e7b 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -1201,13 +1201,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, | |||
| 1201 | l2tp_session_get(sock_net(sk), tunnel, | 1201 | l2tp_session_get(sock_net(sk), tunnel, |
| 1202 | stats.session_id); | 1202 | stats.session_id); |
| 1203 | 1203 | ||
| 1204 | if (session && session->pwtype == L2TP_PWTYPE_PPP) { | 1204 | if (!session) { |
| 1205 | err = pppol2tp_session_ioctl(session, cmd, | 1205 | err = -EBADR; |
| 1206 | arg); | 1206 | break; |
| 1207 | } | ||
| 1208 | if (session->pwtype != L2TP_PWTYPE_PPP) { | ||
| 1207 | l2tp_session_dec_refcount(session); | 1209 | l2tp_session_dec_refcount(session); |
| 1208 | } else { | ||
| 1209 | err = -EBADR; | 1210 | err = -EBADR; |
| 1211 | break; | ||
| 1210 | } | 1212 | } |
| 1213 | |||
| 1214 | err = pppol2tp_session_ioctl(session, cmd, arg); | ||
| 1215 | l2tp_session_dec_refcount(session); | ||
| 1211 | break; | 1216 | break; |
| 1212 | } | 1217 | } |
| 1213 | #ifdef CONFIG_XFRM | 1218 | #ifdef CONFIG_XFRM |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 393573a99a5a..56704d95f82d 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #include <linux/hash.h> | 63 | #include <linux/hash.h> |
| 64 | #include <linux/genetlink.h> | 64 | #include <linux/genetlink.h> |
| 65 | #include <linux/net_namespace.h> | 65 | #include <linux/net_namespace.h> |
| 66 | #include <linux/nospec.h> | ||
| 66 | 67 | ||
| 67 | #include <net/net_namespace.h> | 68 | #include <net/net_namespace.h> |
| 68 | #include <net/netns/generic.h> | 69 | #include <net/netns/generic.h> |
| @@ -679,6 +680,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, | |||
| 679 | 680 | ||
| 680 | if (protocol < 0 || protocol >= MAX_LINKS) | 681 | if (protocol < 0 || protocol >= MAX_LINKS) |
| 681 | return -EPROTONOSUPPORT; | 682 | return -EPROTONOSUPPORT; |
| 683 | protocol = array_index_nospec(protocol, MAX_LINKS); | ||
| 682 | 684 | ||
| 683 | netlink_lock_table(); | 685 | netlink_lock_table(); |
| 684 | #ifdef CONFIG_MODULES | 686 | #ifdef CONFIG_MODULES |
| @@ -1009,6 +1011,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
| 1009 | return err; | 1011 | return err; |
| 1010 | } | 1012 | } |
| 1011 | 1013 | ||
| 1014 | if (nlk->ngroups == 0) | ||
| 1015 | groups = 0; | ||
| 1016 | else if (nlk->ngroups < 8*sizeof(groups)) | ||
| 1017 | groups &= (1UL << nlk->ngroups) - 1; | ||
| 1018 | |||
| 1012 | bound = nlk->bound; | 1019 | bound = nlk->bound; |
| 1013 | if (bound) { | 1020 | if (bound) { |
| 1014 | /* Ensure nlk->portid is up-to-date. */ | 1021 | /* Ensure nlk->portid is up-to-date. */ |
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index b891a91577f8..c038e021a591 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c | |||
| @@ -211,6 +211,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) | |||
| 211 | if (!meter) | 211 | if (!meter) |
| 212 | return ERR_PTR(-ENOMEM); | 212 | return ERR_PTR(-ENOMEM); |
| 213 | 213 | ||
| 214 | meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]); | ||
| 214 | meter->used = div_u64(ktime_get_ns(), 1000 * 1000); | 215 | meter->used = div_u64(ktime_get_ns(), 1000 * 1000); |
| 215 | meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0; | 216 | meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0; |
| 216 | meter->keep_stats = !a[OVS_METER_ATTR_CLEAR]; | 217 | meter->keep_stats = !a[OVS_METER_ATTR_CLEAR]; |
| @@ -280,6 +281,10 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
| 280 | u32 meter_id; | 281 | u32 meter_id; |
| 281 | bool failed; | 282 | bool failed; |
| 282 | 283 | ||
| 284 | if (!a[OVS_METER_ATTR_ID]) { | ||
| 285 | return -ENODEV; | ||
| 286 | } | ||
| 287 | |||
| 283 | meter = dp_meter_create(a); | 288 | meter = dp_meter_create(a); |
| 284 | if (IS_ERR_OR_NULL(meter)) | 289 | if (IS_ERR_OR_NULL(meter)) |
| 285 | return PTR_ERR(meter); | 290 | return PTR_ERR(meter); |
| @@ -298,11 +303,6 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
| 298 | goto exit_unlock; | 303 | goto exit_unlock; |
| 299 | } | 304 | } |
| 300 | 305 | ||
| 301 | if (!a[OVS_METER_ATTR_ID]) { | ||
| 302 | err = -ENODEV; | ||
| 303 | goto exit_unlock; | ||
| 304 | } | ||
| 305 | |||
| 306 | meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]); | 306 | meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]); |
| 307 | 307 | ||
| 308 | /* Cannot fail after this. */ | 308 | /* Cannot fail after this. */ |
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c index 48332a6ed738..d152e48ea371 100644 --- a/net/rds/ib_frmr.c +++ b/net/rds/ib_frmr.c | |||
| @@ -344,6 +344,11 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev, | |||
| 344 | struct rds_ib_frmr *frmr; | 344 | struct rds_ib_frmr *frmr; |
| 345 | int ret; | 345 | int ret; |
| 346 | 346 | ||
| 347 | if (!ic) { | ||
| 348 | /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/ | ||
| 349 | return ERR_PTR(-EOPNOTSUPP); | ||
| 350 | } | ||
| 351 | |||
| 347 | do { | 352 | do { |
| 348 | if (ibmr) | 353 | if (ibmr) |
| 349 | rds_ib_free_frmr(ibmr, true); | 354 | rds_ib_free_frmr(ibmr, true); |
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 0ea4ab017a8c..655f01d427fe 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h | |||
| @@ -115,7 +115,8 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, | |||
| 115 | struct rds_info_rdma_connection *iinfo); | 115 | struct rds_info_rdma_connection *iinfo); |
| 116 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); | 116 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); |
| 117 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | 117 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, |
| 118 | struct rds_sock *rs, u32 *key_ret); | 118 | struct rds_sock *rs, u32 *key_ret, |
| 119 | struct rds_connection *conn); | ||
| 119 | void rds_ib_sync_mr(void *trans_private, int dir); | 120 | void rds_ib_sync_mr(void *trans_private, int dir); |
| 120 | void rds_ib_free_mr(void *trans_private, int invalidate); | 121 | void rds_ib_free_mr(void *trans_private, int invalidate); |
| 121 | void rds_ib_flush_mrs(void); | 122 | void rds_ib_flush_mrs(void); |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index e678699268a2..2e49a40a5e11 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
| @@ -537,11 +537,12 @@ void rds_ib_flush_mrs(void) | |||
| 537 | } | 537 | } |
| 538 | 538 | ||
| 539 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | 539 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, |
| 540 | struct rds_sock *rs, u32 *key_ret) | 540 | struct rds_sock *rs, u32 *key_ret, |
| 541 | struct rds_connection *conn) | ||
| 541 | { | 542 | { |
| 542 | struct rds_ib_device *rds_ibdev; | 543 | struct rds_ib_device *rds_ibdev; |
| 543 | struct rds_ib_mr *ibmr = NULL; | 544 | struct rds_ib_mr *ibmr = NULL; |
| 544 | struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; | 545 | struct rds_ib_connection *ic = NULL; |
| 545 | int ret; | 546 | int ret; |
| 546 | 547 | ||
| 547 | rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); | 548 | rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); |
| @@ -550,6 +551,9 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |||
| 550 | goto out; | 551 | goto out; |
| 551 | } | 552 | } |
| 552 | 553 | ||
| 554 | if (conn) | ||
| 555 | ic = conn->c_transport_data; | ||
| 556 | |||
| 553 | if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { | 557 | if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { |
| 554 | ret = -ENODEV; | 558 | ret = -ENODEV; |
| 555 | goto out; | 559 | goto out; |
| @@ -559,17 +563,18 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |||
| 559 | ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); | 563 | ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); |
| 560 | else | 564 | else |
| 561 | ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); | 565 | ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); |
| 562 | if (ibmr) | 566 | if (IS_ERR(ibmr)) { |
| 563 | rds_ibdev = NULL; | 567 | ret = PTR_ERR(ibmr); |
| 564 | |||
| 565 | out: | ||
| 566 | if (!ibmr) | ||
| 567 | pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); | 568 | pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); |
| 569 | } else { | ||
| 570 | return ibmr; | ||
| 571 | } | ||
| 568 | 572 | ||
| 573 | out: | ||
| 569 | if (rds_ibdev) | 574 | if (rds_ibdev) |
| 570 | rds_ib_dev_put(rds_ibdev); | 575 | rds_ib_dev_put(rds_ibdev); |
| 571 | 576 | ||
| 572 | return ibmr; | 577 | return ERR_PTR(ret); |
| 573 | } | 578 | } |
| 574 | 579 | ||
| 575 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | 580 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 634cfcb7bba6..80920e47f2c7 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
| @@ -170,7 +170,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, | |||
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, | 172 | static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, |
| 173 | u64 *cookie_ret, struct rds_mr **mr_ret) | 173 | u64 *cookie_ret, struct rds_mr **mr_ret, |
| 174 | struct rds_conn_path *cp) | ||
| 174 | { | 175 | { |
| 175 | struct rds_mr *mr = NULL, *found; | 176 | struct rds_mr *mr = NULL, *found; |
| 176 | unsigned int nr_pages; | 177 | unsigned int nr_pages; |
| @@ -269,7 +270,8 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, | |||
| 269 | * Note that dma_map() implies that pending writes are | 270 | * Note that dma_map() implies that pending writes are |
| 270 | * flushed to RAM, so no dma_sync is needed here. */ | 271 | * flushed to RAM, so no dma_sync is needed here. */ |
| 271 | trans_private = rs->rs_transport->get_mr(sg, nents, rs, | 272 | trans_private = rs->rs_transport->get_mr(sg, nents, rs, |
| 272 | &mr->r_key); | 273 | &mr->r_key, |
| 274 | cp ? cp->cp_conn : NULL); | ||
| 273 | 275 | ||
| 274 | if (IS_ERR(trans_private)) { | 276 | if (IS_ERR(trans_private)) { |
| 275 | for (i = 0 ; i < nents; i++) | 277 | for (i = 0 ; i < nents; i++) |
| @@ -330,7 +332,7 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) | |||
| 330 | sizeof(struct rds_get_mr_args))) | 332 | sizeof(struct rds_get_mr_args))) |
| 331 | return -EFAULT; | 333 | return -EFAULT; |
| 332 | 334 | ||
| 333 | return __rds_rdma_map(rs, &args, NULL, NULL); | 335 | return __rds_rdma_map(rs, &args, NULL, NULL, NULL); |
| 334 | } | 336 | } |
| 335 | 337 | ||
| 336 | int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) | 338 | int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) |
| @@ -354,7 +356,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) | |||
| 354 | new_args.cookie_addr = args.cookie_addr; | 356 | new_args.cookie_addr = args.cookie_addr; |
| 355 | new_args.flags = args.flags; | 357 | new_args.flags = args.flags; |
| 356 | 358 | ||
| 357 | return __rds_rdma_map(rs, &new_args, NULL, NULL); | 359 | return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL); |
| 358 | } | 360 | } |
| 359 | 361 | ||
| 360 | /* | 362 | /* |
| @@ -782,7 +784,8 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, | |||
| 782 | rm->m_rdma_cookie != 0) | 784 | rm->m_rdma_cookie != 0) |
| 783 | return -EINVAL; | 785 | return -EINVAL; |
| 784 | 786 | ||
| 785 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); | 787 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, |
| 788 | &rm->rdma.op_rdma_mr, rm->m_conn_path); | ||
| 786 | } | 789 | } |
| 787 | 790 | ||
| 788 | /* | 791 | /* |
diff --git a/net/rds/rds.h b/net/rds/rds.h index f2272fb8cd45..60b3b787fbdb 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
| @@ -464,6 +464,8 @@ struct rds_message { | |||
| 464 | struct scatterlist *op_sg; | 464 | struct scatterlist *op_sg; |
| 465 | } data; | 465 | } data; |
| 466 | }; | 466 | }; |
| 467 | |||
| 468 | struct rds_conn_path *m_conn_path; | ||
| 467 | }; | 469 | }; |
| 468 | 470 | ||
| 469 | /* | 471 | /* |
| @@ -544,7 +546,8 @@ struct rds_transport { | |||
| 544 | unsigned int avail); | 546 | unsigned int avail); |
| 545 | void (*exit)(void); | 547 | void (*exit)(void); |
| 546 | void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, | 548 | void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, |
| 547 | struct rds_sock *rs, u32 *key_ret); | 549 | struct rds_sock *rs, u32 *key_ret, |
| 550 | struct rds_connection *conn); | ||
| 548 | void (*sync_mr)(void *trans_private, int direction); | 551 | void (*sync_mr)(void *trans_private, int direction); |
| 549 | void (*free_mr)(void *trans_private, int invalidate); | 552 | void (*free_mr)(void *trans_private, int invalidate); |
| 550 | void (*flush_mrs)(void); | 553 | void (*flush_mrs)(void); |
diff --git a/net/rds/send.c b/net/rds/send.c index 94c7f74909be..59f17a2335f4 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
| @@ -1169,6 +1169,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) | |||
| 1169 | rs->rs_conn = conn; | 1169 | rs->rs_conn = conn; |
| 1170 | } | 1170 | } |
| 1171 | 1171 | ||
| 1172 | if (conn->c_trans->t_mp_capable) | ||
| 1173 | cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; | ||
| 1174 | else | ||
| 1175 | cpath = &conn->c_path[0]; | ||
| 1176 | |||
| 1177 | rm->m_conn_path = cpath; | ||
| 1178 | |||
| 1172 | /* Parse any control messages the user may have included. */ | 1179 | /* Parse any control messages the user may have included. */ |
| 1173 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); | 1180 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); |
| 1174 | if (ret) { | 1181 | if (ret) { |
| @@ -1192,11 +1199,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) | |||
| 1192 | goto out; | 1199 | goto out; |
| 1193 | } | 1200 | } |
| 1194 | 1201 | ||
| 1195 | if (conn->c_trans->t_mp_capable) | ||
| 1196 | cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)]; | ||
| 1197 | else | ||
| 1198 | cpath = &conn->c_path[0]; | ||
| 1199 | |||
| 1200 | if (rds_destroy_pending(conn)) { | 1202 | if (rds_destroy_pending(conn)) { |
| 1201 | ret = -EAGAIN; | 1203 | ret = -EAGAIN; |
| 1202 | goto out; | 1204 | goto out; |
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index a9a9be5519b9..9d1e298b784c 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c | |||
| @@ -116,9 +116,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |||
| 116 | while (*pp) { | 116 | while (*pp) { |
| 117 | parent = *pp; | 117 | parent = *pp; |
| 118 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); | 118 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
| 119 | if (user_call_ID < call->user_call_ID) | 119 | if (user_call_ID < xcall->user_call_ID) |
| 120 | pp = &(*pp)->rb_left; | 120 | pp = &(*pp)->rb_left; |
| 121 | else if (user_call_ID > call->user_call_ID) | 121 | else if (user_call_ID > xcall->user_call_ID) |
| 122 | pp = &(*pp)->rb_right; | 122 | pp = &(*pp)->rb_right; |
| 123 | else | 123 | else |
| 124 | goto id_in_use; | 124 | goto id_in_use; |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a7e8d63fc8ae..9bde1e4ca288 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
| @@ -233,7 +233,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc, | |||
| 233 | /* force immediate tx of current consumer cursor, but | 233 | /* force immediate tx of current consumer cursor, but |
| 234 | * under send_lock to guarantee arrival in seqno-order | 234 | * under send_lock to guarantee arrival in seqno-order |
| 235 | */ | 235 | */ |
| 236 | smc_tx_sndbuf_nonempty(conn); | 236 | if (smc->sk.sk_state != SMC_INIT) |
| 237 | smc_tx_sndbuf_nonempty(conn); | ||
| 237 | } | 238 | } |
| 238 | } | 239 | } |
| 239 | 240 | ||
diff --git a/net/socket.c b/net/socket.c index 85633622c94d..8c24d5dc4bc8 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -89,6 +89,7 @@ | |||
| 89 | #include <linux/magic.h> | 89 | #include <linux/magic.h> |
| 90 | #include <linux/slab.h> | 90 | #include <linux/slab.h> |
| 91 | #include <linux/xattr.h> | 91 | #include <linux/xattr.h> |
| 92 | #include <linux/nospec.h> | ||
| 92 | 93 | ||
| 93 | #include <linux/uaccess.h> | 94 | #include <linux/uaccess.h> |
| 94 | #include <asm/unistd.h> | 95 | #include <asm/unistd.h> |
| @@ -2522,6 +2523,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) | |||
| 2522 | 2523 | ||
| 2523 | if (call < 1 || call > SYS_SENDMMSG) | 2524 | if (call < 1 || call > SYS_SENDMMSG) |
| 2524 | return -EINVAL; | 2525 | return -EINVAL; |
| 2526 | call = array_index_nospec(call, SYS_SENDMMSG + 1); | ||
| 2525 | 2527 | ||
| 2526 | len = nargs[call]; | 2528 | len = nargs[call]; |
| 2527 | if (len > sizeof(a)) | 2529 | if (len > sizeof(a)) |
| @@ -2688,7 +2690,8 @@ EXPORT_SYMBOL(sock_unregister); | |||
| 2688 | 2690 | ||
| 2689 | bool sock_is_registered(int family) | 2691 | bool sock_is_registered(int family) |
| 2690 | { | 2692 | { |
| 2691 | return family < NPROTO && rcu_access_pointer(net_families[family]); | 2693 | return family < NPROTO && |
| 2694 | rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]); | ||
| 2692 | } | 2695 | } |
| 2693 | 2696 | ||
| 2694 | static int __init sock_init(void) | 2697 | static int __init sock_init(void) |
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 72335c2e8108..4e937cd7c17d 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c | |||
| @@ -84,10 +84,8 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) | |||
| 84 | { | 84 | { |
| 85 | int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len); | 85 | int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len); |
| 86 | 86 | ||
| 87 | if (err) { | 87 | if (err) |
| 88 | xdp_return_buff(xdp); | ||
| 89 | xs->rx_dropped++; | 88 | xs->rx_dropped++; |
| 90 | } | ||
| 91 | 89 | ||
| 92 | return err; | 90 | return err; |
| 93 | } | 91 | } |
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 52ecaf770642..8a64b150be54 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h | |||
| @@ -250,7 +250,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q) | |||
| 250 | 250 | ||
| 251 | static inline bool xskq_empty_desc(struct xsk_queue *q) | 251 | static inline bool xskq_empty_desc(struct xsk_queue *q) |
| 252 | { | 252 | { |
| 253 | return xskq_nb_free(q, q->prod_tail, 1) == q->nentries; | 253 | return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries; |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); | 256 | void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 5f48251c1319..7c5e8978aeaa 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -2286,6 +2286,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, | |||
| 2286 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) | 2286 | if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) |
| 2287 | return make_blackhole(net, dst_orig->ops->family, dst_orig); | 2287 | return make_blackhole(net, dst_orig->ops->family, dst_orig); |
| 2288 | 2288 | ||
| 2289 | if (IS_ERR(dst)) | ||
| 2290 | dst_release(dst_orig); | ||
| 2291 | |||
| 2289 | return dst; | 2292 | return dst; |
| 2290 | } | 2293 | } |
| 2291 | EXPORT_SYMBOL(xfrm_lookup_route); | 2294 | EXPORT_SYMBOL(xfrm_lookup_route); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 080035f056d9..33878e6e0d0a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
| @@ -1025,10 +1025,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, | |||
| 1025 | { | 1025 | { |
| 1026 | struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); | 1026 | struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); |
| 1027 | 1027 | ||
| 1028 | if (nlsk) | 1028 | if (!nlsk) { |
| 1029 | return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); | 1029 | kfree_skb(skb); |
| 1030 | else | 1030 | return -EPIPE; |
| 1031 | return -1; | 1031 | } |
| 1032 | |||
| 1033 | return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); | ||
| 1032 | } | 1034 | } |
| 1033 | 1035 | ||
| 1034 | static inline unsigned int xfrm_spdinfo_msgsize(void) | 1036 | static inline unsigned int xfrm_spdinfo_msgsize(void) |
| @@ -1671,9 +1673,11 @@ static inline unsigned int userpolicy_type_attrsize(void) | |||
| 1671 | #ifdef CONFIG_XFRM_SUB_POLICY | 1673 | #ifdef CONFIG_XFRM_SUB_POLICY |
| 1672 | static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) | 1674 | static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) |
| 1673 | { | 1675 | { |
| 1674 | struct xfrm_userpolicy_type upt = { | 1676 | struct xfrm_userpolicy_type upt; |
| 1675 | .type = type, | 1677 | |
| 1676 | }; | 1678 | /* Sadly there are two holes in struct xfrm_userpolicy_type */ |
| 1679 | memset(&upt, 0, sizeof(upt)); | ||
| 1680 | upt.type = type; | ||
| 1677 | 1681 | ||
| 1678 | return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); | 1682 | return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); |
| 1679 | } | 1683 | } |
