aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/filter.c48
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/neighbour.c15
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/skmsg.c3
-rw-r--r--net/core/sock.c2
7 files changed, 50 insertions, 37 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 82f20022259d..5d03889502eb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
8152 netdev_features_t feature; 8152 netdev_features_t feature;
8153 int feature_bit; 8153 int feature_bit;
8154 8154
8155 for_each_netdev_feature(&upper_disables, feature_bit) { 8155 for_each_netdev_feature(upper_disables, feature_bit) {
8156 feature = __NETIF_F_BIT(feature_bit); 8156 feature = __NETIF_F_BIT(feature_bit);
8157 if (!(upper->wanted_features & feature) 8157 if (!(upper->wanted_features & feature)
8158 && (features & feature)) { 8158 && (features & feature)) {
@@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
8172 netdev_features_t feature; 8172 netdev_features_t feature;
8173 int feature_bit; 8173 int feature_bit;
8174 8174
8175 for_each_netdev_feature(&upper_disables, feature_bit) { 8175 for_each_netdev_feature(upper_disables, feature_bit) {
8176 feature = __NETIF_F_BIT(feature_bit); 8176 feature = __NETIF_F_BIT(feature_bit);
8177 if (!(features & feature) && (lower->features & feature)) { 8177 if (!(features & feature) && (lower->features & feature)) {
8178 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", 8178 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
@@ -8712,6 +8712,9 @@ int init_dummy_netdev(struct net_device *dev)
8712 set_bit(__LINK_STATE_PRESENT, &dev->state); 8712 set_bit(__LINK_STATE_PRESENT, &dev->state);
8713 set_bit(__LINK_STATE_START, &dev->state); 8713 set_bit(__LINK_STATE_START, &dev->state);
8714 8714
8715 /* napi_busy_loop stats accounting wants this */
8716 dev_net_set(dev, &init_net);
8717
8715 /* Note : We dont allocate pcpu_refcnt for dummy devices, 8718 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8716 * because users of this 'device' dont need to change 8719 * because users of this 'device' dont need to change
8717 * its refcount. 8720 * its refcount.
diff --git a/net/core/filter.c b/net/core/filter.c
index 447dd1bad31f..f7d0004fc160 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2020static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2020static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2021 u32 flags) 2021 u32 flags)
2022{ 2022{
2023 /* skb->mac_len is not set on normal egress */ 2023 unsigned int mlen = skb_network_offset(skb);
2024 unsigned int mlen = skb->network_header - skb->mac_header;
2025 2024
2026 __skb_pull(skb, mlen); 2025 if (mlen) {
2026 __skb_pull(skb, mlen);
2027 2027
2028 /* At ingress, the mac header has already been pulled once. 2028 /* At ingress, the mac header has already been pulled once.
2029 * At egress, skb_pospull_rcsum has to be done in case that 2029 * At egress, skb_pospull_rcsum has to be done in case that
2030 * the skb is originated from ingress (i.e. a forwarded skb) 2030 * the skb is originated from ingress (i.e. a forwarded skb)
2031 * to ensure that rcsum starts at net header. 2031 * to ensure that rcsum starts at net header.
2032 */ 2032 */
2033 if (!skb_at_tc_ingress(skb)) 2033 if (!skb_at_tc_ingress(skb))
2034 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2034 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2035 }
2035 skb_pop_mac_header(skb); 2036 skb_pop_mac_header(skb);
2036 skb_reset_mac_len(skb); 2037 skb_reset_mac_len(skb);
2037 return flags & BPF_F_INGRESS ? 2038 return flags & BPF_F_INGRESS ?
@@ -2788,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2788 u32 off = skb_mac_header_len(skb); 2789 u32 off = skb_mac_header_len(skb);
2789 int ret; 2790 int ret;
2790 2791
2791 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2792 if (!skb_is_gso_tcp(skb))
2792 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2793 return -ENOTSUPP; 2793 return -ENOTSUPP;
2794 2794
2795 ret = skb_cow(skb, len_diff); 2795 ret = skb_cow(skb, len_diff);
@@ -2830,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2830 u32 off = skb_mac_header_len(skb); 2830 u32 off = skb_mac_header_len(skb);
2831 int ret; 2831 int ret;
2832 2832
2833 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2833 if (!skb_is_gso_tcp(skb))
2834 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2835 return -ENOTSUPP; 2834 return -ENOTSUPP;
2836 2835
2837 ret = skb_unclone(skb, GFP_ATOMIC); 2836 ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2956,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2956 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2955 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2957 int ret; 2956 int ret;
2958 2957
2959 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2958 if (!skb_is_gso_tcp(skb))
2960 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2961 return -ENOTSUPP; 2959 return -ENOTSUPP;
2962 2960
2963 ret = skb_cow(skb, len_diff); 2961 ret = skb_cow(skb, len_diff);
@@ -2986,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2986 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2984 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2987 int ret; 2985 int ret;
2988 2986
2989 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ 2987 if (!skb_is_gso_tcp(skb))
2990 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2991 return -ENOTSUPP; 2988 return -ENOTSUPP;
2992 2989
2993 ret = skb_unclone(skb, GFP_ATOMIC); 2990 ret = skb_unclone(skb, GFP_ATOMIC);
@@ -4111,14 +4108,20 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4111 /* Only some socketops are supported */ 4108 /* Only some socketops are supported */
4112 switch (optname) { 4109 switch (optname) {
4113 case SO_RCVBUF: 4110 case SO_RCVBUF:
4111 val = min_t(u32, val, sysctl_rmem_max);
4114 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 4112 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4115 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); 4113 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4116 break; 4114 break;
4117 case SO_SNDBUF: 4115 case SO_SNDBUF:
4116 val = min_t(u32, val, sysctl_wmem_max);
4118 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 4117 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4119 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 4118 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4120 break; 4119 break;
4121 case SO_MAX_PACING_RATE: /* 32bit version */ 4120 case SO_MAX_PACING_RATE: /* 32bit version */
4121 if (val != ~0U)
4122 cmpxchg(&sk->sk_pacing_status,
4123 SK_PACING_NONE,
4124 SK_PACING_NEEDED);
4122 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; 4125 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4123 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 4126 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4124 sk->sk_max_pacing_rate); 4127 sk->sk_max_pacing_rate);
@@ -4132,7 +4135,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4132 sk->sk_rcvlowat = val ? : 1; 4135 sk->sk_rcvlowat = val ? : 1;
4133 break; 4136 break;
4134 case SO_MARK: 4137 case SO_MARK:
4135 sk->sk_mark = val; 4138 if (sk->sk_mark != val) {
4139 sk->sk_mark = val;
4140 sk_dst_reset(sk);
4141 }
4136 break; 4142 break;
4137 default: 4143 default:
4138 ret = -EINVAL; 4144 ret = -EINVAL;
@@ -4203,7 +4209,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4203 /* Only some options are supported */ 4209 /* Only some options are supported */
4204 switch (optname) { 4210 switch (optname) {
4205 case TCP_BPF_IW: 4211 case TCP_BPF_IW:
4206 if (val <= 0 || tp->data_segs_out > 0) 4212 if (val <= 0 || tp->data_segs_out > tp->syn_data)
4207 ret = -EINVAL; 4213 ret = -EINVAL;
4208 else 4214 else
4209 tp->snd_cwnd = val; 4215 tp->snd_cwnd = val;
@@ -5309,7 +5315,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
5309 case BPF_FUNC_trace_printk: 5315 case BPF_FUNC_trace_printk:
5310 if (capable(CAP_SYS_ADMIN)) 5316 if (capable(CAP_SYS_ADMIN))
5311 return bpf_get_trace_printk_proto(); 5317 return bpf_get_trace_printk_proto();
5312 /* else: fall through */ 5318 /* else, fall through */
5313 default: 5319 default:
5314 return NULL; 5320 return NULL;
5315 } 5321 }
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437f7106..a648568c5e8f 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
63 lwt->name ? : "<unknown>"); 63 lwt->name ? : "<unknown>");
64 ret = BPF_OK; 64 ret = BPF_OK;
65 } else { 65 } else {
66 skb_reset_mac_header(skb);
66 ret = skb_do_redirect(skb); 67 ret = skb_do_redirect(skb);
67 if (ret == 0) 68 if (ret == 0)
68 ret = BPF_REDIRECT; 69 ret = BPF_REDIRECT;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 763a7b08df67..4230400b9a30 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -18,6 +18,7 @@
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 19
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/kmemleak.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -443,12 +444,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
443 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 444 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
444 if (!ret) 445 if (!ret)
445 return NULL; 446 return NULL;
446 if (size <= PAGE_SIZE) 447 if (size <= PAGE_SIZE) {
447 buckets = kzalloc(size, GFP_ATOMIC); 448 buckets = kzalloc(size, GFP_ATOMIC);
448 else 449 } else {
449 buckets = (struct neighbour __rcu **) 450 buckets = (struct neighbour __rcu **)
450 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 451 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
451 get_order(size)); 452 get_order(size));
453 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
454 }
452 if (!buckets) { 455 if (!buckets) {
453 kfree(ret); 456 kfree(ret);
454 return NULL; 457 return NULL;
@@ -468,10 +471,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
468 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 471 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
469 struct neighbour __rcu **buckets = nht->hash_buckets; 472 struct neighbour __rcu **buckets = nht->hash_buckets;
470 473
471 if (size <= PAGE_SIZE) 474 if (size <= PAGE_SIZE) {
472 kfree(buckets); 475 kfree(buckets);
473 else 476 } else {
477 kmemleak_free(buckets);
474 free_pages((unsigned long)buckets, get_order(size)); 478 free_pages((unsigned long)buckets, get_order(size));
479 }
475 kfree(nht); 480 kfree(nht);
476} 481}
477 482
@@ -1002,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh)
1002 if (neigh->ops->solicit) 1007 if (neigh->ops->solicit)
1003 neigh->ops->solicit(neigh, skb); 1008 neigh->ops->solicit(neigh, skb);
1004 atomic_inc(&neigh->probes); 1009 atomic_inc(&neigh->probes);
1005 kfree_skb(skb); 1010 consume_skb(skb);
1006} 1011}
1007 1012
1008/* Called when a timer expires for a neighbour entry. */ 1013/* Called when a timer expires for a neighbour entry. */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 37317ffec146..2415d9cb9b89 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
356 */ 356 */
357void *netdev_alloc_frag(unsigned int fragsz) 357void *netdev_alloc_frag(unsigned int fragsz)
358{ 358{
359 fragsz = SKB_DATA_ALIGN(fragsz);
360
359 return __netdev_alloc_frag(fragsz, GFP_ATOMIC); 361 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
360} 362}
361EXPORT_SYMBOL(netdev_alloc_frag); 363EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
369 371
370void *napi_alloc_frag(unsigned int fragsz) 372void *napi_alloc_frag(unsigned int fragsz)
371{ 373{
374 fragsz = SKB_DATA_ALIGN(fragsz);
375
372 return __napi_alloc_frag(fragsz, GFP_ATOMIC); 376 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
373} 377}
374EXPORT_SYMBOL(napi_alloc_frag); 378EXPORT_SYMBOL(napi_alloc_frag);
@@ -5270,7 +5274,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5270 unsigned long chunk; 5274 unsigned long chunk;
5271 struct sk_buff *skb; 5275 struct sk_buff *skb;
5272 struct page *page; 5276 struct page *page;
5273 gfp_t gfp_head;
5274 int i; 5277 int i;
5275 5278
5276 *errcode = -EMSGSIZE; 5279 *errcode = -EMSGSIZE;
@@ -5280,12 +5283,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5280 if (npages > MAX_SKB_FRAGS) 5283 if (npages > MAX_SKB_FRAGS)
5281 return NULL; 5284 return NULL;
5282 5285
5283 gfp_head = gfp_mask;
5284 if (gfp_head & __GFP_DIRECT_RECLAIM)
5285 gfp_head |= __GFP_RETRY_MAYFAIL;
5286
5287 *errcode = -ENOBUFS; 5286 *errcode = -ENOBUFS;
5288 skb = alloc_skb(header_len, gfp_head); 5287 skb = alloc_skb(header_len, gfp_mask);
5289 if (!skb) 5288 if (!skb)
5290 return NULL; 5289 return NULL;
5291 5290
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index d6d5c20d7044..8c826603bf36 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
545 struct sk_psock *psock = container_of(gc, struct sk_psock, gc); 545 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
546 546
547 /* No sk_callback_lock since already detached. */ 547 /* No sk_callback_lock since already detached. */
548 if (psock->parser.enabled) 548 strp_done(&psock->parser.strp);
549 strp_done(&psock->parser.strp);
550 549
551 cancel_work_sync(&psock->work); 550 cancel_work_sync(&psock->work);
552 551
diff --git a/net/core/sock.c b/net/core/sock.c
index 6aa2e7e0b4fb..bc3512f230a3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2380 } 2380 }
2381 2381
2382 if (sk_has_memory_pressure(sk)) { 2382 if (sk_has_memory_pressure(sk)) {
2383 int alloc; 2383 u64 alloc;
2384 2384
2385 if (!sk_under_memory_pressure(sk)) 2385 if (!sk_under_memory_pressure(sk))
2386 return 1; 2386 return 1;