aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/sysctl/net.txt8
-rw-r--r--drivers/net/macsec.c20
-rw-r--r--drivers/ptp/ptp_clock.c6
-rw-r--r--include/linux/filter.h1
-rw-r--r--kernel/bpf/btf.c58
-rw-r--r--kernel/bpf/core.c49
-rw-r--r--kernel/bpf/devmap.c3
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/queue_stack_maps.c2
-rw-r--r--kernel/bpf/verifier.c13
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/filter.c21
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/ipv4/udp_diag.c1
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c15
18 files changed, 155 insertions, 63 deletions
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 9ecde517728c..2793d4eac55f 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -92,6 +92,14 @@ Values :
92 0 - disable JIT kallsyms export (default value) 92 0 - disable JIT kallsyms export (default value)
93 1 - enable JIT kallsyms export for privileged users only 93 1 - enable JIT kallsyms export for privileged users only
94 94
95bpf_jit_limit
96-------------
97
98This enforces a global limit for memory allocations to the BPF JIT
99compiler in order to reject unprivileged JIT requests once it has
100been surpassed. bpf_jit_limit contains the value of the global limit
101in bytes.
102
95dev_weight 103dev_weight
96-------------- 104--------------
97 105
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 4bb90b6867a2..64a982563d59 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2812,9 +2812,6 @@ static int macsec_dev_open(struct net_device *dev)
2812 struct net_device *real_dev = macsec->real_dev; 2812 struct net_device *real_dev = macsec->real_dev;
2813 int err; 2813 int err;
2814 2814
2815 if (!(real_dev->flags & IFF_UP))
2816 return -ENETDOWN;
2817
2818 err = dev_uc_add(real_dev, dev->dev_addr); 2815 err = dev_uc_add(real_dev, dev->dev_addr);
2819 if (err < 0) 2816 if (err < 0)
2820 return err; 2817 return err;
@@ -3306,6 +3303,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3306 if (err < 0) 3303 if (err < 0)
3307 goto del_dev; 3304 goto del_dev;
3308 3305
3306 netif_stacked_transfer_operstate(real_dev, dev);
3307 linkwatch_fire_event(dev);
3308
3309 macsec_generation++; 3309 macsec_generation++;
3310 3310
3311 return 0; 3311 return 0;
@@ -3490,6 +3490,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
3490 return NOTIFY_DONE; 3490 return NOTIFY_DONE;
3491 3491
3492 switch (event) { 3492 switch (event) {
3493 case NETDEV_DOWN:
3494 case NETDEV_UP:
3495 case NETDEV_CHANGE: {
3496 struct macsec_dev *m, *n;
3497 struct macsec_rxh_data *rxd;
3498
3499 rxd = macsec_data_rtnl(real_dev);
3500 list_for_each_entry_safe(m, n, &rxd->secys, secys) {
3501 struct net_device *dev = m->secy.netdev;
3502
3503 netif_stacked_transfer_operstate(real_dev, dev);
3504 }
3505 break;
3506 }
3493 case NETDEV_UNREGISTER: { 3507 case NETDEV_UNREGISTER: {
3494 struct macsec_dev *m, *n; 3508 struct macsec_dev *m, *n;
3495 struct macsec_rxh_data *rxd; 3509 struct macsec_rxh_data *rxd;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 7eacc1c4b3b1..5419a89d300e 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -232,12 +232,8 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
232 init_waitqueue_head(&ptp->tsev_wq); 232 init_waitqueue_head(&ptp->tsev_wq);
233 233
234 if (ptp->info->do_aux_work) { 234 if (ptp->info->do_aux_work) {
235 char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
236
237 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); 235 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
238 ptp->kworker = kthread_create_worker(0, worker_name ? 236 ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index);
239 worker_name : info->name);
240 kfree(worker_name);
241 if (IS_ERR(ptp->kworker)) { 237 if (IS_ERR(ptp->kworker)) {
242 err = PTR_ERR(ptp->kworker); 238 err = PTR_ERR(ptp->kworker);
243 pr_err("failed to create ptp aux_worker %d\n", err); 239 pr_err("failed to create ptp aux_worker %d\n", err);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 91b4c934f02e..de629b706d1d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
854extern int bpf_jit_enable; 854extern int bpf_jit_enable;
855extern int bpf_jit_harden; 855extern int bpf_jit_harden;
856extern int bpf_jit_kallsyms; 856extern int bpf_jit_kallsyms;
857extern int bpf_jit_limit;
857 858
858typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); 859typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
859 860
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 378cef70341c..ee4c82667d65 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -2067,56 +2067,47 @@ static int btf_check_sec_info(struct btf_verifier_env *env,
2067 return 0; 2067 return 0;
2068} 2068}
2069 2069
2070static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data, 2070static int btf_parse_hdr(struct btf_verifier_env *env)
2071 u32 btf_data_size)
2072{ 2071{
2072 u32 hdr_len, hdr_copy, btf_data_size;
2073 const struct btf_header *hdr; 2073 const struct btf_header *hdr;
2074 u32 hdr_len, hdr_copy;
2075 /*
2076 * Minimal part of the "struct btf_header" that
2077 * contains the hdr_len.
2078 */
2079 struct btf_min_header {
2080 u16 magic;
2081 u8 version;
2082 u8 flags;
2083 u32 hdr_len;
2084 } __user *min_hdr;
2085 struct btf *btf; 2074 struct btf *btf;
2086 int err; 2075 int err;
2087 2076
2088 btf = env->btf; 2077 btf = env->btf;
2089 min_hdr = btf_data; 2078 btf_data_size = btf->data_size;
2090 2079
2091 if (btf_data_size < sizeof(*min_hdr)) { 2080 if (btf_data_size <
2081 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
2092 btf_verifier_log(env, "hdr_len not found"); 2082 btf_verifier_log(env, "hdr_len not found");
2093 return -EINVAL; 2083 return -EINVAL;
2094 } 2084 }
2095 2085
2096 if (get_user(hdr_len, &min_hdr->hdr_len)) 2086 hdr = btf->data;
2097 return -EFAULT; 2087 hdr_len = hdr->hdr_len;
2098
2099 if (btf_data_size < hdr_len) { 2088 if (btf_data_size < hdr_len) {
2100 btf_verifier_log(env, "btf_header not found"); 2089 btf_verifier_log(env, "btf_header not found");
2101 return -EINVAL; 2090 return -EINVAL;
2102 } 2091 }
2103 2092
2104 err = bpf_check_uarg_tail_zero(btf_data, sizeof(btf->hdr), hdr_len); 2093 /* Ensure the unsupported header fields are zero */
2105 if (err) { 2094 if (hdr_len > sizeof(btf->hdr)) {
2106 if (err == -E2BIG) 2095 u8 *expected_zero = btf->data + sizeof(btf->hdr);
2107 btf_verifier_log(env, "Unsupported btf_header"); 2096 u8 *end = btf->data + hdr_len;
2108 return err; 2097
2098 for (; expected_zero < end; expected_zero++) {
2099 if (*expected_zero) {
2100 btf_verifier_log(env, "Unsupported btf_header");
2101 return -E2BIG;
2102 }
2103 }
2109 } 2104 }
2110 2105
2111 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); 2106 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
2112 if (copy_from_user(&btf->hdr, btf_data, hdr_copy)) 2107 memcpy(&btf->hdr, btf->data, hdr_copy);
2113 return -EFAULT;
2114 2108
2115 hdr = &btf->hdr; 2109 hdr = &btf->hdr;
2116 2110
2117 if (hdr->hdr_len != hdr_len)
2118 return -EINVAL;
2119
2120 btf_verifier_log_hdr(env, btf_data_size); 2111 btf_verifier_log_hdr(env, btf_data_size);
2121 2112
2122 if (hdr->magic != BTF_MAGIC) { 2113 if (hdr->magic != BTF_MAGIC) {
@@ -2186,10 +2177,6 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
2186 } 2177 }
2187 env->btf = btf; 2178 env->btf = btf;
2188 2179
2189 err = btf_parse_hdr(env, btf_data, btf_data_size);
2190 if (err)
2191 goto errout;
2192
2193 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); 2180 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
2194 if (!data) { 2181 if (!data) {
2195 err = -ENOMEM; 2182 err = -ENOMEM;
@@ -2198,13 +2185,18 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
2198 2185
2199 btf->data = data; 2186 btf->data = data;
2200 btf->data_size = btf_data_size; 2187 btf->data_size = btf_data_size;
2201 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
2202 2188
2203 if (copy_from_user(data, btf_data, btf_data_size)) { 2189 if (copy_from_user(data, btf_data, btf_data_size)) {
2204 err = -EFAULT; 2190 err = -EFAULT;
2205 goto errout; 2191 goto errout;
2206 } 2192 }
2207 2193
2194 err = btf_parse_hdr(env);
2195 if (err)
2196 goto errout;
2197
2198 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
2199
2208 err = btf_parse_str_sec(env); 2200 err = btf_parse_str_sec(env);
2209 if (err) 2201 if (err)
2210 goto errout; 2202 goto errout;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 7c7eeea8cffc..6377225b2082 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -365,10 +365,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
365} 365}
366 366
367#ifdef CONFIG_BPF_JIT 367#ifdef CONFIG_BPF_JIT
368# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
369
368/* All BPF JIT sysctl knobs here. */ 370/* All BPF JIT sysctl knobs here. */
369int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); 371int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
370int bpf_jit_harden __read_mostly; 372int bpf_jit_harden __read_mostly;
371int bpf_jit_kallsyms __read_mostly; 373int bpf_jit_kallsyms __read_mostly;
374int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
372 375
373static __always_inline void 376static __always_inline void
374bpf_get_prog_addr_region(const struct bpf_prog *prog, 377bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -577,27 +580,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
577 return ret; 580 return ret;
578} 581}
579 582
583static atomic_long_t bpf_jit_current;
584
585#if defined(MODULES_VADDR)
586static int __init bpf_jit_charge_init(void)
587{
588 /* Only used as heuristic here to derive limit. */
589 bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
590 PAGE_SIZE), INT_MAX);
591 return 0;
592}
593pure_initcall(bpf_jit_charge_init);
594#endif
595
596static int bpf_jit_charge_modmem(u32 pages)
597{
598 if (atomic_long_add_return(pages, &bpf_jit_current) >
599 (bpf_jit_limit >> PAGE_SHIFT)) {
600 if (!capable(CAP_SYS_ADMIN)) {
601 atomic_long_sub(pages, &bpf_jit_current);
602 return -EPERM;
603 }
604 }
605
606 return 0;
607}
608
609static void bpf_jit_uncharge_modmem(u32 pages)
610{
611 atomic_long_sub(pages, &bpf_jit_current);
612}
613
580struct bpf_binary_header * 614struct bpf_binary_header *
581bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, 615bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
582 unsigned int alignment, 616 unsigned int alignment,
583 bpf_jit_fill_hole_t bpf_fill_ill_insns) 617 bpf_jit_fill_hole_t bpf_fill_ill_insns)
584{ 618{
585 struct bpf_binary_header *hdr; 619 struct bpf_binary_header *hdr;
586 unsigned int size, hole, start; 620 u32 size, hole, start, pages;
587 621
588 /* Most of BPF filters are really small, but if some of them 622 /* Most of BPF filters are really small, but if some of them
589 * fill a page, allow at least 128 extra bytes to insert a 623 * fill a page, allow at least 128 extra bytes to insert a
590 * random section of illegal instructions. 624 * random section of illegal instructions.
591 */ 625 */
592 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); 626 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
627 pages = size / PAGE_SIZE;
628
629 if (bpf_jit_charge_modmem(pages))
630 return NULL;
593 hdr = module_alloc(size); 631 hdr = module_alloc(size);
594 if (hdr == NULL) 632 if (!hdr) {
633 bpf_jit_uncharge_modmem(pages);
595 return NULL; 634 return NULL;
635 }
596 636
597 /* Fill space with illegal/arch-dep instructions. */ 637 /* Fill space with illegal/arch-dep instructions. */
598 bpf_fill_ill_insns(hdr, size); 638 bpf_fill_ill_insns(hdr, size);
599 639
600 hdr->pages = size / PAGE_SIZE; 640 hdr->pages = pages;
601 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), 641 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
602 PAGE_SIZE - sizeof(*hdr)); 642 PAGE_SIZE - sizeof(*hdr));
603 start = (get_random_int() % hole) & ~(alignment - 1); 643 start = (get_random_int() % hole) & ~(alignment - 1);
@@ -610,7 +650,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
610 650
611void bpf_jit_binary_free(struct bpf_binary_header *hdr) 651void bpf_jit_binary_free(struct bpf_binary_header *hdr)
612{ 652{
653 u32 pages = hdr->pages;
654
613 module_memfree(hdr); 655 module_memfree(hdr);
656 bpf_jit_uncharge_modmem(pages);
614} 657}
615 658
616/* This symbol is only overridden by archs that have different 659/* This symbol is only overridden by archs that have different
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 141710b82a6c..191b79948424 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -512,8 +512,7 @@ static int dev_map_notification(struct notifier_block *notifier,
512 struct bpf_dtab_netdev *dev, *odev; 512 struct bpf_dtab_netdev *dev, *odev;
513 513
514 dev = READ_ONCE(dtab->netdev_map[i]); 514 dev = READ_ONCE(dtab->netdev_map[i]);
515 if (!dev || 515 if (!dev || netdev != dev->dev)
516 dev->dev->ifindex != netdev->ifindex)
517 continue; 516 continue;
518 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); 517 odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
519 if (dev == odev) 518 if (dev == odev)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index ab0d5e3f9892..a74972b07e74 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -99,7 +99,6 @@ BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
99const struct bpf_func_proto bpf_map_pop_elem_proto = { 99const struct bpf_func_proto bpf_map_pop_elem_proto = {
100 .func = bpf_map_pop_elem, 100 .func = bpf_map_pop_elem,
101 .gpl_only = false, 101 .gpl_only = false,
102 .pkt_access = true,
103 .ret_type = RET_INTEGER, 102 .ret_type = RET_INTEGER,
104 .arg1_type = ARG_CONST_MAP_PTR, 103 .arg1_type = ARG_CONST_MAP_PTR,
105 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, 104 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
@@ -113,7 +112,6 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
113const struct bpf_func_proto bpf_map_peek_elem_proto = { 112const struct bpf_func_proto bpf_map_peek_elem_proto = {
114 .func = bpf_map_pop_elem, 113 .func = bpf_map_pop_elem,
115 .gpl_only = false, 114 .gpl_only = false,
116 .pkt_access = true,
117 .ret_type = RET_INTEGER, 115 .ret_type = RET_INTEGER,
118 .arg1_type = ARG_CONST_MAP_PTR, 116 .arg1_type = ARG_CONST_MAP_PTR,
119 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, 117 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 12a93fb37449..8bbd72d3a121 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -122,6 +122,7 @@ static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
122 raw_spin_lock_irqsave(&qs->lock, flags); 122 raw_spin_lock_irqsave(&qs->lock, flags);
123 123
124 if (queue_stack_map_is_empty(qs)) { 124 if (queue_stack_map_is_empty(qs)) {
125 memset(value, 0, qs->map.value_size);
125 err = -ENOENT; 126 err = -ENOENT;
126 goto out; 127 goto out;
127 } 128 }
@@ -151,6 +152,7 @@ static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
151 raw_spin_lock_irqsave(&qs->lock, flags); 152 raw_spin_lock_irqsave(&qs->lock, flags);
152 153
153 if (queue_stack_map_is_empty(qs)) { 154 if (queue_stack_map_is_empty(qs)) {
155 memset(value, 0, qs->map.value_size);
154 err = -ENOENT; 156 err = -ENOENT;
155 goto out; 157 goto out;
156 } 158 }
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 98fa0be35370..171a2c88e77d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1387,21 +1387,24 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
1387 enum bpf_access_type t) 1387 enum bpf_access_type t)
1388{ 1388{
1389 switch (env->prog->type) { 1389 switch (env->prog->type) {
1390 /* Program types only with direct read access go here! */
1390 case BPF_PROG_TYPE_LWT_IN: 1391 case BPF_PROG_TYPE_LWT_IN:
1391 case BPF_PROG_TYPE_LWT_OUT: 1392 case BPF_PROG_TYPE_LWT_OUT:
1392 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 1393 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1393 case BPF_PROG_TYPE_SK_REUSEPORT: 1394 case BPF_PROG_TYPE_SK_REUSEPORT:
1394 /* dst_input() and dst_output() can't write for now */ 1395 case BPF_PROG_TYPE_FLOW_DISSECTOR:
1396 case BPF_PROG_TYPE_CGROUP_SKB:
1395 if (t == BPF_WRITE) 1397 if (t == BPF_WRITE)
1396 return false; 1398 return false;
1397 /* fallthrough */ 1399 /* fallthrough */
1400
1401 /* Program types with direct read + write access go here! */
1398 case BPF_PROG_TYPE_SCHED_CLS: 1402 case BPF_PROG_TYPE_SCHED_CLS:
1399 case BPF_PROG_TYPE_SCHED_ACT: 1403 case BPF_PROG_TYPE_SCHED_ACT:
1400 case BPF_PROG_TYPE_XDP: 1404 case BPF_PROG_TYPE_XDP:
1401 case BPF_PROG_TYPE_LWT_XMIT: 1405 case BPF_PROG_TYPE_LWT_XMIT:
1402 case BPF_PROG_TYPE_SK_SKB: 1406 case BPF_PROG_TYPE_SK_SKB:
1403 case BPF_PROG_TYPE_SK_MSG: 1407 case BPF_PROG_TYPE_SK_MSG:
1404 case BPF_PROG_TYPE_FLOW_DISSECTOR:
1405 if (meta) 1408 if (meta)
1406 return meta->pkt_access; 1409 return meta->pkt_access;
1407 1410
@@ -5706,7 +5709,11 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
5706 bool is_narrower_load; 5709 bool is_narrower_load;
5707 u32 target_size; 5710 u32 target_size;
5708 5711
5709 if (ops->gen_prologue) { 5712 if (ops->gen_prologue || env->seen_direct_write) {
5713 if (!ops->gen_prologue) {
5714 verbose(env, "bpf verifier is misconfigured\n");
5715 return -EINVAL;
5716 }
5710 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 5717 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
5711 env->prog); 5718 env->prog);
5712 if (cnt >= ARRAY_SIZE(insn_buf)) { 5719 if (cnt >= ARRAY_SIZE(insn_buf)) {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 41cdafbf2ebe..6bac0d6b7b94 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1428,8 +1428,7 @@ static void br_multicast_query_received(struct net_bridge *br,
1428 * is 0.0.0.0 should not be added to router port list. 1428 * is 0.0.0.0 should not be added to router port list.
1429 */ 1429 */
1430 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) || 1430 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
1431 (saddr->proto == htons(ETH_P_IPV6) && 1431 saddr->proto == htons(ETH_P_IPV6))
1432 !ipv6_addr_any(&saddr->u.ip6)))
1433 br_multicast_mark_router(br, port); 1432 br_multicast_mark_router(br, port);
1434} 1433}
1435 1434
diff --git a/net/core/dev.c b/net/core/dev.c
index 022ad73d6253..77d43ae2a7bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5457,7 +5457,7 @@ static void gro_flush_oldest(struct list_head *head)
5457 /* Do not adjust napi->gro_hash[].count, caller is adding a new 5457 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5458 * SKB to the chain. 5458 * SKB to the chain.
5459 */ 5459 */
5460 list_del(&oldest->list); 5460 skb_list_del_init(oldest);
5461 napi_gro_complete(oldest); 5461 napi_gro_complete(oldest);
5462} 5462}
5463 5463
diff --git a/net/core/filter.c b/net/core/filter.c
index 35c6933c2622..e521c5ebc7d1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5264,8 +5264,6 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5264 return &bpf_msg_pull_data_proto; 5264 return &bpf_msg_pull_data_proto;
5265 case BPF_FUNC_msg_push_data: 5265 case BPF_FUNC_msg_push_data:
5266 return &bpf_msg_push_data_proto; 5266 return &bpf_msg_push_data_proto;
5267 case BPF_FUNC_get_local_storage:
5268 return &bpf_get_local_storage_proto;
5269 default: 5267 default:
5270 return bpf_base_func_proto(func_id); 5268 return bpf_base_func_proto(func_id);
5271 } 5269 }
@@ -5296,8 +5294,6 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5296 return &bpf_sk_redirect_map_proto; 5294 return &bpf_sk_redirect_map_proto;
5297 case BPF_FUNC_sk_redirect_hash: 5295 case BPF_FUNC_sk_redirect_hash:
5298 return &bpf_sk_redirect_hash_proto; 5296 return &bpf_sk_redirect_hash_proto;
5299 case BPF_FUNC_get_local_storage:
5300 return &bpf_get_local_storage_proto;
5301#ifdef CONFIG_INET 5297#ifdef CONFIG_INET
5302 case BPF_FUNC_sk_lookup_tcp: 5298 case BPF_FUNC_sk_lookup_tcp:
5303 return &bpf_sk_lookup_tcp_proto; 5299 return &bpf_sk_lookup_tcp_proto;
@@ -5496,7 +5492,13 @@ static bool cg_skb_is_valid_access(int off, int size,
5496 case bpf_ctx_range(struct __sk_buff, data_meta): 5492 case bpf_ctx_range(struct __sk_buff, data_meta):
5497 case bpf_ctx_range(struct __sk_buff, flow_keys): 5493 case bpf_ctx_range(struct __sk_buff, flow_keys):
5498 return false; 5494 return false;
5495 case bpf_ctx_range(struct __sk_buff, data):
5496 case bpf_ctx_range(struct __sk_buff, data_end):
5497 if (!capable(CAP_SYS_ADMIN))
5498 return false;
5499 break;
5499 } 5500 }
5501
5500 if (type == BPF_WRITE) { 5502 if (type == BPF_WRITE) {
5501 switch (off) { 5503 switch (off) {
5502 case bpf_ctx_range(struct __sk_buff, mark): 5504 case bpf_ctx_range(struct __sk_buff, mark):
@@ -5638,6 +5640,15 @@ static bool sock_filter_is_valid_access(int off, int size,
5638 prog->expected_attach_type); 5640 prog->expected_attach_type);
5639} 5641}
5640 5642
5643static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
5644 const struct bpf_prog *prog)
5645{
5646 /* Neither direct read nor direct write requires any preliminary
5647 * action.
5648 */
5649 return 0;
5650}
5651
5641static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, 5652static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
5642 const struct bpf_prog *prog, int drop_verdict) 5653 const struct bpf_prog *prog, int drop_verdict)
5643{ 5654{
@@ -7204,6 +7215,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
7204 .get_func_proto = xdp_func_proto, 7215 .get_func_proto = xdp_func_proto,
7205 .is_valid_access = xdp_is_valid_access, 7216 .is_valid_access = xdp_is_valid_access,
7206 .convert_ctx_access = xdp_convert_ctx_access, 7217 .convert_ctx_access = xdp_convert_ctx_access,
7218 .gen_prologue = bpf_noop_prologue,
7207}; 7219};
7208 7220
7209const struct bpf_prog_ops xdp_prog_ops = { 7221const struct bpf_prog_ops xdp_prog_ops = {
@@ -7302,6 +7314,7 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = {
7302 .get_func_proto = sk_msg_func_proto, 7314 .get_func_proto = sk_msg_func_proto,
7303 .is_valid_access = sk_msg_is_valid_access, 7315 .is_valid_access = sk_msg_is_valid_access,
7304 .convert_ctx_access = sk_msg_convert_ctx_access, 7316 .convert_ctx_access = sk_msg_convert_ctx_access,
7317 .gen_prologue = bpf_noop_prologue,
7305}; 7318};
7306 7319
7307const struct bpf_prog_ops sk_msg_prog_ops = { 7320const struct bpf_prog_ops sk_msg_prog_ops = {
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index b1a2c5e38530..37b4667128a3 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
279 return ret; 279 return ret;
280} 280}
281 281
282# ifdef CONFIG_HAVE_EBPF_JIT
283static int 282static int
284proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write, 283proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
285 void __user *buffer, size_t *lenp, 284 void __user *buffer, size_t *lenp,
@@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
290 289
291 return proc_dointvec_minmax(table, write, buffer, lenp, ppos); 290 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
292} 291}
293# endif
294#endif 292#endif
295 293
296static struct ctl_table net_core_table[] = { 294static struct ctl_table net_core_table[] = {
@@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = {
397 .extra2 = &one, 395 .extra2 = &one,
398 }, 396 },
399# endif 397# endif
398 {
399 .procname = "bpf_jit_limit",
400 .data = &bpf_jit_limit,
401 .maxlen = sizeof(int),
402 .mode = 0600,
403 .proc_handler = proc_dointvec_minmax_bpf_restricted,
404 .extra1 = &one,
405 },
400#endif 406#endif
401 { 407 {
402 .procname = "netdev_tstamp_prequeue", 408 .procname = "netdev_tstamp_prequeue",
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index d9ad986c7b2c..5cbb9be05295 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -42,6 +42,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
42 42
43 rcu_read_lock(); 43 rcu_read_lock();
44 if (req->sdiag_family == AF_INET) 44 if (req->sdiag_family == AF_INET)
45 /* src and dst are swapped for historical reasons */
45 sk = __udp4_lib_lookup(net, 46 sk = __udp4_lib_lookup(net,
46 req->id.idiag_src[0], req->id.idiag_sport, 47 req->id.idiag_src[0], req->id.idiag_sport,
47 req->id.idiag_dst[0], req->id.idiag_dport, 48 req->id.idiag_dst[0], req->id.idiag_dport,
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index cbe4831f46f4..4a042abf844c 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -413,7 +413,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
413 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) { 413 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
414 if (tb[TCA_GRED_LIMIT] != NULL) 414 if (tb[TCA_GRED_LIMIT] != NULL)
415 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); 415 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
416 return gred_change_table_def(sch, opt); 416 return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
417 } 417 }
418 418
419 if (tb[TCA_GRED_PARMS] == NULL || 419 if (tb[TCA_GRED_PARMS] == NULL ||
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index dd49df5e2df4..7f90d3645af8 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -20,3 +20,5 @@ CONFIG_VXLAN=y
20CONFIG_GENEVE=y 20CONFIG_GENEVE=y
21CONFIG_NET_CLS_FLOWER=m 21CONFIG_NET_CLS_FLOWER=m
22CONFIG_LWTUNNEL=y 22CONFIG_LWTUNNEL=y
23CONFIG_BPF_STREAM_PARSER=y
24CONFIG_XDP_SOCKETS=y
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 769d68a48f30..36f3d3009d1a 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -4891,6 +4891,8 @@ static struct bpf_test tests[] = {
4891 BPF_EXIT_INSN(), 4891 BPF_EXIT_INSN(),
4892 }, 4892 },
4893 .result = ACCEPT, 4893 .result = ACCEPT,
4894 .result_unpriv = REJECT,
4895 .errstr_unpriv = "invalid bpf_context access off=76 size=4",
4894 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 4896 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4895 }, 4897 },
4896 { 4898 {
@@ -5146,6 +5148,7 @@ static struct bpf_test tests[] = {
5146 .fixup_cgroup_storage = { 1 }, 5148 .fixup_cgroup_storage = { 1 },
5147 .result = REJECT, 5149 .result = REJECT,
5148 .errstr = "get_local_storage() doesn't support non-zero flags", 5150 .errstr = "get_local_storage() doesn't support non-zero flags",
5151 .errstr_unpriv = "R2 leaks addr into helper function",
5149 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 5152 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5150 }, 5153 },
5151 { 5154 {
@@ -5261,6 +5264,7 @@ static struct bpf_test tests[] = {
5261 .fixup_percpu_cgroup_storage = { 1 }, 5264 .fixup_percpu_cgroup_storage = { 1 },
5262 .result = REJECT, 5265 .result = REJECT,
5263 .errstr = "get_local_storage() doesn't support non-zero flags", 5266 .errstr = "get_local_storage() doesn't support non-zero flags",
5267 .errstr_unpriv = "R2 leaks addr into helper function",
5264 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 5268 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5265 }, 5269 },
5266 { 5270 {
@@ -14050,6 +14054,13 @@ static void get_unpriv_disabled()
14050 fclose(fd); 14054 fclose(fd);
14051} 14055}
14052 14056
14057static bool test_as_unpriv(struct bpf_test *test)
14058{
14059 return !test->prog_type ||
14060 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14061 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14062}
14063
14053static int do_test(bool unpriv, unsigned int from, unsigned int to) 14064static int do_test(bool unpriv, unsigned int from, unsigned int to)
14054{ 14065{
14055 int i, passes = 0, errors = 0, skips = 0; 14066 int i, passes = 0, errors = 0, skips = 0;
@@ -14060,10 +14071,10 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
14060 /* Program types that are not supported by non-root we 14071 /* Program types that are not supported by non-root we
14061 * skip right away. 14072 * skip right away.
14062 */ 14073 */
14063 if (!test->prog_type && unpriv_disabled) { 14074 if (test_as_unpriv(test) && unpriv_disabled) {
14064 printf("#%d/u %s SKIP\n", i, test->descr); 14075 printf("#%d/u %s SKIP\n", i, test->descr);
14065 skips++; 14076 skips++;
14066 } else if (!test->prog_type) { 14077 } else if (test_as_unpriv(test)) {
14067 if (!unpriv) 14078 if (!unpriv)
14068 set_admin(false); 14079 set_admin(false);
14069 printf("#%d/u %s ", i, test->descr); 14080 printf("#%d/u %s ", i, test->descr);