diff options
author | Radim Krčmář <rkrcmar@redhat.com> | 2018-09-07 12:30:47 -0400 |
---|---|---|
committer | Radim Krčmář <rkrcmar@redhat.com> | 2018-09-07 12:30:47 -0400 |
commit | ed2ef29100644eabc6099cbf1a3aa9d938555ab8 (patch) | |
tree | dadd3eb5d77ae2c44adc92869c662292bd652f46 /net | |
parent | 732b53146ac8f604e45c593efe0579f78205fdcc (diff) | |
parent | df88f3181f10565c6e3a89eb6f0f9e6afaaf15f1 (diff) |
Merge tag 'kvm-s390-master-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux
KVM: s390: Fixes for 4.19
- Fallout from the hugetlbfs support: pfmf interpretion and locking
- VSIE: fix keywrapping for nested guests
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 1 | ||||
-rw-r--r-- | net/dsa/slave.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_bbr.c | 42 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 6 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 6 | ||||
-rw-r--r-- | net/ipv6/ip6_fib.c | 2 | ||||
-rw-r--r-- | net/ipv6/ip6_vti.c | 3 | ||||
-rw-r--r-- | net/ipv6/route.c | 2 | ||||
-rw-r--r-- | net/ncsi/ncsi-netlink.c | 4 | ||||
-rw-r--r-- | net/rds/tcp.c | 1 | ||||
-rw-r--r-- | net/sched/act_api.c | 70 | ||||
-rw-r--r-- | net/sched/act_bpf.c | 8 | ||||
-rw-r--r-- | net/sched/act_connmark.c | 8 | ||||
-rw-r--r-- | net/sched/act_csum.c | 8 | ||||
-rw-r--r-- | net/sched/act_gact.c | 8 | ||||
-rw-r--r-- | net/sched/act_ife.c | 92 | ||||
-rw-r--r-- | net/sched/act_ipt.c | 16 | ||||
-rw-r--r-- | net/sched/act_mirred.c | 8 | ||||
-rw-r--r-- | net/sched/act_nat.c | 8 | ||||
-rw-r--r-- | net/sched/act_pedit.c | 8 | ||||
-rw-r--r-- | net/sched/act_police.c | 8 | ||||
-rw-r--r-- | net/sched/act_sample.c | 8 | ||||
-rw-r--r-- | net/sched/act_simple.c | 8 | ||||
-rw-r--r-- | net/sched/act_skbedit.c | 8 | ||||
-rw-r--r-- | net/sched/act_skbmod.c | 8 | ||||
-rw-r--r-- | net/sched/act_tunnel_key.c | 8 | ||||
-rw-r--r-- | net/sched/act_vlan.c | 8 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 10 | ||||
-rw-r--r-- | net/sched/sch_cake.c | 24 | ||||
-rw-r--r-- | net/tls/tls_main.c | 9 | ||||
-rw-r--r-- | net/xdp/xdp_umem.c | 4 |
31 files changed, 142 insertions, 266 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 325fc5088370..82114e1111e6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -93,7 +93,6 @@ | |||
93 | #include <linux/netdevice.h> | 93 | #include <linux/netdevice.h> |
94 | #include <linux/etherdevice.h> | 94 | #include <linux/etherdevice.h> |
95 | #include <linux/ethtool.h> | 95 | #include <linux/ethtool.h> |
96 | #include <linux/notifier.h> | ||
97 | #include <linux/skbuff.h> | 96 | #include <linux/skbuff.h> |
98 | #include <linux/bpf.h> | 97 | #include <linux/bpf.h> |
99 | #include <linux/bpf_trace.h> | 98 | #include <linux/bpf_trace.h> |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 962c4fd338ba..1c45c1d6d241 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, | |||
767 | const struct tc_action *a; | 767 | const struct tc_action *a; |
768 | struct dsa_port *to_dp; | 768 | struct dsa_port *to_dp; |
769 | int err = -EOPNOTSUPP; | 769 | int err = -EOPNOTSUPP; |
770 | LIST_HEAD(actions); | ||
771 | 770 | ||
772 | if (!ds->ops->port_mirror_add) | 771 | if (!ds->ops->port_mirror_add) |
773 | return err; | 772 | return err; |
@@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, | |||
775 | if (!tcf_exts_has_one_action(cls->exts)) | 774 | if (!tcf_exts_has_one_action(cls->exts)) |
776 | return err; | 775 | return err; |
777 | 776 | ||
778 | tcf_exts_to_list(cls->exts, &actions); | 777 | a = tcf_exts_first_action(cls->exts); |
779 | a = list_first_entry(&actions, struct tc_action, list); | ||
780 | 778 | ||
781 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { | 779 | if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { |
782 | struct dsa_mall_mirror_tc_entry *mirror; | 780 | struct dsa_mall_mirror_tc_entry *mirror; |
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 13d34427ca3d..02ff2dde9609 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c | |||
@@ -95,11 +95,10 @@ struct bbr { | |||
95 | u32 mode:3, /* current bbr_mode in state machine */ | 95 | u32 mode:3, /* current bbr_mode in state machine */ |
96 | prev_ca_state:3, /* CA state on previous ACK */ | 96 | prev_ca_state:3, /* CA state on previous ACK */ |
97 | packet_conservation:1, /* use packet conservation? */ | 97 | packet_conservation:1, /* use packet conservation? */ |
98 | restore_cwnd:1, /* decided to revert cwnd to old value */ | ||
99 | round_start:1, /* start of packet-timed tx->ack round? */ | 98 | round_start:1, /* start of packet-timed tx->ack round? */ |
100 | idle_restart:1, /* restarting after idle? */ | 99 | idle_restart:1, /* restarting after idle? */ |
101 | probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ | 100 | probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ |
102 | unused:12, | 101 | unused:13, |
103 | lt_is_sampling:1, /* taking long-term ("LT") samples now? */ | 102 | lt_is_sampling:1, /* taking long-term ("LT") samples now? */ |
104 | lt_rtt_cnt:7, /* round trips in long-term interval */ | 103 | lt_rtt_cnt:7, /* round trips in long-term interval */ |
105 | lt_use_bw:1; /* use lt_bw as our bw estimate? */ | 104 | lt_use_bw:1; /* use lt_bw as our bw estimate? */ |
@@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8; | |||
175 | /* If we estimate we're policed, use lt_bw for this many round trips: */ | 174 | /* If we estimate we're policed, use lt_bw for this many round trips: */ |
176 | static const u32 bbr_lt_bw_max_rtts = 48; | 175 | static const u32 bbr_lt_bw_max_rtts = 48; |
177 | 176 | ||
177 | static void bbr_check_probe_rtt_done(struct sock *sk); | ||
178 | |||
178 | /* Do we estimate that STARTUP filled the pipe? */ | 179 | /* Do we estimate that STARTUP filled the pipe? */ |
179 | static bool bbr_full_bw_reached(const struct sock *sk) | 180 | static bool bbr_full_bw_reached(const struct sock *sk) |
180 | { | 181 | { |
@@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) | |||
309 | */ | 310 | */ |
310 | if (bbr->mode == BBR_PROBE_BW) | 311 | if (bbr->mode == BBR_PROBE_BW) |
311 | bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); | 312 | bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); |
313 | else if (bbr->mode == BBR_PROBE_RTT) | ||
314 | bbr_check_probe_rtt_done(sk); | ||
312 | } | 315 | } |
313 | } | 316 | } |
314 | 317 | ||
@@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore( | |||
396 | cwnd = tcp_packets_in_flight(tp) + acked; | 399 | cwnd = tcp_packets_in_flight(tp) + acked; |
397 | } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { | 400 | } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { |
398 | /* Exiting loss recovery; restore cwnd saved before recovery. */ | 401 | /* Exiting loss recovery; restore cwnd saved before recovery. */ |
399 | bbr->restore_cwnd = 1; | 402 | cwnd = max(cwnd, bbr->prior_cwnd); |
400 | bbr->packet_conservation = 0; | 403 | bbr->packet_conservation = 0; |
401 | } | 404 | } |
402 | bbr->prev_ca_state = state; | 405 | bbr->prev_ca_state = state; |
403 | 406 | ||
404 | if (bbr->restore_cwnd) { | ||
405 | /* Restore cwnd after exiting loss recovery or PROBE_RTT. */ | ||
406 | cwnd = max(cwnd, bbr->prior_cwnd); | ||
407 | bbr->restore_cwnd = 0; | ||
408 | } | ||
409 | |||
410 | if (bbr->packet_conservation) { | 407 | if (bbr->packet_conservation) { |
411 | *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); | 408 | *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); |
412 | return true; /* yes, using packet conservation */ | 409 | return true; /* yes, using packet conservation */ |
@@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, | |||
423 | { | 420 | { |
424 | struct tcp_sock *tp = tcp_sk(sk); | 421 | struct tcp_sock *tp = tcp_sk(sk); |
425 | struct bbr *bbr = inet_csk_ca(sk); | 422 | struct bbr *bbr = inet_csk_ca(sk); |
426 | u32 cwnd = 0, target_cwnd = 0; | 423 | u32 cwnd = tp->snd_cwnd, target_cwnd = 0; |
427 | 424 | ||
428 | if (!acked) | 425 | if (!acked) |
429 | return; | 426 | goto done; /* no packet fully ACKed; just apply caps */ |
430 | 427 | ||
431 | if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) | 428 | if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) |
432 | goto done; | 429 | goto done; |
@@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) | |||
748 | bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ | 745 | bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ |
749 | } | 746 | } |
750 | 747 | ||
748 | static void bbr_check_probe_rtt_done(struct sock *sk) | ||
749 | { | ||
750 | struct tcp_sock *tp = tcp_sk(sk); | ||
751 | struct bbr *bbr = inet_csk_ca(sk); | ||
752 | |||
753 | if (!(bbr->probe_rtt_done_stamp && | ||
754 | after(tcp_jiffies32, bbr->probe_rtt_done_stamp))) | ||
755 | return; | ||
756 | |||
757 | bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ | ||
758 | tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); | ||
759 | bbr_reset_mode(sk); | ||
760 | } | ||
761 | |||
751 | /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and | 762 | /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and |
752 | * periodically drain the bottleneck queue, to converge to measure the true | 763 | * periodically drain the bottleneck queue, to converge to measure the true |
753 | * min_rtt (unloaded propagation delay). This allows the flows to keep queues | 764 | * min_rtt (unloaded propagation delay). This allows the flows to keep queues |
@@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) | |||
806 | } else if (bbr->probe_rtt_done_stamp) { | 817 | } else if (bbr->probe_rtt_done_stamp) { |
807 | if (bbr->round_start) | 818 | if (bbr->round_start) |
808 | bbr->probe_rtt_round_done = 1; | 819 | bbr->probe_rtt_round_done = 1; |
809 | if (bbr->probe_rtt_round_done && | 820 | if (bbr->probe_rtt_round_done) |
810 | after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) { | 821 | bbr_check_probe_rtt_done(sk); |
811 | bbr->min_rtt_stamp = tcp_jiffies32; | ||
812 | bbr->restore_cwnd = 1; /* snap to prior_cwnd */ | ||
813 | bbr_reset_mode(sk); | ||
814 | } | ||
815 | } | 822 | } |
816 | } | 823 | } |
817 | /* Restart after idle ends only once we process a new S/ACK for data */ | 824 | /* Restart after idle ends only once we process a new S/ACK for data */ |
@@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk) | |||
862 | bbr->has_seen_rtt = 0; | 869 | bbr->has_seen_rtt = 0; |
863 | bbr_init_pacing_rate_from_rtt(sk); | 870 | bbr_init_pacing_rate_from_rtt(sk); |
864 | 871 | ||
865 | bbr->restore_cwnd = 0; | ||
866 | bbr->round_start = 0; | 872 | bbr->round_start = 0; |
867 | bbr->idle_restart = 0; | 873 | bbr->idle_restart = 0; |
868 | bbr->full_bw_reached = 0; | 874 | bbr->full_bw_reached = 0; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9e041fa5c545..44c09eddbb78 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net) | |||
2517 | if (res) | 2517 | if (res) |
2518 | goto fail; | 2518 | goto fail; |
2519 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 2519 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
2520 | |||
2521 | /* Please enforce IP_DF and IPID==0 for RST and | ||
2522 | * ACK sent in SYN-RECV and TIME-WAIT state. | ||
2523 | */ | ||
2524 | inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; | ||
2525 | |||
2520 | *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; | 2526 | *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; |
2521 | } | 2527 | } |
2522 | 2528 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2fac4ad74867..d51a8c0b3372 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev) | |||
2398 | 2398 | ||
2399 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); | 2399 | ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); |
2400 | 2400 | ||
2401 | ip6_route_add(&cfg, GFP_ATOMIC, NULL); | 2401 | ip6_route_add(&cfg, GFP_KERNEL, NULL); |
2402 | } | 2402 | } |
2403 | 2403 | ||
2404 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) | 2404 | static struct inet6_dev *addrconf_add_dev(struct net_device *dev) |
@@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
3062 | if (addr.s6_addr32[3]) { | 3062 | if (addr.s6_addr32[3]) { |
3063 | add_addr(idev, &addr, plen, scope); | 3063 | add_addr(idev, &addr, plen, scope); |
3064 | addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, | 3064 | addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, |
3065 | GFP_ATOMIC); | 3065 | GFP_KERNEL); |
3066 | return; | 3066 | return; |
3067 | } | 3067 | } |
3068 | 3068 | ||
@@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
3087 | 3087 | ||
3088 | add_addr(idev, &addr, plen, flag); | 3088 | add_addr(idev, &addr, plen, flag); |
3089 | addrconf_prefix_route(&addr, plen, 0, idev->dev, | 3089 | addrconf_prefix_route(&addr, plen, 0, idev->dev, |
3090 | 0, pflags, GFP_ATOMIC); | 3090 | 0, pflags, GFP_KERNEL); |
3091 | } | 3091 | } |
3092 | } | 3092 | } |
3093 | } | 3093 | } |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index d212738e9d10..c861a6d4671d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head) | |||
198 | } | 198 | } |
199 | } | 199 | } |
200 | 200 | ||
201 | lwtstate_put(f6i->fib6_nh.nh_lwtstate); | ||
202 | |||
201 | if (f6i->fib6_nh.nh_dev) | 203 | if (f6i->fib6_nh.nh_dev) |
202 | dev_put(f6i->fib6_nh.nh_dev); | 204 | dev_put(f6i->fib6_nh.nh_dev); |
203 | 205 | ||
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 38dec9da90d3..5095367c7204 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n, | |||
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | t = rtnl_dereference(ip6n->tnls_wc[0]); | 1096 | t = rtnl_dereference(ip6n->tnls_wc[0]); |
1097 | unregister_netdevice_queue(t->dev, list); | 1097 | if (t) |
1098 | unregister_netdevice_queue(t->dev, list); | ||
1098 | } | 1099 | } |
1099 | 1100 | ||
1100 | static int __net_init vti6_init_net(struct net *net) | 1101 | static int __net_init vti6_init_net(struct net *net) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7208c16302f6..c4ea13e8360b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) | |||
956 | rt->dst.error = 0; | 956 | rt->dst.error = 0; |
957 | rt->dst.output = ip6_output; | 957 | rt->dst.output = ip6_output; |
958 | 958 | ||
959 | if (ort->fib6_type == RTN_LOCAL) { | 959 | if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) { |
960 | rt->dst.input = ip6_input; | 960 | rt->dst.input = ip6_input; |
961 | } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { | 961 | } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { |
962 | rt->dst.input = ip6_mc_input; | 962 | rt->dst.input = ip6_mc_input; |
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c index 82e6edf9c5d9..45f33d6dedf7 100644 --- a/net/ncsi/ncsi-netlink.c +++ b/net/ncsi/ncsi-netlink.c | |||
@@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb, | |||
100 | bool found; | 100 | bool found; |
101 | int rc; | 101 | int rc; |
102 | 102 | ||
103 | if (id > ndp->package_num) { | 103 | if (id > ndp->package_num - 1) { |
104 | netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); | 104 | netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); |
105 | return -ENODEV; | 105 | return -ENODEV; |
106 | } | 106 | } |
@@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb, | |||
240 | return 0; /* done */ | 240 | return 0; /* done */ |
241 | 241 | ||
242 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, | 242 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, |
243 | &ncsi_genl_family, 0, NCSI_CMD_PKG_INFO); | 243 | &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO); |
244 | if (!hdr) { | 244 | if (!hdr) { |
245 | rc = -EMSGSIZE; | 245 | rc = -EMSGSIZE; |
246 | goto err; | 246 | goto err; |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 2c7b7c352d3e..b9bbcf3d6c63 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <net/tcp.h> | 37 | #include <net/tcp.h> |
38 | #include <net/net_namespace.h> | 38 | #include <net/net_namespace.h> |
39 | #include <net/netns/generic.h> | 39 | #include <net/netns/generic.h> |
40 | #include <net/tcp.h> | ||
41 | #include <net/addrconf.h> | 40 | #include <net/addrconf.h> |
42 | 41 | ||
43 | #include "rds.h" | 42 | #include "rds.h" |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 229d63c99be2..db83dac1e7f4 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, | |||
300 | } | 300 | } |
301 | EXPORT_SYMBOL(tcf_generic_walker); | 301 | EXPORT_SYMBOL(tcf_generic_walker); |
302 | 302 | ||
303 | static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, | 303 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) |
304 | struct tc_action **a, int bind) | ||
305 | { | 304 | { |
306 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | 305 | struct tcf_idrinfo *idrinfo = tn->idrinfo; |
307 | struct tc_action *p; | 306 | struct tc_action *p; |
308 | 307 | ||
309 | spin_lock(&idrinfo->lock); | 308 | spin_lock(&idrinfo->lock); |
310 | p = idr_find(&idrinfo->action_idr, index); | 309 | p = idr_find(&idrinfo->action_idr, index); |
311 | if (IS_ERR(p)) { | 310 | if (IS_ERR(p)) |
312 | p = NULL; | 311 | p = NULL; |
313 | } else if (p) { | 312 | else if (p) |
314 | refcount_inc(&p->tcfa_refcnt); | 313 | refcount_inc(&p->tcfa_refcnt); |
315 | if (bind) | ||
316 | atomic_inc(&p->tcfa_bindcnt); | ||
317 | } | ||
318 | spin_unlock(&idrinfo->lock); | 314 | spin_unlock(&idrinfo->lock); |
319 | 315 | ||
320 | if (p) { | 316 | if (p) { |
@@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, | |||
323 | } | 319 | } |
324 | return false; | 320 | return false; |
325 | } | 321 | } |
326 | |||
327 | int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) | ||
328 | { | ||
329 | return __tcf_idr_check(tn, index, a, 0); | ||
330 | } | ||
331 | EXPORT_SYMBOL(tcf_idr_search); | 322 | EXPORT_SYMBOL(tcf_idr_search); |
332 | 323 | ||
333 | bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, | 324 | static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) |
334 | int bind) | ||
335 | { | 325 | { |
336 | return __tcf_idr_check(tn, index, a, bind); | ||
337 | } | ||
338 | EXPORT_SYMBOL(tcf_idr_check); | ||
339 | |||
340 | int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) | ||
341 | { | ||
342 | struct tcf_idrinfo *idrinfo = tn->idrinfo; | ||
343 | struct tc_action *p; | 326 | struct tc_action *p; |
344 | int ret = 0; | 327 | int ret = 0; |
345 | 328 | ||
@@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index) | |||
370 | spin_unlock(&idrinfo->lock); | 353 | spin_unlock(&idrinfo->lock); |
371 | return ret; | 354 | return ret; |
372 | } | 355 | } |
373 | EXPORT_SYMBOL(tcf_idr_delete_index); | ||
374 | 356 | ||
375 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | 357 | int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, |
376 | struct tc_action **a, const struct tc_action_ops *ops, | 358 | struct tc_action **a, const struct tc_action_ops *ops, |
@@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, | |||
409 | 391 | ||
410 | p->idrinfo = idrinfo; | 392 | p->idrinfo = idrinfo; |
411 | p->ops = ops; | 393 | p->ops = ops; |
412 | INIT_LIST_HEAD(&p->list); | ||
413 | *a = p; | 394 | *a = p; |
414 | return 0; | 395 | return 0; |
415 | err3: | 396 | err3: |
@@ -686,14 +667,18 @@ static int tcf_action_put(struct tc_action *p) | |||
686 | return __tcf_action_put(p, false); | 667 | return __tcf_action_put(p, false); |
687 | } | 668 | } |
688 | 669 | ||
670 | /* Put all actions in this array, skip those NULL's. */ | ||
689 | static void tcf_action_put_many(struct tc_action *actions[]) | 671 | static void tcf_action_put_many(struct tc_action *actions[]) |
690 | { | 672 | { |
691 | int i; | 673 | int i; |
692 | 674 | ||
693 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { | 675 | for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { |
694 | struct tc_action *a = actions[i]; | 676 | struct tc_action *a = actions[i]; |
695 | const struct tc_action_ops *ops = a->ops; | 677 | const struct tc_action_ops *ops; |
696 | 678 | ||
679 | if (!a) | ||
680 | continue; | ||
681 | ops = a->ops; | ||
697 | if (tcf_action_put(a)) | 682 | if (tcf_action_put(a)) |
698 | module_put(ops->owner); | 683 | module_put(ops->owner); |
699 | } | 684 | } |
@@ -1175,41 +1160,38 @@ err_out: | |||
1175 | return err; | 1160 | return err; |
1176 | } | 1161 | } |
1177 | 1162 | ||
1178 | static int tcf_action_delete(struct net *net, struct tc_action *actions[], | 1163 | static int tcf_action_delete(struct net *net, struct tc_action *actions[]) |
1179 | int *acts_deleted, struct netlink_ext_ack *extack) | ||
1180 | { | 1164 | { |
1181 | u32 act_index; | 1165 | int i; |
1182 | int ret, i; | ||
1183 | 1166 | ||
1184 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { | 1167 | for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { |
1185 | struct tc_action *a = actions[i]; | 1168 | struct tc_action *a = actions[i]; |
1186 | const struct tc_action_ops *ops = a->ops; | 1169 | const struct tc_action_ops *ops = a->ops; |
1187 | |||
1188 | /* Actions can be deleted concurrently so we must save their | 1170 | /* Actions can be deleted concurrently so we must save their |
1189 | * type and id to search again after reference is released. | 1171 | * type and id to search again after reference is released. |
1190 | */ | 1172 | */ |
1191 | act_index = a->tcfa_index; | 1173 | struct tcf_idrinfo *idrinfo = a->idrinfo; |
1174 | u32 act_index = a->tcfa_index; | ||
1192 | 1175 | ||
1193 | if (tcf_action_put(a)) { | 1176 | if (tcf_action_put(a)) { |
1194 | /* last reference, action was deleted concurrently */ | 1177 | /* last reference, action was deleted concurrently */ |
1195 | module_put(ops->owner); | 1178 | module_put(ops->owner); |
1196 | } else { | 1179 | } else { |
1180 | int ret; | ||
1181 | |||
1197 | /* now do the delete */ | 1182 | /* now do the delete */ |
1198 | ret = ops->delete(net, act_index); | 1183 | ret = tcf_idr_delete_index(idrinfo, act_index); |
1199 | if (ret < 0) { | 1184 | if (ret < 0) |
1200 | *acts_deleted = i + 1; | ||
1201 | return ret; | 1185 | return ret; |
1202 | } | ||
1203 | } | 1186 | } |
1187 | actions[i] = NULL; | ||
1204 | } | 1188 | } |
1205 | *acts_deleted = i; | ||
1206 | return 0; | 1189 | return 0; |
1207 | } | 1190 | } |
1208 | 1191 | ||
1209 | static int | 1192 | static int |
1210 | tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], | 1193 | tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], |
1211 | int *acts_deleted, u32 portid, size_t attr_size, | 1194 | u32 portid, size_t attr_size, struct netlink_ext_ack *extack) |
1212 | struct netlink_ext_ack *extack) | ||
1213 | { | 1195 | { |
1214 | int ret; | 1196 | int ret; |
1215 | struct sk_buff *skb; | 1197 | struct sk_buff *skb; |
@@ -1227,7 +1209,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], | |||
1227 | } | 1209 | } |
1228 | 1210 | ||
1229 | /* now do the delete */ | 1211 | /* now do the delete */ |
1230 | ret = tcf_action_delete(net, actions, acts_deleted, extack); | 1212 | ret = tcf_action_delete(net, actions); |
1231 | if (ret < 0) { | 1213 | if (ret < 0) { |
1232 | NL_SET_ERR_MSG(extack, "Failed to delete TC action"); | 1214 | NL_SET_ERR_MSG(extack, "Failed to delete TC action"); |
1233 | kfree_skb(skb); | 1215 | kfree_skb(skb); |
@@ -1249,8 +1231,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
1249 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | 1231 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
1250 | struct tc_action *act; | 1232 | struct tc_action *act; |
1251 | size_t attr_size = 0; | 1233 | size_t attr_size = 0; |
1252 | struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; | 1234 | struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; |
1253 | int acts_deleted = 0; | ||
1254 | 1235 | ||
1255 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); | 1236 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); |
1256 | if (ret < 0) | 1237 | if (ret < 0) |
@@ -1280,14 +1261,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
1280 | if (event == RTM_GETACTION) | 1261 | if (event == RTM_GETACTION) |
1281 | ret = tcf_get_notify(net, portid, n, actions, event, extack); | 1262 | ret = tcf_get_notify(net, portid, n, actions, event, extack); |
1282 | else { /* delete */ | 1263 | else { /* delete */ |
1283 | ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, | 1264 | ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); |
1284 | attr_size, extack); | ||
1285 | if (ret) | 1265 | if (ret) |
1286 | goto err; | 1266 | goto err; |
1287 | return ret; | 1267 | return 0; |
1288 | } | 1268 | } |
1289 | err: | 1269 | err: |
1290 | tcf_action_put_many(&actions[acts_deleted]); | 1270 | tcf_action_put_many(actions); |
1291 | return ret; | 1271 | return ret; |
1292 | } | 1272 | } |
1293 | 1273 | ||
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index d30b23e42436..0c68bc9cf0b4 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index, | |||
395 | return tcf_idr_search(tn, a, index); | 395 | return tcf_idr_search(tn, a, index); |
396 | } | 396 | } |
397 | 397 | ||
398 | static int tcf_bpf_delete(struct net *net, u32 index) | ||
399 | { | ||
400 | struct tc_action_net *tn = net_generic(net, bpf_net_id); | ||
401 | |||
402 | return tcf_idr_delete_index(tn, index); | ||
403 | } | ||
404 | |||
405 | static struct tc_action_ops act_bpf_ops __read_mostly = { | 398 | static struct tc_action_ops act_bpf_ops __read_mostly = { |
406 | .kind = "bpf", | 399 | .kind = "bpf", |
407 | .type = TCA_ACT_BPF, | 400 | .type = TCA_ACT_BPF, |
@@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = { | |||
412 | .init = tcf_bpf_init, | 405 | .init = tcf_bpf_init, |
413 | .walk = tcf_bpf_walker, | 406 | .walk = tcf_bpf_walker, |
414 | .lookup = tcf_bpf_search, | 407 | .lookup = tcf_bpf_search, |
415 | .delete = tcf_bpf_delete, | ||
416 | .size = sizeof(struct tcf_bpf), | 408 | .size = sizeof(struct tcf_bpf), |
417 | }; | 409 | }; |
418 | 410 | ||
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 54c0bf54f2ac..6f0f273f1139 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c | |||
@@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index, | |||
198 | return tcf_idr_search(tn, a, index); | 198 | return tcf_idr_search(tn, a, index); |
199 | } | 199 | } |
200 | 200 | ||
201 | static int tcf_connmark_delete(struct net *net, u32 index) | ||
202 | { | ||
203 | struct tc_action_net *tn = net_generic(net, connmark_net_id); | ||
204 | |||
205 | return tcf_idr_delete_index(tn, index); | ||
206 | } | ||
207 | |||
208 | static struct tc_action_ops act_connmark_ops = { | 201 | static struct tc_action_ops act_connmark_ops = { |
209 | .kind = "connmark", | 202 | .kind = "connmark", |
210 | .type = TCA_ACT_CONNMARK, | 203 | .type = TCA_ACT_CONNMARK, |
@@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = { | |||
214 | .init = tcf_connmark_init, | 207 | .init = tcf_connmark_init, |
215 | .walk = tcf_connmark_walker, | 208 | .walk = tcf_connmark_walker, |
216 | .lookup = tcf_connmark_search, | 209 | .lookup = tcf_connmark_search, |
217 | .delete = tcf_connmark_delete, | ||
218 | .size = sizeof(struct tcf_connmark_info), | 210 | .size = sizeof(struct tcf_connmark_info), |
219 | }; | 211 | }; |
220 | 212 | ||
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index e698d3fe2080..b8a67ae3105a 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) | |||
659 | return nla_total_size(sizeof(struct tc_csum)); | 659 | return nla_total_size(sizeof(struct tc_csum)); |
660 | } | 660 | } |
661 | 661 | ||
662 | static int tcf_csum_delete(struct net *net, u32 index) | ||
663 | { | ||
664 | struct tc_action_net *tn = net_generic(net, csum_net_id); | ||
665 | |||
666 | return tcf_idr_delete_index(tn, index); | ||
667 | } | ||
668 | |||
669 | static struct tc_action_ops act_csum_ops = { | 662 | static struct tc_action_ops act_csum_ops = { |
670 | .kind = "csum", | 663 | .kind = "csum", |
671 | .type = TCA_ACT_CSUM, | 664 | .type = TCA_ACT_CSUM, |
@@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = { | |||
677 | .walk = tcf_csum_walker, | 670 | .walk = tcf_csum_walker, |
678 | .lookup = tcf_csum_search, | 671 | .lookup = tcf_csum_search, |
679 | .get_fill_size = tcf_csum_get_fill_size, | 672 | .get_fill_size = tcf_csum_get_fill_size, |
680 | .delete = tcf_csum_delete, | ||
681 | .size = sizeof(struct tcf_csum), | 673 | .size = sizeof(struct tcf_csum), |
682 | }; | 674 | }; |
683 | 675 | ||
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 6a3f25a8ffb3..cd1d9bd32ef9 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c | |||
@@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) | |||
243 | return sz; | 243 | return sz; |
244 | } | 244 | } |
245 | 245 | ||
246 | static int tcf_gact_delete(struct net *net, u32 index) | ||
247 | { | ||
248 | struct tc_action_net *tn = net_generic(net, gact_net_id); | ||
249 | |||
250 | return tcf_idr_delete_index(tn, index); | ||
251 | } | ||
252 | |||
253 | static struct tc_action_ops act_gact_ops = { | 246 | static struct tc_action_ops act_gact_ops = { |
254 | .kind = "gact", | 247 | .kind = "gact", |
255 | .type = TCA_ACT_GACT, | 248 | .type = TCA_ACT_GACT, |
@@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = { | |||
261 | .walk = tcf_gact_walker, | 254 | .walk = tcf_gact_walker, |
262 | .lookup = tcf_gact_search, | 255 | .lookup = tcf_gact_search, |
263 | .get_fill_size = tcf_gact_get_fill_size, | 256 | .get_fill_size = tcf_gact_get_fill_size, |
264 | .delete = tcf_gact_delete, | ||
265 | .size = sizeof(struct tcf_gact), | 257 | .size = sizeof(struct tcf_gact), |
266 | }; | 258 | }; |
267 | 259 | ||
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index d1081bdf1bdb..196430aefe87 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c | |||
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid) | |||
167 | { | 167 | { |
168 | struct tcf_meta_ops *o; | 168 | struct tcf_meta_ops *o; |
169 | 169 | ||
170 | read_lock_bh(&ife_mod_lock); | 170 | read_lock(&ife_mod_lock); |
171 | list_for_each_entry(o, &ifeoplist, list) { | 171 | list_for_each_entry(o, &ifeoplist, list) { |
172 | if (o->metaid == metaid) { | 172 | if (o->metaid == metaid) { |
173 | if (!try_module_get(o->owner)) | 173 | if (!try_module_get(o->owner)) |
174 | o = NULL; | 174 | o = NULL; |
175 | read_unlock_bh(&ife_mod_lock); | 175 | read_unlock(&ife_mod_lock); |
176 | return o; | 176 | return o; |
177 | } | 177 | } |
178 | } | 178 | } |
179 | read_unlock_bh(&ife_mod_lock); | 179 | read_unlock(&ife_mod_lock); |
180 | 180 | ||
181 | return NULL; | 181 | return NULL; |
182 | } | 182 | } |
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops) | |||
190 | !mops->get || !mops->alloc) | 190 | !mops->get || !mops->alloc) |
191 | return -EINVAL; | 191 | return -EINVAL; |
192 | 192 | ||
193 | write_lock_bh(&ife_mod_lock); | 193 | write_lock(&ife_mod_lock); |
194 | 194 | ||
195 | list_for_each_entry(m, &ifeoplist, list) { | 195 | list_for_each_entry(m, &ifeoplist, list) { |
196 | if (m->metaid == mops->metaid || | 196 | if (m->metaid == mops->metaid || |
197 | (strcmp(mops->name, m->name) == 0)) { | 197 | (strcmp(mops->name, m->name) == 0)) { |
198 | write_unlock_bh(&ife_mod_lock); | 198 | write_unlock(&ife_mod_lock); |
199 | return -EEXIST; | 199 | return -EEXIST; |
200 | } | 200 | } |
201 | } | 201 | } |
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops) | |||
204 | mops->release = ife_release_meta_gen; | 204 | mops->release = ife_release_meta_gen; |
205 | 205 | ||
206 | list_add_tail(&mops->list, &ifeoplist); | 206 | list_add_tail(&mops->list, &ifeoplist); |
207 | write_unlock_bh(&ife_mod_lock); | 207 | write_unlock(&ife_mod_lock); |
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | EXPORT_SYMBOL_GPL(unregister_ife_op); | 210 | EXPORT_SYMBOL_GPL(unregister_ife_op); |
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) | |||
214 | struct tcf_meta_ops *m; | 214 | struct tcf_meta_ops *m; |
215 | int err = -ENOENT; | 215 | int err = -ENOENT; |
216 | 216 | ||
217 | write_lock_bh(&ife_mod_lock); | 217 | write_lock(&ife_mod_lock); |
218 | list_for_each_entry(m, &ifeoplist, list) { | 218 | list_for_each_entry(m, &ifeoplist, list) { |
219 | if (m->metaid == mops->metaid) { | 219 | if (m->metaid == mops->metaid) { |
220 | list_del(&mops->list); | 220 | list_del(&mops->list); |
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops) | |||
222 | break; | 222 | break; |
223 | } | 223 | } |
224 | } | 224 | } |
225 | write_unlock_bh(&ife_mod_lock); | 225 | write_unlock(&ife_mod_lock); |
226 | 226 | ||
227 | return err; | 227 | return err; |
228 | } | 228 | } |
@@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid) | |||
265 | #endif | 265 | #endif |
266 | 266 | ||
267 | /* called when adding new meta information | 267 | /* called when adding new meta information |
268 | * under ife->tcf_lock for existing action | ||
269 | */ | 268 | */ |
270 | static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | 269 | static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held) |
271 | void *val, int len, bool exists, | ||
272 | bool rtnl_held) | ||
273 | { | 270 | { |
274 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | 271 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); |
275 | int ret = 0; | 272 | int ret = 0; |
@@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
277 | if (!ops) { | 274 | if (!ops) { |
278 | ret = -ENOENT; | 275 | ret = -ENOENT; |
279 | #ifdef CONFIG_MODULES | 276 | #ifdef CONFIG_MODULES |
280 | if (exists) | ||
281 | spin_unlock_bh(&ife->tcf_lock); | ||
282 | if (rtnl_held) | 277 | if (rtnl_held) |
283 | rtnl_unlock(); | 278 | rtnl_unlock(); |
284 | request_module("ife-meta-%s", ife_meta_id2name(metaid)); | 279 | request_module("ife-meta-%s", ife_meta_id2name(metaid)); |
285 | if (rtnl_held) | 280 | if (rtnl_held) |
286 | rtnl_lock(); | 281 | rtnl_lock(); |
287 | if (exists) | ||
288 | spin_lock_bh(&ife->tcf_lock); | ||
289 | ops = find_ife_oplist(metaid); | 282 | ops = find_ife_oplist(metaid); |
290 | #endif | 283 | #endif |
291 | } | 284 | } |
@@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, | |||
302 | } | 295 | } |
303 | 296 | ||
304 | /* called when adding new meta information | 297 | /* called when adding new meta information |
305 | * under ife->tcf_lock for existing action | ||
306 | */ | 298 | */ |
307 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | 299 | static int __add_metainfo(const struct tcf_meta_ops *ops, |
308 | int len, bool atomic) | 300 | struct tcf_ife_info *ife, u32 metaid, void *metaval, |
301 | int len, bool atomic, bool exists) | ||
309 | { | 302 | { |
310 | struct tcf_meta_info *mi = NULL; | 303 | struct tcf_meta_info *mi = NULL; |
311 | struct tcf_meta_ops *ops = find_ife_oplist(metaid); | ||
312 | int ret = 0; | 304 | int ret = 0; |
313 | 305 | ||
314 | if (!ops) | ||
315 | return -ENOENT; | ||
316 | |||
317 | mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); | 306 | mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); |
318 | if (!mi) { | 307 | if (!mi) |
319 | /*put back what find_ife_oplist took */ | ||
320 | module_put(ops->owner); | ||
321 | return -ENOMEM; | 308 | return -ENOMEM; |
322 | } | ||
323 | 309 | ||
324 | mi->metaid = metaid; | 310 | mi->metaid = metaid; |
325 | mi->ops = ops; | 311 | mi->ops = ops; |
@@ -327,29 +313,47 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | |||
327 | ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); | 313 | ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); |
328 | if (ret != 0) { | 314 | if (ret != 0) { |
329 | kfree(mi); | 315 | kfree(mi); |
330 | module_put(ops->owner); | ||
331 | return ret; | 316 | return ret; |
332 | } | 317 | } |
333 | } | 318 | } |
334 | 319 | ||
320 | if (exists) | ||
321 | spin_lock_bh(&ife->tcf_lock); | ||
335 | list_add_tail(&mi->metalist, &ife->metalist); | 322 | list_add_tail(&mi->metalist, &ife->metalist); |
323 | if (exists) | ||
324 | spin_unlock_bh(&ife->tcf_lock); | ||
325 | |||
326 | return ret; | ||
327 | } | ||
328 | |||
329 | static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, | ||
330 | int len, bool exists) | ||
331 | { | ||
332 | const struct tcf_meta_ops *ops = find_ife_oplist(metaid); | ||
333 | int ret; | ||
336 | 334 | ||
335 | if (!ops) | ||
336 | return -ENOENT; | ||
337 | ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists); | ||
338 | if (ret) | ||
339 | /*put back what find_ife_oplist took */ | ||
340 | module_put(ops->owner); | ||
337 | return ret; | 341 | return ret; |
338 | } | 342 | } |
339 | 343 | ||
340 | static int use_all_metadata(struct tcf_ife_info *ife) | 344 | static int use_all_metadata(struct tcf_ife_info *ife, bool exists) |
341 | { | 345 | { |
342 | struct tcf_meta_ops *o; | 346 | struct tcf_meta_ops *o; |
343 | int rc = 0; | 347 | int rc = 0; |
344 | int installed = 0; | 348 | int installed = 0; |
345 | 349 | ||
346 | read_lock_bh(&ife_mod_lock); | 350 | read_lock(&ife_mod_lock); |
347 | list_for_each_entry(o, &ifeoplist, list) { | 351 | list_for_each_entry(o, &ifeoplist, list) { |
348 | rc = add_metainfo(ife, o->metaid, NULL, 0, true); | 352 | rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists); |
349 | if (rc == 0) | 353 | if (rc == 0) |
350 | installed += 1; | 354 | installed += 1; |
351 | } | 355 | } |
352 | read_unlock_bh(&ife_mod_lock); | 356 | read_unlock(&ife_mod_lock); |
353 | 357 | ||
354 | if (installed) | 358 | if (installed) |
355 | return 0; | 359 | return 0; |
@@ -422,7 +426,6 @@ static void tcf_ife_cleanup(struct tc_action *a) | |||
422 | kfree_rcu(p, rcu); | 426 | kfree_rcu(p, rcu); |
423 | } | 427 | } |
424 | 428 | ||
425 | /* under ife->tcf_lock for existing action */ | ||
426 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, | 429 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
427 | bool exists, bool rtnl_held) | 430 | bool exists, bool rtnl_held) |
428 | { | 431 | { |
@@ -436,8 +439,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, | |||
436 | val = nla_data(tb[i]); | 439 | val = nla_data(tb[i]); |
437 | len = nla_len(tb[i]); | 440 | len = nla_len(tb[i]); |
438 | 441 | ||
439 | rc = load_metaops_and_vet(ife, i, val, len, exists, | 442 | rc = load_metaops_and_vet(i, val, len, rtnl_held); |
440 | rtnl_held); | ||
441 | if (rc != 0) | 443 | if (rc != 0) |
442 | return rc; | 444 | return rc; |
443 | 445 | ||
@@ -540,8 +542,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
540 | p->eth_type = ife_type; | 542 | p->eth_type = ife_type; |
541 | } | 543 | } |
542 | 544 | ||
543 | if (exists) | ||
544 | spin_lock_bh(&ife->tcf_lock); | ||
545 | 545 | ||
546 | if (ret == ACT_P_CREATED) | 546 | if (ret == ACT_P_CREATED) |
547 | INIT_LIST_HEAD(&ife->metalist); | 547 | INIT_LIST_HEAD(&ife->metalist); |
@@ -551,10 +551,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, | |||
551 | NULL, NULL); | 551 | NULL, NULL); |
552 | if (err) { | 552 | if (err) { |
553 | metadata_parse_err: | 553 | metadata_parse_err: |
554 | if (exists) | ||
555 | spin_unlock_bh(&ife->tcf_lock); | ||
556 | tcf_idr_release(*a, bind); | 554 | tcf_idr_release(*a, bind); |
557 | |||
558 | kfree(p); | 555 | kfree(p); |
559 | return err; | 556 | return err; |
560 | } | 557 | } |
@@ -569,17 +566,16 @@ metadata_parse_err: | |||
569 | * as we can. You better have at least one else we are | 566 | * as we can. You better have at least one else we are |
570 | * going to bail out | 567 | * going to bail out |
571 | */ | 568 | */ |
572 | err = use_all_metadata(ife); | 569 | err = use_all_metadata(ife, exists); |
573 | if (err) { | 570 | if (err) { |
574 | if (exists) | ||
575 | spin_unlock_bh(&ife->tcf_lock); | ||
576 | tcf_idr_release(*a, bind); | 571 | tcf_idr_release(*a, bind); |
577 | |||
578 | kfree(p); | 572 | kfree(p); |
579 | return err; | 573 | return err; |
580 | } | 574 | } |
581 | } | 575 | } |
582 | 576 | ||
577 | if (exists) | ||
578 | spin_lock_bh(&ife->tcf_lock); | ||
583 | ife->tcf_action = parm->action; | 579 | ife->tcf_action = parm->action; |
584 | /* protected by tcf_lock when modifying existing action */ | 580 | /* protected by tcf_lock when modifying existing action */ |
585 | rcu_swap_protected(ife->params, p, 1); | 581 | rcu_swap_protected(ife->params, p, 1); |
@@ -853,13 +849,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index, | |||
853 | return tcf_idr_search(tn, a, index); | 849 | return tcf_idr_search(tn, a, index); |
854 | } | 850 | } |
855 | 851 | ||
856 | static int tcf_ife_delete(struct net *net, u32 index) | ||
857 | { | ||
858 | struct tc_action_net *tn = net_generic(net, ife_net_id); | ||
859 | |||
860 | return tcf_idr_delete_index(tn, index); | ||
861 | } | ||
862 | |||
863 | static struct tc_action_ops act_ife_ops = { | 852 | static struct tc_action_ops act_ife_ops = { |
864 | .kind = "ife", | 853 | .kind = "ife", |
865 | .type = TCA_ACT_IFE, | 854 | .type = TCA_ACT_IFE, |
@@ -870,7 +859,6 @@ static struct tc_action_ops act_ife_ops = { | |||
870 | .init = tcf_ife_init, | 859 | .init = tcf_ife_init, |
871 | .walk = tcf_ife_walker, | 860 | .walk = tcf_ife_walker, |
872 | .lookup = tcf_ife_search, | 861 | .lookup = tcf_ife_search, |
873 | .delete = tcf_ife_delete, | ||
874 | .size = sizeof(struct tcf_ife_info), | 862 | .size = sizeof(struct tcf_ife_info), |
875 | }; | 863 | }; |
876 | 864 | ||
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 51f235bbeb5b..23273b5303fd 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index, | |||
337 | return tcf_idr_search(tn, a, index); | 337 | return tcf_idr_search(tn, a, index); |
338 | } | 338 | } |
339 | 339 | ||
340 | static int tcf_ipt_delete(struct net *net, u32 index) | ||
341 | { | ||
342 | struct tc_action_net *tn = net_generic(net, ipt_net_id); | ||
343 | |||
344 | return tcf_idr_delete_index(tn, index); | ||
345 | } | ||
346 | |||
347 | static struct tc_action_ops act_ipt_ops = { | 340 | static struct tc_action_ops act_ipt_ops = { |
348 | .kind = "ipt", | 341 | .kind = "ipt", |
349 | .type = TCA_ACT_IPT, | 342 | .type = TCA_ACT_IPT, |
@@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = { | |||
354 | .init = tcf_ipt_init, | 347 | .init = tcf_ipt_init, |
355 | .walk = tcf_ipt_walker, | 348 | .walk = tcf_ipt_walker, |
356 | .lookup = tcf_ipt_search, | 349 | .lookup = tcf_ipt_search, |
357 | .delete = tcf_ipt_delete, | ||
358 | .size = sizeof(struct tcf_ipt), | 350 | .size = sizeof(struct tcf_ipt), |
359 | }; | 351 | }; |
360 | 352 | ||
@@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index, | |||
395 | return tcf_idr_search(tn, a, index); | 387 | return tcf_idr_search(tn, a, index); |
396 | } | 388 | } |
397 | 389 | ||
398 | static int tcf_xt_delete(struct net *net, u32 index) | ||
399 | { | ||
400 | struct tc_action_net *tn = net_generic(net, xt_net_id); | ||
401 | |||
402 | return tcf_idr_delete_index(tn, index); | ||
403 | } | ||
404 | |||
405 | static struct tc_action_ops act_xt_ops = { | 390 | static struct tc_action_ops act_xt_ops = { |
406 | .kind = "xt", | 391 | .kind = "xt", |
407 | .type = TCA_ACT_XT, | 392 | .type = TCA_ACT_XT, |
@@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = { | |||
412 | .init = tcf_xt_init, | 397 | .init = tcf_xt_init, |
413 | .walk = tcf_xt_walker, | 398 | .walk = tcf_xt_walker, |
414 | .lookup = tcf_xt_search, | 399 | .lookup = tcf_xt_search, |
415 | .delete = tcf_xt_delete, | ||
416 | .size = sizeof(struct tcf_ipt), | 400 | .size = sizeof(struct tcf_ipt), |
417 | }; | 401 | }; |
418 | 402 | ||
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 38fd20f10f67..8bf66d0a6800 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev) | |||
395 | dev_put(dev); | 395 | dev_put(dev); |
396 | } | 396 | } |
397 | 397 | ||
398 | static int tcf_mirred_delete(struct net *net, u32 index) | ||
399 | { | ||
400 | struct tc_action_net *tn = net_generic(net, mirred_net_id); | ||
401 | |||
402 | return tcf_idr_delete_index(tn, index); | ||
403 | } | ||
404 | |||
405 | static struct tc_action_ops act_mirred_ops = { | 398 | static struct tc_action_ops act_mirred_ops = { |
406 | .kind = "mirred", | 399 | .kind = "mirred", |
407 | .type = TCA_ACT_MIRRED, | 400 | .type = TCA_ACT_MIRRED, |
@@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = { | |||
416 | .size = sizeof(struct tcf_mirred), | 409 | .size = sizeof(struct tcf_mirred), |
417 | .get_dev = tcf_mirred_get_dev, | 410 | .get_dev = tcf_mirred_get_dev, |
418 | .put_dev = tcf_mirred_put_dev, | 411 | .put_dev = tcf_mirred_put_dev, |
419 | .delete = tcf_mirred_delete, | ||
420 | }; | 412 | }; |
421 | 413 | ||
422 | static __net_init int mirred_init_net(struct net *net) | 414 | static __net_init int mirred_init_net(struct net *net) |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 822e903bfc25..4313aa102440 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index, | |||
300 | return tcf_idr_search(tn, a, index); | 300 | return tcf_idr_search(tn, a, index); |
301 | } | 301 | } |
302 | 302 | ||
303 | static int tcf_nat_delete(struct net *net, u32 index) | ||
304 | { | ||
305 | struct tc_action_net *tn = net_generic(net, nat_net_id); | ||
306 | |||
307 | return tcf_idr_delete_index(tn, index); | ||
308 | } | ||
309 | |||
310 | static struct tc_action_ops act_nat_ops = { | 303 | static struct tc_action_ops act_nat_ops = { |
311 | .kind = "nat", | 304 | .kind = "nat", |
312 | .type = TCA_ACT_NAT, | 305 | .type = TCA_ACT_NAT, |
@@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = { | |||
316 | .init = tcf_nat_init, | 309 | .init = tcf_nat_init, |
317 | .walk = tcf_nat_walker, | 310 | .walk = tcf_nat_walker, |
318 | .lookup = tcf_nat_search, | 311 | .lookup = tcf_nat_search, |
319 | .delete = tcf_nat_delete, | ||
320 | .size = sizeof(struct tcf_nat), | 312 | .size = sizeof(struct tcf_nat), |
321 | }; | 313 | }; |
322 | 314 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 8a7a7cb94e83..107034070019 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -460,13 +460,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index, | |||
460 | return tcf_idr_search(tn, a, index); | 460 | return tcf_idr_search(tn, a, index); |
461 | } | 461 | } |
462 | 462 | ||
463 | static int tcf_pedit_delete(struct net *net, u32 index) | ||
464 | { | ||
465 | struct tc_action_net *tn = net_generic(net, pedit_net_id); | ||
466 | |||
467 | return tcf_idr_delete_index(tn, index); | ||
468 | } | ||
469 | |||
470 | static struct tc_action_ops act_pedit_ops = { | 463 | static struct tc_action_ops act_pedit_ops = { |
471 | .kind = "pedit", | 464 | .kind = "pedit", |
472 | .type = TCA_ACT_PEDIT, | 465 | .type = TCA_ACT_PEDIT, |
@@ -477,7 +470,6 @@ static struct tc_action_ops act_pedit_ops = { | |||
477 | .init = tcf_pedit_init, | 470 | .init = tcf_pedit_init, |
478 | .walk = tcf_pedit_walker, | 471 | .walk = tcf_pedit_walker, |
479 | .lookup = tcf_pedit_search, | 472 | .lookup = tcf_pedit_search, |
480 | .delete = tcf_pedit_delete, | ||
481 | .size = sizeof(struct tcf_pedit), | 473 | .size = sizeof(struct tcf_pedit), |
482 | }; | 474 | }; |
483 | 475 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 06f0742db593..5d8bfa878477 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index, | |||
320 | return tcf_idr_search(tn, a, index); | 320 | return tcf_idr_search(tn, a, index); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int tcf_police_delete(struct net *net, u32 index) | ||
324 | { | ||
325 | struct tc_action_net *tn = net_generic(net, police_net_id); | ||
326 | |||
327 | return tcf_idr_delete_index(tn, index); | ||
328 | } | ||
329 | |||
330 | MODULE_AUTHOR("Alexey Kuznetsov"); | 323 | MODULE_AUTHOR("Alexey Kuznetsov"); |
331 | MODULE_DESCRIPTION("Policing actions"); | 324 | MODULE_DESCRIPTION("Policing actions"); |
332 | MODULE_LICENSE("GPL"); | 325 | MODULE_LICENSE("GPL"); |
@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = { | |||
340 | .init = tcf_police_init, | 333 | .init = tcf_police_init, |
341 | .walk = tcf_police_walker, | 334 | .walk = tcf_police_walker, |
342 | .lookup = tcf_police_search, | 335 | .lookup = tcf_police_search, |
343 | .delete = tcf_police_delete, | ||
344 | .size = sizeof(struct tcf_police), | 336 | .size = sizeof(struct tcf_police), |
345 | }; | 337 | }; |
346 | 338 | ||
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 207b4132d1b0..44e9c00657bc 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index, | |||
232 | return tcf_idr_search(tn, a, index); | 232 | return tcf_idr_search(tn, a, index); |
233 | } | 233 | } |
234 | 234 | ||
235 | static int tcf_sample_delete(struct net *net, u32 index) | ||
236 | { | ||
237 | struct tc_action_net *tn = net_generic(net, sample_net_id); | ||
238 | |||
239 | return tcf_idr_delete_index(tn, index); | ||
240 | } | ||
241 | |||
242 | static struct tc_action_ops act_sample_ops = { | 235 | static struct tc_action_ops act_sample_ops = { |
243 | .kind = "sample", | 236 | .kind = "sample", |
244 | .type = TCA_ACT_SAMPLE, | 237 | .type = TCA_ACT_SAMPLE, |
@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = { | |||
249 | .cleanup = tcf_sample_cleanup, | 242 | .cleanup = tcf_sample_cleanup, |
250 | .walk = tcf_sample_walker, | 243 | .walk = tcf_sample_walker, |
251 | .lookup = tcf_sample_search, | 244 | .lookup = tcf_sample_search, |
252 | .delete = tcf_sample_delete, | ||
253 | .size = sizeof(struct tcf_sample), | 245 | .size = sizeof(struct tcf_sample), |
254 | }; | 246 | }; |
255 | 247 | ||
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e616523ba3c1..52400d49f81f 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index, | |||
196 | return tcf_idr_search(tn, a, index); | 196 | return tcf_idr_search(tn, a, index); |
197 | } | 197 | } |
198 | 198 | ||
199 | static int tcf_simp_delete(struct net *net, u32 index) | ||
200 | { | ||
201 | struct tc_action_net *tn = net_generic(net, simp_net_id); | ||
202 | |||
203 | return tcf_idr_delete_index(tn, index); | ||
204 | } | ||
205 | |||
206 | static struct tc_action_ops act_simp_ops = { | 199 | static struct tc_action_ops act_simp_ops = { |
207 | .kind = "simple", | 200 | .kind = "simple", |
208 | .type = TCA_ACT_SIMP, | 201 | .type = TCA_ACT_SIMP, |
@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = { | |||
213 | .init = tcf_simp_init, | 206 | .init = tcf_simp_init, |
214 | .walk = tcf_simp_walker, | 207 | .walk = tcf_simp_walker, |
215 | .lookup = tcf_simp_search, | 208 | .lookup = tcf_simp_search, |
216 | .delete = tcf_simp_delete, | ||
217 | .size = sizeof(struct tcf_defact), | 209 | .size = sizeof(struct tcf_defact), |
218 | }; | 210 | }; |
219 | 211 | ||
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 926d7bc4a89d..73e44ce2a883 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index, | |||
299 | return tcf_idr_search(tn, a, index); | 299 | return tcf_idr_search(tn, a, index); |
300 | } | 300 | } |
301 | 301 | ||
302 | static int tcf_skbedit_delete(struct net *net, u32 index) | ||
303 | { | ||
304 | struct tc_action_net *tn = net_generic(net, skbedit_net_id); | ||
305 | |||
306 | return tcf_idr_delete_index(tn, index); | ||
307 | } | ||
308 | |||
309 | static struct tc_action_ops act_skbedit_ops = { | 302 | static struct tc_action_ops act_skbedit_ops = { |
310 | .kind = "skbedit", | 303 | .kind = "skbedit", |
311 | .type = TCA_ACT_SKBEDIT, | 304 | .type = TCA_ACT_SKBEDIT, |
@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = { | |||
316 | .cleanup = tcf_skbedit_cleanup, | 309 | .cleanup = tcf_skbedit_cleanup, |
317 | .walk = tcf_skbedit_walker, | 310 | .walk = tcf_skbedit_walker, |
318 | .lookup = tcf_skbedit_search, | 311 | .lookup = tcf_skbedit_search, |
319 | .delete = tcf_skbedit_delete, | ||
320 | .size = sizeof(struct tcf_skbedit), | 312 | .size = sizeof(struct tcf_skbedit), |
321 | }; | 313 | }; |
322 | 314 | ||
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index d6a1af0c4171..588077fafd6c 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c | |||
@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index, | |||
259 | return tcf_idr_search(tn, a, index); | 259 | return tcf_idr_search(tn, a, index); |
260 | } | 260 | } |
261 | 261 | ||
262 | static int tcf_skbmod_delete(struct net *net, u32 index) | ||
263 | { | ||
264 | struct tc_action_net *tn = net_generic(net, skbmod_net_id); | ||
265 | |||
266 | return tcf_idr_delete_index(tn, index); | ||
267 | } | ||
268 | |||
269 | static struct tc_action_ops act_skbmod_ops = { | 262 | static struct tc_action_ops act_skbmod_ops = { |
270 | .kind = "skbmod", | 263 | .kind = "skbmod", |
271 | .type = TCA_ACT_SKBMOD, | 264 | .type = TCA_ACT_SKBMOD, |
@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = { | |||
276 | .cleanup = tcf_skbmod_cleanup, | 269 | .cleanup = tcf_skbmod_cleanup, |
277 | .walk = tcf_skbmod_walker, | 270 | .walk = tcf_skbmod_walker, |
278 | .lookup = tcf_skbmod_search, | 271 | .lookup = tcf_skbmod_search, |
279 | .delete = tcf_skbmod_delete, | ||
280 | .size = sizeof(struct tcf_skbmod), | 272 | .size = sizeof(struct tcf_skbmod), |
281 | }; | 273 | }; |
282 | 274 | ||
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 8f09cf08d8fe..420759153d5f 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c | |||
@@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index, | |||
548 | return tcf_idr_search(tn, a, index); | 548 | return tcf_idr_search(tn, a, index); |
549 | } | 549 | } |
550 | 550 | ||
551 | static int tunnel_key_delete(struct net *net, u32 index) | ||
552 | { | ||
553 | struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); | ||
554 | |||
555 | return tcf_idr_delete_index(tn, index); | ||
556 | } | ||
557 | |||
558 | static struct tc_action_ops act_tunnel_key_ops = { | 551 | static struct tc_action_ops act_tunnel_key_ops = { |
559 | .kind = "tunnel_key", | 552 | .kind = "tunnel_key", |
560 | .type = TCA_ACT_TUNNEL_KEY, | 553 | .type = TCA_ACT_TUNNEL_KEY, |
@@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = { | |||
565 | .cleanup = tunnel_key_release, | 558 | .cleanup = tunnel_key_release, |
566 | .walk = tunnel_key_walker, | 559 | .walk = tunnel_key_walker, |
567 | .lookup = tunnel_key_search, | 560 | .lookup = tunnel_key_search, |
568 | .delete = tunnel_key_delete, | ||
569 | .size = sizeof(struct tcf_tunnel_key), | 561 | .size = sizeof(struct tcf_tunnel_key), |
570 | }; | 562 | }; |
571 | 563 | ||
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 209e70ad2c09..033d273afe50 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c | |||
@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index, | |||
296 | return tcf_idr_search(tn, a, index); | 296 | return tcf_idr_search(tn, a, index); |
297 | } | 297 | } |
298 | 298 | ||
299 | static int tcf_vlan_delete(struct net *net, u32 index) | ||
300 | { | ||
301 | struct tc_action_net *tn = net_generic(net, vlan_net_id); | ||
302 | |||
303 | return tcf_idr_delete_index(tn, index); | ||
304 | } | ||
305 | |||
306 | static struct tc_action_ops act_vlan_ops = { | 299 | static struct tc_action_ops act_vlan_ops = { |
307 | .kind = "vlan", | 300 | .kind = "vlan", |
308 | .type = TCA_ACT_VLAN, | 301 | .type = TCA_ACT_VLAN, |
@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = { | |||
313 | .cleanup = tcf_vlan_cleanup, | 306 | .cleanup = tcf_vlan_cleanup, |
314 | .walk = tcf_vlan_walker, | 307 | .walk = tcf_vlan_walker, |
315 | .lookup = tcf_vlan_search, | 308 | .lookup = tcf_vlan_search, |
316 | .delete = tcf_vlan_delete, | ||
317 | .size = sizeof(struct tcf_vlan), | 309 | .size = sizeof(struct tcf_vlan), |
318 | }; | 310 | }; |
319 | 311 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d5d2a6dc3921..f218ccf1e2d9 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
914 | struct nlattr *opt = tca[TCA_OPTIONS]; | 914 | struct nlattr *opt = tca[TCA_OPTIONS]; |
915 | struct nlattr *tb[TCA_U32_MAX + 1]; | 915 | struct nlattr *tb[TCA_U32_MAX + 1]; |
916 | u32 htid, flags = 0; | 916 | u32 htid, flags = 0; |
917 | size_t sel_size; | ||
917 | int err; | 918 | int err; |
918 | #ifdef CONFIG_CLS_U32_PERF | 919 | #ifdef CONFIG_CLS_U32_PERF |
919 | size_t size; | 920 | size_t size; |
@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
1076 | } | 1077 | } |
1077 | 1078 | ||
1078 | s = nla_data(tb[TCA_U32_SEL]); | 1079 | s = nla_data(tb[TCA_U32_SEL]); |
1080 | sel_size = struct_size(s, keys, s->nkeys); | ||
1081 | if (nla_len(tb[TCA_U32_SEL]) < sel_size) { | ||
1082 | err = -EINVAL; | ||
1083 | goto erridr; | ||
1084 | } | ||
1079 | 1085 | ||
1080 | n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); | 1086 | n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); |
1081 | if (n == NULL) { | 1087 | if (n == NULL) { |
1082 | err = -ENOBUFS; | 1088 | err = -ENOBUFS; |
1083 | goto erridr; | 1089 | goto erridr; |
@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
1092 | } | 1098 | } |
1093 | #endif | 1099 | #endif |
1094 | 1100 | ||
1095 | memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); | 1101 | memcpy(&n->sel, s, sel_size); |
1096 | RCU_INIT_POINTER(n->ht_up, ht); | 1102 | RCU_INIT_POINTER(n->ht_up, ht); |
1097 | n->handle = handle; | 1103 | n->handle = handle; |
1098 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; | 1104 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; |
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 35fc7252187c..c07c30b916d5 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c | |||
@@ -64,7 +64,6 @@ | |||
64 | #include <linux/vmalloc.h> | 64 | #include <linux/vmalloc.h> |
65 | #include <linux/reciprocal_div.h> | 65 | #include <linux/reciprocal_div.h> |
66 | #include <net/netlink.h> | 66 | #include <net/netlink.h> |
67 | #include <linux/version.h> | ||
68 | #include <linux/if_vlan.h> | 67 | #include <linux/if_vlan.h> |
69 | #include <net/pkt_sched.h> | 68 | #include <net/pkt_sched.h> |
70 | #include <net/pkt_cls.h> | 69 | #include <net/pkt_cls.h> |
@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode) | |||
621 | } | 620 | } |
622 | 621 | ||
623 | static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, | 622 | static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, |
624 | int flow_mode) | 623 | int flow_mode, u16 flow_override, u16 host_override) |
625 | { | 624 | { |
626 | u32 flow_hash = 0, srchost_hash, dsthost_hash; | 625 | u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0; |
627 | u16 reduced_hash, srchost_idx, dsthost_idx; | 626 | u16 reduced_hash, srchost_idx, dsthost_idx; |
628 | struct flow_keys keys, host_keys; | 627 | struct flow_keys keys, host_keys; |
629 | 628 | ||
630 | if (unlikely(flow_mode == CAKE_FLOW_NONE)) | 629 | if (unlikely(flow_mode == CAKE_FLOW_NONE)) |
631 | return 0; | 630 | return 0; |
632 | 631 | ||
632 | /* If both overrides are set we can skip packet dissection entirely */ | ||
633 | if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) && | ||
634 | (host_override || !(flow_mode & CAKE_FLOW_HOSTS))) | ||
635 | goto skip_hash; | ||
636 | |||
633 | skb_flow_dissect_flow_keys(skb, &keys, | 637 | skb_flow_dissect_flow_keys(skb, &keys, |
634 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); | 638 | FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); |
635 | 639 | ||
@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, | |||
676 | if (flow_mode & CAKE_FLOW_FLOWS) | 680 | if (flow_mode & CAKE_FLOW_FLOWS) |
677 | flow_hash = flow_hash_from_keys(&keys); | 681 | flow_hash = flow_hash_from_keys(&keys); |
678 | 682 | ||
683 | skip_hash: | ||
684 | if (flow_override) | ||
685 | flow_hash = flow_override - 1; | ||
686 | if (host_override) { | ||
687 | dsthost_hash = host_override - 1; | ||
688 | srchost_hash = host_override - 1; | ||
689 | } | ||
690 | |||
679 | if (!(flow_mode & CAKE_FLOW_FLOWS)) { | 691 | if (!(flow_mode & CAKE_FLOW_FLOWS)) { |
680 | if (flow_mode & CAKE_FLOW_SRC_IP) | 692 | if (flow_mode & CAKE_FLOW_SRC_IP) |
681 | flow_hash ^= srchost_hash; | 693 | flow_hash ^= srchost_hash; |
@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, | |||
1571 | struct cake_sched_data *q = qdisc_priv(sch); | 1583 | struct cake_sched_data *q = qdisc_priv(sch); |
1572 | struct tcf_proto *filter; | 1584 | struct tcf_proto *filter; |
1573 | struct tcf_result res; | 1585 | struct tcf_result res; |
1574 | u32 flow = 0; | 1586 | u16 flow = 0, host = 0; |
1575 | int result; | 1587 | int result; |
1576 | 1588 | ||
1577 | filter = rcu_dereference_bh(q->filter_list); | 1589 | filter = rcu_dereference_bh(q->filter_list); |
@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, | |||
1595 | #endif | 1607 | #endif |
1596 | if (TC_H_MIN(res.classid) <= CAKE_QUEUES) | 1608 | if (TC_H_MIN(res.classid) <= CAKE_QUEUES) |
1597 | flow = TC_H_MIN(res.classid); | 1609 | flow = TC_H_MIN(res.classid); |
1610 | if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16)) | ||
1611 | host = TC_H_MAJ(res.classid) >> 16; | ||
1598 | } | 1612 | } |
1599 | hash: | 1613 | hash: |
1600 | *t = cake_select_tin(sch, skb); | 1614 | *t = cake_select_tin(sch, skb); |
1601 | return flow ?: cake_hash(*t, skb, flow_mode) + 1; | 1615 | return cake_hash(*t, skb, flow_mode, flow, host) + 1; |
1602 | } | 1616 | } |
1603 | 1617 | ||
1604 | static void cake_reconfigure(struct Qdisc *sch); | 1618 | static void cake_reconfigure(struct Qdisc *sch); |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 93c0c225ab34..180b6640e531 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c | |||
@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk) | |||
213 | { | 213 | { |
214 | struct tls_context *ctx = tls_get_ctx(sk); | 214 | struct tls_context *ctx = tls_get_ctx(sk); |
215 | 215 | ||
216 | /* We are already sending pages, ignore notification */ | 216 | /* If in_tcp_sendpages call lower protocol write space handler |
217 | if (ctx->in_tcp_sendpages) | 217 | * to ensure we wake up any waiting operations there. For example |
218 | * if do_tcp_sendpages where to call sk_wait_event. | ||
219 | */ | ||
220 | if (ctx->in_tcp_sendpages) { | ||
221 | ctx->sk_write_space(sk); | ||
218 | return; | 222 | return; |
223 | } | ||
219 | 224 | ||
220 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { | 225 | if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { |
221 | gfp_t sk_allocation = sk->sk_allocation; | 226 | gfp_t sk_allocation = sk->sk_allocation; |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 911ca6d3cb5a..bfe2dbea480b 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
74 | return 0; | 74 | return 0; |
75 | 75 | ||
76 | if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) | 76 | if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) |
77 | return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ | 77 | return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */ |
78 | 78 | ||
79 | bpf.command = XDP_QUERY_XSK_UMEM; | 79 | bpf.command = XDP_QUERY_XSK_UMEM; |
80 | 80 | ||
81 | rtnl_lock(); | 81 | rtnl_lock(); |
82 | err = xdp_umem_query(dev, queue_id); | 82 | err = xdp_umem_query(dev, queue_id); |
83 | if (err) { | 83 | if (err) { |
84 | err = err < 0 ? -ENOTSUPP : -EBUSY; | 84 | err = err < 0 ? -EOPNOTSUPP : -EBUSY; |
85 | goto err_rtnl_unlock; | 85 | goto err_rtnl_unlock; |
86 | } | 86 | } |
87 | 87 | ||