diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/core/filter.c | 5 | ||||
| -rw-r--r-- | net/ipv4/ip_output.c | 3 | ||||
| -rw-r--r-- | net/ipv4/netfilter/ipt_MASQUERADE.c | 7 | ||||
| -rw-r--r-- | net/ipv4/netfilter/nf_nat_masquerade_ipv4.c | 38 | ||||
| -rw-r--r-- | net/ipv4/netfilter/nft_masq_ipv4.c | 4 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 16 | ||||
| -rw-r--r-- | net/ipv4/tcp_timer.c | 10 | ||||
| -rw-r--r-- | net/ipv6/ip6_output.c | 3 | ||||
| -rw-r--r-- | net/ipv6/netfilter.c | 3 | ||||
| -rw-r--r-- | net/ipv6/netfilter/ip6t_MASQUERADE.c | 8 | ||||
| -rw-r--r-- | net/ipv6/netfilter/nf_nat_masquerade_ipv6.c | 49 | ||||
| -rw-r--r-- | net/ipv6/netfilter/nft_masq_ipv6.c | 4 | ||||
| -rw-r--r-- | net/netfilter/ipvs/ip_vs_ctl.c | 3 | ||||
| -rw-r--r-- | net/netfilter/nf_conncount.c | 44 | ||||
| -rw-r--r-- | net/netfilter/nf_conntrack_proto_gre.c | 14 | ||||
| -rw-r--r-- | net/netfilter/nf_tables_api.c | 46 | ||||
| -rw-r--r-- | net/netfilter/nfnetlink_cttimeout.c | 15 | ||||
| -rw-r--r-- | net/netfilter/nft_compat.c | 3 | ||||
| -rw-r--r-- | net/netfilter/nft_flow_offload.c | 5 | ||||
| -rw-r--r-- | net/netfilter/xt_RATEEST.c | 10 | ||||
| -rw-r--r-- | net/netfilter/xt_hashlimit.c | 9 | ||||
| -rw-r--r-- | net/sctp/output.c | 1 | ||||
| -rw-r--r-- | net/tipc/node.c | 7 |
23 files changed, 185 insertions, 122 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index e521c5ebc7d1..9a1327eb25fa 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -4852,18 +4852,17 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, | |||
| 4852 | } else { | 4852 | } else { |
| 4853 | struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; | 4853 | struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; |
| 4854 | struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; | 4854 | struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; |
| 4855 | u16 hnum = ntohs(tuple->ipv6.dport); | ||
| 4856 | int sdif = inet6_sdif(skb); | 4855 | int sdif = inet6_sdif(skb); |
| 4857 | 4856 | ||
| 4858 | if (proto == IPPROTO_TCP) | 4857 | if (proto == IPPROTO_TCP) |
| 4859 | sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, | 4858 | sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, |
| 4860 | src6, tuple->ipv6.sport, | 4859 | src6, tuple->ipv6.sport, |
| 4861 | dst6, hnum, | 4860 | dst6, ntohs(tuple->ipv6.dport), |
| 4862 | dif, sdif, &refcounted); | 4861 | dif, sdif, &refcounted); |
| 4863 | else if (likely(ipv6_bpf_stub)) | 4862 | else if (likely(ipv6_bpf_stub)) |
| 4864 | sk = ipv6_bpf_stub->udp6_lib_lookup(net, | 4863 | sk = ipv6_bpf_stub->udp6_lib_lookup(net, |
| 4865 | src6, tuple->ipv6.sport, | 4864 | src6, tuple->ipv6.sport, |
| 4866 | dst6, hnum, | 4865 | dst6, tuple->ipv6.dport, |
| 4867 | dif, sdif, | 4866 | dif, sdif, |
| 4868 | &udp_table, skb); | 4867 | &udp_table, skb); |
| 4869 | #endif | 4868 | #endif |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c09219e7f230..5dbec21856f4 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -939,7 +939,7 @@ static int __ip_append_data(struct sock *sk, | |||
| 939 | unsigned int fraglen; | 939 | unsigned int fraglen; |
| 940 | unsigned int fraggap; | 940 | unsigned int fraggap; |
| 941 | unsigned int alloclen; | 941 | unsigned int alloclen; |
| 942 | unsigned int pagedlen = 0; | 942 | unsigned int pagedlen; |
| 943 | struct sk_buff *skb_prev; | 943 | struct sk_buff *skb_prev; |
| 944 | alloc_new_skb: | 944 | alloc_new_skb: |
| 945 | skb_prev = skb; | 945 | skb_prev = skb; |
| @@ -956,6 +956,7 @@ alloc_new_skb: | |||
| 956 | if (datalen > mtu - fragheaderlen) | 956 | if (datalen > mtu - fragheaderlen) |
| 957 | datalen = maxfraglen - fragheaderlen; | 957 | datalen = maxfraglen - fragheaderlen; |
| 958 | fraglen = datalen + fragheaderlen; | 958 | fraglen = datalen + fragheaderlen; |
| 959 | pagedlen = 0; | ||
| 959 | 960 | ||
| 960 | if ((flags & MSG_MORE) && | 961 | if ((flags & MSG_MORE) && |
| 961 | !(rt->dst.dev->features&NETIF_F_SG)) | 962 | !(rt->dst.dev->features&NETIF_F_SG)) |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index ce1512b02cb2..fd3f9e8a74da 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
| @@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void) | |||
| 81 | int ret; | 81 | int ret; |
| 82 | 82 | ||
| 83 | ret = xt_register_target(&masquerade_tg_reg); | 83 | ret = xt_register_target(&masquerade_tg_reg); |
| 84 | if (ret) | ||
| 85 | return ret; | ||
| 84 | 86 | ||
| 85 | if (ret == 0) | 87 | ret = nf_nat_masquerade_ipv4_register_notifier(); |
| 86 | nf_nat_masquerade_ipv4_register_notifier(); | 88 | if (ret) |
| 89 | xt_unregister_target(&masquerade_tg_reg); | ||
| 87 | 90 | ||
| 88 | return ret; | 91 | return ret; |
| 89 | } | 92 | } |
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c index a9d5e013e555..41327bb99093 100644 --- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c | |||
| @@ -147,28 +147,50 @@ static struct notifier_block masq_inet_notifier = { | |||
| 147 | .notifier_call = masq_inet_event, | 147 | .notifier_call = masq_inet_event, |
| 148 | }; | 148 | }; |
| 149 | 149 | ||
| 150 | static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); | 150 | static int masq_refcnt; |
| 151 | static DEFINE_MUTEX(masq_mutex); | ||
| 151 | 152 | ||
| 152 | void nf_nat_masquerade_ipv4_register_notifier(void) | 153 | int nf_nat_masquerade_ipv4_register_notifier(void) |
| 153 | { | 154 | { |
| 155 | int ret = 0; | ||
| 156 | |||
| 157 | mutex_lock(&masq_mutex); | ||
| 154 | /* check if the notifier was already set */ | 158 | /* check if the notifier was already set */ |
| 155 | if (atomic_inc_return(&masquerade_notifier_refcount) > 1) | 159 | if (++masq_refcnt > 1) |
| 156 | return; | 160 | goto out_unlock; |
| 157 | 161 | ||
| 158 | /* Register for device down reports */ | 162 | /* Register for device down reports */ |
| 159 | register_netdevice_notifier(&masq_dev_notifier); | 163 | ret = register_netdevice_notifier(&masq_dev_notifier); |
| 164 | if (ret) | ||
| 165 | goto err_dec; | ||
| 160 | /* Register IP address change reports */ | 166 | /* Register IP address change reports */ |
| 161 | register_inetaddr_notifier(&masq_inet_notifier); | 167 | ret = register_inetaddr_notifier(&masq_inet_notifier); |
| 168 | if (ret) | ||
| 169 | goto err_unregister; | ||
| 170 | |||
| 171 | mutex_unlock(&masq_mutex); | ||
| 172 | return ret; | ||
| 173 | |||
| 174 | err_unregister: | ||
| 175 | unregister_netdevice_notifier(&masq_dev_notifier); | ||
| 176 | err_dec: | ||
| 177 | masq_refcnt--; | ||
| 178 | out_unlock: | ||
| 179 | mutex_unlock(&masq_mutex); | ||
| 180 | return ret; | ||
| 162 | } | 181 | } |
| 163 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier); | 182 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier); |
| 164 | 183 | ||
| 165 | void nf_nat_masquerade_ipv4_unregister_notifier(void) | 184 | void nf_nat_masquerade_ipv4_unregister_notifier(void) |
| 166 | { | 185 | { |
| 186 | mutex_lock(&masq_mutex); | ||
| 167 | /* check if the notifier still has clients */ | 187 | /* check if the notifier still has clients */ |
| 168 | if (atomic_dec_return(&masquerade_notifier_refcount) > 0) | 188 | if (--masq_refcnt > 0) |
| 169 | return; | 189 | goto out_unlock; |
| 170 | 190 | ||
| 171 | unregister_netdevice_notifier(&masq_dev_notifier); | 191 | unregister_netdevice_notifier(&masq_dev_notifier); |
| 172 | unregister_inetaddr_notifier(&masq_inet_notifier); | 192 | unregister_inetaddr_notifier(&masq_inet_notifier); |
| 193 | out_unlock: | ||
| 194 | mutex_unlock(&masq_mutex); | ||
| 173 | } | 195 | } |
| 174 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier); | 196 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier); |
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index f1193e1e928a..6847de1d1db8 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c | |||
| @@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void) | |||
| 69 | if (ret < 0) | 69 | if (ret < 0) |
| 70 | return ret; | 70 | return ret; |
| 71 | 71 | ||
| 72 | nf_nat_masquerade_ipv4_register_notifier(); | 72 | ret = nf_nat_masquerade_ipv4_register_notifier(); |
| 73 | if (ret) | ||
| 74 | nft_unregister_expr(&nft_masq_ipv4_type); | ||
| 73 | 75 | ||
| 74 | return ret; | 76 | return ret; |
| 75 | } | 77 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1e37c1388189..a9d9555a973f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -579,10 +579,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, | |||
| 579 | u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; | 579 | u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; |
| 580 | u32 delta_us; | 580 | u32 delta_us; |
| 581 | 581 | ||
| 582 | if (!delta) | 582 | if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { |
| 583 | delta = 1; | 583 | if (!delta) |
| 584 | delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); | 584 | delta = 1; |
| 585 | tcp_rcv_rtt_update(tp, delta_us, 0); | 585 | delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); |
| 586 | tcp_rcv_rtt_update(tp, delta_us, 0); | ||
| 587 | } | ||
| 586 | } | 588 | } |
| 587 | } | 589 | } |
| 588 | 590 | ||
| @@ -2910,9 +2912,11 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag, | |||
| 2910 | if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && | 2912 | if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && |
| 2911 | flag & FLAG_ACKED) { | 2913 | flag & FLAG_ACKED) { |
| 2912 | u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; | 2914 | u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; |
| 2913 | u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); | ||
| 2914 | 2915 | ||
| 2915 | seq_rtt_us = ca_rtt_us = delta_us; | 2916 | if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { |
| 2917 | seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ); | ||
| 2918 | ca_rtt_us = seq_rtt_us; | ||
| 2919 | } | ||
| 2916 | } | 2920 | } |
| 2917 | rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ | 2921 | rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ |
| 2918 | if (seq_rtt_us < 0) | 2922 | if (seq_rtt_us < 0) |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 5f8b6d3cd855..091c53925e4d 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -40,15 +40,17 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) | |||
| 40 | { | 40 | { |
| 41 | struct inet_connection_sock *icsk = inet_csk(sk); | 41 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 42 | u32 elapsed, start_ts; | 42 | u32 elapsed, start_ts; |
| 43 | s32 remaining; | ||
| 43 | 44 | ||
| 44 | start_ts = tcp_retransmit_stamp(sk); | 45 | start_ts = tcp_retransmit_stamp(sk); |
| 45 | if (!icsk->icsk_user_timeout || !start_ts) | 46 | if (!icsk->icsk_user_timeout || !start_ts) |
| 46 | return icsk->icsk_rto; | 47 | return icsk->icsk_rto; |
| 47 | elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; | 48 | elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; |
| 48 | if (elapsed >= icsk->icsk_user_timeout) | 49 | remaining = icsk->icsk_user_timeout - elapsed; |
| 50 | if (remaining <= 0) | ||
| 49 | return 1; /* user timeout has passed; fire ASAP */ | 51 | return 1; /* user timeout has passed; fire ASAP */ |
| 50 | else | 52 | |
| 51 | return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed)); | 53 | return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); |
| 52 | } | 54 | } |
| 53 | 55 | ||
| 54 | /** | 56 | /** |
| @@ -209,7 +211,7 @@ static bool retransmits_timed_out(struct sock *sk, | |||
| 209 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | 211 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; |
| 210 | timeout = jiffies_to_msecs(timeout); | 212 | timeout = jiffies_to_msecs(timeout); |
| 211 | } | 213 | } |
| 212 | return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout; | 214 | return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; |
| 213 | } | 215 | } |
| 214 | 216 | ||
| 215 | /* A write timeout has occurred. Process the after effects. */ | 217 | /* A write timeout has occurred. Process the after effects. */ |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 89e0d5118afe..827a3f5ff3bb 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1354,7 +1354,7 @@ emsgsize: | |||
| 1354 | unsigned int fraglen; | 1354 | unsigned int fraglen; |
| 1355 | unsigned int fraggap; | 1355 | unsigned int fraggap; |
| 1356 | unsigned int alloclen; | 1356 | unsigned int alloclen; |
| 1357 | unsigned int pagedlen = 0; | 1357 | unsigned int pagedlen; |
| 1358 | alloc_new_skb: | 1358 | alloc_new_skb: |
| 1359 | /* There's no room in the current skb */ | 1359 | /* There's no room in the current skb */ |
| 1360 | if (skb) | 1360 | if (skb) |
| @@ -1378,6 +1378,7 @@ alloc_new_skb: | |||
| 1378 | if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) | 1378 | if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) |
| 1379 | datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; | 1379 | datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; |
| 1380 | fraglen = datalen + fragheaderlen; | 1380 | fraglen = datalen + fragheaderlen; |
| 1381 | pagedlen = 0; | ||
| 1381 | 1382 | ||
| 1382 | if ((flags & MSG_MORE) && | 1383 | if ((flags & MSG_MORE) && |
| 1383 | !(rt->dst.dev->features&NETIF_F_SG)) | 1384 | !(rt->dst.dev->features&NETIF_F_SG)) |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 5ae8e1c51079..8b075f0bc351 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
| @@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |||
| 24 | unsigned int hh_len; | 24 | unsigned int hh_len; |
| 25 | struct dst_entry *dst; | 25 | struct dst_entry *dst; |
| 26 | struct flowi6 fl6 = { | 26 | struct flowi6 fl6 = { |
| 27 | .flowi6_oif = sk ? sk->sk_bound_dev_if : 0, | 27 | .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : |
| 28 | rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, | ||
| 28 | .flowi6_mark = skb->mark, | 29 | .flowi6_mark = skb->mark, |
| 29 | .flowi6_uid = sock_net_uid(net, sk), | 30 | .flowi6_uid = sock_net_uid(net, sk), |
| 30 | .daddr = iph->daddr, | 31 | .daddr = iph->daddr, |
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index 491f808e356a..29c7f1915a96 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c | |||
| @@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void) | |||
| 58 | int err; | 58 | int err; |
| 59 | 59 | ||
| 60 | err = xt_register_target(&masquerade_tg6_reg); | 60 | err = xt_register_target(&masquerade_tg6_reg); |
| 61 | if (err == 0) | 61 | if (err) |
| 62 | nf_nat_masquerade_ipv6_register_notifier(); | 62 | return err; |
| 63 | |||
| 64 | err = nf_nat_masquerade_ipv6_register_notifier(); | ||
| 65 | if (err) | ||
| 66 | xt_unregister_target(&masquerade_tg6_reg); | ||
| 63 | 67 | ||
| 64 | return err; | 68 | return err; |
| 65 | } | 69 | } |
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c index 3e4bf2286abe..0ad0da5a2600 100644 --- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c | |||
| @@ -132,8 +132,8 @@ static void iterate_cleanup_work(struct work_struct *work) | |||
| 132 | * of ipv6 addresses being deleted), we also need to add an upper | 132 | * of ipv6 addresses being deleted), we also need to add an upper |
| 133 | * limit to the number of queued work items. | 133 | * limit to the number of queued work items. |
| 134 | */ | 134 | */ |
| 135 | static int masq_inet_event(struct notifier_block *this, | 135 | static int masq_inet6_event(struct notifier_block *this, |
| 136 | unsigned long event, void *ptr) | 136 | unsigned long event, void *ptr) |
| 137 | { | 137 | { |
| 138 | struct inet6_ifaddr *ifa = ptr; | 138 | struct inet6_ifaddr *ifa = ptr; |
| 139 | const struct net_device *dev; | 139 | const struct net_device *dev; |
| @@ -171,30 +171,53 @@ static int masq_inet_event(struct notifier_block *this, | |||
| 171 | return NOTIFY_DONE; | 171 | return NOTIFY_DONE; |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | static struct notifier_block masq_inet_notifier = { | 174 | static struct notifier_block masq_inet6_notifier = { |
| 175 | .notifier_call = masq_inet_event, | 175 | .notifier_call = masq_inet6_event, |
| 176 | }; | 176 | }; |
| 177 | 177 | ||
| 178 | static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); | 178 | static int masq_refcnt; |
| 179 | static DEFINE_MUTEX(masq_mutex); | ||
| 179 | 180 | ||
| 180 | void nf_nat_masquerade_ipv6_register_notifier(void) | 181 | int nf_nat_masquerade_ipv6_register_notifier(void) |
| 181 | { | 182 | { |
| 183 | int ret = 0; | ||
| 184 | |||
| 185 | mutex_lock(&masq_mutex); | ||
| 182 | /* check if the notifier is already set */ | 186 | /* check if the notifier is already set */ |
| 183 | if (atomic_inc_return(&masquerade_notifier_refcount) > 1) | 187 | if (++masq_refcnt > 1) |
| 184 | return; | 188 | goto out_unlock; |
| 189 | |||
| 190 | ret = register_netdevice_notifier(&masq_dev_notifier); | ||
| 191 | if (ret) | ||
| 192 | goto err_dec; | ||
| 193 | |||
| 194 | ret = register_inet6addr_notifier(&masq_inet6_notifier); | ||
| 195 | if (ret) | ||
| 196 | goto err_unregister; | ||
| 185 | 197 | ||
| 186 | register_netdevice_notifier(&masq_dev_notifier); | 198 | mutex_unlock(&masq_mutex); |
| 187 | register_inet6addr_notifier(&masq_inet_notifier); | 199 | return ret; |
| 200 | |||
| 201 | err_unregister: | ||
| 202 | unregister_netdevice_notifier(&masq_dev_notifier); | ||
| 203 | err_dec: | ||
| 204 | masq_refcnt--; | ||
| 205 | out_unlock: | ||
| 206 | mutex_unlock(&masq_mutex); | ||
| 207 | return ret; | ||
| 188 | } | 208 | } |
| 189 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier); | 209 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier); |
| 190 | 210 | ||
| 191 | void nf_nat_masquerade_ipv6_unregister_notifier(void) | 211 | void nf_nat_masquerade_ipv6_unregister_notifier(void) |
| 192 | { | 212 | { |
| 213 | mutex_lock(&masq_mutex); | ||
| 193 | /* check if the notifier still has clients */ | 214 | /* check if the notifier still has clients */ |
| 194 | if (atomic_dec_return(&masquerade_notifier_refcount) > 0) | 215 | if (--masq_refcnt > 0) |
| 195 | return; | 216 | goto out_unlock; |
| 196 | 217 | ||
| 197 | unregister_inet6addr_notifier(&masq_inet_notifier); | 218 | unregister_inet6addr_notifier(&masq_inet6_notifier); |
| 198 | unregister_netdevice_notifier(&masq_dev_notifier); | 219 | unregister_netdevice_notifier(&masq_dev_notifier); |
| 220 | out_unlock: | ||
| 221 | mutex_unlock(&masq_mutex); | ||
| 199 | } | 222 | } |
| 200 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier); | 223 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier); |
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index dd0122f3cffe..e06c82e9dfcd 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c | |||
| @@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void) | |||
| 70 | if (ret < 0) | 70 | if (ret < 0) |
| 71 | return ret; | 71 | return ret; |
| 72 | 72 | ||
| 73 | nf_nat_masquerade_ipv6_register_notifier(); | 73 | ret = nf_nat_masquerade_ipv6_register_notifier(); |
| 74 | if (ret) | ||
| 75 | nft_unregister_expr(&nft_masq_ipv6_type); | ||
| 74 | 76 | ||
| 75 | return ret; | 77 | return ret; |
| 76 | } | 78 | } |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 83395bf6dc35..432141f04af3 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) | |||
| 3980 | 3980 | ||
| 3981 | static struct notifier_block ip_vs_dst_notifier = { | 3981 | static struct notifier_block ip_vs_dst_notifier = { |
| 3982 | .notifier_call = ip_vs_dst_event, | 3982 | .notifier_call = ip_vs_dst_event, |
| 3983 | #ifdef CONFIG_IP_VS_IPV6 | ||
| 3984 | .priority = ADDRCONF_NOTIFY_PRIORITY + 5, | ||
| 3985 | #endif | ||
| 3983 | }; | 3986 | }; |
| 3984 | 3987 | ||
| 3985 | int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs) | 3988 | int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs) |
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 02ca7df793f5..b6d0f6deea86 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c | |||
| @@ -49,6 +49,7 @@ struct nf_conncount_tuple { | |||
| 49 | struct nf_conntrack_zone zone; | 49 | struct nf_conntrack_zone zone; |
| 50 | int cpu; | 50 | int cpu; |
| 51 | u32 jiffies32; | 51 | u32 jiffies32; |
| 52 | bool dead; | ||
| 52 | struct rcu_head rcu_head; | 53 | struct rcu_head rcu_head; |
| 53 | }; | 54 | }; |
| 54 | 55 | ||
| @@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list, | |||
| 106 | conn->zone = *zone; | 107 | conn->zone = *zone; |
| 107 | conn->cpu = raw_smp_processor_id(); | 108 | conn->cpu = raw_smp_processor_id(); |
| 108 | conn->jiffies32 = (u32)jiffies; | 109 | conn->jiffies32 = (u32)jiffies; |
| 109 | spin_lock(&list->list_lock); | 110 | conn->dead = false; |
| 111 | spin_lock_bh(&list->list_lock); | ||
| 110 | if (list->dead == true) { | 112 | if (list->dead == true) { |
| 111 | kmem_cache_free(conncount_conn_cachep, conn); | 113 | kmem_cache_free(conncount_conn_cachep, conn); |
| 112 | spin_unlock(&list->list_lock); | 114 | spin_unlock_bh(&list->list_lock); |
| 113 | return NF_CONNCOUNT_SKIP; | 115 | return NF_CONNCOUNT_SKIP; |
| 114 | } | 116 | } |
| 115 | list_add_tail(&conn->node, &list->head); | 117 | list_add_tail(&conn->node, &list->head); |
| 116 | list->count++; | 118 | list->count++; |
| 117 | spin_unlock(&list->list_lock); | 119 | spin_unlock_bh(&list->list_lock); |
| 118 | return NF_CONNCOUNT_ADDED; | 120 | return NF_CONNCOUNT_ADDED; |
| 119 | } | 121 | } |
| 120 | EXPORT_SYMBOL_GPL(nf_conncount_add); | 122 | EXPORT_SYMBOL_GPL(nf_conncount_add); |
| @@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list, | |||
| 132 | { | 134 | { |
| 133 | bool free_entry = false; | 135 | bool free_entry = false; |
| 134 | 136 | ||
| 135 | spin_lock(&list->list_lock); | 137 | spin_lock_bh(&list->list_lock); |
| 136 | 138 | ||
| 137 | if (list->count == 0) { | 139 | if (conn->dead) { |
| 138 | spin_unlock(&list->list_lock); | 140 | spin_unlock_bh(&list->list_lock); |
| 139 | return free_entry; | 141 | return free_entry; |
| 140 | } | 142 | } |
| 141 | 143 | ||
| 142 | list->count--; | 144 | list->count--; |
| 145 | conn->dead = true; | ||
| 143 | list_del_rcu(&conn->node); | 146 | list_del_rcu(&conn->node); |
| 144 | if (list->count == 0) | 147 | if (list->count == 0) { |
| 148 | list->dead = true; | ||
| 145 | free_entry = true; | 149 | free_entry = true; |
| 150 | } | ||
| 146 | 151 | ||
| 147 | spin_unlock(&list->list_lock); | 152 | spin_unlock_bh(&list->list_lock); |
| 148 | call_rcu(&conn->rcu_head, __conn_free); | 153 | call_rcu(&conn->rcu_head, __conn_free); |
| 149 | return free_entry; | 154 | return free_entry; |
| 150 | } | 155 | } |
| @@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list) | |||
| 245 | { | 250 | { |
| 246 | spin_lock_init(&list->list_lock); | 251 | spin_lock_init(&list->list_lock); |
| 247 | INIT_LIST_HEAD(&list->head); | 252 | INIT_LIST_HEAD(&list->head); |
| 248 | list->count = 1; | 253 | list->count = 0; |
| 249 | list->dead = false; | 254 | list->dead = false; |
| 250 | } | 255 | } |
| 251 | EXPORT_SYMBOL_GPL(nf_conncount_list_init); | 256 | EXPORT_SYMBOL_GPL(nf_conncount_list_init); |
| @@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net, | |||
| 259 | struct nf_conn *found_ct; | 264 | struct nf_conn *found_ct; |
| 260 | unsigned int collected = 0; | 265 | unsigned int collected = 0; |
| 261 | bool free_entry = false; | 266 | bool free_entry = false; |
| 267 | bool ret = false; | ||
| 262 | 268 | ||
| 263 | list_for_each_entry_safe(conn, conn_n, &list->head, node) { | 269 | list_for_each_entry_safe(conn, conn_n, &list->head, node) { |
| 264 | found = find_or_evict(net, list, conn, &free_entry); | 270 | found = find_or_evict(net, list, conn, &free_entry); |
| @@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net, | |||
| 288 | if (collected > CONNCOUNT_GC_MAX_NODES) | 294 | if (collected > CONNCOUNT_GC_MAX_NODES) |
| 289 | return false; | 295 | return false; |
| 290 | } | 296 | } |
| 291 | return false; | 297 | |
| 298 | spin_lock_bh(&list->list_lock); | ||
| 299 | if (!list->count) { | ||
| 300 | list->dead = true; | ||
| 301 | ret = true; | ||
| 302 | } | ||
| 303 | spin_unlock_bh(&list->list_lock); | ||
| 304 | |||
| 305 | return ret; | ||
| 292 | } | 306 | } |
| 293 | EXPORT_SYMBOL_GPL(nf_conncount_gc_list); | 307 | EXPORT_SYMBOL_GPL(nf_conncount_gc_list); |
| 294 | 308 | ||
| @@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root, | |||
| 309 | while (gc_count) { | 323 | while (gc_count) { |
| 310 | rbconn = gc_nodes[--gc_count]; | 324 | rbconn = gc_nodes[--gc_count]; |
| 311 | spin_lock(&rbconn->list.list_lock); | 325 | spin_lock(&rbconn->list.list_lock); |
| 312 | if (rbconn->list.count == 0 && rbconn->list.dead == false) { | 326 | rb_erase(&rbconn->node, root); |
| 313 | rbconn->list.dead = true; | 327 | call_rcu(&rbconn->rcu_head, __tree_nodes_free); |
| 314 | rb_erase(&rbconn->node, root); | ||
| 315 | call_rcu(&rbconn->rcu_head, __tree_nodes_free); | ||
| 316 | } | ||
| 317 | spin_unlock(&rbconn->list.list_lock); | 328 | spin_unlock(&rbconn->list.list_lock); |
| 318 | } | 329 | } |
| 319 | } | 330 | } |
| @@ -414,6 +425,7 @@ insert_tree(struct net *net, | |||
| 414 | nf_conncount_list_init(&rbconn->list); | 425 | nf_conncount_list_init(&rbconn->list); |
| 415 | list_add(&conn->node, &rbconn->list.head); | 426 | list_add(&conn->node, &rbconn->list.head); |
| 416 | count = 1; | 427 | count = 1; |
| 428 | rbconn->list.count = count; | ||
| 417 | 429 | ||
| 418 | rb_link_node(&rbconn->node, parent, rbnode); | 430 | rb_link_node(&rbconn->node, parent, rbnode); |
| 419 | rb_insert_color(&rbconn->node, root); | 431 | rb_insert_color(&rbconn->node, root); |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 9b48dc8b4b88..2a5e56c6d8d9 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
| @@ -43,24 +43,12 @@ | |||
| 43 | #include <linux/netfilter/nf_conntrack_proto_gre.h> | 43 | #include <linux/netfilter/nf_conntrack_proto_gre.h> |
| 44 | #include <linux/netfilter/nf_conntrack_pptp.h> | 44 | #include <linux/netfilter/nf_conntrack_pptp.h> |
| 45 | 45 | ||
| 46 | enum grep_conntrack { | ||
| 47 | GRE_CT_UNREPLIED, | ||
| 48 | GRE_CT_REPLIED, | ||
| 49 | GRE_CT_MAX | ||
| 50 | }; | ||
| 51 | |||
| 52 | static const unsigned int gre_timeouts[GRE_CT_MAX] = { | 46 | static const unsigned int gre_timeouts[GRE_CT_MAX] = { |
| 53 | [GRE_CT_UNREPLIED] = 30*HZ, | 47 | [GRE_CT_UNREPLIED] = 30*HZ, |
| 54 | [GRE_CT_REPLIED] = 180*HZ, | 48 | [GRE_CT_REPLIED] = 180*HZ, |
| 55 | }; | 49 | }; |
| 56 | 50 | ||
| 57 | static unsigned int proto_gre_net_id __read_mostly; | 51 | static unsigned int proto_gre_net_id __read_mostly; |
| 58 | struct netns_proto_gre { | ||
| 59 | struct nf_proto_net nf; | ||
| 60 | rwlock_t keymap_lock; | ||
| 61 | struct list_head keymap_list; | ||
| 62 | unsigned int gre_timeouts[GRE_CT_MAX]; | ||
| 63 | }; | ||
| 64 | 52 | ||
| 65 | static inline struct netns_proto_gre *gre_pernet(struct net *net) | 53 | static inline struct netns_proto_gre *gre_pernet(struct net *net) |
| 66 | { | 54 | { |
| @@ -402,6 +390,8 @@ static int __init nf_ct_proto_gre_init(void) | |||
| 402 | { | 390 | { |
| 403 | int ret; | 391 | int ret; |
| 404 | 392 | ||
| 393 | BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0); | ||
| 394 | |||
| 405 | ret = register_pernet_subsys(&proto_gre_net_ops); | 395 | ret = register_pernet_subsys(&proto_gre_net_ops); |
| 406 | if (ret < 0) | 396 | if (ret < 0) |
| 407 | goto out_pernet; | 397 | goto out_pernet; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 42487d01a3ed..2e61aab6ed73 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -2457,7 +2457,7 @@ err: | |||
| 2457 | static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | 2457 | static void nf_tables_rule_destroy(const struct nft_ctx *ctx, |
| 2458 | struct nft_rule *rule) | 2458 | struct nft_rule *rule) |
| 2459 | { | 2459 | { |
| 2460 | struct nft_expr *expr; | 2460 | struct nft_expr *expr, *next; |
| 2461 | 2461 | ||
| 2462 | /* | 2462 | /* |
| 2463 | * Careful: some expressions might not be initialized in case this | 2463 | * Careful: some expressions might not be initialized in case this |
| @@ -2465,8 +2465,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | |||
| 2465 | */ | 2465 | */ |
| 2466 | expr = nft_expr_first(rule); | 2466 | expr = nft_expr_first(rule); |
| 2467 | while (expr != nft_expr_last(rule) && expr->ops) { | 2467 | while (expr != nft_expr_last(rule) && expr->ops) { |
| 2468 | next = nft_expr_next(expr); | ||
| 2468 | nf_tables_expr_destroy(ctx, expr); | 2469 | nf_tables_expr_destroy(ctx, expr); |
| 2469 | expr = nft_expr_next(expr); | 2470 | expr = next; |
| 2470 | } | 2471 | } |
| 2471 | kfree(rule); | 2472 | kfree(rule); |
| 2472 | } | 2473 | } |
| @@ -2589,17 +2590,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, | |||
| 2589 | 2590 | ||
| 2590 | if (chain->use == UINT_MAX) | 2591 | if (chain->use == UINT_MAX) |
| 2591 | return -EOVERFLOW; | 2592 | return -EOVERFLOW; |
| 2592 | } | ||
| 2593 | |||
| 2594 | if (nla[NFTA_RULE_POSITION]) { | ||
| 2595 | if (!(nlh->nlmsg_flags & NLM_F_CREATE)) | ||
| 2596 | return -EOPNOTSUPP; | ||
| 2597 | 2593 | ||
| 2598 | pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); | 2594 | if (nla[NFTA_RULE_POSITION]) { |
| 2599 | old_rule = __nft_rule_lookup(chain, pos_handle); | 2595 | pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); |
| 2600 | if (IS_ERR(old_rule)) { | 2596 | old_rule = __nft_rule_lookup(chain, pos_handle); |
| 2601 | NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]); | 2597 | if (IS_ERR(old_rule)) { |
| 2602 | return PTR_ERR(old_rule); | 2598 | NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]); |
| 2599 | return PTR_ERR(old_rule); | ||
| 2600 | } | ||
| 2603 | } | 2601 | } |
| 2604 | } | 2602 | } |
| 2605 | 2603 | ||
| @@ -2669,21 +2667,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, | |||
| 2669 | } | 2667 | } |
| 2670 | 2668 | ||
| 2671 | if (nlh->nlmsg_flags & NLM_F_REPLACE) { | 2669 | if (nlh->nlmsg_flags & NLM_F_REPLACE) { |
| 2672 | if (!nft_is_active_next(net, old_rule)) { | 2670 | trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule); |
| 2673 | err = -ENOENT; | ||
| 2674 | goto err2; | ||
| 2675 | } | ||
| 2676 | trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, | ||
| 2677 | old_rule); | ||
| 2678 | if (trans == NULL) { | 2671 | if (trans == NULL) { |
| 2679 | err = -ENOMEM; | 2672 | err = -ENOMEM; |
| 2680 | goto err2; | 2673 | goto err2; |
| 2681 | } | 2674 | } |
| 2682 | nft_deactivate_next(net, old_rule); | 2675 | err = nft_delrule(&ctx, old_rule); |
| 2683 | chain->use--; | 2676 | if (err < 0) { |
| 2684 | 2677 | nft_trans_destroy(trans); | |
| 2685 | if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { | ||
| 2686 | err = -ENOMEM; | ||
| 2687 | goto err2; | 2678 | goto err2; |
| 2688 | } | 2679 | } |
| 2689 | 2680 | ||
| @@ -6324,7 +6315,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules) | |||
| 6324 | call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old); | 6315 | call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old); |
| 6325 | } | 6316 | } |
| 6326 | 6317 | ||
| 6327 | static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain) | 6318 | static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain) |
| 6328 | { | 6319 | { |
| 6329 | struct nft_rule **g0, **g1; | 6320 | struct nft_rule **g0, **g1; |
| 6330 | bool next_genbit; | 6321 | bool next_genbit; |
| @@ -6441,11 +6432,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) | |||
| 6441 | 6432 | ||
| 6442 | /* step 2. Make rules_gen_X visible to packet path */ | 6433 | /* step 2. Make rules_gen_X visible to packet path */ |
| 6443 | list_for_each_entry(table, &net->nft.tables, list) { | 6434 | list_for_each_entry(table, &net->nft.tables, list) { |
| 6444 | list_for_each_entry(chain, &table->chains, list) { | 6435 | list_for_each_entry(chain, &table->chains, list) |
| 6445 | if (!nft_is_active_next(net, chain)) | 6436 | nf_tables_commit_chain(net, chain); |
| 6446 | continue; | ||
| 6447 | nf_tables_commit_chain_active(net, chain); | ||
| 6448 | } | ||
| 6449 | } | 6437 | } |
| 6450 | 6438 | ||
| 6451 | /* | 6439 | /* |
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index a518eb162344..109b0d27345a 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c | |||
| @@ -455,7 +455,8 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl, | |||
| 455 | case IPPROTO_TCP: | 455 | case IPPROTO_TCP: |
| 456 | timeouts = nf_tcp_pernet(net)->timeouts; | 456 | timeouts = nf_tcp_pernet(net)->timeouts; |
| 457 | break; | 457 | break; |
| 458 | case IPPROTO_UDP: | 458 | case IPPROTO_UDP: /* fallthrough */ |
| 459 | case IPPROTO_UDPLITE: | ||
| 459 | timeouts = nf_udp_pernet(net)->timeouts; | 460 | timeouts = nf_udp_pernet(net)->timeouts; |
| 460 | break; | 461 | break; |
| 461 | case IPPROTO_DCCP: | 462 | case IPPROTO_DCCP: |
| @@ -471,11 +472,21 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl, | |||
| 471 | timeouts = nf_sctp_pernet(net)->timeouts; | 472 | timeouts = nf_sctp_pernet(net)->timeouts; |
| 472 | #endif | 473 | #endif |
| 473 | break; | 474 | break; |
| 475 | case IPPROTO_GRE: | ||
| 476 | #ifdef CONFIG_NF_CT_PROTO_GRE | ||
| 477 | if (l4proto->net_id) { | ||
| 478 | struct netns_proto_gre *net_gre; | ||
| 479 | |||
| 480 | net_gre = net_generic(net, *l4proto->net_id); | ||
| 481 | timeouts = net_gre->gre_timeouts; | ||
| 482 | } | ||
| 483 | #endif | ||
| 484 | break; | ||
| 474 | case 255: | 485 | case 255: |
| 475 | timeouts = &nf_generic_pernet(net)->timeout; | 486 | timeouts = &nf_generic_pernet(net)->timeout; |
| 476 | break; | 487 | break; |
| 477 | default: | 488 | default: |
| 478 | WARN_ON_ONCE(1); | 489 | WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto); |
| 479 | break; | 490 | break; |
| 480 | } | 491 | } |
| 481 | 492 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 9d0ede474224..7334e0b80a5e 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
| @@ -520,6 +520,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 520 | void *info) | 520 | void *info) |
| 521 | { | 521 | { |
| 522 | struct xt_match *match = expr->ops->data; | 522 | struct xt_match *match = expr->ops->data; |
| 523 | struct module *me = match->me; | ||
| 523 | struct xt_mtdtor_param par; | 524 | struct xt_mtdtor_param par; |
| 524 | 525 | ||
| 525 | par.net = ctx->net; | 526 | par.net = ctx->net; |
| @@ -530,7 +531,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 530 | par.match->destroy(&par); | 531 | par.match->destroy(&par); |
| 531 | 532 | ||
| 532 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) | 533 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) |
| 533 | module_put(match->me); | 534 | module_put(me); |
| 534 | } | 535 | } |
| 535 | 536 | ||
| 536 | static void | 537 | static void |
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index e82d9a966c45..974525eb92df 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c | |||
| @@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void) | |||
| 214 | { | 214 | { |
| 215 | int err; | 215 | int err; |
| 216 | 216 | ||
| 217 | register_netdevice_notifier(&flow_offload_netdev_notifier); | 217 | err = register_netdevice_notifier(&flow_offload_netdev_notifier); |
| 218 | if (err) | ||
| 219 | goto err; | ||
| 218 | 220 | ||
| 219 | err = nft_register_expr(&nft_flow_offload_type); | 221 | err = nft_register_expr(&nft_flow_offload_type); |
| 220 | if (err < 0) | 222 | if (err < 0) |
| @@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void) | |||
| 224 | 226 | ||
| 225 | register_expr: | 227 | register_expr: |
| 226 | unregister_netdevice_notifier(&flow_offload_netdev_notifier); | 228 | unregister_netdevice_notifier(&flow_offload_netdev_notifier); |
| 229 | err: | ||
| 227 | return err; | 230 | return err; |
| 228 | } | 231 | } |
| 229 | 232 | ||
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index dec843cadf46..9e05c86ba5c4 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c | |||
| @@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net) | |||
| 201 | return 0; | 201 | return 0; |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | static void __net_exit xt_rateest_net_exit(struct net *net) | ||
| 205 | { | ||
| 206 | struct xt_rateest_net *xn = net_generic(net, xt_rateest_id); | ||
| 207 | int i; | ||
| 208 | |||
| 209 | for (i = 0; i < ARRAY_SIZE(xn->hash); i++) | ||
| 210 | WARN_ON_ONCE(!hlist_empty(&xn->hash[i])); | ||
| 211 | } | ||
| 212 | |||
| 213 | static struct pernet_operations xt_rateest_net_ops = { | 204 | static struct pernet_operations xt_rateest_net_ops = { |
| 214 | .init = xt_rateest_net_init, | 205 | .init = xt_rateest_net_init, |
| 215 | .exit = xt_rateest_net_exit, | ||
| 216 | .id = &xt_rateest_id, | 206 | .id = &xt_rateest_id, |
| 217 | .size = sizeof(struct xt_rateest_net), | 207 | .size = sizeof(struct xt_rateest_net), |
| 218 | }; | 208 | }; |
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 3e7d259e5d8d..1ad4017f9b73 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c | |||
| @@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, | |||
| 295 | 295 | ||
| 296 | /* copy match config into hashtable config */ | 296 | /* copy match config into hashtable config */ |
| 297 | ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3); | 297 | ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3); |
| 298 | 298 | if (ret) { | |
| 299 | if (ret) | 299 | vfree(hinfo); |
| 300 | return ret; | 300 | return ret; |
| 301 | } | ||
| 301 | 302 | ||
| 302 | hinfo->cfg.size = size; | 303 | hinfo->cfg.size = size; |
| 303 | if (hinfo->cfg.max == 0) | 304 | if (hinfo->cfg.max == 0) |
| @@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 814 | int ret; | 815 | int ret; |
| 815 | 816 | ||
| 816 | ret = cfg_copy(&cfg, (void *)&info->cfg, 1); | 817 | ret = cfg_copy(&cfg, (void *)&info->cfg, 1); |
| 817 | |||
| 818 | if (ret) | 818 | if (ret) |
| 819 | return ret; | 819 | return ret; |
| 820 | 820 | ||
| @@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 830 | int ret; | 830 | int ret; |
| 831 | 831 | ||
| 832 | ret = cfg_copy(&cfg, (void *)&info->cfg, 2); | 832 | ret = cfg_copy(&cfg, (void *)&info->cfg, 2); |
| 833 | |||
| 834 | if (ret) | 833 | if (ret) |
| 835 | return ret; | 834 | return ret; |
| 836 | 835 | ||
| @@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par) | |||
| 921 | return ret; | 920 | return ret; |
| 922 | 921 | ||
| 923 | ret = cfg_copy(&cfg, (void *)&info->cfg, 1); | 922 | ret = cfg_copy(&cfg, (void *)&info->cfg, 1); |
| 924 | |||
| 925 | if (ret) | 923 | if (ret) |
| 926 | return ret; | 924 | return ret; |
| 927 | 925 | ||
| @@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par) | |||
| 940 | return ret; | 938 | return ret; |
| 941 | 939 | ||
| 942 | ret = cfg_copy(&cfg, (void *)&info->cfg, 2); | 940 | ret = cfg_copy(&cfg, (void *)&info->cfg, 2); |
| 943 | |||
| 944 | if (ret) | 941 | if (ret) |
| 945 | return ret; | 942 | return ret; |
| 946 | 943 | ||
diff --git a/net/sctp/output.c b/net/sctp/output.c index b0e74a3e77ec..025f48e14a91 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -410,6 +410,7 @@ static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) | |||
| 410 | head->truesize += skb->truesize; | 410 | head->truesize += skb->truesize; |
| 411 | head->data_len += skb->len; | 411 | head->data_len += skb->len; |
| 412 | head->len += skb->len; | 412 | head->len += skb->len; |
| 413 | refcount_add(skb->truesize, &head->sk->sk_wmem_alloc); | ||
| 413 | 414 | ||
| 414 | __skb_header_release(skb); | 415 | __skb_header_release(skb); |
| 415 | } | 416 | } |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 2afc4f8c37a7..488019766433 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -584,12 +584,15 @@ static void tipc_node_clear_links(struct tipc_node *node) | |||
| 584 | /* tipc_node_cleanup - delete nodes that does not | 584 | /* tipc_node_cleanup - delete nodes that does not |
| 585 | * have active links for NODE_CLEANUP_AFTER time | 585 | * have active links for NODE_CLEANUP_AFTER time |
| 586 | */ | 586 | */ |
| 587 | static int tipc_node_cleanup(struct tipc_node *peer) | 587 | static bool tipc_node_cleanup(struct tipc_node *peer) |
| 588 | { | 588 | { |
| 589 | struct tipc_net *tn = tipc_net(peer->net); | 589 | struct tipc_net *tn = tipc_net(peer->net); |
| 590 | bool deleted = false; | 590 | bool deleted = false; |
| 591 | 591 | ||
| 592 | spin_lock_bh(&tn->node_list_lock); | 592 | /* If lock held by tipc_node_stop() the node will be deleted anyway */ |
| 593 | if (!spin_trylock_bh(&tn->node_list_lock)) | ||
| 594 | return false; | ||
| 595 | |||
| 593 | tipc_node_write_lock(peer); | 596 | tipc_node_write_lock(peer); |
| 594 | 597 | ||
| 595 | if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { | 598 | if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { |
