diff options
| author | Olof Johansson <olof@lixom.net> | 2014-11-04 23:37:25 -0500 |
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2014-11-04 23:37:25 -0500 |
| commit | 83b3d538db83fe37e24b46befa699a4ae8c496f2 (patch) | |
| tree | 71141d9e170e9f489db186c640ef2a3abf7f1c18 /net | |
| parent | 4257412db57900e43716d0b7ddd4f4a51e6ed2f4 (diff) | |
| parent | 89fbec5b97fbcf08db3a9cd93a340f21f95d38b8 (diff) | |
Merge tag 'imx-fixes-3.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux into fixes
Merge "ARM: imx: fixes for 3.18, 2nd round" from Shawn Guo:
"This is the second round of i.MX fixes for 3.18. The clk-vf610 fix is
relatively big, because it needs some adaption to the change made by
offending commit dc4805c2e78b (ARM: imx: remove ENABLE and BYPASS bits
from clk-pllv3 driver). And it should have been sent to you for earlier
-rc inclusion, but unfortunately it got delayed for some time because
Stefan wasn't aware of my email address change."
The i.MX fixes for 3.18, 2nd round:
- Fix a regression on Vybrid platform which is caused by commit
dc4805c2e78b (ARM: imx: remove ENABLE and BYPASS bits from clk-pllv3
driver), and results in a missing configuration on PLL clocks.
- Fix a regression with i.MX defconfig files where CONFIG_SPI option
gets lost accidentally.
* tag 'imx-fixes-3.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux: (460 commits)
ARM: imx: Fix the removal of CONFIG_SPI option
ARM: imx: clk-vf610: define PLL's clock tree
+ Linux 3.18-rc3
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'net')
55 files changed, 851 insertions, 308 deletions
diff --git a/net/Kconfig b/net/Kconfig index 6272420a721b..99815b5454bf 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
| @@ -6,7 +6,7 @@ menuconfig NET | |||
| 6 | bool "Networking support" | 6 | bool "Networking support" |
| 7 | select NLATTR | 7 | select NLATTR |
| 8 | select GENERIC_NET_UTILS | 8 | select GENERIC_NET_UTILS |
| 9 | select ANON_INODES | 9 | select BPF |
| 10 | ---help--- | 10 | ---help--- |
| 11 | Unless you really know what you are doing, you should say Y here. | 11 | Unless you really know what you are doing, you should say Y here. |
| 12 | The reason is that some programs need kernel networking support even | 12 | The reason is that some programs need kernel networking support even |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 992ec49a96aa..44cb786b925a 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
| @@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
| 112 | 112 | ||
| 113 | kfree_skb(skb); | 113 | kfree_skb(skb); |
| 114 | } | 114 | } |
| 115 | EXPORT_SYMBOL_GPL(br_deliver); | ||
| 115 | 116 | ||
| 116 | /* called with rcu_read_lock */ | 117 | /* called with rcu_read_lock */ |
| 117 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) | 118 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 1bada53bb195..1a4f32c09ad5 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
| @@ -192,7 +192,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) | |||
| 192 | 192 | ||
| 193 | static int br_parse_ip_options(struct sk_buff *skb) | 193 | static int br_parse_ip_options(struct sk_buff *skb) |
| 194 | { | 194 | { |
| 195 | struct ip_options *opt; | ||
| 196 | const struct iphdr *iph; | 195 | const struct iphdr *iph; |
| 197 | struct net_device *dev = skb->dev; | 196 | struct net_device *dev = skb->dev; |
| 198 | u32 len; | 197 | u32 len; |
| @@ -201,7 +200,6 @@ static int br_parse_ip_options(struct sk_buff *skb) | |||
| 201 | goto inhdr_error; | 200 | goto inhdr_error; |
| 202 | 201 | ||
| 203 | iph = ip_hdr(skb); | 202 | iph = ip_hdr(skb); |
| 204 | opt = &(IPCB(skb)->opt); | ||
| 205 | 203 | ||
| 206 | /* Basic sanity checks */ | 204 | /* Basic sanity checks */ |
| 207 | if (iph->ihl < 5 || iph->version != 4) | 205 | if (iph->ihl < 5 || iph->version != 4) |
| @@ -227,23 +225,11 @@ static int br_parse_ip_options(struct sk_buff *skb) | |||
| 227 | } | 225 | } |
| 228 | 226 | ||
| 229 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | 227 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
| 230 | if (iph->ihl == 5) | 228 | /* We should really parse IP options here but until |
| 231 | return 0; | 229 | * somebody who actually uses IP options complains to |
| 232 | 230 | * us we'll just silently ignore the options because | |
| 233 | opt->optlen = iph->ihl*4 - sizeof(struct iphdr); | 231 | * we're lazy! |
| 234 | if (ip_options_compile(dev_net(dev), opt, skb)) | 232 | */ |
| 235 | goto inhdr_error; | ||
| 236 | |||
| 237 | /* Check correct handling of SRR option */ | ||
| 238 | if (unlikely(opt->srr)) { | ||
| 239 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
| 240 | if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) | ||
| 241 | goto drop; | ||
| 242 | |||
| 243 | if (ip_options_rcv_srr(skb)) | ||
| 244 | goto drop; | ||
| 245 | } | ||
| 246 | |||
| 247 | return 0; | 233 | return 0; |
| 248 | 234 | ||
| 249 | inhdr_error: | 235 | inhdr_error: |
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index da17a5eab8b4..074c557ab505 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c | |||
| @@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = { | |||
| 75 | .type = NFT_CHAIN_T_DEFAULT, | 75 | .type = NFT_CHAIN_T_DEFAULT, |
| 76 | .family = NFPROTO_BRIDGE, | 76 | .family = NFPROTO_BRIDGE, |
| 77 | .owner = THIS_MODULE, | 77 | .owner = THIS_MODULE, |
| 78 | .hook_mask = (1 << NF_BR_LOCAL_IN) | | 78 | .hook_mask = (1 << NF_BR_PRE_ROUTING) | |
| 79 | (1 << NF_BR_LOCAL_IN) | | ||
| 79 | (1 << NF_BR_FORWARD) | | 80 | (1 << NF_BR_FORWARD) | |
| 80 | (1 << NF_BR_LOCAL_OUT), | 81 | (1 << NF_BR_LOCAL_OUT) | |
| 82 | (1 << NF_BR_POST_ROUTING), | ||
| 81 | }; | 83 | }; |
| 82 | 84 | ||
| 83 | static int __init nf_tables_bridge_init(void) | 85 | static int __init nf_tables_bridge_init(void) |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index a76479535df2..654c9018e3e7 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
| @@ -16,6 +16,238 @@ | |||
| 16 | #include <net/netfilter/nft_reject.h> | 16 | #include <net/netfilter/nft_reject.h> |
| 17 | #include <net/netfilter/ipv4/nf_reject.h> | 17 | #include <net/netfilter/ipv4/nf_reject.h> |
| 18 | #include <net/netfilter/ipv6/nf_reject.h> | 18 | #include <net/netfilter/ipv6/nf_reject.h> |
| 19 | #include <linux/ip.h> | ||
| 20 | #include <net/ip.h> | ||
| 21 | #include <linux/netfilter_bridge.h> | ||
| 22 | #include "../br_private.h" | ||
| 23 | |||
| 24 | static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, | ||
| 25 | struct sk_buff *nskb) | ||
| 26 | { | ||
| 27 | struct ethhdr *eth; | ||
| 28 | |||
| 29 | eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN); | ||
| 30 | skb_reset_mac_header(nskb); | ||
| 31 | ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest); | ||
| 32 | ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); | ||
| 33 | eth->h_proto = eth_hdr(oldskb)->h_proto; | ||
| 34 | skb_pull(nskb, ETH_HLEN); | ||
| 35 | } | ||
| 36 | |||
| 37 | static int nft_reject_iphdr_validate(struct sk_buff *oldskb) | ||
| 38 | { | ||
| 39 | struct iphdr *iph; | ||
| 40 | u32 len; | ||
| 41 | |||
| 42 | if (!pskb_may_pull(oldskb, sizeof(struct iphdr))) | ||
| 43 | return 0; | ||
| 44 | |||
| 45 | iph = ip_hdr(oldskb); | ||
| 46 | if (iph->ihl < 5 || iph->version != 4) | ||
| 47 | return 0; | ||
| 48 | |||
| 49 | len = ntohs(iph->tot_len); | ||
| 50 | if (oldskb->len < len) | ||
| 51 | return 0; | ||
| 52 | else if (len < (iph->ihl*4)) | ||
| 53 | return 0; | ||
| 54 | |||
| 55 | if (!pskb_may_pull(oldskb, iph->ihl*4)) | ||
| 56 | return 0; | ||
| 57 | |||
| 58 | return 1; | ||
| 59 | } | ||
| 60 | |||
| 61 | static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) | ||
| 62 | { | ||
| 63 | struct sk_buff *nskb; | ||
| 64 | struct iphdr *niph; | ||
| 65 | const struct tcphdr *oth; | ||
| 66 | struct tcphdr _oth; | ||
| 67 | |||
| 68 | if (!nft_reject_iphdr_validate(oldskb)) | ||
| 69 | return; | ||
| 70 | |||
| 71 | oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); | ||
| 72 | if (!oth) | ||
| 73 | return; | ||
| 74 | |||
| 75 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + | ||
| 76 | LL_MAX_HEADER, GFP_ATOMIC); | ||
| 77 | if (!nskb) | ||
| 78 | return; | ||
| 79 | |||
| 80 | skb_reserve(nskb, LL_MAX_HEADER); | ||
| 81 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, | ||
| 82 | sysctl_ip_default_ttl); | ||
| 83 | nf_reject_ip_tcphdr_put(nskb, oldskb, oth); | ||
| 84 | niph->ttl = sysctl_ip_default_ttl; | ||
| 85 | niph->tot_len = htons(nskb->len); | ||
| 86 | ip_send_check(niph); | ||
| 87 | |||
| 88 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
| 89 | |||
| 90 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
| 91 | } | ||
| 92 | |||
| 93 | static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, | ||
| 94 | u8 code) | ||
| 95 | { | ||
| 96 | struct sk_buff *nskb; | ||
| 97 | struct iphdr *niph; | ||
| 98 | struct icmphdr *icmph; | ||
| 99 | unsigned int len; | ||
| 100 | void *payload; | ||
| 101 | __wsum csum; | ||
| 102 | |||
| 103 | if (!nft_reject_iphdr_validate(oldskb)) | ||
| 104 | return; | ||
| 105 | |||
| 106 | /* IP header checks: fragment. */ | ||
| 107 | if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) | ||
| 108 | return; | ||
| 109 | |||
| 110 | /* RFC says return as much as we can without exceeding 576 bytes. */ | ||
| 111 | len = min_t(unsigned int, 536, oldskb->len); | ||
| 112 | |||
| 113 | if (!pskb_may_pull(oldskb, len)) | ||
| 114 | return; | ||
| 115 | |||
| 116 | if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) | ||
| 117 | return; | ||
| 118 | |||
| 119 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + | ||
| 120 | LL_MAX_HEADER + len, GFP_ATOMIC); | ||
| 121 | if (!nskb) | ||
| 122 | return; | ||
| 123 | |||
| 124 | skb_reserve(nskb, LL_MAX_HEADER); | ||
| 125 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, | ||
| 126 | sysctl_ip_default_ttl); | ||
| 127 | |||
| 128 | skb_reset_transport_header(nskb); | ||
| 129 | icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); | ||
| 130 | memset(icmph, 0, sizeof(*icmph)); | ||
| 131 | icmph->type = ICMP_DEST_UNREACH; | ||
| 132 | icmph->code = code; | ||
| 133 | |||
| 134 | payload = skb_put(nskb, len); | ||
| 135 | memcpy(payload, skb_network_header(oldskb), len); | ||
| 136 | |||
| 137 | csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); | ||
| 138 | icmph->checksum = csum_fold(csum); | ||
| 139 | |||
| 140 | niph->tot_len = htons(nskb->len); | ||
| 141 | ip_send_check(niph); | ||
| 142 | |||
| 143 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
| 144 | |||
| 145 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
| 146 | } | ||
| 147 | |||
| 148 | static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb) | ||
| 149 | { | ||
| 150 | struct ipv6hdr *hdr; | ||
| 151 | u32 pkt_len; | ||
| 152 | |||
| 153 | if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr))) | ||
| 154 | return 0; | ||
| 155 | |||
| 156 | hdr = ipv6_hdr(oldskb); | ||
| 157 | if (hdr->version != 6) | ||
| 158 | return 0; | ||
| 159 | |||
| 160 | pkt_len = ntohs(hdr->payload_len); | ||
| 161 | if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len) | ||
| 162 | return 0; | ||
| 163 | |||
| 164 | return 1; | ||
| 165 | } | ||
| 166 | |||
| 167 | static void nft_reject_br_send_v6_tcp_reset(struct net *net, | ||
| 168 | struct sk_buff *oldskb, int hook) | ||
| 169 | { | ||
| 170 | struct sk_buff *nskb; | ||
| 171 | const struct tcphdr *oth; | ||
| 172 | struct tcphdr _oth; | ||
| 173 | unsigned int otcplen; | ||
| 174 | struct ipv6hdr *nip6h; | ||
| 175 | |||
| 176 | if (!nft_reject_ip6hdr_validate(oldskb)) | ||
| 177 | return; | ||
| 178 | |||
| 179 | oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook); | ||
| 180 | if (!oth) | ||
| 181 | return; | ||
| 182 | |||
| 183 | nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + | ||
| 184 | LL_MAX_HEADER, GFP_ATOMIC); | ||
| 185 | if (!nskb) | ||
| 186 | return; | ||
| 187 | |||
| 188 | skb_reserve(nskb, LL_MAX_HEADER); | ||
| 189 | nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, | ||
| 190 | net->ipv6.devconf_all->hop_limit); | ||
| 191 | nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); | ||
| 192 | nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); | ||
| 193 | |||
| 194 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
| 195 | |||
| 196 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
| 197 | } | ||
| 198 | |||
| 199 | static void nft_reject_br_send_v6_unreach(struct net *net, | ||
| 200 | struct sk_buff *oldskb, int hook, | ||
| 201 | u8 code) | ||
| 202 | { | ||
| 203 | struct sk_buff *nskb; | ||
| 204 | struct ipv6hdr *nip6h; | ||
| 205 | struct icmp6hdr *icmp6h; | ||
| 206 | unsigned int len; | ||
| 207 | void *payload; | ||
| 208 | |||
| 209 | if (!nft_reject_ip6hdr_validate(oldskb)) | ||
| 210 | return; | ||
| 211 | |||
| 212 | /* Include "As much of invoking packet as possible without the ICMPv6 | ||
| 213 | * packet exceeding the minimum IPv6 MTU" in the ICMP payload. | ||
| 214 | */ | ||
| 215 | len = min_t(unsigned int, 1220, oldskb->len); | ||
| 216 | |||
| 217 | if (!pskb_may_pull(oldskb, len)) | ||
| 218 | return; | ||
| 219 | |||
| 220 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + | ||
| 221 | LL_MAX_HEADER + len, GFP_ATOMIC); | ||
| 222 | if (!nskb) | ||
| 223 | return; | ||
| 224 | |||
| 225 | skb_reserve(nskb, LL_MAX_HEADER); | ||
| 226 | nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6, | ||
| 227 | net->ipv6.devconf_all->hop_limit); | ||
| 228 | |||
| 229 | skb_reset_transport_header(nskb); | ||
| 230 | icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr)); | ||
| 231 | memset(icmp6h, 0, sizeof(*icmp6h)); | ||
| 232 | icmp6h->icmp6_type = ICMPV6_DEST_UNREACH; | ||
| 233 | icmp6h->icmp6_code = code; | ||
| 234 | |||
| 235 | payload = skb_put(nskb, len); | ||
| 236 | memcpy(payload, skb_network_header(oldskb), len); | ||
| 237 | nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); | ||
| 238 | |||
| 239 | icmp6h->icmp6_cksum = | ||
| 240 | csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, | ||
| 241 | nskb->len - sizeof(struct ipv6hdr), | ||
| 242 | IPPROTO_ICMPV6, | ||
| 243 | csum_partial(icmp6h, | ||
| 244 | nskb->len - sizeof(struct ipv6hdr), | ||
| 245 | 0)); | ||
| 246 | |||
| 247 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
| 248 | |||
| 249 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
| 250 | } | ||
| 19 | 251 | ||
| 20 | static void nft_reject_bridge_eval(const struct nft_expr *expr, | 252 | static void nft_reject_bridge_eval(const struct nft_expr *expr, |
| 21 | struct nft_data data[NFT_REG_MAX + 1], | 253 | struct nft_data data[NFT_REG_MAX + 1], |
| @@ -23,35 +255,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
| 23 | { | 255 | { |
| 24 | struct nft_reject *priv = nft_expr_priv(expr); | 256 | struct nft_reject *priv = nft_expr_priv(expr); |
| 25 | struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); | 257 | struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); |
| 258 | const unsigned char *dest = eth_hdr(pkt->skb)->h_dest; | ||
| 259 | |||
| 260 | if (is_broadcast_ether_addr(dest) || | ||
| 261 | is_multicast_ether_addr(dest)) | ||
| 262 | goto out; | ||
| 26 | 263 | ||
| 27 | switch (eth_hdr(pkt->skb)->h_proto) { | 264 | switch (eth_hdr(pkt->skb)->h_proto) { |
| 28 | case htons(ETH_P_IP): | 265 | case htons(ETH_P_IP): |
| 29 | switch (priv->type) { | 266 | switch (priv->type) { |
| 30 | case NFT_REJECT_ICMP_UNREACH: | 267 | case NFT_REJECT_ICMP_UNREACH: |
| 31 | nf_send_unreach(pkt->skb, priv->icmp_code); | 268 | nft_reject_br_send_v4_unreach(pkt->skb, |
| 269 | pkt->ops->hooknum, | ||
| 270 | priv->icmp_code); | ||
| 32 | break; | 271 | break; |
| 33 | case NFT_REJECT_TCP_RST: | 272 | case NFT_REJECT_TCP_RST: |
| 34 | nf_send_reset(pkt->skb, pkt->ops->hooknum); | 273 | nft_reject_br_send_v4_tcp_reset(pkt->skb, |
| 274 | pkt->ops->hooknum); | ||
| 35 | break; | 275 | break; |
| 36 | case NFT_REJECT_ICMPX_UNREACH: | 276 | case NFT_REJECT_ICMPX_UNREACH: |
| 37 | nf_send_unreach(pkt->skb, | 277 | nft_reject_br_send_v4_unreach(pkt->skb, |
| 38 | nft_reject_icmp_code(priv->icmp_code)); | 278 | pkt->ops->hooknum, |
| 279 | nft_reject_icmp_code(priv->icmp_code)); | ||
| 39 | break; | 280 | break; |
| 40 | } | 281 | } |
| 41 | break; | 282 | break; |
| 42 | case htons(ETH_P_IPV6): | 283 | case htons(ETH_P_IPV6): |
| 43 | switch (priv->type) { | 284 | switch (priv->type) { |
| 44 | case NFT_REJECT_ICMP_UNREACH: | 285 | case NFT_REJECT_ICMP_UNREACH: |
| 45 | nf_send_unreach6(net, pkt->skb, priv->icmp_code, | 286 | nft_reject_br_send_v6_unreach(net, pkt->skb, |
| 46 | pkt->ops->hooknum); | 287 | pkt->ops->hooknum, |
| 288 | priv->icmp_code); | ||
| 47 | break; | 289 | break; |
| 48 | case NFT_REJECT_TCP_RST: | 290 | case NFT_REJECT_TCP_RST: |
| 49 | nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); | 291 | nft_reject_br_send_v6_tcp_reset(net, pkt->skb, |
| 292 | pkt->ops->hooknum); | ||
| 50 | break; | 293 | break; |
| 51 | case NFT_REJECT_ICMPX_UNREACH: | 294 | case NFT_REJECT_ICMPX_UNREACH: |
| 52 | nf_send_unreach6(net, pkt->skb, | 295 | nft_reject_br_send_v6_unreach(net, pkt->skb, |
| 53 | nft_reject_icmpv6_code(priv->icmp_code), | 296 | pkt->ops->hooknum, |
| 54 | pkt->ops->hooknum); | 297 | nft_reject_icmpv6_code(priv->icmp_code)); |
| 55 | break; | 298 | break; |
| 56 | } | 299 | } |
| 57 | break; | 300 | break; |
| @@ -59,15 +302,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
| 59 | /* No explicit way to reject this protocol, drop it. */ | 302 | /* No explicit way to reject this protocol, drop it. */ |
| 60 | break; | 303 | break; |
| 61 | } | 304 | } |
| 305 | out: | ||
| 62 | data[NFT_REG_VERDICT].verdict = NF_DROP; | 306 | data[NFT_REG_VERDICT].verdict = NF_DROP; |
| 63 | } | 307 | } |
| 64 | 308 | ||
| 309 | static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) | ||
| 310 | { | ||
| 311 | struct nft_base_chain *basechain; | ||
| 312 | |||
| 313 | if (chain->flags & NFT_BASE_CHAIN) { | ||
| 314 | basechain = nft_base_chain(chain); | ||
| 315 | |||
| 316 | switch (basechain->ops[0].hooknum) { | ||
| 317 | case NF_BR_PRE_ROUTING: | ||
| 318 | case NF_BR_LOCAL_IN: | ||
| 319 | break; | ||
| 320 | default: | ||
| 321 | return -EOPNOTSUPP; | ||
| 322 | } | ||
| 323 | } | ||
| 324 | return 0; | ||
| 325 | } | ||
| 326 | |||
| 65 | static int nft_reject_bridge_init(const struct nft_ctx *ctx, | 327 | static int nft_reject_bridge_init(const struct nft_ctx *ctx, |
| 66 | const struct nft_expr *expr, | 328 | const struct nft_expr *expr, |
| 67 | const struct nlattr * const tb[]) | 329 | const struct nlattr * const tb[]) |
| 68 | { | 330 | { |
| 69 | struct nft_reject *priv = nft_expr_priv(expr); | 331 | struct nft_reject *priv = nft_expr_priv(expr); |
| 70 | int icmp_code; | 332 | int icmp_code, err; |
| 333 | |||
| 334 | err = nft_reject_bridge_validate_hooks(ctx->chain); | ||
| 335 | if (err < 0) | ||
| 336 | return err; | ||
| 71 | 337 | ||
| 72 | if (tb[NFTA_REJECT_TYPE] == NULL) | 338 | if (tb[NFTA_REJECT_TYPE] == NULL) |
| 73 | return -EINVAL; | 339 | return -EINVAL; |
| @@ -116,6 +382,13 @@ nla_put_failure: | |||
| 116 | return -1; | 382 | return -1; |
| 117 | } | 383 | } |
| 118 | 384 | ||
| 385 | static int nft_reject_bridge_validate(const struct nft_ctx *ctx, | ||
| 386 | const struct nft_expr *expr, | ||
| 387 | const struct nft_data **data) | ||
| 388 | { | ||
| 389 | return nft_reject_bridge_validate_hooks(ctx->chain); | ||
| 390 | } | ||
| 391 | |||
| 119 | static struct nft_expr_type nft_reject_bridge_type; | 392 | static struct nft_expr_type nft_reject_bridge_type; |
| 120 | static const struct nft_expr_ops nft_reject_bridge_ops = { | 393 | static const struct nft_expr_ops nft_reject_bridge_ops = { |
| 121 | .type = &nft_reject_bridge_type, | 394 | .type = &nft_reject_bridge_type, |
| @@ -123,6 +396,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = { | |||
| 123 | .eval = nft_reject_bridge_eval, | 396 | .eval = nft_reject_bridge_eval, |
| 124 | .init = nft_reject_bridge_init, | 397 | .init = nft_reject_bridge_init, |
| 125 | .dump = nft_reject_bridge_dump, | 398 | .dump = nft_reject_bridge_dump, |
| 399 | .validate = nft_reject_bridge_validate, | ||
| 126 | }; | 400 | }; |
| 127 | 401 | ||
| 128 | static struct nft_expr_type nft_reject_bridge_type __read_mostly = { | 402 | static struct nft_expr_type nft_reject_bridge_type __read_mostly = { |
diff --git a/net/core/dev.c b/net/core/dev.c index b793e3521a36..945bbd001359 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -4157,6 +4157,10 @@ EXPORT_SYMBOL(napi_gro_receive); | |||
| 4157 | 4157 | ||
| 4158 | static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | 4158 | static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) |
| 4159 | { | 4159 | { |
| 4160 | if (unlikely(skb->pfmemalloc)) { | ||
| 4161 | consume_skb(skb); | ||
| 4162 | return; | ||
| 4163 | } | ||
| 4160 | __skb_pull(skb, skb_headlen(skb)); | 4164 | __skb_pull(skb, skb_headlen(skb)); |
| 4161 | /* restore the reserve we had after netdev_alloc_skb_ip_align() */ | 4165 | /* restore the reserve we had after netdev_alloc_skb_ip_align() */ |
| 4162 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); | 4166 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1600aa24d36b..06dfb293e5aa 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -1036,7 +1036,8 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) | |||
| 1036 | { | 1036 | { |
| 1037 | const struct ethtool_ops *ops = dev->ethtool_ops; | 1037 | const struct ethtool_ops *ops = dev->ethtool_ops; |
| 1038 | 1038 | ||
| 1039 | if (!ops->get_eeprom || !ops->get_eeprom_len) | 1039 | if (!ops->get_eeprom || !ops->get_eeprom_len || |
| 1040 | !ops->get_eeprom_len(dev)) | ||
| 1040 | return -EOPNOTSUPP; | 1041 | return -EOPNOTSUPP; |
| 1041 | 1042 | ||
| 1042 | return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, | 1043 | return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, |
| @@ -1052,7 +1053,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
| 1052 | u8 *data; | 1053 | u8 *data; |
| 1053 | int ret = 0; | 1054 | int ret = 0; |
| 1054 | 1055 | ||
| 1055 | if (!ops->set_eeprom || !ops->get_eeprom_len) | 1056 | if (!ops->set_eeprom || !ops->get_eeprom_len || |
| 1057 | !ops->get_eeprom_len(dev)) | ||
| 1056 | return -EOPNOTSUPP; | 1058 | return -EOPNOTSUPP; |
| 1057 | 1059 | ||
| 1058 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) | 1060 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 61059a05ec95..c16615bfb61e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -4070,15 +4070,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); | |||
| 4070 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | 4070 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
| 4071 | { | 4071 | { |
| 4072 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | 4072 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
| 4073 | unsigned int thlen = 0; | ||
| 4073 | 4074 | ||
| 4074 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | 4075 | if (skb->encapsulation) { |
| 4075 | return tcp_hdrlen(skb) + shinfo->gso_size; | 4076 | thlen = skb_inner_transport_header(skb) - |
| 4077 | skb_transport_header(skb); | ||
| 4076 | 4078 | ||
| 4079 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | ||
| 4080 | thlen += inner_tcp_hdrlen(skb); | ||
| 4081 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { | ||
| 4082 | thlen = tcp_hdrlen(skb); | ||
| 4083 | } | ||
| 4077 | /* UFO sets gso_size to the size of the fragmentation | 4084 | /* UFO sets gso_size to the size of the fragmentation |
| 4078 | * payload, i.e. the size of the L4 (UDP) header is already | 4085 | * payload, i.e. the size of the L4 (UDP) header is already |
| 4079 | * accounted for. | 4086 | * accounted for. |
| 4080 | */ | 4087 | */ |
| 4081 | return shinfo->gso_size; | 4088 | return thlen + shinfo->gso_size; |
| 4082 | } | 4089 | } |
| 4083 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); | 4090 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
| 4084 | 4091 | ||
diff --git a/net/core/tso.c b/net/core/tso.c index 8c3203c585b0..630b30b4fb53 100644 --- a/net/core/tso.c +++ b/net/core/tso.c | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
| 2 | #include <net/ip.h> | 2 | #include <net/ip.h> |
| 3 | #include <net/tso.h> | 3 | #include <net/tso.h> |
| 4 | #include <asm/unaligned.h> | ||
| 4 | 5 | ||
| 5 | /* Calculate expected number of TX descriptors */ | 6 | /* Calculate expected number of TX descriptors */ |
| 6 | int tso_count_descs(struct sk_buff *skb) | 7 | int tso_count_descs(struct sk_buff *skb) |
| @@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, | |||
| 23 | iph->id = htons(tso->ip_id); | 24 | iph->id = htons(tso->ip_id); |
| 24 | iph->tot_len = htons(size + hdr_len - mac_hdr_len); | 25 | iph->tot_len = htons(size + hdr_len - mac_hdr_len); |
| 25 | tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); | 26 | tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); |
| 26 | tcph->seq = htonl(tso->tcp_seq); | 27 | put_unaligned_be32(tso->tcp_seq, &tcph->seq); |
| 27 | tso->ip_id++; | 28 | tso->ip_id++; |
| 28 | 29 | ||
| 29 | if (!is_last) { | 30 | if (!is_last) { |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 22f34cf4cb27..6317b41c99b0 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
| @@ -174,8 +174,11 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, | |||
| 174 | dst->rcv = brcm_netdev_ops.rcv; | 174 | dst->rcv = brcm_netdev_ops.rcv; |
| 175 | break; | 175 | break; |
| 176 | #endif | 176 | #endif |
| 177 | default: | 177 | case DSA_TAG_PROTO_NONE: |
| 178 | break; | 178 | break; |
| 179 | default: | ||
| 180 | ret = -ENOPROTOOPT; | ||
| 181 | goto out; | ||
| 179 | } | 182 | } |
| 180 | 183 | ||
| 181 | dst->tag_protocol = drv->tag_protocol; | 184 | dst->tag_protocol = drv->tag_protocol; |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 92db7a69f2b9..8b7fe5b03906 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
| 1246 | 1246 | ||
| 1247 | encap = SKB_GSO_CB(skb)->encap_level > 0; | 1247 | encap = SKB_GSO_CB(skb)->encap_level > 0; |
| 1248 | if (encap) | 1248 | if (encap) |
| 1249 | features = skb->dev->hw_enc_features & netif_skb_features(skb); | 1249 | features &= skb->dev->hw_enc_features; |
| 1250 | SKB_GSO_CB(skb)->encap_level += ihl; | 1250 | SKB_GSO_CB(skb)->encap_level += ihl; |
| 1251 | 1251 | ||
| 1252 | skb_reset_transport_header(skb); | 1252 | skb_reset_transport_header(skb); |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index ccda09628de7..bb5947b0ce2d 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
| @@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
| 47 | 47 | ||
| 48 | greh = (struct gre_base_hdr *)skb_transport_header(skb); | 48 | greh = (struct gre_base_hdr *)skb_transport_header(skb); |
| 49 | 49 | ||
| 50 | ghl = skb_inner_network_header(skb) - skb_transport_header(skb); | 50 | ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); |
| 51 | if (unlikely(ghl < sizeof(*greh))) | 51 | if (unlikely(ghl < sizeof(*greh))) |
| 52 | goto out; | 52 | goto out; |
| 53 | 53 | ||
| @@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
| 68 | skb->mac_len = skb_inner_network_offset(skb); | 68 | skb->mac_len = skb_inner_network_offset(skb); |
| 69 | 69 | ||
| 70 | /* segment inner packet. */ | 70 | /* segment inner packet. */ |
| 71 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 71 | enc_features = skb->dev->hw_enc_features & features; |
| 72 | segs = skb_mac_gso_segment(skb, enc_features); | 72 | segs = skb_mac_gso_segment(skb, enc_features); |
| 73 | if (IS_ERR_OR_NULL(segs)) { | 73 | if (IS_ERR_OR_NULL(segs)) { |
| 74 | skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); | 74 | skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 9eb89f3f0ee4..19419b60cb37 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -146,7 +146,6 @@ evict_again: | |||
| 146 | atomic_inc(&fq->refcnt); | 146 | atomic_inc(&fq->refcnt); |
| 147 | spin_unlock(&hb->chain_lock); | 147 | spin_unlock(&hb->chain_lock); |
| 148 | del_timer_sync(&fq->timer); | 148 | del_timer_sync(&fq->timer); |
| 149 | WARN_ON(atomic_read(&fq->refcnt) != 1); | ||
| 150 | inet_frag_put(fq, f); | 149 | inet_frag_put(fq, f); |
| 151 | goto evict_again; | 150 | goto evict_again; |
| 152 | } | 151 | } |
| @@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) | |||
| 285 | struct inet_frag_bucket *hb; | 284 | struct inet_frag_bucket *hb; |
| 286 | 285 | ||
| 287 | hb = get_frag_bucket_locked(fq, f); | 286 | hb = get_frag_bucket_locked(fq, f); |
| 288 | hlist_del(&fq->list); | 287 | if (!(fq->flags & INET_FRAG_EVICTED)) |
| 288 | hlist_del(&fq->list); | ||
| 289 | spin_unlock(&hb->chain_lock); | 289 | spin_unlock(&hb->chain_lock); |
| 290 | } | 290 | } |
| 291 | 291 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 88e5ef2c7f51..bc6471d4abcd 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
| @@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb) | |||
| 231 | */ | 231 | */ |
| 232 | features = netif_skb_features(skb); | 232 | features = netif_skb_features(skb); |
| 233 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | 233 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); |
| 234 | if (IS_ERR(segs)) { | 234 | if (IS_ERR_OR_NULL(segs)) { |
| 235 | kfree_skb(skb); | 235 | kfree_skb(skb); |
| 236 | return -ENOMEM; | 236 | return -ENOMEM; |
| 237 | } | 237 | } |
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index b023b4eb1a96..1baaa83dfe5c 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c | |||
| @@ -6,48 +6,45 @@ | |||
| 6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/module.h> | ||
| 9 | #include <net/ip.h> | 10 | #include <net/ip.h> |
| 10 | #include <net/tcp.h> | 11 | #include <net/tcp.h> |
| 11 | #include <net/route.h> | 12 | #include <net/route.h> |
| 12 | #include <net/dst.h> | 13 | #include <net/dst.h> |
| 13 | #include <linux/netfilter_ipv4.h> | 14 | #include <linux/netfilter_ipv4.h> |
| 15 | #include <net/netfilter/ipv4/nf_reject.h> | ||
| 14 | 16 | ||
| 15 | /* Send RST reply */ | 17 | const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, |
| 16 | void nf_send_reset(struct sk_buff *oldskb, int hook) | 18 | struct tcphdr *_oth, int hook) |
| 17 | { | 19 | { |
| 18 | struct sk_buff *nskb; | ||
| 19 | const struct iphdr *oiph; | ||
| 20 | struct iphdr *niph; | ||
| 21 | const struct tcphdr *oth; | 20 | const struct tcphdr *oth; |
| 22 | struct tcphdr _otcph, *tcph; | ||
| 23 | 21 | ||
| 24 | /* IP header checks: fragment. */ | 22 | /* IP header checks: fragment. */ |
| 25 | if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) | 23 | if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) |
| 26 | return; | 24 | return NULL; |
| 27 | 25 | ||
| 28 | oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), | 26 | oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), |
| 29 | sizeof(_otcph), &_otcph); | 27 | sizeof(struct tcphdr), _oth); |
| 30 | if (oth == NULL) | 28 | if (oth == NULL) |
| 31 | return; | 29 | return NULL; |
| 32 | 30 | ||
| 33 | /* No RST for RST. */ | 31 | /* No RST for RST. */ |
| 34 | if (oth->rst) | 32 | if (oth->rst) |
| 35 | return; | 33 | return NULL; |
| 36 | |||
| 37 | if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | ||
| 38 | return; | ||
| 39 | 34 | ||
| 40 | /* Check checksum */ | 35 | /* Check checksum */ |
| 41 | if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) | 36 | if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) |
| 42 | return; | 37 | return NULL; |
| 43 | oiph = ip_hdr(oldskb); | ||
| 44 | 38 | ||
| 45 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + | 39 | return oth; |
| 46 | LL_MAX_HEADER, GFP_ATOMIC); | 40 | } |
| 47 | if (!nskb) | 41 | EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get); |
| 48 | return; | ||
| 49 | 42 | ||
| 50 | skb_reserve(nskb, LL_MAX_HEADER); | 43 | struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, |
| 44 | const struct sk_buff *oldskb, | ||
| 45 | __be16 protocol, int ttl) | ||
| 46 | { | ||
| 47 | struct iphdr *niph, *oiph = ip_hdr(oldskb); | ||
| 51 | 48 | ||
| 52 | skb_reset_network_header(nskb); | 49 | skb_reset_network_header(nskb); |
| 53 | niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); | 50 | niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); |
| @@ -56,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
| 56 | niph->tos = 0; | 53 | niph->tos = 0; |
| 57 | niph->id = 0; | 54 | niph->id = 0; |
| 58 | niph->frag_off = htons(IP_DF); | 55 | niph->frag_off = htons(IP_DF); |
| 59 | niph->protocol = IPPROTO_TCP; | 56 | niph->protocol = protocol; |
| 60 | niph->check = 0; | 57 | niph->check = 0; |
| 61 | niph->saddr = oiph->daddr; | 58 | niph->saddr = oiph->daddr; |
| 62 | niph->daddr = oiph->saddr; | 59 | niph->daddr = oiph->saddr; |
| 60 | niph->ttl = ttl; | ||
| 61 | |||
| 62 | nskb->protocol = htons(ETH_P_IP); | ||
| 63 | |||
| 64 | return niph; | ||
| 65 | } | ||
| 66 | EXPORT_SYMBOL_GPL(nf_reject_iphdr_put); | ||
| 67 | |||
| 68 | void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, | ||
| 69 | const struct tcphdr *oth) | ||
| 70 | { | ||
| 71 | struct iphdr *niph = ip_hdr(nskb); | ||
| 72 | struct tcphdr *tcph; | ||
| 63 | 73 | ||
| 64 | skb_reset_transport_header(nskb); | 74 | skb_reset_transport_header(nskb); |
| 65 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); | 75 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); |
| @@ -68,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
| 68 | tcph->dest = oth->source; | 78 | tcph->dest = oth->source; |
| 69 | tcph->doff = sizeof(struct tcphdr) / 4; | 79 | tcph->doff = sizeof(struct tcphdr) / 4; |
| 70 | 80 | ||
| 71 | if (oth->ack) | 81 | if (oth->ack) { |
| 72 | tcph->seq = oth->ack_seq; | 82 | tcph->seq = oth->ack_seq; |
| 73 | else { | 83 | } else { |
| 74 | tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + | 84 | tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + |
| 75 | oldskb->len - ip_hdrlen(oldskb) - | 85 | oldskb->len - ip_hdrlen(oldskb) - |
| 76 | (oth->doff << 2)); | 86 | (oth->doff << 2)); |
| @@ -83,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
| 83 | nskb->ip_summed = CHECKSUM_PARTIAL; | 93 | nskb->ip_summed = CHECKSUM_PARTIAL; |
| 84 | nskb->csum_start = (unsigned char *)tcph - nskb->head; | 94 | nskb->csum_start = (unsigned char *)tcph - nskb->head; |
| 85 | nskb->csum_offset = offsetof(struct tcphdr, check); | 95 | nskb->csum_offset = offsetof(struct tcphdr, check); |
| 96 | } | ||
| 97 | EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put); | ||
| 98 | |||
| 99 | /* Send RST reply */ | ||
| 100 | void nf_send_reset(struct sk_buff *oldskb, int hook) | ||
| 101 | { | ||
| 102 | struct sk_buff *nskb; | ||
| 103 | const struct iphdr *oiph; | ||
| 104 | struct iphdr *niph; | ||
| 105 | const struct tcphdr *oth; | ||
| 106 | struct tcphdr _oth; | ||
| 107 | |||
| 108 | oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); | ||
| 109 | if (!oth) | ||
| 110 | return; | ||
| 111 | |||
| 112 | if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | ||
| 113 | return; | ||
| 114 | |||
| 115 | oiph = ip_hdr(oldskb); | ||
| 116 | |||
| 117 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + | ||
| 118 | LL_MAX_HEADER, GFP_ATOMIC); | ||
| 119 | if (!nskb) | ||
| 120 | return; | ||
| 86 | 121 | ||
| 87 | /* ip_route_me_harder expects skb->dst to be set */ | 122 | /* ip_route_me_harder expects skb->dst to be set */ |
| 88 | skb_dst_set_noref(nskb, skb_dst(oldskb)); | 123 | skb_dst_set_noref(nskb, skb_dst(oldskb)); |
| 89 | 124 | ||
| 90 | nskb->protocol = htons(ETH_P_IP); | 125 | skb_reserve(nskb, LL_MAX_HEADER); |
| 126 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, | ||
| 127 | ip4_dst_hoplimit(skb_dst(nskb))); | ||
| 128 | nf_reject_ip_tcphdr_put(nskb, oldskb, oth); | ||
| 129 | |||
| 91 | if (ip_route_me_harder(nskb, RTN_UNSPEC)) | 130 | if (ip_route_me_harder(nskb, RTN_UNSPEC)) |
| 92 | goto free_nskb; | 131 | goto free_nskb; |
| 93 | 132 | ||
| 94 | niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); | ||
| 95 | |||
| 96 | /* "Never happens" */ | 133 | /* "Never happens" */ |
| 97 | if (nskb->len > dst_mtu(skb_dst(nskb))) | 134 | if (nskb->len > dst_mtu(skb_dst(nskb))) |
| 98 | goto free_nskb; | 135 | goto free_nskb; |
| @@ -125,3 +162,5 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
| 125 | kfree_skb(nskb); | 162 | kfree_skb(nskb); |
| 126 | } | 163 | } |
| 127 | EXPORT_SYMBOL_GPL(nf_send_reset); | 164 | EXPORT_SYMBOL_GPL(nf_send_reset); |
| 165 | |||
| 166 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index 1c636d6b5b50..c1023c445920 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c | |||
| @@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = { | |||
| 39 | .eval = nft_masq_ipv4_eval, | 39 | .eval = nft_masq_ipv4_eval, |
| 40 | .init = nft_masq_init, | 40 | .init = nft_masq_init, |
| 41 | .dump = nft_masq_dump, | 41 | .dump = nft_masq_dump, |
| 42 | .validate = nft_masq_validate, | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { | 45 | static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2d4ae469b471..6a2155b02602 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -1798,6 +1798,7 @@ local_input: | |||
| 1798 | no_route: | 1798 | no_route: |
| 1799 | RT_CACHE_STAT_INC(in_no_route); | 1799 | RT_CACHE_STAT_INC(in_no_route); |
| 1800 | res.type = RTN_UNREACHABLE; | 1800 | res.type = RTN_UNREACHABLE; |
| 1801 | res.fi = NULL; | ||
| 1801 | goto local_input; | 1802 | goto local_input; |
| 1802 | 1803 | ||
| 1803 | /* | 1804 | /* |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1bec4e76d88c..39ec0c379545 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt); | |||
| 2868 | #endif | 2868 | #endif |
| 2869 | 2869 | ||
| 2870 | #ifdef CONFIG_TCP_MD5SIG | 2870 | #ifdef CONFIG_TCP_MD5SIG |
| 2871 | static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; | 2871 | static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); |
| 2872 | static DEFINE_MUTEX(tcp_md5sig_mutex); | 2872 | static DEFINE_MUTEX(tcp_md5sig_mutex); |
| 2873 | 2873 | static bool tcp_md5sig_pool_populated = false; | |
| 2874 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) | ||
| 2875 | { | ||
| 2876 | int cpu; | ||
| 2877 | |||
| 2878 | for_each_possible_cpu(cpu) { | ||
| 2879 | struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); | ||
| 2880 | |||
| 2881 | if (p->md5_desc.tfm) | ||
| 2882 | crypto_free_hash(p->md5_desc.tfm); | ||
| 2883 | } | ||
| 2884 | free_percpu(pool); | ||
| 2885 | } | ||
| 2886 | 2874 | ||
| 2887 | static void __tcp_alloc_md5sig_pool(void) | 2875 | static void __tcp_alloc_md5sig_pool(void) |
| 2888 | { | 2876 | { |
| 2889 | int cpu; | 2877 | int cpu; |
| 2890 | struct tcp_md5sig_pool __percpu *pool; | ||
| 2891 | |||
| 2892 | pool = alloc_percpu(struct tcp_md5sig_pool); | ||
| 2893 | if (!pool) | ||
| 2894 | return; | ||
| 2895 | 2878 | ||
| 2896 | for_each_possible_cpu(cpu) { | 2879 | for_each_possible_cpu(cpu) { |
| 2897 | struct crypto_hash *hash; | 2880 | if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { |
| 2898 | 2881 | struct crypto_hash *hash; | |
| 2899 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | ||
| 2900 | if (IS_ERR_OR_NULL(hash)) | ||
| 2901 | goto out_free; | ||
| 2902 | 2882 | ||
| 2903 | per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; | 2883 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
| 2884 | if (IS_ERR_OR_NULL(hash)) | ||
| 2885 | return; | ||
| 2886 | per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; | ||
| 2887 | } | ||
| 2904 | } | 2888 | } |
| 2905 | /* before setting tcp_md5sig_pool, we must commit all writes | 2889 | /* before setting tcp_md5sig_pool_populated, we must commit all writes |
| 2906 | * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() | 2890 | * to memory. See smp_rmb() in tcp_get_md5sig_pool() |
| 2907 | */ | 2891 | */ |
| 2908 | smp_wmb(); | 2892 | smp_wmb(); |
| 2909 | tcp_md5sig_pool = pool; | 2893 | tcp_md5sig_pool_populated = true; |
| 2910 | return; | ||
| 2911 | out_free: | ||
| 2912 | __tcp_free_md5sig_pool(pool); | ||
| 2913 | } | 2894 | } |
| 2914 | 2895 | ||
| 2915 | bool tcp_alloc_md5sig_pool(void) | 2896 | bool tcp_alloc_md5sig_pool(void) |
| 2916 | { | 2897 | { |
| 2917 | if (unlikely(!tcp_md5sig_pool)) { | 2898 | if (unlikely(!tcp_md5sig_pool_populated)) { |
| 2918 | mutex_lock(&tcp_md5sig_mutex); | 2899 | mutex_lock(&tcp_md5sig_mutex); |
| 2919 | 2900 | ||
| 2920 | if (!tcp_md5sig_pool) | 2901 | if (!tcp_md5sig_pool_populated) |
| 2921 | __tcp_alloc_md5sig_pool(); | 2902 | __tcp_alloc_md5sig_pool(); |
| 2922 | 2903 | ||
| 2923 | mutex_unlock(&tcp_md5sig_mutex); | 2904 | mutex_unlock(&tcp_md5sig_mutex); |
| 2924 | } | 2905 | } |
| 2925 | return tcp_md5sig_pool != NULL; | 2906 | return tcp_md5sig_pool_populated; |
| 2926 | } | 2907 | } |
| 2927 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | 2908 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
| 2928 | 2909 | ||
| @@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |||
| 2936 | */ | 2917 | */ |
| 2937 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | 2918 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) |
| 2938 | { | 2919 | { |
| 2939 | struct tcp_md5sig_pool __percpu *p; | ||
| 2940 | |||
| 2941 | local_bh_disable(); | 2920 | local_bh_disable(); |
| 2942 | p = ACCESS_ONCE(tcp_md5sig_pool); | ||
| 2943 | if (p) | ||
| 2944 | return raw_cpu_ptr(p); | ||
| 2945 | 2921 | ||
| 2922 | if (tcp_md5sig_pool_populated) { | ||
| 2923 | /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ | ||
| 2924 | smp_rmb(); | ||
| 2925 | return this_cpu_ptr(&tcp_md5sig_pool); | ||
| 2926 | } | ||
| 2946 | local_bh_enable(); | 2927 | local_bh_enable(); |
| 2947 | return NULL; | 2928 | return NULL; |
| 2948 | } | 2929 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 94d1a7757ff7..9c7d7621466b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
| 206 | inet->inet_dport = usin->sin_port; | 206 | inet->inet_dport = usin->sin_port; |
| 207 | inet->inet_daddr = daddr; | 207 | inet->inet_daddr = daddr; |
| 208 | 208 | ||
| 209 | inet_set_txhash(sk); | ||
| 210 | |||
| 211 | inet_csk(sk)->icsk_ext_hdr_len = 0; | 209 | inet_csk(sk)->icsk_ext_hdr_len = 0; |
| 212 | if (inet_opt) | 210 | if (inet_opt) |
| 213 | inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; | 211 | inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; |
| @@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
| 224 | if (err) | 222 | if (err) |
| 225 | goto failure; | 223 | goto failure; |
| 226 | 224 | ||
| 225 | inet_set_txhash(sk); | ||
| 226 | |||
| 227 | rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, | 227 | rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, |
| 228 | inet->inet_sport, inet->inet_dport, sk); | 228 | inet->inet_sport, inet->inet_dport, sk); |
| 229 | if (IS_ERR(rt)) { | 229 | if (IS_ERR(rt)) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3af21296d967..a3d453b94747 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -2126,7 +2126,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
| 2126 | static bool skb_still_in_host_queue(const struct sock *sk, | 2126 | static bool skb_still_in_host_queue(const struct sock *sk, |
| 2127 | const struct sk_buff *skb) | 2127 | const struct sk_buff *skb) |
| 2128 | { | 2128 | { |
| 2129 | if (unlikely(skb_fclone_busy(skb))) { | 2129 | if (unlikely(skb_fclone_busy(sk, skb))) { |
| 2130 | NET_INC_STATS_BH(sock_net(sk), | 2130 | NET_INC_STATS_BH(sock_net(sk), |
| 2131 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); | 2131 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); |
| 2132 | return true; | 2132 | return true; |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 507310ef4b56..6480cea7aa53 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
| @@ -58,7 +58,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, | |||
| 58 | skb->encap_hdr_csum = 1; | 58 | skb->encap_hdr_csum = 1; |
| 59 | 59 | ||
| 60 | /* segment inner packet. */ | 60 | /* segment inner packet. */ |
| 61 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 61 | enc_features = skb->dev->hw_enc_features & features; |
| 62 | segs = gso_inner_segment(skb, enc_features); | 62 | segs = gso_inner_segment(skb, enc_features); |
| 63 | if (IS_ERR_OR_NULL(segs)) { | 63 | if (IS_ERR_OR_NULL(segs)) { |
| 64 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, | 64 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 725c763270a0..0169ccf5aa4f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -4531,6 +4531,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) | |||
| 4531 | } | 4531 | } |
| 4532 | 4532 | ||
| 4533 | write_unlock_bh(&idev->lock); | 4533 | write_unlock_bh(&idev->lock); |
| 4534 | inet6_ifinfo_notify(RTM_NEWLINK, idev); | ||
| 4534 | addrconf_verify_rtnl(); | 4535 | addrconf_verify_rtnl(); |
| 4535 | return 0; | 4536 | return 0; |
| 4536 | } | 4537 | } |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 91014d32488d..a071563a7e6e 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
| @@ -90,7 +90,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
| 90 | 90 | ||
| 91 | encap = SKB_GSO_CB(skb)->encap_level > 0; | 91 | encap = SKB_GSO_CB(skb)->encap_level > 0; |
| 92 | if (encap) | 92 | if (encap) |
| 93 | features = skb->dev->hw_enc_features & netif_skb_features(skb); | 93 | features &= skb->dev->hw_enc_features; |
| 94 | SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); | 94 | SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); |
| 95 | 95 | ||
| 96 | ipv6h = ipv6_hdr(skb); | 96 | ipv6h = ipv6_hdr(skb); |
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 5f5f0438d74d..015eb8a80766 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c | |||
| @@ -5,121 +5,109 @@ | |||
| 5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
| 7 | */ | 7 | */ |
| 8 | |||
| 9 | #include <linux/module.h> | ||
| 8 | #include <net/ipv6.h> | 10 | #include <net/ipv6.h> |
| 9 | #include <net/ip6_route.h> | 11 | #include <net/ip6_route.h> |
| 10 | #include <net/ip6_fib.h> | 12 | #include <net/ip6_fib.h> |
| 11 | #include <net/ip6_checksum.h> | 13 | #include <net/ip6_checksum.h> |
| 12 | #include <linux/netfilter_ipv6.h> | 14 | #include <linux/netfilter_ipv6.h> |
| 15 | #include <net/netfilter/ipv6/nf_reject.h> | ||
| 13 | 16 | ||
| 14 | void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | 17 | const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, |
| 18 | struct tcphdr *otcph, | ||
| 19 | unsigned int *otcplen, int hook) | ||
| 15 | { | 20 | { |
| 16 | struct sk_buff *nskb; | ||
| 17 | struct tcphdr otcph, *tcph; | ||
| 18 | unsigned int otcplen, hh_len; | ||
| 19 | int tcphoff, needs_ack; | ||
| 20 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | 21 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); |
| 21 | struct ipv6hdr *ip6h; | ||
| 22 | #define DEFAULT_TOS_VALUE 0x0U | ||
| 23 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
| 24 | struct dst_entry *dst = NULL; | ||
| 25 | u8 proto; | 22 | u8 proto; |
| 26 | __be16 frag_off; | 23 | __be16 frag_off; |
| 27 | struct flowi6 fl6; | 24 | int tcphoff; |
| 28 | |||
| 29 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || | ||
| 30 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { | ||
| 31 | pr_debug("addr is not unicast.\n"); | ||
| 32 | return; | ||
| 33 | } | ||
| 34 | 25 | ||
| 35 | proto = oip6h->nexthdr; | 26 | proto = oip6h->nexthdr; |
| 36 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); | 27 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), |
| 28 | &proto, &frag_off); | ||
| 37 | 29 | ||
| 38 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { | 30 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { |
| 39 | pr_debug("Cannot get TCP header.\n"); | 31 | pr_debug("Cannot get TCP header.\n"); |
| 40 | return; | 32 | return NULL; |
| 41 | } | 33 | } |
| 42 | 34 | ||
| 43 | otcplen = oldskb->len - tcphoff; | 35 | *otcplen = oldskb->len - tcphoff; |
| 44 | 36 | ||
| 45 | /* IP header checks: fragment, too short. */ | 37 | /* IP header checks: fragment, too short. */ |
| 46 | if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { | 38 | if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) { |
| 47 | pr_debug("proto(%d) != IPPROTO_TCP, " | 39 | pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n", |
| 48 | "or too short. otcplen = %d\n", | 40 | proto, *otcplen); |
| 49 | proto, otcplen); | 41 | return NULL; |
| 50 | return; | ||
| 51 | } | 42 | } |
| 52 | 43 | ||
| 53 | if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) | 44 | otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr), |
| 54 | BUG(); | 45 | otcph); |
| 46 | if (otcph == NULL) | ||
| 47 | return NULL; | ||
| 55 | 48 | ||
| 56 | /* No RST for RST. */ | 49 | /* No RST for RST. */ |
| 57 | if (otcph.rst) { | 50 | if (otcph->rst) { |
| 58 | pr_debug("RST is set\n"); | 51 | pr_debug("RST is set\n"); |
| 59 | return; | 52 | return NULL; |
| 60 | } | 53 | } |
| 61 | 54 | ||
| 62 | /* Check checksum. */ | 55 | /* Check checksum. */ |
| 63 | if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { | 56 | if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { |
| 64 | pr_debug("TCP checksum is invalid\n"); | 57 | pr_debug("TCP checksum is invalid\n"); |
| 65 | return; | 58 | return NULL; |
| 66 | } | 59 | } |
| 67 | 60 | ||
| 68 | memset(&fl6, 0, sizeof(fl6)); | 61 | return otcph; |
| 69 | fl6.flowi6_proto = IPPROTO_TCP; | 62 | } |
| 70 | fl6.saddr = oip6h->daddr; | 63 | EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get); |
| 71 | fl6.daddr = oip6h->saddr; | ||
| 72 | fl6.fl6_sport = otcph.dest; | ||
| 73 | fl6.fl6_dport = otcph.source; | ||
| 74 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | ||
| 75 | dst = ip6_route_output(net, NULL, &fl6); | ||
| 76 | if (dst == NULL || dst->error) { | ||
| 77 | dst_release(dst); | ||
| 78 | return; | ||
| 79 | } | ||
| 80 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); | ||
| 81 | if (IS_ERR(dst)) | ||
| 82 | return; | ||
| 83 | |||
| 84 | hh_len = (dst->dev->hard_header_len + 15)&~15; | ||
| 85 | nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) | ||
| 86 | + sizeof(struct tcphdr) + dst->trailer_len, | ||
| 87 | GFP_ATOMIC); | ||
| 88 | |||
| 89 | if (!nskb) { | ||
| 90 | net_dbg_ratelimited("cannot alloc skb\n"); | ||
| 91 | dst_release(dst); | ||
| 92 | return; | ||
| 93 | } | ||
| 94 | |||
| 95 | skb_dst_set(nskb, dst); | ||
| 96 | 64 | ||
| 97 | skb_reserve(nskb, hh_len + dst->header_len); | 65 | struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb, |
| 66 | const struct sk_buff *oldskb, | ||
| 67 | __be16 protocol, int hoplimit) | ||
| 68 | { | ||
| 69 | struct ipv6hdr *ip6h; | ||
| 70 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | ||
| 71 | #define DEFAULT_TOS_VALUE 0x0U | ||
| 72 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
| 98 | 73 | ||
| 99 | skb_put(nskb, sizeof(struct ipv6hdr)); | 74 | skb_put(nskb, sizeof(struct ipv6hdr)); |
| 100 | skb_reset_network_header(nskb); | 75 | skb_reset_network_header(nskb); |
| 101 | ip6h = ipv6_hdr(nskb); | 76 | ip6h = ipv6_hdr(nskb); |
| 102 | ip6_flow_hdr(ip6h, tclass, 0); | 77 | ip6_flow_hdr(ip6h, tclass, 0); |
| 103 | ip6h->hop_limit = ip6_dst_hoplimit(dst); | 78 | ip6h->hop_limit = hoplimit; |
| 104 | ip6h->nexthdr = IPPROTO_TCP; | 79 | ip6h->nexthdr = protocol; |
| 105 | ip6h->saddr = oip6h->daddr; | 80 | ip6h->saddr = oip6h->daddr; |
| 106 | ip6h->daddr = oip6h->saddr; | 81 | ip6h->daddr = oip6h->saddr; |
| 107 | 82 | ||
| 83 | nskb->protocol = htons(ETH_P_IPV6); | ||
| 84 | |||
| 85 | return ip6h; | ||
| 86 | } | ||
| 87 | EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put); | ||
| 88 | |||
| 89 | void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, | ||
| 90 | const struct sk_buff *oldskb, | ||
| 91 | const struct tcphdr *oth, unsigned int otcplen) | ||
| 92 | { | ||
| 93 | struct tcphdr *tcph; | ||
| 94 | int needs_ack; | ||
| 95 | |||
| 108 | skb_reset_transport_header(nskb); | 96 | skb_reset_transport_header(nskb); |
| 109 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); | 97 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); |
| 110 | /* Truncate to length (no data) */ | 98 | /* Truncate to length (no data) */ |
| 111 | tcph->doff = sizeof(struct tcphdr)/4; | 99 | tcph->doff = sizeof(struct tcphdr)/4; |
| 112 | tcph->source = otcph.dest; | 100 | tcph->source = oth->dest; |
| 113 | tcph->dest = otcph.source; | 101 | tcph->dest = oth->source; |
| 114 | 102 | ||
| 115 | if (otcph.ack) { | 103 | if (oth->ack) { |
| 116 | needs_ack = 0; | 104 | needs_ack = 0; |
| 117 | tcph->seq = otcph.ack_seq; | 105 | tcph->seq = oth->ack_seq; |
| 118 | tcph->ack_seq = 0; | 106 | tcph->ack_seq = 0; |
| 119 | } else { | 107 | } else { |
| 120 | needs_ack = 1; | 108 | needs_ack = 1; |
| 121 | tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin | 109 | tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + |
| 122 | + otcplen - (otcph.doff<<2)); | 110 | otcplen - (oth->doff<<2)); |
| 123 | tcph->seq = 0; | 111 | tcph->seq = 0; |
| 124 | } | 112 | } |
| 125 | 113 | ||
| @@ -137,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
| 137 | sizeof(struct tcphdr), IPPROTO_TCP, | 125 | sizeof(struct tcphdr), IPPROTO_TCP, |
| 138 | csum_partial(tcph, | 126 | csum_partial(tcph, |
| 139 | sizeof(struct tcphdr), 0)); | 127 | sizeof(struct tcphdr), 0)); |
| 128 | } | ||
| 129 | EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put); | ||
| 130 | |||
| 131 | void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | ||
| 132 | { | ||
| 133 | struct sk_buff *nskb; | ||
| 134 | struct tcphdr _otcph; | ||
| 135 | const struct tcphdr *otcph; | ||
| 136 | unsigned int otcplen, hh_len; | ||
| 137 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | ||
| 138 | struct ipv6hdr *ip6h; | ||
| 139 | struct dst_entry *dst = NULL; | ||
| 140 | struct flowi6 fl6; | ||
| 141 | |||
| 142 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || | ||
| 143 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { | ||
| 144 | pr_debug("addr is not unicast.\n"); | ||
| 145 | return; | ||
| 146 | } | ||
| 147 | |||
| 148 | otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook); | ||
| 149 | if (!otcph) | ||
| 150 | return; | ||
| 151 | |||
| 152 | memset(&fl6, 0, sizeof(fl6)); | ||
| 153 | fl6.flowi6_proto = IPPROTO_TCP; | ||
| 154 | fl6.saddr = oip6h->daddr; | ||
| 155 | fl6.daddr = oip6h->saddr; | ||
| 156 | fl6.fl6_sport = otcph->dest; | ||
| 157 | fl6.fl6_dport = otcph->source; | ||
| 158 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | ||
| 159 | dst = ip6_route_output(net, NULL, &fl6); | ||
| 160 | if (dst == NULL || dst->error) { | ||
| 161 | dst_release(dst); | ||
| 162 | return; | ||
| 163 | } | ||
| 164 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); | ||
| 165 | if (IS_ERR(dst)) | ||
| 166 | return; | ||
| 167 | |||
| 168 | hh_len = (dst->dev->hard_header_len + 15)&~15; | ||
| 169 | nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) | ||
| 170 | + sizeof(struct tcphdr) + dst->trailer_len, | ||
| 171 | GFP_ATOMIC); | ||
| 172 | |||
| 173 | if (!nskb) { | ||
| 174 | net_dbg_ratelimited("cannot alloc skb\n"); | ||
| 175 | dst_release(dst); | ||
| 176 | return; | ||
| 177 | } | ||
| 178 | |||
| 179 | skb_dst_set(nskb, dst); | ||
| 180 | |||
| 181 | skb_reserve(nskb, hh_len + dst->header_len); | ||
| 182 | ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, | ||
| 183 | ip6_dst_hoplimit(dst)); | ||
| 184 | nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); | ||
| 140 | 185 | ||
| 141 | nf_ct_attach(nskb, oldskb); | 186 | nf_ct_attach(nskb, oldskb); |
| 142 | 187 | ||
| @@ -161,3 +206,5 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
| 161 | ip6_local_out(nskb); | 206 | ip6_local_out(nskb); |
| 162 | } | 207 | } |
| 163 | EXPORT_SYMBOL_GPL(nf_send_reset6); | 208 | EXPORT_SYMBOL_GPL(nf_send_reset6); |
| 209 | |||
| 210 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 556262f40761..8a7ac685076d 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c | |||
| @@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv6_ops = { | |||
| 39 | .eval = nft_masq_ipv6_eval, | 39 | .eval = nft_masq_ipv6_eval, |
| 40 | .init = nft_masq_init, | 40 | .init = nft_masq_init, |
| 41 | .dump = nft_masq_dump, | 41 | .dump = nft_masq_dump, |
| 42 | .validate = nft_masq_validate, | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { | 45 | static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { |
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index fc24c390af05..97f41a3e68d9 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c | |||
| @@ -3,11 +3,45 @@ | |||
| 3 | * not configured or static. These functions are needed by GSO/GRO implementation. | 3 | * not configured or static. These functions are needed by GSO/GRO implementation. |
| 4 | */ | 4 | */ |
| 5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
| 6 | #include <net/ip.h> | ||
| 6 | #include <net/ipv6.h> | 7 | #include <net/ipv6.h> |
| 7 | #include <net/ip6_fib.h> | 8 | #include <net/ip6_fib.h> |
| 8 | #include <net/addrconf.h> | 9 | #include <net/addrconf.h> |
| 9 | #include <net/secure_seq.h> | 10 | #include <net/secure_seq.h> |
| 10 | 11 | ||
| 12 | /* This function exists only for tap drivers that must support broken | ||
| 13 | * clients requesting UFO without specifying an IPv6 fragment ID. | ||
| 14 | * | ||
| 15 | * This is similar to ipv6_select_ident() but we use an independent hash | ||
| 16 | * seed to limit information leakage. | ||
| 17 | * | ||
| 18 | * The network header must be set before calling this. | ||
| 19 | */ | ||
| 20 | void ipv6_proxy_select_ident(struct sk_buff *skb) | ||
| 21 | { | ||
| 22 | static u32 ip6_proxy_idents_hashrnd __read_mostly; | ||
| 23 | struct in6_addr buf[2]; | ||
| 24 | struct in6_addr *addrs; | ||
| 25 | u32 hash, id; | ||
| 26 | |||
| 27 | addrs = skb_header_pointer(skb, | ||
| 28 | skb_network_offset(skb) + | ||
| 29 | offsetof(struct ipv6hdr, saddr), | ||
| 30 | sizeof(buf), buf); | ||
| 31 | if (!addrs) | ||
| 32 | return; | ||
| 33 | |||
| 34 | net_get_random_once(&ip6_proxy_idents_hashrnd, | ||
| 35 | sizeof(ip6_proxy_idents_hashrnd)); | ||
| 36 | |||
| 37 | hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); | ||
| 38 | hash = __ipv6_addr_jhash(&addrs[0], hash); | ||
| 39 | |||
| 40 | id = ip_idents_reserve(hash, 1); | ||
| 41 | skb_shinfo(skb)->ip6_frag_id = htonl(id); | ||
| 42 | } | ||
| 43 | EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); | ||
| 44 | |||
| 11 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | 45 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) |
| 12 | { | 46 | { |
| 13 | u16 offset = sizeof(struct ipv6hdr); | 47 | u16 offset = sizeof(struct ipv6hdr); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 831495529b82..ace29b60813c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -200,8 +200,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
| 200 | sk->sk_v6_daddr = usin->sin6_addr; | 200 | sk->sk_v6_daddr = usin->sin6_addr; |
| 201 | np->flow_label = fl6.flowlabel; | 201 | np->flow_label = fl6.flowlabel; |
| 202 | 202 | ||
| 203 | ip6_set_txhash(sk); | ||
| 204 | |||
| 205 | /* | 203 | /* |
| 206 | * TCP over IPv4 | 204 | * TCP over IPv4 |
| 207 | */ | 205 | */ |
| @@ -297,6 +295,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
| 297 | if (err) | 295 | if (err) |
| 298 | goto late_failure; | 296 | goto late_failure; |
| 299 | 297 | ||
| 298 | ip6_set_txhash(sk); | ||
| 299 | |||
| 300 | if (!tp->write_seq && likely(!tp->repair)) | 300 | if (!tp->write_seq && likely(!tp->repair)) |
| 301 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, | 301 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, |
| 302 | sk->sk_v6_daddr.s6_addr32, | 302 | sk->sk_v6_daddr.s6_addr32, |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ac49f84fe2c3..5f983644373a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
| @@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
| 170 | case IPPROTO_DCCP: | 170 | case IPPROTO_DCCP: |
| 171 | if (!onlyproto && (nh + offset + 4 < skb->data || | 171 | if (!onlyproto && (nh + offset + 4 < skb->data || |
| 172 | pskb_may_pull(skb, nh + offset + 4 - skb->data))) { | 172 | pskb_may_pull(skb, nh + offset + 4 - skb->data))) { |
| 173 | __be16 *ports = (__be16 *)exthdr; | 173 | __be16 *ports; |
| 174 | 174 | ||
| 175 | nh = skb_network_header(skb); | ||
| 176 | ports = (__be16 *)(nh + offset); | ||
| 175 | fl6->fl6_sport = ports[!!reverse]; | 177 | fl6->fl6_sport = ports[!!reverse]; |
| 176 | fl6->fl6_dport = ports[!reverse]; | 178 | fl6->fl6_dport = ports[!reverse]; |
| 177 | } | 179 | } |
| @@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
| 180 | 182 | ||
| 181 | case IPPROTO_ICMPV6: | 183 | case IPPROTO_ICMPV6: |
| 182 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { | 184 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { |
| 183 | u8 *icmp = (u8 *)exthdr; | 185 | u8 *icmp; |
| 184 | 186 | ||
| 187 | nh = skb_network_header(skb); | ||
| 188 | icmp = (u8 *)(nh + offset); | ||
| 185 | fl6->fl6_icmp_type = icmp[0]; | 189 | fl6->fl6_icmp_type = icmp[0]; |
| 186 | fl6->fl6_icmp_code = icmp[1]; | 190 | fl6->fl6_icmp_code = icmp[1]; |
| 187 | } | 191 | } |
| @@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
| 192 | case IPPROTO_MH: | 196 | case IPPROTO_MH: |
| 193 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { | 197 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { |
| 194 | struct ip6_mh *mh; | 198 | struct ip6_mh *mh; |
| 195 | mh = (struct ip6_mh *)exthdr; | ||
| 196 | 199 | ||
| 200 | nh = skb_network_header(skb); | ||
| 201 | mh = (struct ip6_mh *)(nh + offset); | ||
| 197 | fl6->fl6_mh_type = mh->ip6mh_type; | 202 | fl6->fl6_mh_type = mh->ip6mh_type; |
| 198 | } | 203 | } |
| 199 | fl6->flowi6_proto = nexthdr; | 204 | fl6->flowi6_proto = nexthdr; |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 92fafd485deb..3f3a6cbdceb7 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -1064,8 +1064,6 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
| 1064 | 1064 | ||
| 1065 | if (sk->sk_state != TCP_ESTABLISHED) { | 1065 | if (sk->sk_state != TCP_ESTABLISHED) { |
| 1066 | sock->state = SS_UNCONNECTED; | 1066 | sock->state = SS_UNCONNECTED; |
| 1067 | if (sk->sk_prot->disconnect(sk, flags)) | ||
| 1068 | sock->state = SS_DISCONNECTING; | ||
| 1069 | err = sock_error(sk); | 1067 | err = sock_error(sk); |
| 1070 | if (!err) | 1068 | if (!err) |
| 1071 | err = -ECONNRESET; | 1069 | err = -ECONNRESET; |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fb6a1502b6df..343da1e35025 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -3458,7 +3458,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, | |||
| 3458 | rcu_read_lock(); | 3458 | rcu_read_lock(); |
| 3459 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); | 3459 | chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); |
| 3460 | if (chanctx_conf) { | 3460 | if (chanctx_conf) { |
| 3461 | *chandef = chanctx_conf->def; | 3461 | *chandef = sdata->vif.bss_conf.chandef; |
| 3462 | ret = 0; | 3462 | ret = 0; |
| 3463 | } else if (local->open_count > 0 && | 3463 | } else if (local->open_count > 0 && |
| 3464 | local->open_count == local->monitors && | 3464 | local->open_count == local->monitors && |
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 8fdadfd94ba8..6081329784dd 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c | |||
| @@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif, | |||
| 448 | */ | 448 | */ |
| 449 | if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) { | 449 | if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) { |
| 450 | u32 basic_rates = vif->bss_conf.basic_rates; | 450 | u32 basic_rates = vif->bss_conf.basic_rates; |
| 451 | s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0; | 451 | s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0; |
| 452 | 452 | ||
| 453 | rate = &sband->bitrates[rates[0].idx]; | 453 | rate = &sband->bitrates[rates[0].idx]; |
| 454 | 454 | ||
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index edde723f9f00..2acab1bcaa4b 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c | |||
| @@ -62,14 +62,14 @@ minstrel_stats_open(struct inode *inode, struct file *file) | |||
| 62 | unsigned int i, tp, prob, eprob; | 62 | unsigned int i, tp, prob, eprob; |
| 63 | char *p; | 63 | char *p; |
| 64 | 64 | ||
| 65 | ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL); | 65 | ms = kmalloc(2048, GFP_KERNEL); |
| 66 | if (!ms) | 66 | if (!ms) |
| 67 | return -ENOMEM; | 67 | return -ENOMEM; |
| 68 | 68 | ||
| 69 | file->private_data = ms; | 69 | file->private_data = ms; |
| 70 | p = ms->buf; | 70 | p = ms->buf; |
| 71 | p += sprintf(p, "rate throughput ewma prob this prob " | 71 | p += sprintf(p, "rate tpt eprob *prob" |
| 72 | "this succ/attempt success attempts\n"); | 72 | " *ok(*cum) ok( cum)\n"); |
| 73 | for (i = 0; i < mi->n_rates; i++) { | 73 | for (i = 0; i < mi->n_rates; i++) { |
| 74 | struct minstrel_rate *mr = &mi->r[i]; | 74 | struct minstrel_rate *mr = &mi->r[i]; |
| 75 | struct minstrel_rate_stats *mrs = &mi->r[i].stats; | 75 | struct minstrel_rate_stats *mrs = &mi->r[i].stats; |
| @@ -86,8 +86,8 @@ minstrel_stats_open(struct inode *inode, struct file *file) | |||
| 86 | prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); | 86 | prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); |
| 87 | eprob = MINSTREL_TRUNC(mrs->probability * 1000); | 87 | eprob = MINSTREL_TRUNC(mrs->probability * 1000); |
| 88 | 88 | ||
| 89 | p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " | 89 | p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u" |
| 90 | " %3u(%3u) %8llu %8llu\n", | 90 | " %4u(%4u) %9llu(%9llu)\n", |
| 91 | tp / 10, tp % 10, | 91 | tp / 10, tp % 10, |
| 92 | eprob / 10, eprob % 10, | 92 | eprob / 10, eprob % 10, |
| 93 | prob / 10, prob % 10, | 93 | prob / 10, prob % 10, |
| @@ -102,6 +102,8 @@ minstrel_stats_open(struct inode *inode, struct file *file) | |||
| 102 | mi->sample_packets); | 102 | mi->sample_packets); |
| 103 | ms->len = p - ms->buf; | 103 | ms->len = p - ms->buf; |
| 104 | 104 | ||
| 105 | WARN_ON(ms->len + sizeof(*ms) > 2048); | ||
| 106 | |||
| 105 | return 0; | 107 | return 0; |
| 106 | } | 108 | } |
| 107 | 109 | ||
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c index a72ad46f2a04..d537bec93754 100644 --- a/net/mac80211/rc80211_minstrel_ht_debugfs.c +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c | |||
| @@ -63,8 +63,8 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p) | |||
| 63 | prob = MINSTREL_TRUNC(mr->cur_prob * 1000); | 63 | prob = MINSTREL_TRUNC(mr->cur_prob * 1000); |
| 64 | eprob = MINSTREL_TRUNC(mr->probability * 1000); | 64 | eprob = MINSTREL_TRUNC(mr->probability * 1000); |
| 65 | 65 | ||
| 66 | p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " | 66 | p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u " |
| 67 | "%3u %3u(%3u) %8llu %8llu\n", | 67 | "%3u %4u(%4u) %9llu(%9llu)\n", |
| 68 | tp / 10, tp % 10, | 68 | tp / 10, tp % 10, |
| 69 | eprob / 10, eprob % 10, | 69 | eprob / 10, eprob % 10, |
| 70 | prob / 10, prob % 10, | 70 | prob / 10, prob % 10, |
| @@ -96,14 +96,15 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) | |||
| 96 | return ret; | 96 | return ret; |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL); | 99 | ms = kmalloc(8192, GFP_KERNEL); |
| 100 | if (!ms) | 100 | if (!ms) |
| 101 | return -ENOMEM; | 101 | return -ENOMEM; |
| 102 | 102 | ||
| 103 | file->private_data = ms; | 103 | file->private_data = ms; |
| 104 | p = ms->buf; | 104 | p = ms->buf; |
| 105 | p += sprintf(p, "type rate throughput ewma prob " | 105 | p += sprintf(p, "type rate tpt eprob *prob " |
| 106 | "this prob retry this succ/attempt success attempts\n"); | 106 | "ret *ok(*cum) ok( cum)\n"); |
| 107 | |||
| 107 | 108 | ||
| 108 | p = minstrel_ht_stats_dump(mi, max_mcs, p); | 109 | p = minstrel_ht_stats_dump(mi, max_mcs, p); |
| 109 | for (i = 0; i < max_mcs; i++) | 110 | for (i = 0; i < max_mcs; i++) |
| @@ -118,6 +119,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file) | |||
| 118 | MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); | 119 | MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); |
| 119 | ms->len = p - ms->buf; | 120 | ms->len = p - ms->buf; |
| 120 | 121 | ||
| 122 | WARN_ON(ms->len + sizeof(*ms) > 8192); | ||
| 123 | |||
| 121 | return nonseekable_open(inode, file); | 124 | return nonseekable_open(inode, file); |
| 122 | } | 125 | } |
| 123 | 126 | ||
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 42f68cb8957e..bcda2ac7d844 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
| @@ -336,6 +336,7 @@ struct ieee80211_tx_latency_stat { | |||
| 336 | * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for | 336 | * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for |
| 337 | * AP only. | 337 | * AP only. |
| 338 | * @cipher_scheme: optional cipher scheme for this station | 338 | * @cipher_scheme: optional cipher scheme for this station |
| 339 | * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed | ||
| 339 | */ | 340 | */ |
| 340 | struct sta_info { | 341 | struct sta_info { |
| 341 | /* General information, mostly static */ | 342 | /* General information, mostly static */ |
diff --git a/net/mpls/Makefile b/net/mpls/Makefile index 0a3c171be537..6dec088c2d0f 100644 --- a/net/mpls/Makefile +++ b/net/mpls/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for MPLS. | 2 | # Makefile for MPLS. |
| 3 | # | 3 | # |
| 4 | obj-y += mpls_gso.o | 4 | obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o |
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index e28ed2ef5b06..e3545f21a099 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c | |||
| @@ -48,7 +48,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, | |||
| 48 | __skb_push(skb, skb->mac_len); | 48 | __skb_push(skb, skb->mac_len); |
| 49 | 49 | ||
| 50 | /* Segment inner packet. */ | 50 | /* Segment inner packet. */ |
| 51 | mpls_features = skb->dev->mpls_features & netif_skb_features(skb); | 51 | mpls_features = skb->dev->mpls_features & features; |
| 52 | segs = skb_mac_gso_segment(skb, mpls_features); | 52 | segs = skb_mac_gso_segment(skb, mpls_features); |
| 53 | 53 | ||
| 54 | 54 | ||
| @@ -59,8 +59,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, | |||
| 59 | * above pulled. It will be re-pushed after returning | 59 | * above pulled. It will be re-pushed after returning |
| 60 | * skb_mac_gso_segment(), an indirect caller of this function. | 60 | * skb_mac_gso_segment(), an indirect caller of this function. |
| 61 | */ | 61 | */ |
| 62 | __skb_push(skb, skb->data - skb_mac_header(skb)); | 62 | __skb_pull(skb, skb->data - skb_mac_header(skb)); |
| 63 | |||
| 64 | out: | 63 | out: |
| 65 | return segs; | 64 | return segs; |
| 66 | } | 65 | } |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 912e5a05b79d..86f9d76b1464 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -659,7 +659,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) | |||
| 659 | struct ip_set *set; | 659 | struct ip_set *set; |
| 660 | struct ip_set_net *inst = ip_set_pernet(net); | 660 | struct ip_set_net *inst = ip_set_pernet(net); |
| 661 | 661 | ||
| 662 | if (index > inst->ip_set_max) | 662 | if (index >= inst->ip_set_max) |
| 663 | return IPSET_INVALID_ID; | 663 | return IPSET_INVALID_ID; |
| 664 | 664 | ||
| 665 | nfnl_lock(NFNL_SUBSYS_IPSET); | 665 | nfnl_lock(NFNL_SUBSYS_IPSET); |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 91f17c1eb8a2..437a3663ad03 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
| @@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, | |||
| 316 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, | 316 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, |
| 317 | local))) { | 317 | local))) { |
| 318 | IP_VS_DBG_RL("We are crossing local and non-local addresses" | 318 | IP_VS_DBG_RL("We are crossing local and non-local addresses" |
| 319 | " daddr=%pI4\n", &dest->addr.ip); | 319 | " daddr=%pI4\n", &daddr); |
| 320 | goto err_put; | 320 | goto err_put; |
| 321 | } | 321 | } |
| 322 | 322 | ||
| @@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, | |||
| 458 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, | 458 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, |
| 459 | local))) { | 459 | local))) { |
| 460 | IP_VS_DBG_RL("We are crossing local and non-local addresses" | 460 | IP_VS_DBG_RL("We are crossing local and non-local addresses" |
| 461 | " daddr=%pI6\n", &dest->addr.in6); | 461 | " daddr=%pI6\n", daddr); |
| 462 | goto err_put; | 462 | goto err_put; |
| 463 | } | 463 | } |
| 464 | 464 | ||
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 44d1ea32570a..d87b6423ffb2 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
| @@ -213,7 +213,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
| 213 | { | 213 | { |
| 214 | /* REPLY */ | 214 | /* REPLY */ |
| 215 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 215 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
| 216 | /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 }, | 216 | /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 }, |
| 217 | /* | 217 | /* |
| 218 | * sNO -> sIV Never reached. | 218 | * sNO -> sIV Never reached. |
| 219 | * sSS -> sS2 Simultaneous open | 219 | * sSS -> sS2 Simultaneous open |
| @@ -223,7 +223,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
| 223 | * sFW -> sIV | 223 | * sFW -> sIV |
| 224 | * sCW -> sIV | 224 | * sCW -> sIV |
| 225 | * sLA -> sIV | 225 | * sLA -> sIV |
| 226 | * sTW -> sIV Reopened connection, but server may not do it. | 226 | * sTW -> sSS Reopened connection, but server may have switched role |
| 227 | * sCL -> sIV | 227 | * sCL -> sIV |
| 228 | */ | 228 | */ |
| 229 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 229 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 556a0dfa4abc..11ab4b078f3b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -1328,10 +1328,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
| 1328 | basechain->stats = stats; | 1328 | basechain->stats = stats; |
| 1329 | } else { | 1329 | } else { |
| 1330 | stats = netdev_alloc_pcpu_stats(struct nft_stats); | 1330 | stats = netdev_alloc_pcpu_stats(struct nft_stats); |
| 1331 | if (IS_ERR(stats)) { | 1331 | if (stats == NULL) { |
| 1332 | module_put(type->owner); | 1332 | module_put(type->owner); |
| 1333 | kfree(basechain); | 1333 | kfree(basechain); |
| 1334 | return PTR_ERR(stats); | 1334 | return -ENOMEM; |
| 1335 | } | 1335 | } |
| 1336 | rcu_assign_pointer(basechain->stats, stats); | 1336 | rcu_assign_pointer(basechain->stats, stats); |
| 1337 | } | 1337 | } |
| @@ -3744,6 +3744,20 @@ static const struct nfnetlink_subsystem nf_tables_subsys = { | |||
| 3744 | .abort = nf_tables_abort, | 3744 | .abort = nf_tables_abort, |
| 3745 | }; | 3745 | }; |
| 3746 | 3746 | ||
| 3747 | int nft_chain_validate_dependency(const struct nft_chain *chain, | ||
| 3748 | enum nft_chain_type type) | ||
| 3749 | { | ||
| 3750 | const struct nft_base_chain *basechain; | ||
| 3751 | |||
| 3752 | if (chain->flags & NFT_BASE_CHAIN) { | ||
| 3753 | basechain = nft_base_chain(chain); | ||
| 3754 | if (basechain->type->type != type) | ||
| 3755 | return -EOPNOTSUPP; | ||
| 3756 | } | ||
| 3757 | return 0; | ||
| 3758 | } | ||
| 3759 | EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); | ||
| 3760 | |||
| 3747 | /* | 3761 | /* |
| 3748 | * Loop detection - walk through the ruleset beginning at the destination chain | 3762 | * Loop detection - walk through the ruleset beginning at the destination chain |
| 3749 | * of a new jump until either the source chain is reached (loop) or all | 3763 | * of a new jump until either the source chain is reached (loop) or all |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index b1e3a0579416..5f1be5ba3559 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
| @@ -43,7 +43,8 @@ | |||
| 43 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE | 43 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE |
| 44 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ | 44 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ |
| 45 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ | 45 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ |
| 46 | #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ | 46 | /* max packet size is limited by 16-bit struct nfattr nfa_len field */ |
| 47 | #define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN) | ||
| 47 | 48 | ||
| 48 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ | 49 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ |
| 49 | printk(x, ## args); } while (0); | 50 | printk(x, ## args); } while (0); |
| @@ -252,6 +253,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, | |||
| 252 | 253 | ||
| 253 | case NFULNL_COPY_PACKET: | 254 | case NFULNL_COPY_PACKET: |
| 254 | inst->copy_mode = mode; | 255 | inst->copy_mode = mode; |
| 256 | if (range == 0) | ||
| 257 | range = NFULNL_COPY_RANGE_MAX; | ||
| 255 | inst->copy_range = min_t(unsigned int, | 258 | inst->copy_range = min_t(unsigned int, |
| 256 | range, NFULNL_COPY_RANGE_MAX); | 259 | range, NFULNL_COPY_RANGE_MAX); |
| 257 | break; | 260 | break; |
| @@ -343,26 +346,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size, | |||
| 343 | return skb; | 346 | return skb; |
| 344 | } | 347 | } |
| 345 | 348 | ||
| 346 | static int | 349 | static void |
| 347 | __nfulnl_send(struct nfulnl_instance *inst) | 350 | __nfulnl_send(struct nfulnl_instance *inst) |
| 348 | { | 351 | { |
| 349 | int status = -1; | ||
| 350 | |||
| 351 | if (inst->qlen > 1) { | 352 | if (inst->qlen > 1) { |
| 352 | struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, | 353 | struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, |
| 353 | NLMSG_DONE, | 354 | NLMSG_DONE, |
| 354 | sizeof(struct nfgenmsg), | 355 | sizeof(struct nfgenmsg), |
| 355 | 0); | 356 | 0); |
| 356 | if (!nlh) | 357 | if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n", |
| 358 | inst->skb->len, skb_tailroom(inst->skb))) { | ||
| 359 | kfree_skb(inst->skb); | ||
| 357 | goto out; | 360 | goto out; |
| 361 | } | ||
| 358 | } | 362 | } |
| 359 | status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, | 363 | nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, |
| 360 | MSG_DONTWAIT); | 364 | MSG_DONTWAIT); |
| 361 | 365 | out: | |
| 362 | inst->qlen = 0; | 366 | inst->qlen = 0; |
| 363 | inst->skb = NULL; | 367 | inst->skb = NULL; |
| 364 | out: | ||
| 365 | return status; | ||
| 366 | } | 368 | } |
| 367 | 369 | ||
| 368 | static void | 370 | static void |
| @@ -649,7 +651,8 @@ nfulnl_log_packet(struct net *net, | |||
| 649 | + nla_total_size(sizeof(u_int32_t)) /* gid */ | 651 | + nla_total_size(sizeof(u_int32_t)) /* gid */ |
| 650 | + nla_total_size(plen) /* prefix */ | 652 | + nla_total_size(plen) /* prefix */ |
| 651 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) | 653 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) |
| 652 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); | 654 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) |
| 655 | + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ | ||
| 653 | 656 | ||
| 654 | if (in && skb_mac_header_was_set(skb)) { | 657 | if (in && skb_mac_header_was_set(skb)) { |
| 655 | size += nla_total_size(skb->dev->hard_header_len) | 658 | size += nla_total_size(skb->dev->hard_header_len) |
| @@ -678,8 +681,7 @@ nfulnl_log_packet(struct net *net, | |||
| 678 | break; | 681 | break; |
| 679 | 682 | ||
| 680 | case NFULNL_COPY_PACKET: | 683 | case NFULNL_COPY_PACKET: |
| 681 | if (inst->copy_range == 0 | 684 | if (inst->copy_range > skb->len) |
| 682 | || inst->copy_range > skb->len) | ||
| 683 | data_len = skb->len; | 685 | data_len = skb->len; |
| 684 | else | 686 | else |
| 685 | data_len = inst->copy_range; | 687 | data_len = inst->copy_range; |
| @@ -692,8 +694,7 @@ nfulnl_log_packet(struct net *net, | |||
| 692 | goto unlock_and_release; | 694 | goto unlock_and_release; |
| 693 | } | 695 | } |
| 694 | 696 | ||
| 695 | if (inst->skb && | 697 | if (inst->skb && size > skb_tailroom(inst->skb)) { |
| 696 | size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { | ||
| 697 | /* either the queue len is too high or we don't have | 698 | /* either the queue len is too high or we don't have |
| 698 | * enough room in the skb left. flush to userspace. */ | 699 | * enough room in the skb left. flush to userspace. */ |
| 699 | __nfulnl_flush(inst); | 700 | __nfulnl_flush(inst); |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index a82077d9f59b..7c60ccd61a3e 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
| @@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | |||
| 665 | * returned by nf_queue. For instance, callers rely on -ECANCELED to | 665 | * returned by nf_queue. For instance, callers rely on -ECANCELED to |
| 666 | * mean 'ignore this hook'. | 666 | * mean 'ignore this hook'. |
| 667 | */ | 667 | */ |
| 668 | if (IS_ERR(segs)) | 668 | if (IS_ERR_OR_NULL(segs)) |
| 669 | goto out_err; | 669 | goto out_err; |
| 670 | queued = 0; | 670 | queued = 0; |
| 671 | err = 0; | 671 | err = 0; |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7e2683c8a44a..9d6d6f60a80f 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
| @@ -19,9 +19,52 @@ | |||
| 19 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
| 20 | #include <linux/netfilter_ipv4/ip_tables.h> | 20 | #include <linux/netfilter_ipv4/ip_tables.h> |
| 21 | #include <linux/netfilter_ipv6/ip6_tables.h> | 21 | #include <linux/netfilter_ipv6/ip6_tables.h> |
| 22 | #include <asm/uaccess.h> /* for set_fs */ | ||
| 23 | #include <net/netfilter/nf_tables.h> | 22 | #include <net/netfilter/nf_tables.h> |
| 24 | 23 | ||
| 24 | static const struct { | ||
| 25 | const char *name; | ||
| 26 | u8 type; | ||
| 27 | } table_to_chaintype[] = { | ||
| 28 | { "filter", NFT_CHAIN_T_DEFAULT }, | ||
| 29 | { "raw", NFT_CHAIN_T_DEFAULT }, | ||
| 30 | { "security", NFT_CHAIN_T_DEFAULT }, | ||
| 31 | { "mangle", NFT_CHAIN_T_ROUTE }, | ||
| 32 | { "nat", NFT_CHAIN_T_NAT }, | ||
| 33 | { }, | ||
| 34 | }; | ||
| 35 | |||
| 36 | static int nft_compat_table_to_chaintype(const char *table) | ||
| 37 | { | ||
| 38 | int i; | ||
| 39 | |||
| 40 | for (i = 0; table_to_chaintype[i].name != NULL; i++) { | ||
| 41 | if (strcmp(table_to_chaintype[i].name, table) == 0) | ||
| 42 | return table_to_chaintype[i].type; | ||
| 43 | } | ||
| 44 | |||
| 45 | return -1; | ||
| 46 | } | ||
| 47 | |||
| 48 | static int nft_compat_chain_validate_dependency(const char *tablename, | ||
| 49 | const struct nft_chain *chain) | ||
| 50 | { | ||
| 51 | enum nft_chain_type type; | ||
| 52 | const struct nft_base_chain *basechain; | ||
| 53 | |||
| 54 | if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) | ||
| 55 | return 0; | ||
| 56 | |||
| 57 | type = nft_compat_table_to_chaintype(tablename); | ||
| 58 | if (type < 0) | ||
| 59 | return -EINVAL; | ||
| 60 | |||
| 61 | basechain = nft_base_chain(chain); | ||
| 62 | if (basechain->type->type != type) | ||
| 63 | return -EINVAL; | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | |||
| 25 | union nft_entry { | 68 | union nft_entry { |
| 26 | struct ipt_entry e4; | 69 | struct ipt_entry e4; |
| 27 | struct ip6t_entry e6; | 70 | struct ip6t_entry e6; |
| @@ -95,6 +138,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
| 95 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 138 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
| 96 | 139 | ||
| 97 | par->hook_mask = 1 << ops->hooknum; | 140 | par->hook_mask = 1 << ops->hooknum; |
| 141 | } else { | ||
| 142 | par->hook_mask = 0; | ||
| 98 | } | 143 | } |
| 99 | par->family = ctx->afi->family; | 144 | par->family = ctx->afi->family; |
| 100 | } | 145 | } |
| @@ -151,6 +196,10 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 151 | union nft_entry e = {}; | 196 | union nft_entry e = {}; |
| 152 | int ret; | 197 | int ret; |
| 153 | 198 | ||
| 199 | ret = nft_compat_chain_validate_dependency(target->table, ctx->chain); | ||
| 200 | if (ret < 0) | ||
| 201 | goto err; | ||
| 202 | |||
| 154 | target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); | 203 | target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); |
| 155 | 204 | ||
| 156 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 205 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
| @@ -216,6 +265,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, | |||
| 216 | { | 265 | { |
| 217 | struct xt_target *target = expr->ops->data; | 266 | struct xt_target *target = expr->ops->data; |
| 218 | unsigned int hook_mask = 0; | 267 | unsigned int hook_mask = 0; |
| 268 | int ret; | ||
| 219 | 269 | ||
| 220 | if (ctx->chain->flags & NFT_BASE_CHAIN) { | 270 | if (ctx->chain->flags & NFT_BASE_CHAIN) { |
| 221 | const struct nft_base_chain *basechain = | 271 | const struct nft_base_chain *basechain = |
| @@ -223,11 +273,13 @@ static int nft_target_validate(const struct nft_ctx *ctx, | |||
| 223 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 273 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
| 224 | 274 | ||
| 225 | hook_mask = 1 << ops->hooknum; | 275 | hook_mask = 1 << ops->hooknum; |
| 226 | if (hook_mask & target->hooks) | 276 | if (!(hook_mask & target->hooks)) |
| 227 | return 0; | 277 | return -EINVAL; |
| 228 | 278 | ||
| 229 | /* This target is being called from an invalid chain */ | 279 | ret = nft_compat_chain_validate_dependency(target->table, |
| 230 | return -EINVAL; | 280 | ctx->chain); |
| 281 | if (ret < 0) | ||
| 282 | return ret; | ||
| 231 | } | 283 | } |
| 232 | return 0; | 284 | return 0; |
| 233 | } | 285 | } |
| @@ -293,6 +345,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
| 293 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 345 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
| 294 | 346 | ||
| 295 | par->hook_mask = 1 << ops->hooknum; | 347 | par->hook_mask = 1 << ops->hooknum; |
| 348 | } else { | ||
| 349 | par->hook_mask = 0; | ||
| 296 | } | 350 | } |
| 297 | par->family = ctx->afi->family; | 351 | par->family = ctx->afi->family; |
| 298 | } | 352 | } |
| @@ -320,6 +374,10 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 320 | union nft_entry e = {}; | 374 | union nft_entry e = {}; |
| 321 | int ret; | 375 | int ret; |
| 322 | 376 | ||
| 377 | ret = nft_compat_chain_validate_dependency(match->name, ctx->chain); | ||
| 378 | if (ret < 0) | ||
| 379 | goto err; | ||
| 380 | |||
| 323 | match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); | 381 | match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); |
| 324 | 382 | ||
| 325 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 383 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
| @@ -379,6 +437,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, | |||
| 379 | { | 437 | { |
| 380 | struct xt_match *match = expr->ops->data; | 438 | struct xt_match *match = expr->ops->data; |
| 381 | unsigned int hook_mask = 0; | 439 | unsigned int hook_mask = 0; |
| 440 | int ret; | ||
| 382 | 441 | ||
| 383 | if (ctx->chain->flags & NFT_BASE_CHAIN) { | 442 | if (ctx->chain->flags & NFT_BASE_CHAIN) { |
| 384 | const struct nft_base_chain *basechain = | 443 | const struct nft_base_chain *basechain = |
| @@ -386,11 +445,13 @@ static int nft_match_validate(const struct nft_ctx *ctx, | |||
| 386 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 445 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
| 387 | 446 | ||
| 388 | hook_mask = 1 << ops->hooknum; | 447 | hook_mask = 1 << ops->hooknum; |
| 389 | if (hook_mask & match->hooks) | 448 | if (!(hook_mask & match->hooks)) |
| 390 | return 0; | 449 | return -EINVAL; |
| 391 | 450 | ||
| 392 | /* This match is being called from an invalid chain */ | 451 | ret = nft_compat_chain_validate_dependency(match->name, |
| 393 | return -EINVAL; | 452 | ctx->chain); |
| 453 | if (ret < 0) | ||
| 454 | return ret; | ||
| 394 | } | 455 | } |
| 395 | return 0; | 456 | return 0; |
| 396 | } | 457 | } |
| @@ -611,7 +672,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
| 611 | family = ctx->afi->family; | 672 | family = ctx->afi->family; |
| 612 | 673 | ||
| 613 | /* Re-use the existing target if it's already loaded. */ | 674 | /* Re-use the existing target if it's already loaded. */ |
| 614 | list_for_each_entry(nft_target, &nft_match_list, head) { | 675 | list_for_each_entry(nft_target, &nft_target_list, head) { |
| 615 | struct xt_target *target = nft_target->ops.data; | 676 | struct xt_target *target = nft_target->ops.data; |
| 616 | 677 | ||
| 617 | if (strcmp(target->name, tg_name) == 0 && | 678 | if (strcmp(target->name, tg_name) == 0 && |
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index 6637bab00567..d1ffd5eb3a9b 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c | |||
| @@ -26,6 +26,11 @@ int nft_masq_init(const struct nft_ctx *ctx, | |||
| 26 | const struct nlattr * const tb[]) | 26 | const struct nlattr * const tb[]) |
| 27 | { | 27 | { |
| 28 | struct nft_masq *priv = nft_expr_priv(expr); | 28 | struct nft_masq *priv = nft_expr_priv(expr); |
| 29 | int err; | ||
| 30 | |||
| 31 | err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); | ||
| 32 | if (err < 0) | ||
| 33 | return err; | ||
| 29 | 34 | ||
| 30 | if (tb[NFTA_MASQ_FLAGS] == NULL) | 35 | if (tb[NFTA_MASQ_FLAGS] == NULL) |
| 31 | return 0; | 36 | return 0; |
| @@ -55,5 +60,12 @@ nla_put_failure: | |||
| 55 | } | 60 | } |
| 56 | EXPORT_SYMBOL_GPL(nft_masq_dump); | 61 | EXPORT_SYMBOL_GPL(nft_masq_dump); |
| 57 | 62 | ||
| 63 | int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
| 64 | const struct nft_data **data) | ||
| 65 | { | ||
| 66 | return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); | ||
| 67 | } | ||
| 68 | EXPORT_SYMBOL_GPL(nft_masq_validate); | ||
| 69 | |||
| 58 | MODULE_LICENSE("GPL"); | 70 | MODULE_LICENSE("GPL"); |
| 59 | MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); | 71 | MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); |
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 799550b476fb..afe2b0b45ec4 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c | |||
| @@ -95,7 +95,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 95 | u32 family; | 95 | u32 family; |
| 96 | int err; | 96 | int err; |
| 97 | 97 | ||
| 98 | if (tb[NFTA_NAT_TYPE] == NULL) | 98 | err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); |
| 99 | if (err < 0) | ||
| 100 | return err; | ||
| 101 | |||
| 102 | if (tb[NFTA_NAT_TYPE] == NULL || | ||
| 103 | (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && | ||
| 104 | tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) | ||
| 99 | return -EINVAL; | 105 | return -EINVAL; |
| 100 | 106 | ||
| 101 | switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { | 107 | switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { |
| @@ -120,38 +126,44 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
| 120 | priv->family = family; | 126 | priv->family = family; |
| 121 | 127 | ||
| 122 | if (tb[NFTA_NAT_REG_ADDR_MIN]) { | 128 | if (tb[NFTA_NAT_REG_ADDR_MIN]) { |
| 123 | priv->sreg_addr_min = ntohl(nla_get_be32( | 129 | priv->sreg_addr_min = |
| 124 | tb[NFTA_NAT_REG_ADDR_MIN])); | 130 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MIN])); |
| 131 | |||
| 125 | err = nft_validate_input_register(priv->sreg_addr_min); | 132 | err = nft_validate_input_register(priv->sreg_addr_min); |
| 126 | if (err < 0) | 133 | if (err < 0) |
| 127 | return err; | 134 | return err; |
| 128 | } | ||
| 129 | 135 | ||
| 130 | if (tb[NFTA_NAT_REG_ADDR_MAX]) { | 136 | if (tb[NFTA_NAT_REG_ADDR_MAX]) { |
| 131 | priv->sreg_addr_max = ntohl(nla_get_be32( | 137 | priv->sreg_addr_max = |
| 132 | tb[NFTA_NAT_REG_ADDR_MAX])); | 138 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MAX])); |
| 133 | err = nft_validate_input_register(priv->sreg_addr_max); | 139 | |
| 134 | if (err < 0) | 140 | err = nft_validate_input_register(priv->sreg_addr_max); |
| 135 | return err; | 141 | if (err < 0) |
| 136 | } else | 142 | return err; |
| 137 | priv->sreg_addr_max = priv->sreg_addr_min; | 143 | } else { |
| 144 | priv->sreg_addr_max = priv->sreg_addr_min; | ||
| 145 | } | ||
| 146 | } | ||
| 138 | 147 | ||
| 139 | if (tb[NFTA_NAT_REG_PROTO_MIN]) { | 148 | if (tb[NFTA_NAT_REG_PROTO_MIN]) { |
| 140 | priv->sreg_proto_min = ntohl(nla_get_be32( | 149 | priv->sreg_proto_min = |
| 141 | tb[NFTA_NAT_REG_PROTO_MIN])); | 150 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MIN])); |
| 151 | |||
| 142 | err = nft_validate_input_register(priv->sreg_proto_min); | 152 | err = nft_validate_input_register(priv->sreg_proto_min); |
| 143 | if (err < 0) | 153 | if (err < 0) |
| 144 | return err; | 154 | return err; |
| 145 | } | ||
| 146 | 155 | ||
| 147 | if (tb[NFTA_NAT_REG_PROTO_MAX]) { | 156 | if (tb[NFTA_NAT_REG_PROTO_MAX]) { |
| 148 | priv->sreg_proto_max = ntohl(nla_get_be32( | 157 | priv->sreg_proto_max = |
| 149 | tb[NFTA_NAT_REG_PROTO_MAX])); | 158 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MAX])); |
| 150 | err = nft_validate_input_register(priv->sreg_proto_max); | 159 | |
| 151 | if (err < 0) | 160 | err = nft_validate_input_register(priv->sreg_proto_max); |
| 152 | return err; | 161 | if (err < 0) |
| 153 | } else | 162 | return err; |
| 154 | priv->sreg_proto_max = priv->sreg_proto_min; | 163 | } else { |
| 164 | priv->sreg_proto_max = priv->sreg_proto_min; | ||
| 165 | } | ||
| 166 | } | ||
| 155 | 167 | ||
| 156 | if (tb[NFTA_NAT_FLAGS]) { | 168 | if (tb[NFTA_NAT_FLAGS]) { |
| 157 | priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); | 169 | priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); |
| @@ -179,17 +191,19 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
| 179 | 191 | ||
| 180 | if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family))) | 192 | if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family))) |
| 181 | goto nla_put_failure; | 193 | goto nla_put_failure; |
| 182 | if (nla_put_be32(skb, | 194 | |
| 183 | NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min))) | 195 | if (priv->sreg_addr_min) { |
| 184 | goto nla_put_failure; | 196 | if (nla_put_be32(skb, NFTA_NAT_REG_ADDR_MIN, |
| 185 | if (nla_put_be32(skb, | 197 | htonl(priv->sreg_addr_min)) || |
| 186 | NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) | 198 | nla_put_be32(skb, NFTA_NAT_REG_ADDR_MAX, |
| 187 | goto nla_put_failure; | 199 | htonl(priv->sreg_addr_max))) |
| 200 | goto nla_put_failure; | ||
| 201 | } | ||
| 202 | |||
| 188 | if (priv->sreg_proto_min) { | 203 | if (priv->sreg_proto_min) { |
| 189 | if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN, | 204 | if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN, |
| 190 | htonl(priv->sreg_proto_min))) | 205 | htonl(priv->sreg_proto_min)) || |
| 191 | goto nla_put_failure; | 206 | nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, |
| 192 | if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, | ||
| 193 | htonl(priv->sreg_proto_max))) | 207 | htonl(priv->sreg_proto_max))) |
| 194 | goto nla_put_failure; | 208 | goto nla_put_failure; |
| 195 | } | 209 | } |
| @@ -205,6 +219,13 @@ nla_put_failure: | |||
| 205 | return -1; | 219 | return -1; |
| 206 | } | 220 | } |
| 207 | 221 | ||
| 222 | static int nft_nat_validate(const struct nft_ctx *ctx, | ||
| 223 | const struct nft_expr *expr, | ||
| 224 | const struct nft_data **data) | ||
| 225 | { | ||
| 226 | return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); | ||
| 227 | } | ||
| 228 | |||
| 208 | static struct nft_expr_type nft_nat_type; | 229 | static struct nft_expr_type nft_nat_type; |
| 209 | static const struct nft_expr_ops nft_nat_ops = { | 230 | static const struct nft_expr_ops nft_nat_ops = { |
| 210 | .type = &nft_nat_type, | 231 | .type = &nft_nat_type, |
| @@ -212,6 +233,7 @@ static const struct nft_expr_ops nft_nat_ops = { | |||
| 212 | .eval = nft_nat_eval, | 233 | .eval = nft_nat_eval, |
| 213 | .init = nft_nat_init, | 234 | .init = nft_nat_init, |
| 214 | .dump = nft_nat_dump, | 235 | .dump = nft_nat_dump, |
| 236 | .validate = nft_nat_validate, | ||
| 215 | }; | 237 | }; |
| 216 | 238 | ||
| 217 | static struct nft_expr_type nft_nat_type __read_mostly = { | 239 | static struct nft_expr_type nft_nat_type __read_mostly = { |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7a186e74b1b3..f1de72de273e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | |||
| 96 | static int netlink_dump(struct sock *sk); | 96 | static int netlink_dump(struct sock *sk); |
| 97 | static void netlink_skb_destructor(struct sk_buff *skb); | 97 | static void netlink_skb_destructor(struct sk_buff *skb); |
| 98 | 98 | ||
| 99 | /* nl_table locking explained: | ||
| 100 | * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock | ||
| 101 | * combined with an RCU read-side lock. Insertion and removal are protected | ||
| 102 | * with nl_sk_hash_lock while using RCU list modification primitives and may | ||
| 103 | * run in parallel to nl_table_lock protected lookups. Destruction of the | ||
| 104 | * Netlink socket may only occur *after* nl_table_lock has been acquired | ||
| 105 | * either during or after the socket has been removed from the list. | ||
| 106 | */ | ||
| 99 | DEFINE_RWLOCK(nl_table_lock); | 107 | DEFINE_RWLOCK(nl_table_lock); |
| 100 | EXPORT_SYMBOL_GPL(nl_table_lock); | 108 | EXPORT_SYMBOL_GPL(nl_table_lock); |
| 101 | static atomic_t nl_table_users = ATOMIC_INIT(0); | 109 | static atomic_t nl_table_users = ATOMIC_INIT(0); |
| @@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock); | |||
| 109 | static int lockdep_nl_sk_hash_is_held(void) | 117 | static int lockdep_nl_sk_hash_is_held(void) |
| 110 | { | 118 | { |
| 111 | #ifdef CONFIG_LOCKDEP | 119 | #ifdef CONFIG_LOCKDEP |
| 112 | return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1; | 120 | if (debug_locks) |
| 113 | #else | 121 | return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock); |
| 114 | return 1; | ||
| 115 | #endif | 122 | #endif |
| 123 | return 1; | ||
| 116 | } | 124 | } |
| 117 | 125 | ||
| 118 | static ATOMIC_NOTIFIER_HEAD(netlink_chain); | 126 | static ATOMIC_NOTIFIER_HEAD(netlink_chain); |
| @@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) | |||
| 1028 | struct netlink_table *table = &nl_table[protocol]; | 1036 | struct netlink_table *table = &nl_table[protocol]; |
| 1029 | struct sock *sk; | 1037 | struct sock *sk; |
| 1030 | 1038 | ||
| 1039 | read_lock(&nl_table_lock); | ||
| 1031 | rcu_read_lock(); | 1040 | rcu_read_lock(); |
| 1032 | sk = __netlink_lookup(table, portid, net); | 1041 | sk = __netlink_lookup(table, portid, net); |
| 1033 | if (sk) | 1042 | if (sk) |
| 1034 | sock_hold(sk); | 1043 | sock_hold(sk); |
| 1035 | rcu_read_unlock(); | 1044 | rcu_read_unlock(); |
| 1045 | read_unlock(&nl_table_lock); | ||
| 1036 | 1046 | ||
| 1037 | return sk; | 1047 | return sk; |
| 1038 | } | 1048 | } |
| @@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock) | |||
| 1257 | } | 1267 | } |
| 1258 | netlink_table_ungrab(); | 1268 | netlink_table_ungrab(); |
| 1259 | 1269 | ||
| 1260 | /* Wait for readers to complete */ | ||
| 1261 | synchronize_net(); | ||
| 1262 | |||
| 1263 | kfree(nlk->groups); | 1270 | kfree(nlk->groups); |
| 1264 | nlk->groups = NULL; | 1271 | nlk->groups = NULL; |
| 1265 | 1272 | ||
| @@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock) | |||
| 1281 | 1288 | ||
| 1282 | retry: | 1289 | retry: |
| 1283 | cond_resched(); | 1290 | cond_resched(); |
| 1291 | netlink_table_grab(); | ||
| 1284 | rcu_read_lock(); | 1292 | rcu_read_lock(); |
| 1285 | if (__netlink_lookup(table, portid, net)) { | 1293 | if (__netlink_lookup(table, portid, net)) { |
| 1286 | /* Bind collision, search negative portid values. */ | 1294 | /* Bind collision, search negative portid values. */ |
| @@ -1288,9 +1296,11 @@ retry: | |||
| 1288 | if (rover > -4097) | 1296 | if (rover > -4097) |
| 1289 | rover = -4097; | 1297 | rover = -4097; |
| 1290 | rcu_read_unlock(); | 1298 | rcu_read_unlock(); |
| 1299 | netlink_table_ungrab(); | ||
| 1291 | goto retry; | 1300 | goto retry; |
| 1292 | } | 1301 | } |
| 1293 | rcu_read_unlock(); | 1302 | rcu_read_unlock(); |
| 1303 | netlink_table_ungrab(); | ||
| 1294 | 1304 | ||
| 1295 | err = netlink_insert(sk, net, portid); | 1305 | err = netlink_insert(sk, net, portid); |
| 1296 | if (err == -EADDRINUSE) | 1306 | if (err == -EADDRINUSE) |
| @@ -2921,14 +2931,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | |||
| 2921 | } | 2931 | } |
| 2922 | 2932 | ||
| 2923 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) | 2933 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) |
| 2924 | __acquires(RCU) | 2934 | __acquires(nl_table_lock) __acquires(RCU) |
| 2925 | { | 2935 | { |
| 2936 | read_lock(&nl_table_lock); | ||
| 2926 | rcu_read_lock(); | 2937 | rcu_read_lock(); |
| 2927 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2938 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
| 2928 | } | 2939 | } |
| 2929 | 2940 | ||
| 2930 | static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2941 | static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 2931 | { | 2942 | { |
| 2943 | struct rhashtable *ht; | ||
| 2932 | struct netlink_sock *nlk; | 2944 | struct netlink_sock *nlk; |
| 2933 | struct nl_seq_iter *iter; | 2945 | struct nl_seq_iter *iter; |
| 2934 | struct net *net; | 2946 | struct net *net; |
| @@ -2943,19 +2955,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 2943 | iter = seq->private; | 2955 | iter = seq->private; |
| 2944 | nlk = v; | 2956 | nlk = v; |
| 2945 | 2957 | ||
| 2946 | rht_for_each_entry_rcu(nlk, nlk->node.next, node) | 2958 | i = iter->link; |
| 2959 | ht = &nl_table[i].hash; | ||
| 2960 | rht_for_each_entry(nlk, nlk->node.next, ht, node) | ||
| 2947 | if (net_eq(sock_net((struct sock *)nlk), net)) | 2961 | if (net_eq(sock_net((struct sock *)nlk), net)) |
| 2948 | return nlk; | 2962 | return nlk; |
| 2949 | 2963 | ||
| 2950 | i = iter->link; | ||
| 2951 | j = iter->hash_idx + 1; | 2964 | j = iter->hash_idx + 1; |
| 2952 | 2965 | ||
| 2953 | do { | 2966 | do { |
| 2954 | struct rhashtable *ht = &nl_table[i].hash; | ||
| 2955 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | 2967 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); |
| 2956 | 2968 | ||
| 2957 | for (; j < tbl->size; j++) { | 2969 | for (; j < tbl->size; j++) { |
| 2958 | rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) { | 2970 | rht_for_each_entry(nlk, tbl->buckets[j], ht, node) { |
| 2959 | if (net_eq(sock_net((struct sock *)nlk), net)) { | 2971 | if (net_eq(sock_net((struct sock *)nlk), net)) { |
| 2960 | iter->link = i; | 2972 | iter->link = i; |
| 2961 | iter->hash_idx = j; | 2973 | iter->hash_idx = j; |
| @@ -2971,9 +2983,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
| 2971 | } | 2983 | } |
| 2972 | 2984 | ||
| 2973 | static void netlink_seq_stop(struct seq_file *seq, void *v) | 2985 | static void netlink_seq_stop(struct seq_file *seq, void *v) |
| 2974 | __releases(RCU) | 2986 | __releases(RCU) __releases(nl_table_lock) |
| 2975 | { | 2987 | { |
| 2976 | rcu_read_unlock(); | 2988 | rcu_read_unlock(); |
| 2989 | read_unlock(&nl_table_lock); | ||
| 2977 | } | 2990 | } |
| 2978 | 2991 | ||
| 2979 | 2992 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2e31d9e7f4dc..e6d7255183eb 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -324,6 +324,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, | |||
| 324 | segs = __skb_gso_segment(skb, NETIF_F_SG, false); | 324 | segs = __skb_gso_segment(skb, NETIF_F_SG, false); |
| 325 | if (IS_ERR(segs)) | 325 | if (IS_ERR(segs)) |
| 326 | return PTR_ERR(segs); | 326 | return PTR_ERR(segs); |
| 327 | if (segs == NULL) | ||
| 328 | return -EINVAL; | ||
| 327 | 329 | ||
| 328 | /* Queue all of the segments. */ | 330 | /* Queue all of the segments. */ |
| 329 | skb = segs; | 331 | skb = segs; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2cf61b3e633c..76f402e05bd6 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -947,7 +947,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
| 947 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { | 947 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { |
| 948 | if (qdisc_is_percpu_stats(sch)) { | 948 | if (qdisc_is_percpu_stats(sch)) { |
| 949 | sch->cpu_bstats = | 949 | sch->cpu_bstats = |
| 950 | alloc_percpu(struct gnet_stats_basic_cpu); | 950 | netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); |
| 951 | if (!sch->cpu_bstats) | 951 | if (!sch->cpu_bstats) |
| 952 | goto err_out4; | 952 | goto err_out4; |
| 953 | 953 | ||
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 33d7a98a7a97..b783a446d884 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c | |||
| @@ -445,7 +445,6 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 445 | sch->limit = q->params.limit; | 445 | sch->limit = q->params.limit; |
| 446 | 446 | ||
| 447 | setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); | 447 | setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); |
| 448 | mod_timer(&q->adapt_timer, jiffies + HZ / 2); | ||
| 449 | 448 | ||
| 450 | if (opt) { | 449 | if (opt) { |
| 451 | int err = pie_change(sch, opt); | 450 | int err = pie_change(sch, opt); |
| @@ -454,6 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 454 | return err; | 453 | return err; |
| 455 | } | 454 | } |
| 456 | 455 | ||
| 456 | mod_timer(&q->adapt_timer, jiffies + HZ / 2); | ||
| 457 | return 0; | 457 | return 0; |
| 458 | } | 458 | } |
| 459 | 459 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 90cee4a6fce4..5781634e957d 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
| @@ -219,11 +219,11 @@ void tipc_node_abort_sock_conns(struct list_head *conns) | |||
| 219 | void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 219 | void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
| 220 | { | 220 | { |
| 221 | struct tipc_link **active = &n_ptr->active_links[0]; | 221 | struct tipc_link **active = &n_ptr->active_links[0]; |
| 222 | u32 addr = n_ptr->addr; | ||
| 223 | 222 | ||
| 224 | n_ptr->working_links++; | 223 | n_ptr->working_links++; |
| 225 | tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, | 224 | n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP; |
| 226 | l_ptr->bearer_id, addr); | 225 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; |
| 226 | |||
| 227 | pr_info("Established link <%s> on network plane %c\n", | 227 | pr_info("Established link <%s> on network plane %c\n", |
| 228 | l_ptr->name, l_ptr->net_plane); | 228 | l_ptr->name, l_ptr->net_plane); |
| 229 | 229 | ||
| @@ -284,10 +284,10 @@ static void node_select_active_links(struct tipc_node *n_ptr) | |||
| 284 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 284 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
| 285 | { | 285 | { |
| 286 | struct tipc_link **active; | 286 | struct tipc_link **active; |
| 287 | u32 addr = n_ptr->addr; | ||
| 288 | 287 | ||
| 289 | n_ptr->working_links--; | 288 | n_ptr->working_links--; |
| 290 | tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr); | 289 | n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN; |
| 290 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; | ||
| 291 | 291 | ||
| 292 | if (!tipc_link_is_active(l_ptr)) { | 292 | if (!tipc_link_is_active(l_ptr)) { |
| 293 | pr_info("Lost standby link <%s> on network plane %c\n", | 293 | pr_info("Lost standby link <%s> on network plane %c\n", |
| @@ -552,28 +552,30 @@ void tipc_node_unlock(struct tipc_node *node) | |||
| 552 | LIST_HEAD(conn_sks); | 552 | LIST_HEAD(conn_sks); |
| 553 | struct sk_buff_head waiting_sks; | 553 | struct sk_buff_head waiting_sks; |
| 554 | u32 addr = 0; | 554 | u32 addr = 0; |
| 555 | unsigned int flags = node->action_flags; | 555 | int flags = node->action_flags; |
| 556 | u32 link_id = 0; | ||
| 556 | 557 | ||
| 557 | if (likely(!node->action_flags)) { | 558 | if (likely(!flags)) { |
| 558 | spin_unlock_bh(&node->lock); | 559 | spin_unlock_bh(&node->lock); |
| 559 | return; | 560 | return; |
| 560 | } | 561 | } |
| 561 | 562 | ||
| 563 | addr = node->addr; | ||
| 564 | link_id = node->link_id; | ||
| 562 | __skb_queue_head_init(&waiting_sks); | 565 | __skb_queue_head_init(&waiting_sks); |
| 563 | if (node->action_flags & TIPC_WAKEUP_USERS) { | 566 | |
| 567 | if (flags & TIPC_WAKEUP_USERS) | ||
| 564 | skb_queue_splice_init(&node->waiting_sks, &waiting_sks); | 568 | skb_queue_splice_init(&node->waiting_sks, &waiting_sks); |
| 565 | node->action_flags &= ~TIPC_WAKEUP_USERS; | 569 | |
| 566 | } | 570 | if (flags & TIPC_NOTIFY_NODE_DOWN) { |
| 567 | if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { | ||
| 568 | list_replace_init(&node->nsub, &nsub_list); | 571 | list_replace_init(&node->nsub, &nsub_list); |
| 569 | list_replace_init(&node->conn_sks, &conn_sks); | 572 | list_replace_init(&node->conn_sks, &conn_sks); |
| 570 | node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; | ||
| 571 | } | 573 | } |
| 572 | if (node->action_flags & TIPC_NOTIFY_NODE_UP) { | 574 | node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN | |
| 573 | node->action_flags &= ~TIPC_NOTIFY_NODE_UP; | 575 | TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP | |
| 574 | addr = node->addr; | 576 | TIPC_NOTIFY_LINK_DOWN | |
| 575 | } | 577 | TIPC_WAKEUP_BCAST_USERS); |
| 576 | node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS; | 578 | |
| 577 | spin_unlock_bh(&node->lock); | 579 | spin_unlock_bh(&node->lock); |
| 578 | 580 | ||
| 579 | while (!skb_queue_empty(&waiting_sks)) | 581 | while (!skb_queue_empty(&waiting_sks)) |
| @@ -588,6 +590,14 @@ void tipc_node_unlock(struct tipc_node *node) | |||
| 588 | if (flags & TIPC_WAKEUP_BCAST_USERS) | 590 | if (flags & TIPC_WAKEUP_BCAST_USERS) |
| 589 | tipc_bclink_wakeup_users(); | 591 | tipc_bclink_wakeup_users(); |
| 590 | 592 | ||
| 591 | if (addr) | 593 | if (flags & TIPC_NOTIFY_NODE_UP) |
| 592 | tipc_named_node_up(addr); | 594 | tipc_named_node_up(addr); |
| 595 | |||
| 596 | if (flags & TIPC_NOTIFY_LINK_UP) | ||
| 597 | tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, | ||
| 598 | TIPC_NODE_SCOPE, link_id, addr); | ||
| 599 | |||
| 600 | if (flags & TIPC_NOTIFY_LINK_DOWN) | ||
| 601 | tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, | ||
| 602 | link_id, addr); | ||
| 593 | } | 603 | } |
diff --git a/net/tipc/node.h b/net/tipc/node.h index 67513c3c852c..04e91458bb29 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down | 53 | * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down |
| 54 | * TIPC_NOTIFY_NODE_DOWN: notify node is down | 54 | * TIPC_NOTIFY_NODE_DOWN: notify node is down |
| 55 | * TIPC_NOTIFY_NODE_UP: notify node is up | 55 | * TIPC_NOTIFY_NODE_UP: notify node is up |
| 56 | * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type | ||
| 56 | */ | 57 | */ |
| 57 | enum { | 58 | enum { |
| 58 | TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), | 59 | TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), |
| @@ -60,7 +61,9 @@ enum { | |||
| 60 | TIPC_NOTIFY_NODE_DOWN = (1 << 3), | 61 | TIPC_NOTIFY_NODE_DOWN = (1 << 3), |
| 61 | TIPC_NOTIFY_NODE_UP = (1 << 4), | 62 | TIPC_NOTIFY_NODE_UP = (1 << 4), |
| 62 | TIPC_WAKEUP_USERS = (1 << 5), | 63 | TIPC_WAKEUP_USERS = (1 << 5), |
| 63 | TIPC_WAKEUP_BCAST_USERS = (1 << 6) | 64 | TIPC_WAKEUP_BCAST_USERS = (1 << 6), |
| 65 | TIPC_NOTIFY_LINK_UP = (1 << 7), | ||
| 66 | TIPC_NOTIFY_LINK_DOWN = (1 << 8) | ||
| 64 | }; | 67 | }; |
| 65 | 68 | ||
| 66 | /** | 69 | /** |
| @@ -100,6 +103,7 @@ struct tipc_node_bclink { | |||
| 100 | * @working_links: number of working links to node (both active and standby) | 103 | * @working_links: number of working links to node (both active and standby) |
| 101 | * @link_cnt: number of links to node | 104 | * @link_cnt: number of links to node |
| 102 | * @signature: node instance identifier | 105 | * @signature: node instance identifier |
| 106 | * @link_id: local and remote bearer ids of changing link, if any | ||
| 103 | * @nsub: list of "node down" subscriptions monitoring node | 107 | * @nsub: list of "node down" subscriptions monitoring node |
| 104 | * @rcu: rcu struct for tipc_node | 108 | * @rcu: rcu struct for tipc_node |
| 105 | */ | 109 | */ |
| @@ -116,6 +120,7 @@ struct tipc_node { | |||
| 116 | int link_cnt; | 120 | int link_cnt; |
| 117 | int working_links; | 121 | int working_links; |
| 118 | u32 signature; | 122 | u32 signature; |
| 123 | u32 link_id; | ||
| 119 | struct list_head nsub; | 124 | struct list_head nsub; |
| 120 | struct sk_buff_head waiting_sks; | 125 | struct sk_buff_head waiting_sks; |
| 121 | struct list_head conn_sks; | 126 | struct list_head conn_sks; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 75275c5cf929..51bddc236a15 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -1776,7 +1776,7 @@ int tipc_sk_rcv(struct sk_buff *buf) | |||
| 1776 | sk = &tsk->sk; | 1776 | sk = &tsk->sk; |
| 1777 | 1777 | ||
| 1778 | /* Queue message */ | 1778 | /* Queue message */ |
| 1779 | bh_lock_sock(sk); | 1779 | spin_lock_bh(&sk->sk_lock.slock); |
| 1780 | 1780 | ||
| 1781 | if (!sock_owned_by_user(sk)) { | 1781 | if (!sock_owned_by_user(sk)) { |
| 1782 | rc = filter_rcv(sk, buf); | 1782 | rc = filter_rcv(sk, buf); |
| @@ -1787,7 +1787,7 @@ int tipc_sk_rcv(struct sk_buff *buf) | |||
| 1787 | if (sk_add_backlog(sk, buf, limit)) | 1787 | if (sk_add_backlog(sk, buf, limit)) |
| 1788 | rc = -TIPC_ERR_OVERLOAD; | 1788 | rc = -TIPC_ERR_OVERLOAD; |
| 1789 | } | 1789 | } |
| 1790 | bh_unlock_sock(sk); | 1790 | spin_unlock_bh(&sk->sk_lock.slock); |
| 1791 | tipc_sk_put(tsk); | 1791 | tipc_sk_put(tsk); |
| 1792 | if (likely(!rc)) | 1792 | if (likely(!rc)) |
| 1793 | return 0; | 1793 | return 0; |
| @@ -2673,7 +2673,7 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) | |||
| 2673 | case SIOCGETLINKNAME: | 2673 | case SIOCGETLINKNAME: |
| 2674 | if (copy_from_user(&lnr, argp, sizeof(lnr))) | 2674 | if (copy_from_user(&lnr, argp, sizeof(lnr))) |
| 2675 | return -EFAULT; | 2675 | return -EFAULT; |
| 2676 | if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer, | 2676 | if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer, |
| 2677 | lnr.linkname, TIPC_MAX_LINK_NAME)) { | 2677 | lnr.linkname, TIPC_MAX_LINK_NAME)) { |
| 2678 | if (copy_to_user(argp, &lnr, sizeof(lnr))) | 2678 | if (copy_to_user(argp, &lnr, sizeof(lnr))) |
| 2679 | return -EFAULT; | 2679 | return -EFAULT; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index cb9f5a44ffad..5839c85075f1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -5927,6 +5927,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) | |||
| 5927 | int err; | 5927 | int err; |
| 5928 | bool need_new_beacon = false; | 5928 | bool need_new_beacon = false; |
| 5929 | int len, i; | 5929 | int len, i; |
| 5930 | u32 cs_count; | ||
| 5930 | 5931 | ||
| 5931 | if (!rdev->ops->channel_switch || | 5932 | if (!rdev->ops->channel_switch || |
| 5932 | !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) | 5933 | !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) |
| @@ -5963,7 +5964,14 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) | |||
| 5963 | if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES]) | 5964 | if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES]) |
| 5964 | return -EINVAL; | 5965 | return -EINVAL; |
| 5965 | 5966 | ||
| 5966 | params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); | 5967 | /* Even though the attribute is u32, the specification says |
| 5968 | * u8, so let's make sure we don't overflow. | ||
| 5969 | */ | ||
| 5970 | cs_count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); | ||
| 5971 | if (cs_count > 255) | ||
| 5972 | return -EINVAL; | ||
| 5973 | |||
| 5974 | params.count = cs_count; | ||
| 5967 | 5975 | ||
| 5968 | if (!need_new_beacon) | 5976 | if (!need_new_beacon) |
| 5969 | goto skip_beacons; | 5977 | goto skip_beacons; |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 499d6c18a8ce..7c532856b398 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
| @@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb) | |||
| 157 | kfree_skb(skb); | 157 | kfree_skb(skb); |
| 158 | if (IS_ERR(segs)) | 158 | if (IS_ERR(segs)) |
| 159 | return PTR_ERR(segs); | 159 | return PTR_ERR(segs); |
| 160 | if (segs == NULL) | ||
| 161 | return -EINVAL; | ||
| 160 | 162 | ||
| 161 | do { | 163 | do { |
| 162 | struct sk_buff *nskb = segs->next; | 164 | struct sk_buff *nskb = segs->next; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4c4e457e7888..88bf289abdc9 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -1962,7 +1962,7 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) | |||
| 1962 | struct xfrm_policy *pol = xdst->pols[0]; | 1962 | struct xfrm_policy *pol = xdst->pols[0]; |
| 1963 | struct xfrm_policy_queue *pq = &pol->polq; | 1963 | struct xfrm_policy_queue *pq = &pol->polq; |
| 1964 | 1964 | ||
| 1965 | if (unlikely(skb_fclone_busy(skb))) { | 1965 | if (unlikely(skb_fclone_busy(sk, skb))) { |
| 1966 | kfree_skb(skb); | 1966 | kfree_skb(skb); |
| 1967 | return 0; | 1967 | return 0; |
| 1968 | } | 1968 | } |
