diff options
Diffstat (limited to 'net')
64 files changed, 1040 insertions, 416 deletions
diff --git a/net/Kconfig b/net/Kconfig index 6272420a721b..99815b5454bf 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -6,7 +6,7 @@ menuconfig NET | |||
6 | bool "Networking support" | 6 | bool "Networking support" |
7 | select NLATTR | 7 | select NLATTR |
8 | select GENERIC_NET_UTILS | 8 | select GENERIC_NET_UTILS |
9 | select ANON_INODES | 9 | select BPF |
10 | ---help--- | 10 | ---help--- |
11 | Unless you really know what you are doing, you should say Y here. | 11 | Unless you really know what you are doing, you should say Y here. |
12 | The reason is that some programs need kernel networking support even | 12 | The reason is that some programs need kernel networking support even |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 992ec49a96aa..44cb786b925a 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | |||
112 | 112 | ||
113 | kfree_skb(skb); | 113 | kfree_skb(skb); |
114 | } | 114 | } |
115 | EXPORT_SYMBOL_GPL(br_deliver); | ||
115 | 116 | ||
116 | /* called with rcu_read_lock */ | 117 | /* called with rcu_read_lock */ |
117 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) | 118 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 1bada53bb195..1a4f32c09ad5 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -192,7 +192,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) | |||
192 | 192 | ||
193 | static int br_parse_ip_options(struct sk_buff *skb) | 193 | static int br_parse_ip_options(struct sk_buff *skb) |
194 | { | 194 | { |
195 | struct ip_options *opt; | ||
196 | const struct iphdr *iph; | 195 | const struct iphdr *iph; |
197 | struct net_device *dev = skb->dev; | 196 | struct net_device *dev = skb->dev; |
198 | u32 len; | 197 | u32 len; |
@@ -201,7 +200,6 @@ static int br_parse_ip_options(struct sk_buff *skb) | |||
201 | goto inhdr_error; | 200 | goto inhdr_error; |
202 | 201 | ||
203 | iph = ip_hdr(skb); | 202 | iph = ip_hdr(skb); |
204 | opt = &(IPCB(skb)->opt); | ||
205 | 203 | ||
206 | /* Basic sanity checks */ | 204 | /* Basic sanity checks */ |
207 | if (iph->ihl < 5 || iph->version != 4) | 205 | if (iph->ihl < 5 || iph->version != 4) |
@@ -227,23 +225,11 @@ static int br_parse_ip_options(struct sk_buff *skb) | |||
227 | } | 225 | } |
228 | 226 | ||
229 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | 227 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
230 | if (iph->ihl == 5) | 228 | /* We should really parse IP options here but until |
231 | return 0; | 229 | * somebody who actually uses IP options complains to |
232 | 230 | * us we'll just silently ignore the options because | |
233 | opt->optlen = iph->ihl*4 - sizeof(struct iphdr); | 231 | * we're lazy! |
234 | if (ip_options_compile(dev_net(dev), opt, skb)) | 232 | */ |
235 | goto inhdr_error; | ||
236 | |||
237 | /* Check correct handling of SRR option */ | ||
238 | if (unlikely(opt->srr)) { | ||
239 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
240 | if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) | ||
241 | goto drop; | ||
242 | |||
243 | if (ip_options_rcv_srr(skb)) | ||
244 | goto drop; | ||
245 | } | ||
246 | |||
247 | return 0; | 233 | return 0; |
248 | 234 | ||
249 | inhdr_error: | 235 | inhdr_error: |
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index da17a5eab8b4..074c557ab505 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c | |||
@@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = { | |||
75 | .type = NFT_CHAIN_T_DEFAULT, | 75 | .type = NFT_CHAIN_T_DEFAULT, |
76 | .family = NFPROTO_BRIDGE, | 76 | .family = NFPROTO_BRIDGE, |
77 | .owner = THIS_MODULE, | 77 | .owner = THIS_MODULE, |
78 | .hook_mask = (1 << NF_BR_LOCAL_IN) | | 78 | .hook_mask = (1 << NF_BR_PRE_ROUTING) | |
79 | (1 << NF_BR_LOCAL_IN) | | ||
79 | (1 << NF_BR_FORWARD) | | 80 | (1 << NF_BR_FORWARD) | |
80 | (1 << NF_BR_LOCAL_OUT), | 81 | (1 << NF_BR_LOCAL_OUT) | |
82 | (1 << NF_BR_POST_ROUTING), | ||
81 | }; | 83 | }; |
82 | 84 | ||
83 | static int __init nf_tables_bridge_init(void) | 85 | static int __init nf_tables_bridge_init(void) |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index a76479535df2..48da2c54a69e 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
@@ -16,6 +16,239 @@ | |||
16 | #include <net/netfilter/nft_reject.h> | 16 | #include <net/netfilter/nft_reject.h> |
17 | #include <net/netfilter/ipv4/nf_reject.h> | 17 | #include <net/netfilter/ipv4/nf_reject.h> |
18 | #include <net/netfilter/ipv6/nf_reject.h> | 18 | #include <net/netfilter/ipv6/nf_reject.h> |
19 | #include <linux/ip.h> | ||
20 | #include <net/ip.h> | ||
21 | #include <net/ip6_checksum.h> | ||
22 | #include <linux/netfilter_bridge.h> | ||
23 | #include "../br_private.h" | ||
24 | |||
25 | static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, | ||
26 | struct sk_buff *nskb) | ||
27 | { | ||
28 | struct ethhdr *eth; | ||
29 | |||
30 | eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN); | ||
31 | skb_reset_mac_header(nskb); | ||
32 | ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest); | ||
33 | ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); | ||
34 | eth->h_proto = eth_hdr(oldskb)->h_proto; | ||
35 | skb_pull(nskb, ETH_HLEN); | ||
36 | } | ||
37 | |||
38 | static int nft_reject_iphdr_validate(struct sk_buff *oldskb) | ||
39 | { | ||
40 | struct iphdr *iph; | ||
41 | u32 len; | ||
42 | |||
43 | if (!pskb_may_pull(oldskb, sizeof(struct iphdr))) | ||
44 | return 0; | ||
45 | |||
46 | iph = ip_hdr(oldskb); | ||
47 | if (iph->ihl < 5 || iph->version != 4) | ||
48 | return 0; | ||
49 | |||
50 | len = ntohs(iph->tot_len); | ||
51 | if (oldskb->len < len) | ||
52 | return 0; | ||
53 | else if (len < (iph->ihl*4)) | ||
54 | return 0; | ||
55 | |||
56 | if (!pskb_may_pull(oldskb, iph->ihl*4)) | ||
57 | return 0; | ||
58 | |||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) | ||
63 | { | ||
64 | struct sk_buff *nskb; | ||
65 | struct iphdr *niph; | ||
66 | const struct tcphdr *oth; | ||
67 | struct tcphdr _oth; | ||
68 | |||
69 | if (!nft_reject_iphdr_validate(oldskb)) | ||
70 | return; | ||
71 | |||
72 | oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); | ||
73 | if (!oth) | ||
74 | return; | ||
75 | |||
76 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + | ||
77 | LL_MAX_HEADER, GFP_ATOMIC); | ||
78 | if (!nskb) | ||
79 | return; | ||
80 | |||
81 | skb_reserve(nskb, LL_MAX_HEADER); | ||
82 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, | ||
83 | sysctl_ip_default_ttl); | ||
84 | nf_reject_ip_tcphdr_put(nskb, oldskb, oth); | ||
85 | niph->ttl = sysctl_ip_default_ttl; | ||
86 | niph->tot_len = htons(nskb->len); | ||
87 | ip_send_check(niph); | ||
88 | |||
89 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
90 | |||
91 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
92 | } | ||
93 | |||
94 | static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, | ||
95 | u8 code) | ||
96 | { | ||
97 | struct sk_buff *nskb; | ||
98 | struct iphdr *niph; | ||
99 | struct icmphdr *icmph; | ||
100 | unsigned int len; | ||
101 | void *payload; | ||
102 | __wsum csum; | ||
103 | |||
104 | if (!nft_reject_iphdr_validate(oldskb)) | ||
105 | return; | ||
106 | |||
107 | /* IP header checks: fragment. */ | ||
108 | if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) | ||
109 | return; | ||
110 | |||
111 | /* RFC says return as much as we can without exceeding 576 bytes. */ | ||
112 | len = min_t(unsigned int, 536, oldskb->len); | ||
113 | |||
114 | if (!pskb_may_pull(oldskb, len)) | ||
115 | return; | ||
116 | |||
117 | if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) | ||
118 | return; | ||
119 | |||
120 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + | ||
121 | LL_MAX_HEADER + len, GFP_ATOMIC); | ||
122 | if (!nskb) | ||
123 | return; | ||
124 | |||
125 | skb_reserve(nskb, LL_MAX_HEADER); | ||
126 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, | ||
127 | sysctl_ip_default_ttl); | ||
128 | |||
129 | skb_reset_transport_header(nskb); | ||
130 | icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); | ||
131 | memset(icmph, 0, sizeof(*icmph)); | ||
132 | icmph->type = ICMP_DEST_UNREACH; | ||
133 | icmph->code = code; | ||
134 | |||
135 | payload = skb_put(nskb, len); | ||
136 | memcpy(payload, skb_network_header(oldskb), len); | ||
137 | |||
138 | csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); | ||
139 | icmph->checksum = csum_fold(csum); | ||
140 | |||
141 | niph->tot_len = htons(nskb->len); | ||
142 | ip_send_check(niph); | ||
143 | |||
144 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
145 | |||
146 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
147 | } | ||
148 | |||
149 | static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb) | ||
150 | { | ||
151 | struct ipv6hdr *hdr; | ||
152 | u32 pkt_len; | ||
153 | |||
154 | if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr))) | ||
155 | return 0; | ||
156 | |||
157 | hdr = ipv6_hdr(oldskb); | ||
158 | if (hdr->version != 6) | ||
159 | return 0; | ||
160 | |||
161 | pkt_len = ntohs(hdr->payload_len); | ||
162 | if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len) | ||
163 | return 0; | ||
164 | |||
165 | return 1; | ||
166 | } | ||
167 | |||
168 | static void nft_reject_br_send_v6_tcp_reset(struct net *net, | ||
169 | struct sk_buff *oldskb, int hook) | ||
170 | { | ||
171 | struct sk_buff *nskb; | ||
172 | const struct tcphdr *oth; | ||
173 | struct tcphdr _oth; | ||
174 | unsigned int otcplen; | ||
175 | struct ipv6hdr *nip6h; | ||
176 | |||
177 | if (!nft_reject_ip6hdr_validate(oldskb)) | ||
178 | return; | ||
179 | |||
180 | oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook); | ||
181 | if (!oth) | ||
182 | return; | ||
183 | |||
184 | nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + | ||
185 | LL_MAX_HEADER, GFP_ATOMIC); | ||
186 | if (!nskb) | ||
187 | return; | ||
188 | |||
189 | skb_reserve(nskb, LL_MAX_HEADER); | ||
190 | nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, | ||
191 | net->ipv6.devconf_all->hop_limit); | ||
192 | nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); | ||
193 | nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); | ||
194 | |||
195 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
196 | |||
197 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
198 | } | ||
199 | |||
200 | static void nft_reject_br_send_v6_unreach(struct net *net, | ||
201 | struct sk_buff *oldskb, int hook, | ||
202 | u8 code) | ||
203 | { | ||
204 | struct sk_buff *nskb; | ||
205 | struct ipv6hdr *nip6h; | ||
206 | struct icmp6hdr *icmp6h; | ||
207 | unsigned int len; | ||
208 | void *payload; | ||
209 | |||
210 | if (!nft_reject_ip6hdr_validate(oldskb)) | ||
211 | return; | ||
212 | |||
213 | /* Include "As much of invoking packet as possible without the ICMPv6 | ||
214 | * packet exceeding the minimum IPv6 MTU" in the ICMP payload. | ||
215 | */ | ||
216 | len = min_t(unsigned int, 1220, oldskb->len); | ||
217 | |||
218 | if (!pskb_may_pull(oldskb, len)) | ||
219 | return; | ||
220 | |||
221 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + | ||
222 | LL_MAX_HEADER + len, GFP_ATOMIC); | ||
223 | if (!nskb) | ||
224 | return; | ||
225 | |||
226 | skb_reserve(nskb, LL_MAX_HEADER); | ||
227 | nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6, | ||
228 | net->ipv6.devconf_all->hop_limit); | ||
229 | |||
230 | skb_reset_transport_header(nskb); | ||
231 | icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr)); | ||
232 | memset(icmp6h, 0, sizeof(*icmp6h)); | ||
233 | icmp6h->icmp6_type = ICMPV6_DEST_UNREACH; | ||
234 | icmp6h->icmp6_code = code; | ||
235 | |||
236 | payload = skb_put(nskb, len); | ||
237 | memcpy(payload, skb_network_header(oldskb), len); | ||
238 | nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); | ||
239 | |||
240 | icmp6h->icmp6_cksum = | ||
241 | csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, | ||
242 | nskb->len - sizeof(struct ipv6hdr), | ||
243 | IPPROTO_ICMPV6, | ||
244 | csum_partial(icmp6h, | ||
245 | nskb->len - sizeof(struct ipv6hdr), | ||
246 | 0)); | ||
247 | |||
248 | nft_reject_br_push_etherhdr(oldskb, nskb); | ||
249 | |||
250 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | ||
251 | } | ||
19 | 252 | ||
20 | static void nft_reject_bridge_eval(const struct nft_expr *expr, | 253 | static void nft_reject_bridge_eval(const struct nft_expr *expr, |
21 | struct nft_data data[NFT_REG_MAX + 1], | 254 | struct nft_data data[NFT_REG_MAX + 1], |
@@ -23,35 +256,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
23 | { | 256 | { |
24 | struct nft_reject *priv = nft_expr_priv(expr); | 257 | struct nft_reject *priv = nft_expr_priv(expr); |
25 | struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); | 258 | struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); |
259 | const unsigned char *dest = eth_hdr(pkt->skb)->h_dest; | ||
260 | |||
261 | if (is_broadcast_ether_addr(dest) || | ||
262 | is_multicast_ether_addr(dest)) | ||
263 | goto out; | ||
26 | 264 | ||
27 | switch (eth_hdr(pkt->skb)->h_proto) { | 265 | switch (eth_hdr(pkt->skb)->h_proto) { |
28 | case htons(ETH_P_IP): | 266 | case htons(ETH_P_IP): |
29 | switch (priv->type) { | 267 | switch (priv->type) { |
30 | case NFT_REJECT_ICMP_UNREACH: | 268 | case NFT_REJECT_ICMP_UNREACH: |
31 | nf_send_unreach(pkt->skb, priv->icmp_code); | 269 | nft_reject_br_send_v4_unreach(pkt->skb, |
270 | pkt->ops->hooknum, | ||
271 | priv->icmp_code); | ||
32 | break; | 272 | break; |
33 | case NFT_REJECT_TCP_RST: | 273 | case NFT_REJECT_TCP_RST: |
34 | nf_send_reset(pkt->skb, pkt->ops->hooknum); | 274 | nft_reject_br_send_v4_tcp_reset(pkt->skb, |
275 | pkt->ops->hooknum); | ||
35 | break; | 276 | break; |
36 | case NFT_REJECT_ICMPX_UNREACH: | 277 | case NFT_REJECT_ICMPX_UNREACH: |
37 | nf_send_unreach(pkt->skb, | 278 | nft_reject_br_send_v4_unreach(pkt->skb, |
38 | nft_reject_icmp_code(priv->icmp_code)); | 279 | pkt->ops->hooknum, |
280 | nft_reject_icmp_code(priv->icmp_code)); | ||
39 | break; | 281 | break; |
40 | } | 282 | } |
41 | break; | 283 | break; |
42 | case htons(ETH_P_IPV6): | 284 | case htons(ETH_P_IPV6): |
43 | switch (priv->type) { | 285 | switch (priv->type) { |
44 | case NFT_REJECT_ICMP_UNREACH: | 286 | case NFT_REJECT_ICMP_UNREACH: |
45 | nf_send_unreach6(net, pkt->skb, priv->icmp_code, | 287 | nft_reject_br_send_v6_unreach(net, pkt->skb, |
46 | pkt->ops->hooknum); | 288 | pkt->ops->hooknum, |
289 | priv->icmp_code); | ||
47 | break; | 290 | break; |
48 | case NFT_REJECT_TCP_RST: | 291 | case NFT_REJECT_TCP_RST: |
49 | nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); | 292 | nft_reject_br_send_v6_tcp_reset(net, pkt->skb, |
293 | pkt->ops->hooknum); | ||
50 | break; | 294 | break; |
51 | case NFT_REJECT_ICMPX_UNREACH: | 295 | case NFT_REJECT_ICMPX_UNREACH: |
52 | nf_send_unreach6(net, pkt->skb, | 296 | nft_reject_br_send_v6_unreach(net, pkt->skb, |
53 | nft_reject_icmpv6_code(priv->icmp_code), | 297 | pkt->ops->hooknum, |
54 | pkt->ops->hooknum); | 298 | nft_reject_icmpv6_code(priv->icmp_code)); |
55 | break; | 299 | break; |
56 | } | 300 | } |
57 | break; | 301 | break; |
@@ -59,15 +303,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
59 | /* No explicit way to reject this protocol, drop it. */ | 303 | /* No explicit way to reject this protocol, drop it. */ |
60 | break; | 304 | break; |
61 | } | 305 | } |
306 | out: | ||
62 | data[NFT_REG_VERDICT].verdict = NF_DROP; | 307 | data[NFT_REG_VERDICT].verdict = NF_DROP; |
63 | } | 308 | } |
64 | 309 | ||
310 | static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) | ||
311 | { | ||
312 | struct nft_base_chain *basechain; | ||
313 | |||
314 | if (chain->flags & NFT_BASE_CHAIN) { | ||
315 | basechain = nft_base_chain(chain); | ||
316 | |||
317 | switch (basechain->ops[0].hooknum) { | ||
318 | case NF_BR_PRE_ROUTING: | ||
319 | case NF_BR_LOCAL_IN: | ||
320 | break; | ||
321 | default: | ||
322 | return -EOPNOTSUPP; | ||
323 | } | ||
324 | } | ||
325 | return 0; | ||
326 | } | ||
327 | |||
65 | static int nft_reject_bridge_init(const struct nft_ctx *ctx, | 328 | static int nft_reject_bridge_init(const struct nft_ctx *ctx, |
66 | const struct nft_expr *expr, | 329 | const struct nft_expr *expr, |
67 | const struct nlattr * const tb[]) | 330 | const struct nlattr * const tb[]) |
68 | { | 331 | { |
69 | struct nft_reject *priv = nft_expr_priv(expr); | 332 | struct nft_reject *priv = nft_expr_priv(expr); |
70 | int icmp_code; | 333 | int icmp_code, err; |
334 | |||
335 | err = nft_reject_bridge_validate_hooks(ctx->chain); | ||
336 | if (err < 0) | ||
337 | return err; | ||
71 | 338 | ||
72 | if (tb[NFTA_REJECT_TYPE] == NULL) | 339 | if (tb[NFTA_REJECT_TYPE] == NULL) |
73 | return -EINVAL; | 340 | return -EINVAL; |
@@ -116,6 +383,13 @@ nla_put_failure: | |||
116 | return -1; | 383 | return -1; |
117 | } | 384 | } |
118 | 385 | ||
386 | static int nft_reject_bridge_validate(const struct nft_ctx *ctx, | ||
387 | const struct nft_expr *expr, | ||
388 | const struct nft_data **data) | ||
389 | { | ||
390 | return nft_reject_bridge_validate_hooks(ctx->chain); | ||
391 | } | ||
392 | |||
119 | static struct nft_expr_type nft_reject_bridge_type; | 393 | static struct nft_expr_type nft_reject_bridge_type; |
120 | static const struct nft_expr_ops nft_reject_bridge_ops = { | 394 | static const struct nft_expr_ops nft_reject_bridge_ops = { |
121 | .type = &nft_reject_bridge_type, | 395 | .type = &nft_reject_bridge_type, |
@@ -123,6 +397,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = { | |||
123 | .eval = nft_reject_bridge_eval, | 397 | .eval = nft_reject_bridge_eval, |
124 | .init = nft_reject_bridge_init, | 398 | .init = nft_reject_bridge_init, |
125 | .dump = nft_reject_bridge_dump, | 399 | .dump = nft_reject_bridge_dump, |
400 | .validate = nft_reject_bridge_validate, | ||
126 | }; | 401 | }; |
127 | 402 | ||
128 | static struct nft_expr_type nft_reject_bridge_type __read_mostly = { | 403 | static struct nft_expr_type nft_reject_bridge_type __read_mostly = { |
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index de6662b14e1f..7e38b729696a 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
@@ -149,6 +149,7 @@ static int process_one_ticket(struct ceph_auth_client *ac, | |||
149 | struct ceph_crypto_key old_key; | 149 | struct ceph_crypto_key old_key; |
150 | void *ticket_buf = NULL; | 150 | void *ticket_buf = NULL; |
151 | void *tp, *tpend; | 151 | void *tp, *tpend; |
152 | void **ptp; | ||
152 | struct ceph_timespec new_validity; | 153 | struct ceph_timespec new_validity; |
153 | struct ceph_crypto_key new_session_key; | 154 | struct ceph_crypto_key new_session_key; |
154 | struct ceph_buffer *new_ticket_blob; | 155 | struct ceph_buffer *new_ticket_blob; |
@@ -208,25 +209,19 @@ static int process_one_ticket(struct ceph_auth_client *ac, | |||
208 | goto out; | 209 | goto out; |
209 | } | 210 | } |
210 | tp = ticket_buf; | 211 | tp = ticket_buf; |
211 | dlen = ceph_decode_32(&tp); | 212 | ptp = &tp; |
213 | tpend = *ptp + dlen; | ||
212 | } else { | 214 | } else { |
213 | /* unencrypted */ | 215 | /* unencrypted */ |
214 | ceph_decode_32_safe(p, end, dlen, bad); | 216 | ptp = p; |
215 | ticket_buf = kmalloc(dlen, GFP_NOFS); | 217 | tpend = end; |
216 | if (!ticket_buf) { | ||
217 | ret = -ENOMEM; | ||
218 | goto out; | ||
219 | } | ||
220 | tp = ticket_buf; | ||
221 | ceph_decode_need(p, end, dlen, bad); | ||
222 | ceph_decode_copy(p, ticket_buf, dlen); | ||
223 | } | 218 | } |
224 | tpend = tp + dlen; | 219 | ceph_decode_32_safe(ptp, tpend, dlen, bad); |
225 | dout(" ticket blob is %d bytes\n", dlen); | 220 | dout(" ticket blob is %d bytes\n", dlen); |
226 | ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); | 221 | ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); |
227 | blob_struct_v = ceph_decode_8(&tp); | 222 | blob_struct_v = ceph_decode_8(ptp); |
228 | new_secret_id = ceph_decode_64(&tp); | 223 | new_secret_id = ceph_decode_64(ptp); |
229 | ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); | 224 | ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); |
230 | if (ret) | 225 | if (ret) |
231 | goto out; | 226 | goto out; |
232 | 227 | ||
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 62fc5e7a9acf..790fe89d90c0 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c | |||
@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) | |||
90 | 90 | ||
91 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; | 91 | static const u8 *aes_iv = (u8 *)CEPH_AES_IV; |
92 | 92 | ||
93 | /* | ||
94 | * Should be used for buffers allocated with ceph_kvmalloc(). | ||
95 | * Currently these are encrypt out-buffer (ceph_buffer) and decrypt | ||
96 | * in-buffer (msg front). | ||
97 | * | ||
98 | * Dispose of @sgt with teardown_sgtable(). | ||
99 | * | ||
100 | * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() | ||
101 | * in cases where a single sg is sufficient. No attempt to reduce the | ||
102 | * number of sgs by squeezing physically contiguous pages together is | ||
103 | * made though, for simplicity. | ||
104 | */ | ||
105 | static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, | ||
106 | const void *buf, unsigned int buf_len) | ||
107 | { | ||
108 | struct scatterlist *sg; | ||
109 | const bool is_vmalloc = is_vmalloc_addr(buf); | ||
110 | unsigned int off = offset_in_page(buf); | ||
111 | unsigned int chunk_cnt = 1; | ||
112 | unsigned int chunk_len = PAGE_ALIGN(off + buf_len); | ||
113 | int i; | ||
114 | int ret; | ||
115 | |||
116 | if (buf_len == 0) { | ||
117 | memset(sgt, 0, sizeof(*sgt)); | ||
118 | return -EINVAL; | ||
119 | } | ||
120 | |||
121 | if (is_vmalloc) { | ||
122 | chunk_cnt = chunk_len >> PAGE_SHIFT; | ||
123 | chunk_len = PAGE_SIZE; | ||
124 | } | ||
125 | |||
126 | if (chunk_cnt > 1) { | ||
127 | ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); | ||
128 | if (ret) | ||
129 | return ret; | ||
130 | } else { | ||
131 | WARN_ON(chunk_cnt != 1); | ||
132 | sg_init_table(prealloc_sg, 1); | ||
133 | sgt->sgl = prealloc_sg; | ||
134 | sgt->nents = sgt->orig_nents = 1; | ||
135 | } | ||
136 | |||
137 | for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { | ||
138 | struct page *page; | ||
139 | unsigned int len = min(chunk_len - off, buf_len); | ||
140 | |||
141 | if (is_vmalloc) | ||
142 | page = vmalloc_to_page(buf); | ||
143 | else | ||
144 | page = virt_to_page(buf); | ||
145 | |||
146 | sg_set_page(sg, page, len, off); | ||
147 | |||
148 | off = 0; | ||
149 | buf += len; | ||
150 | buf_len -= len; | ||
151 | } | ||
152 | WARN_ON(buf_len != 0); | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static void teardown_sgtable(struct sg_table *sgt) | ||
158 | { | ||
159 | if (sgt->orig_nents > 1) | ||
160 | sg_free_table(sgt); | ||
161 | } | ||
162 | |||
93 | static int ceph_aes_encrypt(const void *key, int key_len, | 163 | static int ceph_aes_encrypt(const void *key, int key_len, |
94 | void *dst, size_t *dst_len, | 164 | void *dst, size_t *dst_len, |
95 | const void *src, size_t src_len) | 165 | const void *src, size_t src_len) |
96 | { | 166 | { |
97 | struct scatterlist sg_in[2], sg_out[1]; | 167 | struct scatterlist sg_in[2], prealloc_sg; |
168 | struct sg_table sg_out; | ||
98 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 169 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
99 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; | 170 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
100 | int ret; | 171 | int ret; |
@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len, | |||
110 | 181 | ||
111 | *dst_len = src_len + zero_padding; | 182 | *dst_len = src_len + zero_padding; |
112 | 183 | ||
113 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
114 | sg_init_table(sg_in, 2); | 184 | sg_init_table(sg_in, 2); |
115 | sg_set_buf(&sg_in[0], src, src_len); | 185 | sg_set_buf(&sg_in[0], src, src_len); |
116 | sg_set_buf(&sg_in[1], pad, zero_padding); | 186 | sg_set_buf(&sg_in[1], pad, zero_padding); |
117 | sg_init_table(sg_out, 1); | 187 | ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
118 | sg_set_buf(sg_out, dst, *dst_len); | 188 | if (ret) |
189 | goto out_tfm; | ||
190 | |||
191 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
119 | iv = crypto_blkcipher_crt(tfm)->iv; | 192 | iv = crypto_blkcipher_crt(tfm)->iv; |
120 | ivsize = crypto_blkcipher_ivsize(tfm); | 193 | ivsize = crypto_blkcipher_ivsize(tfm); |
121 | |||
122 | memcpy(iv, aes_iv, ivsize); | 194 | memcpy(iv, aes_iv, ivsize); |
195 | |||
123 | /* | 196 | /* |
124 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, | 197 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
125 | key, key_len, 1); | 198 | key, key_len, 1); |
@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len, | |||
128 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, | 201 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
129 | pad, zero_padding, 1); | 202 | pad, zero_padding, 1); |
130 | */ | 203 | */ |
131 | ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 204 | ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
132 | src_len + zero_padding); | 205 | src_len + zero_padding); |
133 | crypto_free_blkcipher(tfm); | 206 | if (ret < 0) { |
134 | if (ret < 0) | ||
135 | pr_err("ceph_aes_crypt failed %d\n", ret); | 207 | pr_err("ceph_aes_crypt failed %d\n", ret); |
208 | goto out_sg; | ||
209 | } | ||
136 | /* | 210 | /* |
137 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, | 211 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
138 | dst, *dst_len, 1); | 212 | dst, *dst_len, 1); |
139 | */ | 213 | */ |
140 | return 0; | 214 | |
215 | out_sg: | ||
216 | teardown_sgtable(&sg_out); | ||
217 | out_tfm: | ||
218 | crypto_free_blkcipher(tfm); | ||
219 | return ret; | ||
141 | } | 220 | } |
142 | 221 | ||
143 | static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | 222 | static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, |
@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
145 | const void *src1, size_t src1_len, | 224 | const void *src1, size_t src1_len, |
146 | const void *src2, size_t src2_len) | 225 | const void *src2, size_t src2_len) |
147 | { | 226 | { |
148 | struct scatterlist sg_in[3], sg_out[1]; | 227 | struct scatterlist sg_in[3], prealloc_sg; |
228 | struct sg_table sg_out; | ||
149 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 229 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
150 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; | 230 | struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; |
151 | int ret; | 231 | int ret; |
@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
161 | 241 | ||
162 | *dst_len = src1_len + src2_len + zero_padding; | 242 | *dst_len = src1_len + src2_len + zero_padding; |
163 | 243 | ||
164 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
165 | sg_init_table(sg_in, 3); | 244 | sg_init_table(sg_in, 3); |
166 | sg_set_buf(&sg_in[0], src1, src1_len); | 245 | sg_set_buf(&sg_in[0], src1, src1_len); |
167 | sg_set_buf(&sg_in[1], src2, src2_len); | 246 | sg_set_buf(&sg_in[1], src2, src2_len); |
168 | sg_set_buf(&sg_in[2], pad, zero_padding); | 247 | sg_set_buf(&sg_in[2], pad, zero_padding); |
169 | sg_init_table(sg_out, 1); | 248 | ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); |
170 | sg_set_buf(sg_out, dst, *dst_len); | 249 | if (ret) |
250 | goto out_tfm; | ||
251 | |||
252 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
171 | iv = crypto_blkcipher_crt(tfm)->iv; | 253 | iv = crypto_blkcipher_crt(tfm)->iv; |
172 | ivsize = crypto_blkcipher_ivsize(tfm); | 254 | ivsize = crypto_blkcipher_ivsize(tfm); |
173 | |||
174 | memcpy(iv, aes_iv, ivsize); | 255 | memcpy(iv, aes_iv, ivsize); |
256 | |||
175 | /* | 257 | /* |
176 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, | 258 | print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, |
177 | key, key_len, 1); | 259 | key, key_len, 1); |
@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, | |||
182 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, | 264 | print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, |
183 | pad, zero_padding, 1); | 265 | pad, zero_padding, 1); |
184 | */ | 266 | */ |
185 | ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, | 267 | ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, |
186 | src1_len + src2_len + zero_padding); | 268 | src1_len + src2_len + zero_padding); |
187 | crypto_free_blkcipher(tfm); | 269 | if (ret < 0) { |
188 | if (ret < 0) | ||
189 | pr_err("ceph_aes_crypt2 failed %d\n", ret); | 270 | pr_err("ceph_aes_crypt2 failed %d\n", ret); |
271 | goto out_sg; | ||
272 | } | ||
190 | /* | 273 | /* |
191 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, | 274 | print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, |
192 | dst, *dst_len, 1); | 275 | dst, *dst_len, 1); |
193 | */ | 276 | */ |
194 | return 0; | 277 | |
278 | out_sg: | ||
279 | teardown_sgtable(&sg_out); | ||
280 | out_tfm: | ||
281 | crypto_free_blkcipher(tfm); | ||
282 | return ret; | ||
195 | } | 283 | } |
196 | 284 | ||
197 | static int ceph_aes_decrypt(const void *key, int key_len, | 285 | static int ceph_aes_decrypt(const void *key, int key_len, |
198 | void *dst, size_t *dst_len, | 286 | void *dst, size_t *dst_len, |
199 | const void *src, size_t src_len) | 287 | const void *src, size_t src_len) |
200 | { | 288 | { |
201 | struct scatterlist sg_in[1], sg_out[2]; | 289 | struct sg_table sg_in; |
290 | struct scatterlist sg_out[2], prealloc_sg; | ||
202 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 291 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
203 | struct blkcipher_desc desc = { .tfm = tfm }; | 292 | struct blkcipher_desc desc = { .tfm = tfm }; |
204 | char pad[16]; | 293 | char pad[16]; |
@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
210 | if (IS_ERR(tfm)) | 299 | if (IS_ERR(tfm)) |
211 | return PTR_ERR(tfm); | 300 | return PTR_ERR(tfm); |
212 | 301 | ||
213 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
214 | sg_init_table(sg_in, 1); | ||
215 | sg_init_table(sg_out, 2); | 302 | sg_init_table(sg_out, 2); |
216 | sg_set_buf(sg_in, src, src_len); | ||
217 | sg_set_buf(&sg_out[0], dst, *dst_len); | 303 | sg_set_buf(&sg_out[0], dst, *dst_len); |
218 | sg_set_buf(&sg_out[1], pad, sizeof(pad)); | 304 | sg_set_buf(&sg_out[1], pad, sizeof(pad)); |
305 | ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); | ||
306 | if (ret) | ||
307 | goto out_tfm; | ||
219 | 308 | ||
309 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | ||
220 | iv = crypto_blkcipher_crt(tfm)->iv; | 310 | iv = crypto_blkcipher_crt(tfm)->iv; |
221 | ivsize = crypto_blkcipher_ivsize(tfm); | 311 | ivsize = crypto_blkcipher_ivsize(tfm); |
222 | |||
223 | memcpy(iv, aes_iv, ivsize); | 312 | memcpy(iv, aes_iv, ivsize); |
224 | 313 | ||
225 | /* | 314 | /* |
@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
228 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, | 317 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
229 | src, src_len, 1); | 318 | src, src_len, 1); |
230 | */ | 319 | */ |
231 | 320 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); | |
232 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); | ||
233 | crypto_free_blkcipher(tfm); | ||
234 | if (ret < 0) { | 321 | if (ret < 0) { |
235 | pr_err("ceph_aes_decrypt failed %d\n", ret); | 322 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
236 | return ret; | 323 | goto out_sg; |
237 | } | 324 | } |
238 | 325 | ||
239 | if (src_len <= *dst_len) | 326 | if (src_len <= *dst_len) |
@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len, | |||
251 | print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, | 338 | print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, |
252 | dst, *dst_len, 1); | 339 | dst, *dst_len, 1); |
253 | */ | 340 | */ |
254 | return 0; | 341 | |
342 | out_sg: | ||
343 | teardown_sgtable(&sg_in); | ||
344 | out_tfm: | ||
345 | crypto_free_blkcipher(tfm); | ||
346 | return ret; | ||
255 | } | 347 | } |
256 | 348 | ||
257 | static int ceph_aes_decrypt2(const void *key, int key_len, | 349 | static int ceph_aes_decrypt2(const void *key, int key_len, |
@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
259 | void *dst2, size_t *dst2_len, | 351 | void *dst2, size_t *dst2_len, |
260 | const void *src, size_t src_len) | 352 | const void *src, size_t src_len) |
261 | { | 353 | { |
262 | struct scatterlist sg_in[1], sg_out[3]; | 354 | struct sg_table sg_in; |
355 | struct scatterlist sg_out[3], prealloc_sg; | ||
263 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); | 356 | struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); |
264 | struct blkcipher_desc desc = { .tfm = tfm }; | 357 | struct blkcipher_desc desc = { .tfm = tfm }; |
265 | char pad[16]; | 358 | char pad[16]; |
@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
271 | if (IS_ERR(tfm)) | 364 | if (IS_ERR(tfm)) |
272 | return PTR_ERR(tfm); | 365 | return PTR_ERR(tfm); |
273 | 366 | ||
274 | sg_init_table(sg_in, 1); | ||
275 | sg_set_buf(sg_in, src, src_len); | ||
276 | sg_init_table(sg_out, 3); | 367 | sg_init_table(sg_out, 3); |
277 | sg_set_buf(&sg_out[0], dst1, *dst1_len); | 368 | sg_set_buf(&sg_out[0], dst1, *dst1_len); |
278 | sg_set_buf(&sg_out[1], dst2, *dst2_len); | 369 | sg_set_buf(&sg_out[1], dst2, *dst2_len); |
279 | sg_set_buf(&sg_out[2], pad, sizeof(pad)); | 370 | sg_set_buf(&sg_out[2], pad, sizeof(pad)); |
371 | ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); | ||
372 | if (ret) | ||
373 | goto out_tfm; | ||
280 | 374 | ||
281 | crypto_blkcipher_setkey((void *)tfm, key, key_len); | 375 | crypto_blkcipher_setkey((void *)tfm, key, key_len); |
282 | iv = crypto_blkcipher_crt(tfm)->iv; | 376 | iv = crypto_blkcipher_crt(tfm)->iv; |
283 | ivsize = crypto_blkcipher_ivsize(tfm); | 377 | ivsize = crypto_blkcipher_ivsize(tfm); |
284 | |||
285 | memcpy(iv, aes_iv, ivsize); | 378 | memcpy(iv, aes_iv, ivsize); |
286 | 379 | ||
287 | /* | 380 | /* |
@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
290 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, | 383 | print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, |
291 | src, src_len, 1); | 384 | src, src_len, 1); |
292 | */ | 385 | */ |
293 | 386 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); | |
294 | ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); | ||
295 | crypto_free_blkcipher(tfm); | ||
296 | if (ret < 0) { | 387 | if (ret < 0) { |
297 | pr_err("ceph_aes_decrypt failed %d\n", ret); | 388 | pr_err("ceph_aes_decrypt failed %d\n", ret); |
298 | return ret; | 389 | goto out_sg; |
299 | } | 390 | } |
300 | 391 | ||
301 | if (src_len <= *dst1_len) | 392 | if (src_len <= *dst1_len) |
@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len, | |||
325 | dst2, *dst2_len, 1); | 416 | dst2, *dst2_len, 1); |
326 | */ | 417 | */ |
327 | 418 | ||
328 | return 0; | 419 | out_sg: |
420 | teardown_sgtable(&sg_in); | ||
421 | out_tfm: | ||
422 | crypto_free_blkcipher(tfm); | ||
423 | return ret; | ||
329 | } | 424 | } |
330 | 425 | ||
331 | 426 | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 559c9f619c20..8d1653caffdb 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
484 | IPPROTO_TCP, &sock); | 484 | IPPROTO_TCP, &sock); |
485 | if (ret) | 485 | if (ret) |
486 | return ret; | 486 | return ret; |
487 | sock->sk->sk_allocation = GFP_NOFS; | 487 | sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC; |
488 | 488 | ||
489 | #ifdef CONFIG_LOCKDEP | 489 | #ifdef CONFIG_LOCKDEP |
490 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); | 490 | lockdep_set_class(&sock->sk->sk_lock, &socket_class); |
@@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
509 | 509 | ||
510 | return ret; | 510 | return ret; |
511 | } | 511 | } |
512 | |||
513 | sk_set_memalloc(sock->sk); | ||
514 | |||
512 | con->sock = sock; | 515 | con->sock = sock; |
513 | return 0; | 516 | return 0; |
514 | } | 517 | } |
@@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work) | |||
2769 | { | 2772 | { |
2770 | struct ceph_connection *con = container_of(work, struct ceph_connection, | 2773 | struct ceph_connection *con = container_of(work, struct ceph_connection, |
2771 | work.work); | 2774 | work.work); |
2775 | unsigned long pflags = current->flags; | ||
2772 | bool fault; | 2776 | bool fault; |
2773 | 2777 | ||
2778 | current->flags |= PF_MEMALLOC; | ||
2779 | |||
2774 | mutex_lock(&con->mutex); | 2780 | mutex_lock(&con->mutex); |
2775 | while (true) { | 2781 | while (true) { |
2776 | int ret; | 2782 | int ret; |
@@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work) | |||
2824 | con_fault_finish(con); | 2830 | con_fault_finish(con); |
2825 | 2831 | ||
2826 | con->ops->put(con); | 2832 | con->ops->put(con); |
2833 | |||
2834 | tsk_restore_flags(current, pflags, PF_MEMALLOC); | ||
2827 | } | 2835 | } |
2828 | 2836 | ||
2829 | /* | 2837 | /* |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f3fc54eac09d..6f164289bde8 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd) | |||
1007 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) | 1007 | static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) |
1008 | { | 1008 | { |
1009 | dout("__remove_osd %p\n", osd); | 1009 | dout("__remove_osd %p\n", osd); |
1010 | BUG_ON(!list_empty(&osd->o_requests)); | 1010 | WARN_ON(!list_empty(&osd->o_requests)); |
1011 | BUG_ON(!list_empty(&osd->o_linger_requests)); | 1011 | WARN_ON(!list_empty(&osd->o_linger_requests)); |
1012 | 1012 | ||
1013 | rb_erase(&osd->o_node, &osdc->osds); | 1013 | rb_erase(&osd->o_node, &osdc->osds); |
1014 | list_del_init(&osd->o_osd_lru); | 1014 | list_del_init(&osd->o_osd_lru); |
@@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, | |||
1254 | if (list_empty(&req->r_osd_item)) | 1254 | if (list_empty(&req->r_osd_item)) |
1255 | req->r_osd = NULL; | 1255 | req->r_osd = NULL; |
1256 | } | 1256 | } |
1257 | |||
1258 | list_del_init(&req->r_req_lru_item); /* can be on notarget */ | ||
1257 | ceph_osdc_put_request(req); | 1259 | ceph_osdc_put_request(req); |
1258 | } | 1260 | } |
1259 | 1261 | ||
@@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc, | |||
1395 | if (req->r_osd) { | 1397 | if (req->r_osd) { |
1396 | __cancel_request(req); | 1398 | __cancel_request(req); |
1397 | list_del_init(&req->r_osd_item); | 1399 | list_del_init(&req->r_osd_item); |
1400 | list_del_init(&req->r_linger_osd_item); | ||
1398 | req->r_osd = NULL; | 1401 | req->r_osd = NULL; |
1399 | } | 1402 | } |
1400 | 1403 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index b793e3521a36..945bbd001359 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -4157,6 +4157,10 @@ EXPORT_SYMBOL(napi_gro_receive); | |||
4157 | 4157 | ||
4158 | static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | 4158 | static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) |
4159 | { | 4159 | { |
4160 | if (unlikely(skb->pfmemalloc)) { | ||
4161 | consume_skb(skb); | ||
4162 | return; | ||
4163 | } | ||
4160 | __skb_pull(skb, skb_headlen(skb)); | 4164 | __skb_pull(skb, skb_headlen(skb)); |
4161 | /* restore the reserve we had after netdev_alloc_skb_ip_align() */ | 4165 | /* restore the reserve we had after netdev_alloc_skb_ip_align() */ |
4162 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); | 4166 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1600aa24d36b..06dfb293e5aa 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -1036,7 +1036,8 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) | |||
1036 | { | 1036 | { |
1037 | const struct ethtool_ops *ops = dev->ethtool_ops; | 1037 | const struct ethtool_ops *ops = dev->ethtool_ops; |
1038 | 1038 | ||
1039 | if (!ops->get_eeprom || !ops->get_eeprom_len) | 1039 | if (!ops->get_eeprom || !ops->get_eeprom_len || |
1040 | !ops->get_eeprom_len(dev)) | ||
1040 | return -EOPNOTSUPP; | 1041 | return -EOPNOTSUPP; |
1041 | 1042 | ||
1042 | return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, | 1043 | return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, |
@@ -1052,7 +1053,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) | |||
1052 | u8 *data; | 1053 | u8 *data; |
1053 | int ret = 0; | 1054 | int ret = 0; |
1054 | 1055 | ||
1055 | if (!ops->set_eeprom || !ops->get_eeprom_len) | 1056 | if (!ops->set_eeprom || !ops->get_eeprom_len || |
1057 | !ops->get_eeprom_len(dev)) | ||
1056 | return -EOPNOTSUPP; | 1058 | return -EOPNOTSUPP; |
1057 | 1059 | ||
1058 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) | 1060 | if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 61059a05ec95..c16615bfb61e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -4070,15 +4070,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); | |||
4070 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) | 4070 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
4071 | { | 4071 | { |
4072 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | 4072 | const struct skb_shared_info *shinfo = skb_shinfo(skb); |
4073 | unsigned int thlen = 0; | ||
4073 | 4074 | ||
4074 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | 4075 | if (skb->encapsulation) { |
4075 | return tcp_hdrlen(skb) + shinfo->gso_size; | 4076 | thlen = skb_inner_transport_header(skb) - |
4077 | skb_transport_header(skb); | ||
4076 | 4078 | ||
4079 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | ||
4080 | thlen += inner_tcp_hdrlen(skb); | ||
4081 | } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { | ||
4082 | thlen = tcp_hdrlen(skb); | ||
4083 | } | ||
4077 | /* UFO sets gso_size to the size of the fragmentation | 4084 | /* UFO sets gso_size to the size of the fragmentation |
4078 | * payload, i.e. the size of the L4 (UDP) header is already | 4085 | * payload, i.e. the size of the L4 (UDP) header is already |
4079 | * accounted for. | 4086 | * accounted for. |
4080 | */ | 4087 | */ |
4081 | return shinfo->gso_size; | 4088 | return thlen + shinfo->gso_size; |
4082 | } | 4089 | } |
4083 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); | 4090 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
4084 | 4091 | ||
diff --git a/net/core/tso.c b/net/core/tso.c index 8c3203c585b0..630b30b4fb53 100644 --- a/net/core/tso.c +++ b/net/core/tso.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
2 | #include <net/ip.h> | 2 | #include <net/ip.h> |
3 | #include <net/tso.h> | 3 | #include <net/tso.h> |
4 | #include <asm/unaligned.h> | ||
4 | 5 | ||
5 | /* Calculate expected number of TX descriptors */ | 6 | /* Calculate expected number of TX descriptors */ |
6 | int tso_count_descs(struct sk_buff *skb) | 7 | int tso_count_descs(struct sk_buff *skb) |
@@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, | |||
23 | iph->id = htons(tso->ip_id); | 24 | iph->id = htons(tso->ip_id); |
24 | iph->tot_len = htons(size + hdr_len - mac_hdr_len); | 25 | iph->tot_len = htons(size + hdr_len - mac_hdr_len); |
25 | tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); | 26 | tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); |
26 | tcph->seq = htonl(tso->tcp_seq); | 27 | put_unaligned_be32(tso->tcp_seq, &tcph->seq); |
27 | tso->ip_id++; | 28 | tso->ip_id++; |
28 | 29 | ||
29 | if (!is_last) { | 30 | if (!is_last) { |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 22f34cf4cb27..6317b41c99b0 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -174,8 +174,11 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, | |||
174 | dst->rcv = brcm_netdev_ops.rcv; | 174 | dst->rcv = brcm_netdev_ops.rcv; |
175 | break; | 175 | break; |
176 | #endif | 176 | #endif |
177 | default: | 177 | case DSA_TAG_PROTO_NONE: |
178 | break; | 178 | break; |
179 | default: | ||
180 | ret = -ENOPROTOOPT; | ||
181 | goto out; | ||
179 | } | 182 | } |
180 | 183 | ||
181 | dst->tag_protocol = drv->tag_protocol; | 184 | dst->tag_protocol = drv->tag_protocol; |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6d1817449c36..ab03e00ffe8f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, | |||
489 | /* We could not connect to a designated PHY, so use the switch internal | 489 | /* We could not connect to a designated PHY, so use the switch internal |
490 | * MDIO bus instead | 490 | * MDIO bus instead |
491 | */ | 491 | */ |
492 | if (!p->phy) | 492 | if (!p->phy) { |
493 | p->phy = ds->slave_mii_bus->phy_map[p->port]; | 493 | p->phy = ds->slave_mii_bus->phy_map[p->port]; |
494 | else | 494 | phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, |
495 | p->phy_interface); | ||
496 | } else { | ||
495 | pr_info("attached PHY at address %d [%s]\n", | 497 | pr_info("attached PHY at address %d [%s]\n", |
496 | p->phy->addr, p->phy->drv->name); | 498 | p->phy->addr, p->phy->drv->name); |
499 | } | ||
497 | } | 500 | } |
498 | 501 | ||
499 | int dsa_slave_suspend(struct net_device *slave_dev) | 502 | int dsa_slave_suspend(struct net_device *slave_dev) |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 92db7a69f2b9..8b7fe5b03906 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1246 | 1246 | ||
1247 | encap = SKB_GSO_CB(skb)->encap_level > 0; | 1247 | encap = SKB_GSO_CB(skb)->encap_level > 0; |
1248 | if (encap) | 1248 | if (encap) |
1249 | features = skb->dev->hw_enc_features & netif_skb_features(skb); | 1249 | features &= skb->dev->hw_enc_features; |
1250 | SKB_GSO_CB(skb)->encap_level += ihl; | 1250 | SKB_GSO_CB(skb)->encap_level += ihl; |
1251 | 1251 | ||
1252 | skb_reset_transport_header(skb); | 1252 | skb_reset_transport_header(skb); |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 32e78924e246..606c520ffd5a 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff) | |||
133 | int err = -ENOSYS; | 133 | int err = -ENOSYS; |
134 | const struct net_offload **offloads; | 134 | const struct net_offload **offloads; |
135 | 135 | ||
136 | udp_tunnel_gro_complete(skb, nhoff); | ||
137 | |||
136 | rcu_read_lock(); | 138 | rcu_read_lock(); |
137 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; | 139 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; |
138 | ops = rcu_dereference(offloads[proto]); | 140 | ops = rcu_dereference(offloads[proto]); |
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 065cd94c640c..dedb21e99914 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c | |||
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, | |||
144 | gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); | 144 | gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); |
145 | geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); | 145 | geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); |
146 | 146 | ||
147 | skb_set_inner_protocol(skb, htons(ETH_P_TEB)); | ||
148 | |||
147 | return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, | 149 | return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, |
148 | tos, ttl, df, src_port, dst_port, xnet); | 150 | tos, ttl, df, src_port, dst_port, xnet); |
149 | } | 151 | } |
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module); | |||
364 | static void __exit geneve_cleanup_module(void) | 366 | static void __exit geneve_cleanup_module(void) |
365 | { | 367 | { |
366 | destroy_workqueue(geneve_wq); | 368 | destroy_workqueue(geneve_wq); |
369 | unregister_pernet_subsys(&geneve_net_ops); | ||
367 | } | 370 | } |
368 | module_exit(geneve_cleanup_module); | 371 | module_exit(geneve_cleanup_module); |
369 | 372 | ||
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index ccda09628de7..bb5947b0ce2d 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
47 | 47 | ||
48 | greh = (struct gre_base_hdr *)skb_transport_header(skb); | 48 | greh = (struct gre_base_hdr *)skb_transport_header(skb); |
49 | 49 | ||
50 | ghl = skb_inner_network_header(skb) - skb_transport_header(skb); | 50 | ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); |
51 | if (unlikely(ghl < sizeof(*greh))) | 51 | if (unlikely(ghl < sizeof(*greh))) |
52 | goto out; | 52 | goto out; |
53 | 53 | ||
@@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
68 | skb->mac_len = skb_inner_network_offset(skb); | 68 | skb->mac_len = skb_inner_network_offset(skb); |
69 | 69 | ||
70 | /* segment inner packet. */ | 70 | /* segment inner packet. */ |
71 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 71 | enc_features = skb->dev->hw_enc_features & features; |
72 | segs = skb_mac_gso_segment(skb, enc_features); | 72 | segs = skb_mac_gso_segment(skb, enc_features); |
73 | if (IS_ERR_OR_NULL(segs)) { | 73 | if (IS_ERR_OR_NULL(segs)) { |
74 | skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); | 74 | skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 9eb89f3f0ee4..19419b60cb37 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -146,7 +146,6 @@ evict_again: | |||
146 | atomic_inc(&fq->refcnt); | 146 | atomic_inc(&fq->refcnt); |
147 | spin_unlock(&hb->chain_lock); | 147 | spin_unlock(&hb->chain_lock); |
148 | del_timer_sync(&fq->timer); | 148 | del_timer_sync(&fq->timer); |
149 | WARN_ON(atomic_read(&fq->refcnt) != 1); | ||
150 | inet_frag_put(fq, f); | 149 | inet_frag_put(fq, f); |
151 | goto evict_again; | 150 | goto evict_again; |
152 | } | 151 | } |
@@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) | |||
285 | struct inet_frag_bucket *hb; | 284 | struct inet_frag_bucket *hb; |
286 | 285 | ||
287 | hb = get_frag_bucket_locked(fq, f); | 286 | hb = get_frag_bucket_locked(fq, f); |
288 | hlist_del(&fq->list); | 287 | if (!(fq->flags & INET_FRAG_EVICTED)) |
288 | hlist_del(&fq->list); | ||
289 | spin_unlock(&hb->chain_lock); | 289 | spin_unlock(&hb->chain_lock); |
290 | } | 290 | } |
291 | 291 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 88e5ef2c7f51..bc6471d4abcd 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb) | |||
231 | */ | 231 | */ |
232 | features = netif_skb_features(skb); | 232 | features = netif_skb_features(skb); |
233 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | 233 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); |
234 | if (IS_ERR(segs)) { | 234 | if (IS_ERR_OR_NULL(segs)) { |
235 | kfree_skb(skb); | 235 | kfree_skb(skb); |
236 | return -ENOMEM; | 236 | return -ENOMEM; |
237 | } | 237 | } |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index c373a9ad4555..9daf2177dc00 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, | |||
195 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | 195 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { |
196 | if (!CMSG_OK(msg, cmsg)) | 196 | if (!CMSG_OK(msg, cmsg)) |
197 | return -EINVAL; | 197 | return -EINVAL; |
198 | #if defined(CONFIG_IPV6) | 198 | #if IS_ENABLED(CONFIG_IPV6) |
199 | if (allow_ipv6 && | 199 | if (allow_ipv6 && |
200 | cmsg->cmsg_level == SOL_IPV6 && | 200 | cmsg->cmsg_level == SOL_IPV6 && |
201 | cmsg->cmsg_type == IPV6_PKTINFO) { | 201 | cmsg->cmsg_type == IPV6_PKTINFO) { |
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index b023b4eb1a96..1baaa83dfe5c 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c | |||
@@ -6,48 +6,45 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | ||
9 | #include <net/ip.h> | 10 | #include <net/ip.h> |
10 | #include <net/tcp.h> | 11 | #include <net/tcp.h> |
11 | #include <net/route.h> | 12 | #include <net/route.h> |
12 | #include <net/dst.h> | 13 | #include <net/dst.h> |
13 | #include <linux/netfilter_ipv4.h> | 14 | #include <linux/netfilter_ipv4.h> |
15 | #include <net/netfilter/ipv4/nf_reject.h> | ||
14 | 16 | ||
15 | /* Send RST reply */ | 17 | const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, |
16 | void nf_send_reset(struct sk_buff *oldskb, int hook) | 18 | struct tcphdr *_oth, int hook) |
17 | { | 19 | { |
18 | struct sk_buff *nskb; | ||
19 | const struct iphdr *oiph; | ||
20 | struct iphdr *niph; | ||
21 | const struct tcphdr *oth; | 20 | const struct tcphdr *oth; |
22 | struct tcphdr _otcph, *tcph; | ||
23 | 21 | ||
24 | /* IP header checks: fragment. */ | 22 | /* IP header checks: fragment. */ |
25 | if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) | 23 | if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) |
26 | return; | 24 | return NULL; |
27 | 25 | ||
28 | oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), | 26 | oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), |
29 | sizeof(_otcph), &_otcph); | 27 | sizeof(struct tcphdr), _oth); |
30 | if (oth == NULL) | 28 | if (oth == NULL) |
31 | return; | 29 | return NULL; |
32 | 30 | ||
33 | /* No RST for RST. */ | 31 | /* No RST for RST. */ |
34 | if (oth->rst) | 32 | if (oth->rst) |
35 | return; | 33 | return NULL; |
36 | |||
37 | if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | ||
38 | return; | ||
39 | 34 | ||
40 | /* Check checksum */ | 35 | /* Check checksum */ |
41 | if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) | 36 | if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) |
42 | return; | 37 | return NULL; |
43 | oiph = ip_hdr(oldskb); | ||
44 | 38 | ||
45 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + | 39 | return oth; |
46 | LL_MAX_HEADER, GFP_ATOMIC); | 40 | } |
47 | if (!nskb) | 41 | EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get); |
48 | return; | ||
49 | 42 | ||
50 | skb_reserve(nskb, LL_MAX_HEADER); | 43 | struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, |
44 | const struct sk_buff *oldskb, | ||
45 | __be16 protocol, int ttl) | ||
46 | { | ||
47 | struct iphdr *niph, *oiph = ip_hdr(oldskb); | ||
51 | 48 | ||
52 | skb_reset_network_header(nskb); | 49 | skb_reset_network_header(nskb); |
53 | niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); | 50 | niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); |
@@ -56,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
56 | niph->tos = 0; | 53 | niph->tos = 0; |
57 | niph->id = 0; | 54 | niph->id = 0; |
58 | niph->frag_off = htons(IP_DF); | 55 | niph->frag_off = htons(IP_DF); |
59 | niph->protocol = IPPROTO_TCP; | 56 | niph->protocol = protocol; |
60 | niph->check = 0; | 57 | niph->check = 0; |
61 | niph->saddr = oiph->daddr; | 58 | niph->saddr = oiph->daddr; |
62 | niph->daddr = oiph->saddr; | 59 | niph->daddr = oiph->saddr; |
60 | niph->ttl = ttl; | ||
61 | |||
62 | nskb->protocol = htons(ETH_P_IP); | ||
63 | |||
64 | return niph; | ||
65 | } | ||
66 | EXPORT_SYMBOL_GPL(nf_reject_iphdr_put); | ||
67 | |||
68 | void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, | ||
69 | const struct tcphdr *oth) | ||
70 | { | ||
71 | struct iphdr *niph = ip_hdr(nskb); | ||
72 | struct tcphdr *tcph; | ||
63 | 73 | ||
64 | skb_reset_transport_header(nskb); | 74 | skb_reset_transport_header(nskb); |
65 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); | 75 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); |
@@ -68,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
68 | tcph->dest = oth->source; | 78 | tcph->dest = oth->source; |
69 | tcph->doff = sizeof(struct tcphdr) / 4; | 79 | tcph->doff = sizeof(struct tcphdr) / 4; |
70 | 80 | ||
71 | if (oth->ack) | 81 | if (oth->ack) { |
72 | tcph->seq = oth->ack_seq; | 82 | tcph->seq = oth->ack_seq; |
73 | else { | 83 | } else { |
74 | tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + | 84 | tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + |
75 | oldskb->len - ip_hdrlen(oldskb) - | 85 | oldskb->len - ip_hdrlen(oldskb) - |
76 | (oth->doff << 2)); | 86 | (oth->doff << 2)); |
@@ -83,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
83 | nskb->ip_summed = CHECKSUM_PARTIAL; | 93 | nskb->ip_summed = CHECKSUM_PARTIAL; |
84 | nskb->csum_start = (unsigned char *)tcph - nskb->head; | 94 | nskb->csum_start = (unsigned char *)tcph - nskb->head; |
85 | nskb->csum_offset = offsetof(struct tcphdr, check); | 95 | nskb->csum_offset = offsetof(struct tcphdr, check); |
96 | } | ||
97 | EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put); | ||
98 | |||
99 | /* Send RST reply */ | ||
100 | void nf_send_reset(struct sk_buff *oldskb, int hook) | ||
101 | { | ||
102 | struct sk_buff *nskb; | ||
103 | const struct iphdr *oiph; | ||
104 | struct iphdr *niph; | ||
105 | const struct tcphdr *oth; | ||
106 | struct tcphdr _oth; | ||
107 | |||
108 | oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); | ||
109 | if (!oth) | ||
110 | return; | ||
111 | |||
112 | if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | ||
113 | return; | ||
114 | |||
115 | oiph = ip_hdr(oldskb); | ||
116 | |||
117 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + | ||
118 | LL_MAX_HEADER, GFP_ATOMIC); | ||
119 | if (!nskb) | ||
120 | return; | ||
86 | 121 | ||
87 | /* ip_route_me_harder expects skb->dst to be set */ | 122 | /* ip_route_me_harder expects skb->dst to be set */ |
88 | skb_dst_set_noref(nskb, skb_dst(oldskb)); | 123 | skb_dst_set_noref(nskb, skb_dst(oldskb)); |
89 | 124 | ||
90 | nskb->protocol = htons(ETH_P_IP); | 125 | skb_reserve(nskb, LL_MAX_HEADER); |
126 | niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, | ||
127 | ip4_dst_hoplimit(skb_dst(nskb))); | ||
128 | nf_reject_ip_tcphdr_put(nskb, oldskb, oth); | ||
129 | |||
91 | if (ip_route_me_harder(nskb, RTN_UNSPEC)) | 130 | if (ip_route_me_harder(nskb, RTN_UNSPEC)) |
92 | goto free_nskb; | 131 | goto free_nskb; |
93 | 132 | ||
94 | niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); | ||
95 | |||
96 | /* "Never happens" */ | 133 | /* "Never happens" */ |
97 | if (nskb->len > dst_mtu(skb_dst(nskb))) | 134 | if (nskb->len > dst_mtu(skb_dst(nskb))) |
98 | goto free_nskb; | 135 | goto free_nskb; |
@@ -125,3 +162,5 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) | |||
125 | kfree_skb(nskb); | 162 | kfree_skb(nskb); |
126 | } | 163 | } |
127 | EXPORT_SYMBOL_GPL(nf_send_reset); | 164 | EXPORT_SYMBOL_GPL(nf_send_reset); |
165 | |||
166 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index 1c636d6b5b50..c1023c445920 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c | |||
@@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = { | |||
39 | .eval = nft_masq_ipv4_eval, | 39 | .eval = nft_masq_ipv4_eval, |
40 | .init = nft_masq_init, | 40 | .init = nft_masq_init, |
41 | .dump = nft_masq_dump, | 41 | .dump = nft_masq_dump, |
42 | .validate = nft_masq_validate, | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { | 45 | static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2d4ae469b471..6a2155b02602 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1798,6 +1798,7 @@ local_input: | |||
1798 | no_route: | 1798 | no_route: |
1799 | RT_CACHE_STAT_INC(in_no_route); | 1799 | RT_CACHE_STAT_INC(in_no_route); |
1800 | res.type = RTN_UNREACHABLE; | 1800 | res.type = RTN_UNREACHABLE; |
1801 | res.fi = NULL; | ||
1801 | goto local_input; | 1802 | goto local_input; |
1802 | 1803 | ||
1803 | /* | 1804 | /* |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1bec4e76d88c..39ec0c379545 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt); | |||
2868 | #endif | 2868 | #endif |
2869 | 2869 | ||
2870 | #ifdef CONFIG_TCP_MD5SIG | 2870 | #ifdef CONFIG_TCP_MD5SIG |
2871 | static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; | 2871 | static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); |
2872 | static DEFINE_MUTEX(tcp_md5sig_mutex); | 2872 | static DEFINE_MUTEX(tcp_md5sig_mutex); |
2873 | 2873 | static bool tcp_md5sig_pool_populated = false; | |
2874 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) | ||
2875 | { | ||
2876 | int cpu; | ||
2877 | |||
2878 | for_each_possible_cpu(cpu) { | ||
2879 | struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); | ||
2880 | |||
2881 | if (p->md5_desc.tfm) | ||
2882 | crypto_free_hash(p->md5_desc.tfm); | ||
2883 | } | ||
2884 | free_percpu(pool); | ||
2885 | } | ||
2886 | 2874 | ||
2887 | static void __tcp_alloc_md5sig_pool(void) | 2875 | static void __tcp_alloc_md5sig_pool(void) |
2888 | { | 2876 | { |
2889 | int cpu; | 2877 | int cpu; |
2890 | struct tcp_md5sig_pool __percpu *pool; | ||
2891 | |||
2892 | pool = alloc_percpu(struct tcp_md5sig_pool); | ||
2893 | if (!pool) | ||
2894 | return; | ||
2895 | 2878 | ||
2896 | for_each_possible_cpu(cpu) { | 2879 | for_each_possible_cpu(cpu) { |
2897 | struct crypto_hash *hash; | 2880 | if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { |
2898 | 2881 | struct crypto_hash *hash; | |
2899 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | ||
2900 | if (IS_ERR_OR_NULL(hash)) | ||
2901 | goto out_free; | ||
2902 | 2882 | ||
2903 | per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; | 2883 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
2884 | if (IS_ERR_OR_NULL(hash)) | ||
2885 | return; | ||
2886 | per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; | ||
2887 | } | ||
2904 | } | 2888 | } |
2905 | /* before setting tcp_md5sig_pool, we must commit all writes | 2889 | /* before setting tcp_md5sig_pool_populated, we must commit all writes |
2906 | * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() | 2890 | * to memory. See smp_rmb() in tcp_get_md5sig_pool() |
2907 | */ | 2891 | */ |
2908 | smp_wmb(); | 2892 | smp_wmb(); |
2909 | tcp_md5sig_pool = pool; | 2893 | tcp_md5sig_pool_populated = true; |
2910 | return; | ||
2911 | out_free: | ||
2912 | __tcp_free_md5sig_pool(pool); | ||
2913 | } | 2894 | } |
2914 | 2895 | ||
2915 | bool tcp_alloc_md5sig_pool(void) | 2896 | bool tcp_alloc_md5sig_pool(void) |
2916 | { | 2897 | { |
2917 | if (unlikely(!tcp_md5sig_pool)) { | 2898 | if (unlikely(!tcp_md5sig_pool_populated)) { |
2918 | mutex_lock(&tcp_md5sig_mutex); | 2899 | mutex_lock(&tcp_md5sig_mutex); |
2919 | 2900 | ||
2920 | if (!tcp_md5sig_pool) | 2901 | if (!tcp_md5sig_pool_populated) |
2921 | __tcp_alloc_md5sig_pool(); | 2902 | __tcp_alloc_md5sig_pool(); |
2922 | 2903 | ||
2923 | mutex_unlock(&tcp_md5sig_mutex); | 2904 | mutex_unlock(&tcp_md5sig_mutex); |
2924 | } | 2905 | } |
2925 | return tcp_md5sig_pool != NULL; | 2906 | return tcp_md5sig_pool_populated; |
2926 | } | 2907 | } |
2927 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | 2908 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
2928 | 2909 | ||
@@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | |||
2936 | */ | 2917 | */ |
2937 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | 2918 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) |
2938 | { | 2919 | { |
2939 | struct tcp_md5sig_pool __percpu *p; | ||
2940 | |||
2941 | local_bh_disable(); | 2920 | local_bh_disable(); |
2942 | p = ACCESS_ONCE(tcp_md5sig_pool); | ||
2943 | if (p) | ||
2944 | return raw_cpu_ptr(p); | ||
2945 | 2921 | ||
2922 | if (tcp_md5sig_pool_populated) { | ||
2923 | /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ | ||
2924 | smp_rmb(); | ||
2925 | return this_cpu_ptr(&tcp_md5sig_pool); | ||
2926 | } | ||
2946 | local_bh_enable(); | 2927 | local_bh_enable(); |
2947 | return NULL; | 2928 | return NULL; |
2948 | } | 2929 | } |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a12b455928e5..88fa2d160685 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) | |||
2315 | 2315 | ||
2316 | /* Undo procedures. */ | 2316 | /* Undo procedures. */ |
2317 | 2317 | ||
2318 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
2319 | * window. It would seem that it is trivially available for us in | ||
2320 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
2321 | * what will happen if errors occur when sending retransmission for the | ||
2322 | * second time. ...It could the that such segment has only | ||
2323 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
2324 | * the head skb is enough except for some reneging corner cases that | ||
2325 | * are not worth the effort. | ||
2326 | * | ||
2327 | * Main reason for all this complexity is the fact that connection dying | ||
2328 | * time now depends on the validity of the retrans_stamp, in particular, | ||
2329 | * that successive retransmissions of a segment must not advance | ||
2330 | * retrans_stamp under any conditions. | ||
2331 | */ | ||
2332 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
2333 | { | ||
2334 | const struct tcp_sock *tp = tcp_sk(sk); | ||
2335 | struct sk_buff *skb; | ||
2336 | |||
2337 | if (tp->retrans_out) | ||
2338 | return true; | ||
2339 | |||
2340 | skb = tcp_write_queue_head(sk); | ||
2341 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
2342 | return true; | ||
2343 | |||
2344 | return false; | ||
2345 | } | ||
2346 | |||
2318 | #if FASTRETRANS_DEBUG > 1 | 2347 | #if FASTRETRANS_DEBUG > 1 |
2319 | static void DBGUNDO(struct sock *sk, const char *msg) | 2348 | static void DBGUNDO(struct sock *sk, const char *msg) |
2320 | { | 2349 | { |
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) | |||
2410 | * is ACKed. For Reno it is MUST to prevent false | 2439 | * is ACKed. For Reno it is MUST to prevent false |
2411 | * fast retransmits (RFC2582). SACK TCP is safe. */ | 2440 | * fast retransmits (RFC2582). SACK TCP is safe. */ |
2412 | tcp_moderate_cwnd(tp); | 2441 | tcp_moderate_cwnd(tp); |
2442 | if (!tcp_any_retrans_done(sk)) | ||
2443 | tp->retrans_stamp = 0; | ||
2413 | return true; | 2444 | return true; |
2414 | } | 2445 | } |
2415 | tcp_set_ca_state(sk, TCP_CA_Open); | 2446 | tcp_set_ca_state(sk, TCP_CA_Open); |
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk) | |||
2430 | return false; | 2461 | return false; |
2431 | } | 2462 | } |
2432 | 2463 | ||
2433 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
2434 | * window. It would seem that it is trivially available for us in | ||
2435 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
2436 | * what will happen if errors occur when sending retransmission for the | ||
2437 | * second time. ...It could the that such segment has only | ||
2438 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
2439 | * the head skb is enough except for some reneging corner cases that | ||
2440 | * are not worth the effort. | ||
2441 | * | ||
2442 | * Main reason for all this complexity is the fact that connection dying | ||
2443 | * time now depends on the validity of the retrans_stamp, in particular, | ||
2444 | * that successive retransmissions of a segment must not advance | ||
2445 | * retrans_stamp under any conditions. | ||
2446 | */ | ||
2447 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
2448 | { | ||
2449 | const struct tcp_sock *tp = tcp_sk(sk); | ||
2450 | struct sk_buff *skb; | ||
2451 | |||
2452 | if (tp->retrans_out) | ||
2453 | return true; | ||
2454 | |||
2455 | skb = tcp_write_queue_head(sk); | ||
2456 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
2457 | return true; | ||
2458 | |||
2459 | return false; | ||
2460 | } | ||
2461 | |||
2462 | /* Undo during loss recovery after partial ACK or using F-RTO. */ | 2464 | /* Undo during loss recovery after partial ACK or using F-RTO. */ |
2463 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) | 2465 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) |
2464 | { | 2466 | { |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 94d1a7757ff7..9c7d7621466b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
206 | inet->inet_dport = usin->sin_port; | 206 | inet->inet_dport = usin->sin_port; |
207 | inet->inet_daddr = daddr; | 207 | inet->inet_daddr = daddr; |
208 | 208 | ||
209 | inet_set_txhash(sk); | ||
210 | |||
211 | inet_csk(sk)->icsk_ext_hdr_len = 0; | 209 | inet_csk(sk)->icsk_ext_hdr_len = 0; |
212 | if (inet_opt) | 210 | if (inet_opt) |
213 | inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; | 211 | inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; |
@@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
224 | if (err) | 222 | if (err) |
225 | goto failure; | 223 | goto failure; |
226 | 224 | ||
225 | inet_set_txhash(sk); | ||
226 | |||
227 | rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, | 227 | rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, |
228 | inet->inet_sport, inet->inet_dport, sk); | 228 | inet->inet_sport, inet->inet_dport, sk); |
229 | if (IS_ERR(rt)) { | 229 | if (IS_ERR(rt)) { |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3af21296d967..a3d453b94747 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2126,7 +2126,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
2126 | static bool skb_still_in_host_queue(const struct sock *sk, | 2126 | static bool skb_still_in_host_queue(const struct sock *sk, |
2127 | const struct sk_buff *skb) | 2127 | const struct sk_buff *skb) |
2128 | { | 2128 | { |
2129 | if (unlikely(skb_fclone_busy(skb))) { | 2129 | if (unlikely(skb_fclone_busy(sk, skb))) { |
2130 | NET_INC_STATS_BH(sock_net(sk), | 2130 | NET_INC_STATS_BH(sock_net(sk), |
2131 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); | 2131 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); |
2132 | return true; | 2132 | return true; |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 507310ef4b56..6480cea7aa53 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -58,7 +58,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, | |||
58 | skb->encap_hdr_csum = 1; | 58 | skb->encap_hdr_csum = 1; |
59 | 59 | ||
60 | /* segment inner packet. */ | 60 | /* segment inner packet. */ |
61 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 61 | enc_features = skb->dev->hw_enc_features & features; |
62 | segs = gso_inner_segment(skb, enc_features); | 62 | segs = gso_inner_segment(skb, enc_features); |
63 | if (IS_ERR_OR_NULL(segs)) { | 63 | if (IS_ERR_OR_NULL(segs)) { |
64 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, | 64 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 725c763270a0..0169ccf5aa4f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -4531,6 +4531,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) | |||
4531 | } | 4531 | } |
4532 | 4532 | ||
4533 | write_unlock_bh(&idev->lock); | 4533 | write_unlock_bh(&idev->lock); |
4534 | inet6_ifinfo_notify(RTM_NEWLINK, idev); | ||
4534 | addrconf_verify_rtnl(); | 4535 | addrconf_verify_rtnl(); |
4535 | return 0; | 4536 | return 0; |
4536 | } | 4537 | } |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 12c3c8ef3849..4564e1fca3eb 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
961 | else | 961 | else |
962 | dev->flags &= ~IFF_POINTOPOINT; | 962 | dev->flags &= ~IFF_POINTOPOINT; |
963 | 963 | ||
964 | dev->iflink = p->link; | ||
965 | |||
966 | /* Precalculate GRE options length */ | 964 | /* Precalculate GRE options length */ |
967 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { | 965 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { |
968 | if (t->parms.o_flags&GRE_CSUM) | 966 | if (t->parms.o_flags&GRE_CSUM) |
@@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) | |||
1272 | u64_stats_init(&ip6gre_tunnel_stats->syncp); | 1270 | u64_stats_init(&ip6gre_tunnel_stats->syncp); |
1273 | } | 1271 | } |
1274 | 1272 | ||
1273 | dev->iflink = tunnel->parms.link; | ||
1275 | 1274 | ||
1276 | return 0; | 1275 | return 0; |
1277 | } | 1276 | } |
@@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev) | |||
1481 | if (!dev->tstats) | 1480 | if (!dev->tstats) |
1482 | return -ENOMEM; | 1481 | return -ENOMEM; |
1483 | 1482 | ||
1483 | dev->iflink = tunnel->parms.link; | ||
1484 | |||
1484 | return 0; | 1485 | return 0; |
1485 | } | 1486 | } |
1486 | 1487 | ||
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 91014d32488d..a071563a7e6e 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -90,7 +90,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
90 | 90 | ||
91 | encap = SKB_GSO_CB(skb)->encap_level > 0; | 91 | encap = SKB_GSO_CB(skb)->encap_level > 0; |
92 | if (encap) | 92 | if (encap) |
93 | features = skb->dev->hw_enc_features & netif_skb_features(skb); | 93 | features &= skb->dev->hw_enc_features; |
94 | SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); | 94 | SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); |
95 | 95 | ||
96 | ipv6h = ipv6_hdr(skb); | 96 | ipv6h = ipv6_hdr(skb); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9409887fb664..9cb94cfa0ae7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev) | |||
272 | int err; | 272 | int err; |
273 | 273 | ||
274 | t = netdev_priv(dev); | 274 | t = netdev_priv(dev); |
275 | err = ip6_tnl_dev_init(dev); | ||
276 | if (err < 0) | ||
277 | goto out; | ||
278 | 275 | ||
279 | err = register_netdevice(dev); | 276 | err = register_netdevice(dev); |
280 | if (err < 0) | 277 | if (err < 0) |
@@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) | |||
1462 | 1459 | ||
1463 | 1460 | ||
1464 | static const struct net_device_ops ip6_tnl_netdev_ops = { | 1461 | static const struct net_device_ops ip6_tnl_netdev_ops = { |
1462 | .ndo_init = ip6_tnl_dev_init, | ||
1465 | .ndo_uninit = ip6_tnl_dev_uninit, | 1463 | .ndo_uninit = ip6_tnl_dev_uninit, |
1466 | .ndo_start_xmit = ip6_tnl_xmit, | 1464 | .ndo_start_xmit = ip6_tnl_xmit, |
1467 | .ndo_do_ioctl = ip6_tnl_ioctl, | 1465 | .ndo_do_ioctl = ip6_tnl_ioctl, |
@@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) | |||
1546 | struct ip6_tnl *t = netdev_priv(dev); | 1544 | struct ip6_tnl *t = netdev_priv(dev); |
1547 | struct net *net = dev_net(dev); | 1545 | struct net *net = dev_net(dev); |
1548 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); | 1546 | struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); |
1549 | int err = ip6_tnl_dev_init_gen(dev); | ||
1550 | |||
1551 | if (err) | ||
1552 | return err; | ||
1553 | 1547 | ||
1554 | t->parms.proto = IPPROTO_IPV6; | 1548 | t->parms.proto = IPPROTO_IPV6; |
1555 | dev_hold(dev); | 1549 | dev_hold(dev); |
1556 | 1550 | ||
1557 | ip6_tnl_link_config(t); | ||
1558 | |||
1559 | rcu_assign_pointer(ip6n->tnls_wc[0], t); | 1551 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
1560 | return 0; | 1552 | return 0; |
1561 | } | 1553 | } |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d440bb585524..31089d153fd3 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev) | |||
172 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | 172 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
173 | int err; | 173 | int err; |
174 | 174 | ||
175 | err = vti6_dev_init(dev); | ||
176 | if (err < 0) | ||
177 | goto out; | ||
178 | |||
179 | err = register_netdevice(dev); | 175 | err = register_netdevice(dev); |
180 | if (err < 0) | 176 | if (err < 0) |
181 | goto out; | 177 | goto out; |
@@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu) | |||
783 | } | 779 | } |
784 | 780 | ||
785 | static const struct net_device_ops vti6_netdev_ops = { | 781 | static const struct net_device_ops vti6_netdev_ops = { |
782 | .ndo_init = vti6_dev_init, | ||
786 | .ndo_uninit = vti6_dev_uninit, | 783 | .ndo_uninit = vti6_dev_uninit, |
787 | .ndo_start_xmit = vti6_tnl_xmit, | 784 | .ndo_start_xmit = vti6_tnl_xmit, |
788 | .ndo_do_ioctl = vti6_ioctl, | 785 | .ndo_do_ioctl = vti6_ioctl, |
@@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) | |||
852 | struct ip6_tnl *t = netdev_priv(dev); | 849 | struct ip6_tnl *t = netdev_priv(dev); |
853 | struct net *net = dev_net(dev); | 850 | struct net *net = dev_net(dev); |
854 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); | 851 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
855 | int err = vti6_dev_init_gen(dev); | ||
856 | |||
857 | if (err) | ||
858 | return err; | ||
859 | 852 | ||
860 | t->parms.proto = IPPROTO_IPV6; | 853 | t->parms.proto = IPPROTO_IPV6; |
861 | dev_hold(dev); | 854 | dev_hold(dev); |
862 | 855 | ||
863 | vti6_link_config(t); | ||
864 | |||
865 | rcu_assign_pointer(ip6n->tnls_wc[0], t); | 856 | rcu_assign_pointer(ip6n->tnls_wc[0], t); |
866 | return 0; | 857 | return 0; |
867 | } | 858 | } |
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 5f5f0438d74d..015eb8a80766 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c | |||
@@ -5,121 +5,109 @@ | |||
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | |||
9 | #include <linux/module.h> | ||
8 | #include <net/ipv6.h> | 10 | #include <net/ipv6.h> |
9 | #include <net/ip6_route.h> | 11 | #include <net/ip6_route.h> |
10 | #include <net/ip6_fib.h> | 12 | #include <net/ip6_fib.h> |
11 | #include <net/ip6_checksum.h> | 13 | #include <net/ip6_checksum.h> |
12 | #include <linux/netfilter_ipv6.h> | 14 | #include <linux/netfilter_ipv6.h> |
15 | #include <net/netfilter/ipv6/nf_reject.h> | ||
13 | 16 | ||
14 | void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | 17 | const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, |
18 | struct tcphdr *otcph, | ||
19 | unsigned int *otcplen, int hook) | ||
15 | { | 20 | { |
16 | struct sk_buff *nskb; | ||
17 | struct tcphdr otcph, *tcph; | ||
18 | unsigned int otcplen, hh_len; | ||
19 | int tcphoff, needs_ack; | ||
20 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | 21 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); |
21 | struct ipv6hdr *ip6h; | ||
22 | #define DEFAULT_TOS_VALUE 0x0U | ||
23 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
24 | struct dst_entry *dst = NULL; | ||
25 | u8 proto; | 22 | u8 proto; |
26 | __be16 frag_off; | 23 | __be16 frag_off; |
27 | struct flowi6 fl6; | 24 | int tcphoff; |
28 | |||
29 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || | ||
30 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { | ||
31 | pr_debug("addr is not unicast.\n"); | ||
32 | return; | ||
33 | } | ||
34 | 25 | ||
35 | proto = oip6h->nexthdr; | 26 | proto = oip6h->nexthdr; |
36 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); | 27 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), |
28 | &proto, &frag_off); | ||
37 | 29 | ||
38 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { | 30 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { |
39 | pr_debug("Cannot get TCP header.\n"); | 31 | pr_debug("Cannot get TCP header.\n"); |
40 | return; | 32 | return NULL; |
41 | } | 33 | } |
42 | 34 | ||
43 | otcplen = oldskb->len - tcphoff; | 35 | *otcplen = oldskb->len - tcphoff; |
44 | 36 | ||
45 | /* IP header checks: fragment, too short. */ | 37 | /* IP header checks: fragment, too short. */ |
46 | if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { | 38 | if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) { |
47 | pr_debug("proto(%d) != IPPROTO_TCP, " | 39 | pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n", |
48 | "or too short. otcplen = %d\n", | 40 | proto, *otcplen); |
49 | proto, otcplen); | 41 | return NULL; |
50 | return; | ||
51 | } | 42 | } |
52 | 43 | ||
53 | if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) | 44 | otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr), |
54 | BUG(); | 45 | otcph); |
46 | if (otcph == NULL) | ||
47 | return NULL; | ||
55 | 48 | ||
56 | /* No RST for RST. */ | 49 | /* No RST for RST. */ |
57 | if (otcph.rst) { | 50 | if (otcph->rst) { |
58 | pr_debug("RST is set\n"); | 51 | pr_debug("RST is set\n"); |
59 | return; | 52 | return NULL; |
60 | } | 53 | } |
61 | 54 | ||
62 | /* Check checksum. */ | 55 | /* Check checksum. */ |
63 | if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { | 56 | if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { |
64 | pr_debug("TCP checksum is invalid\n"); | 57 | pr_debug("TCP checksum is invalid\n"); |
65 | return; | 58 | return NULL; |
66 | } | 59 | } |
67 | 60 | ||
68 | memset(&fl6, 0, sizeof(fl6)); | 61 | return otcph; |
69 | fl6.flowi6_proto = IPPROTO_TCP; | 62 | } |
70 | fl6.saddr = oip6h->daddr; | 63 | EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get); |
71 | fl6.daddr = oip6h->saddr; | ||
72 | fl6.fl6_sport = otcph.dest; | ||
73 | fl6.fl6_dport = otcph.source; | ||
74 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | ||
75 | dst = ip6_route_output(net, NULL, &fl6); | ||
76 | if (dst == NULL || dst->error) { | ||
77 | dst_release(dst); | ||
78 | return; | ||
79 | } | ||
80 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); | ||
81 | if (IS_ERR(dst)) | ||
82 | return; | ||
83 | |||
84 | hh_len = (dst->dev->hard_header_len + 15)&~15; | ||
85 | nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) | ||
86 | + sizeof(struct tcphdr) + dst->trailer_len, | ||
87 | GFP_ATOMIC); | ||
88 | |||
89 | if (!nskb) { | ||
90 | net_dbg_ratelimited("cannot alloc skb\n"); | ||
91 | dst_release(dst); | ||
92 | return; | ||
93 | } | ||
94 | |||
95 | skb_dst_set(nskb, dst); | ||
96 | 64 | ||
97 | skb_reserve(nskb, hh_len + dst->header_len); | 65 | struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb, |
66 | const struct sk_buff *oldskb, | ||
67 | __be16 protocol, int hoplimit) | ||
68 | { | ||
69 | struct ipv6hdr *ip6h; | ||
70 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | ||
71 | #define DEFAULT_TOS_VALUE 0x0U | ||
72 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
98 | 73 | ||
99 | skb_put(nskb, sizeof(struct ipv6hdr)); | 74 | skb_put(nskb, sizeof(struct ipv6hdr)); |
100 | skb_reset_network_header(nskb); | 75 | skb_reset_network_header(nskb); |
101 | ip6h = ipv6_hdr(nskb); | 76 | ip6h = ipv6_hdr(nskb); |
102 | ip6_flow_hdr(ip6h, tclass, 0); | 77 | ip6_flow_hdr(ip6h, tclass, 0); |
103 | ip6h->hop_limit = ip6_dst_hoplimit(dst); | 78 | ip6h->hop_limit = hoplimit; |
104 | ip6h->nexthdr = IPPROTO_TCP; | 79 | ip6h->nexthdr = protocol; |
105 | ip6h->saddr = oip6h->daddr; | 80 | ip6h->saddr = oip6h->daddr; |
106 | ip6h->daddr = oip6h->saddr; | 81 | ip6h->daddr = oip6h->saddr; |
107 | 82 | ||
83 | nskb->protocol = htons(ETH_P_IPV6); | ||
84 | |||
85 | return ip6h; | ||
86 | } | ||
87 | EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put); | ||
88 | |||
89 | void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, | ||
90 | const struct sk_buff *oldskb, | ||
91 | const struct tcphdr *oth, unsigned int otcplen) | ||
92 | { | ||
93 | struct tcphdr *tcph; | ||
94 | int needs_ack; | ||
95 | |||
108 | skb_reset_transport_header(nskb); | 96 | skb_reset_transport_header(nskb); |
109 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); | 97 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); |
110 | /* Truncate to length (no data) */ | 98 | /* Truncate to length (no data) */ |
111 | tcph->doff = sizeof(struct tcphdr)/4; | 99 | tcph->doff = sizeof(struct tcphdr)/4; |
112 | tcph->source = otcph.dest; | 100 | tcph->source = oth->dest; |
113 | tcph->dest = otcph.source; | 101 | tcph->dest = oth->source; |
114 | 102 | ||
115 | if (otcph.ack) { | 103 | if (oth->ack) { |
116 | needs_ack = 0; | 104 | needs_ack = 0; |
117 | tcph->seq = otcph.ack_seq; | 105 | tcph->seq = oth->ack_seq; |
118 | tcph->ack_seq = 0; | 106 | tcph->ack_seq = 0; |
119 | } else { | 107 | } else { |
120 | needs_ack = 1; | 108 | needs_ack = 1; |
121 | tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin | 109 | tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + |
122 | + otcplen - (otcph.doff<<2)); | 110 | otcplen - (oth->doff<<2)); |
123 | tcph->seq = 0; | 111 | tcph->seq = 0; |
124 | } | 112 | } |
125 | 113 | ||
@@ -137,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
137 | sizeof(struct tcphdr), IPPROTO_TCP, | 125 | sizeof(struct tcphdr), IPPROTO_TCP, |
138 | csum_partial(tcph, | 126 | csum_partial(tcph, |
139 | sizeof(struct tcphdr), 0)); | 127 | sizeof(struct tcphdr), 0)); |
128 | } | ||
129 | EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put); | ||
130 | |||
131 | void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | ||
132 | { | ||
133 | struct sk_buff *nskb; | ||
134 | struct tcphdr _otcph; | ||
135 | const struct tcphdr *otcph; | ||
136 | unsigned int otcplen, hh_len; | ||
137 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | ||
138 | struct ipv6hdr *ip6h; | ||
139 | struct dst_entry *dst = NULL; | ||
140 | struct flowi6 fl6; | ||
141 | |||
142 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || | ||
143 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { | ||
144 | pr_debug("addr is not unicast.\n"); | ||
145 | return; | ||
146 | } | ||
147 | |||
148 | otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook); | ||
149 | if (!otcph) | ||
150 | return; | ||
151 | |||
152 | memset(&fl6, 0, sizeof(fl6)); | ||
153 | fl6.flowi6_proto = IPPROTO_TCP; | ||
154 | fl6.saddr = oip6h->daddr; | ||
155 | fl6.daddr = oip6h->saddr; | ||
156 | fl6.fl6_sport = otcph->dest; | ||
157 | fl6.fl6_dport = otcph->source; | ||
158 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | ||
159 | dst = ip6_route_output(net, NULL, &fl6); | ||
160 | if (dst == NULL || dst->error) { | ||
161 | dst_release(dst); | ||
162 | return; | ||
163 | } | ||
164 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); | ||
165 | if (IS_ERR(dst)) | ||
166 | return; | ||
167 | |||
168 | hh_len = (dst->dev->hard_header_len + 15)&~15; | ||
169 | nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) | ||
170 | + sizeof(struct tcphdr) + dst->trailer_len, | ||
171 | GFP_ATOMIC); | ||
172 | |||
173 | if (!nskb) { | ||
174 | net_dbg_ratelimited("cannot alloc skb\n"); | ||
175 | dst_release(dst); | ||
176 | return; | ||
177 | } | ||
178 | |||
179 | skb_dst_set(nskb, dst); | ||
180 | |||
181 | skb_reserve(nskb, hh_len + dst->header_len); | ||
182 | ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, | ||
183 | ip6_dst_hoplimit(dst)); | ||
184 | nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); | ||
140 | 185 | ||
141 | nf_ct_attach(nskb, oldskb); | 186 | nf_ct_attach(nskb, oldskb); |
142 | 187 | ||
@@ -161,3 +206,5 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | |||
161 | ip6_local_out(nskb); | 206 | ip6_local_out(nskb); |
162 | } | 207 | } |
163 | EXPORT_SYMBOL_GPL(nf_send_reset6); | 208 | EXPORT_SYMBOL_GPL(nf_send_reset6); |
209 | |||
210 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 556262f40761..8a7ac685076d 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c | |||
@@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv6_ops = { | |||
39 | .eval = nft_masq_ipv6_eval, | 39 | .eval = nft_masq_ipv6_eval, |
40 | .init = nft_masq_init, | 40 | .init = nft_masq_init, |
41 | .dump = nft_masq_dump, | 41 | .dump = nft_masq_dump, |
42 | .validate = nft_masq_validate, | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { | 45 | static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { |
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index fc24c390af05..97f41a3e68d9 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c | |||
@@ -3,11 +3,45 @@ | |||
3 | * not configured or static. These functions are needed by GSO/GRO implementation. | 3 | * not configured or static. These functions are needed by GSO/GRO implementation. |
4 | */ | 4 | */ |
5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
6 | #include <net/ip.h> | ||
6 | #include <net/ipv6.h> | 7 | #include <net/ipv6.h> |
7 | #include <net/ip6_fib.h> | 8 | #include <net/ip6_fib.h> |
8 | #include <net/addrconf.h> | 9 | #include <net/addrconf.h> |
9 | #include <net/secure_seq.h> | 10 | #include <net/secure_seq.h> |
10 | 11 | ||
12 | /* This function exists only for tap drivers that must support broken | ||
13 | * clients requesting UFO without specifying an IPv6 fragment ID. | ||
14 | * | ||
15 | * This is similar to ipv6_select_ident() but we use an independent hash | ||
16 | * seed to limit information leakage. | ||
17 | * | ||
18 | * The network header must be set before calling this. | ||
19 | */ | ||
20 | void ipv6_proxy_select_ident(struct sk_buff *skb) | ||
21 | { | ||
22 | static u32 ip6_proxy_idents_hashrnd __read_mostly; | ||
23 | struct in6_addr buf[2]; | ||
24 | struct in6_addr *addrs; | ||
25 | u32 hash, id; | ||
26 | |||
27 | addrs = skb_header_pointer(skb, | ||
28 | skb_network_offset(skb) + | ||
29 | offsetof(struct ipv6hdr, saddr), | ||
30 | sizeof(buf), buf); | ||
31 | if (!addrs) | ||
32 | return; | ||
33 | |||
34 | net_get_random_once(&ip6_proxy_idents_hashrnd, | ||
35 | sizeof(ip6_proxy_idents_hashrnd)); | ||
36 | |||
37 | hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); | ||
38 | hash = __ipv6_addr_jhash(&addrs[0], hash); | ||
39 | |||
40 | id = ip_idents_reserve(hash, 1); | ||
41 | skb_shinfo(skb)->ip6_frag_id = htonl(id); | ||
42 | } | ||
43 | EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); | ||
44 | |||
11 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) | 45 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) |
12 | { | 46 | { |
13 | u16 offset = sizeof(struct ipv6hdr); | 47 | u16 offset = sizeof(struct ipv6hdr); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 58e5b4710127..a24557a1c1d8 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev) | |||
195 | struct sit_net *sitn = net_generic(net, sit_net_id); | 195 | struct sit_net *sitn = net_generic(net, sit_net_id); |
196 | int err; | 196 | int err; |
197 | 197 | ||
198 | err = ipip6_tunnel_init(dev); | 198 | memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); |
199 | if (err < 0) | 199 | memcpy(dev->broadcast, &t->parms.iph.daddr, 4); |
200 | goto out; | ||
201 | ipip6_tunnel_clone_6rd(dev, sitn); | ||
202 | 200 | ||
203 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) | 201 | if ((__force u16)t->parms.i_flags & SIT_ISATAP) |
204 | dev->priv_flags |= IFF_ISATAP; | 202 | dev->priv_flags |= IFF_ISATAP; |
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev) | |||
207 | if (err < 0) | 205 | if (err < 0) |
208 | goto out; | 206 | goto out; |
209 | 207 | ||
210 | strcpy(t->parms.name, dev->name); | 208 | ipip6_tunnel_clone_6rd(dev, sitn); |
209 | |||
211 | dev->rtnl_link_ops = &sit_link_ops; | 210 | dev->rtnl_link_ops = &sit_link_ops; |
212 | 211 | ||
213 | dev_hold(dev); | 212 | dev_hold(dev); |
@@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) | |||
1330 | } | 1329 | } |
1331 | 1330 | ||
1332 | static const struct net_device_ops ipip6_netdev_ops = { | 1331 | static const struct net_device_ops ipip6_netdev_ops = { |
1332 | .ndo_init = ipip6_tunnel_init, | ||
1333 | .ndo_uninit = ipip6_tunnel_uninit, | 1333 | .ndo_uninit = ipip6_tunnel_uninit, |
1334 | .ndo_start_xmit = sit_tunnel_xmit, | 1334 | .ndo_start_xmit = sit_tunnel_xmit, |
1335 | .ndo_do_ioctl = ipip6_tunnel_ioctl, | 1335 | .ndo_do_ioctl = ipip6_tunnel_ioctl, |
@@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev) | |||
1378 | 1378 | ||
1379 | tunnel->dev = dev; | 1379 | tunnel->dev = dev; |
1380 | tunnel->net = dev_net(dev); | 1380 | tunnel->net = dev_net(dev); |
1381 | 1381 | strcpy(tunnel->parms.name, dev->name); | |
1382 | memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); | ||
1383 | memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); | ||
1384 | 1382 | ||
1385 | ipip6_tunnel_bind_dev(dev); | 1383 | ipip6_tunnel_bind_dev(dev); |
1386 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 1384 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
@@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) | |||
1405 | 1403 | ||
1406 | tunnel->dev = dev; | 1404 | tunnel->dev = dev; |
1407 | tunnel->net = dev_net(dev); | 1405 | tunnel->net = dev_net(dev); |
1408 | strcpy(tunnel->parms.name, dev->name); | ||
1409 | 1406 | ||
1410 | iph->version = 4; | 1407 | iph->version = 4; |
1411 | iph->protocol = IPPROTO_IPV6; | 1408 | iph->protocol = IPPROTO_IPV6; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 831495529b82..ace29b60813c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -200,8 +200,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
200 | sk->sk_v6_daddr = usin->sin6_addr; | 200 | sk->sk_v6_daddr = usin->sin6_addr; |
201 | np->flow_label = fl6.flowlabel; | 201 | np->flow_label = fl6.flowlabel; |
202 | 202 | ||
203 | ip6_set_txhash(sk); | ||
204 | |||
205 | /* | 203 | /* |
206 | * TCP over IPv4 | 204 | * TCP over IPv4 |
207 | */ | 205 | */ |
@@ -297,6 +295,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
297 | if (err) | 295 | if (err) |
298 | goto late_failure; | 296 | goto late_failure; |
299 | 297 | ||
298 | ip6_set_txhash(sk); | ||
299 | |||
300 | if (!tp->write_seq && likely(!tp->repair)) | 300 | if (!tp->write_seq && likely(!tp->repair)) |
301 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, | 301 | tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, |
302 | sk->sk_v6_daddr.s6_addr32, | 302 | sk->sk_v6_daddr.s6_addr32, |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ac49f84fe2c3..5f983644373a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
170 | case IPPROTO_DCCP: | 170 | case IPPROTO_DCCP: |
171 | if (!onlyproto && (nh + offset + 4 < skb->data || | 171 | if (!onlyproto && (nh + offset + 4 < skb->data || |
172 | pskb_may_pull(skb, nh + offset + 4 - skb->data))) { | 172 | pskb_may_pull(skb, nh + offset + 4 - skb->data))) { |
173 | __be16 *ports = (__be16 *)exthdr; | 173 | __be16 *ports; |
174 | 174 | ||
175 | nh = skb_network_header(skb); | ||
176 | ports = (__be16 *)(nh + offset); | ||
175 | fl6->fl6_sport = ports[!!reverse]; | 177 | fl6->fl6_sport = ports[!!reverse]; |
176 | fl6->fl6_dport = ports[!reverse]; | 178 | fl6->fl6_dport = ports[!reverse]; |
177 | } | 179 | } |
@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
180 | 182 | ||
181 | case IPPROTO_ICMPV6: | 183 | case IPPROTO_ICMPV6: |
182 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { | 184 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { |
183 | u8 *icmp = (u8 *)exthdr; | 185 | u8 *icmp; |
184 | 186 | ||
187 | nh = skb_network_header(skb); | ||
188 | icmp = (u8 *)(nh + offset); | ||
185 | fl6->fl6_icmp_type = icmp[0]; | 189 | fl6->fl6_icmp_type = icmp[0]; |
186 | fl6->fl6_icmp_code = icmp[1]; | 190 | fl6->fl6_icmp_code = icmp[1]; |
187 | } | 191 | } |
@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
192 | case IPPROTO_MH: | 196 | case IPPROTO_MH: |
193 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { | 197 | if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { |
194 | struct ip6_mh *mh; | 198 | struct ip6_mh *mh; |
195 | mh = (struct ip6_mh *)exthdr; | ||
196 | 199 | ||
200 | nh = skb_network_header(skb); | ||
201 | mh = (struct ip6_mh *)(nh + offset); | ||
197 | fl6->fl6_mh_type = mh->ip6mh_type; | 202 | fl6->fl6_mh_type = mh->ip6mh_type; |
198 | } | 203 | } |
199 | fl6->flowi6_proto = nexthdr; | 204 | fl6->flowi6_proto = nexthdr; |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 92fafd485deb..3f3a6cbdceb7 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -1064,8 +1064,6 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | |||
1064 | 1064 | ||
1065 | if (sk->sk_state != TCP_ESTABLISHED) { | 1065 | if (sk->sk_state != TCP_ESTABLISHED) { |
1066 | sock->state = SS_UNCONNECTED; | 1066 | sock->state = SS_UNCONNECTED; |
1067 | if (sk->sk_prot->disconnect(sk, flags)) | ||
1068 | sock->state = SS_DISCONNECTING; | ||
1069 | err = sock_error(sk); | 1067 | err = sock_error(sk); |
1070 | if (!err) | 1068 | if (!err) |
1071 | err = -ECONNRESET; | 1069 | err = -ECONNRESET; |
diff --git a/net/mpls/Makefile b/net/mpls/Makefile index 0a3c171be537..6dec088c2d0f 100644 --- a/net/mpls/Makefile +++ b/net/mpls/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | # | 1 | # |
2 | # Makefile for MPLS. | 2 | # Makefile for MPLS. |
3 | # | 3 | # |
4 | obj-y += mpls_gso.o | 4 | obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o |
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index e28ed2ef5b06..e3545f21a099 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c | |||
@@ -48,7 +48,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, | |||
48 | __skb_push(skb, skb->mac_len); | 48 | __skb_push(skb, skb->mac_len); |
49 | 49 | ||
50 | /* Segment inner packet. */ | 50 | /* Segment inner packet. */ |
51 | mpls_features = skb->dev->mpls_features & netif_skb_features(skb); | 51 | mpls_features = skb->dev->mpls_features & features; |
52 | segs = skb_mac_gso_segment(skb, mpls_features); | 52 | segs = skb_mac_gso_segment(skb, mpls_features); |
53 | 53 | ||
54 | 54 | ||
@@ -59,8 +59,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, | |||
59 | * above pulled. It will be re-pushed after returning | 59 | * above pulled. It will be re-pushed after returning |
60 | * skb_mac_gso_segment(), an indirect caller of this function. | 60 | * skb_mac_gso_segment(), an indirect caller of this function. |
61 | */ | 61 | */ |
62 | __skb_push(skb, skb->data - skb_mac_header(skb)); | 62 | __skb_pull(skb, skb->data - skb_mac_header(skb)); |
63 | |||
64 | out: | 63 | out: |
65 | return segs; | 64 | return segs; |
66 | } | 65 | } |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 912e5a05b79d..86f9d76b1464 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -659,7 +659,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) | |||
659 | struct ip_set *set; | 659 | struct ip_set *set; |
660 | struct ip_set_net *inst = ip_set_pernet(net); | 660 | struct ip_set_net *inst = ip_set_pernet(net); |
661 | 661 | ||
662 | if (index > inst->ip_set_max) | 662 | if (index >= inst->ip_set_max) |
663 | return IPSET_INVALID_ID; | 663 | return IPSET_INVALID_ID; |
664 | 664 | ||
665 | nfnl_lock(NFNL_SUBSYS_IPSET); | 665 | nfnl_lock(NFNL_SUBSYS_IPSET); |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 91f17c1eb8a2..437a3663ad03 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, | |||
316 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, | 316 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, |
317 | local))) { | 317 | local))) { |
318 | IP_VS_DBG_RL("We are crossing local and non-local addresses" | 318 | IP_VS_DBG_RL("We are crossing local and non-local addresses" |
319 | " daddr=%pI4\n", &dest->addr.ip); | 319 | " daddr=%pI4\n", &daddr); |
320 | goto err_put; | 320 | goto err_put; |
321 | } | 321 | } |
322 | 322 | ||
@@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, | |||
458 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, | 458 | if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, |
459 | local))) { | 459 | local))) { |
460 | IP_VS_DBG_RL("We are crossing local and non-local addresses" | 460 | IP_VS_DBG_RL("We are crossing local and non-local addresses" |
461 | " daddr=%pI6\n", &dest->addr.in6); | 461 | " daddr=%pI6\n", daddr); |
462 | goto err_put; | 462 | goto err_put; |
463 | } | 463 | } |
464 | 464 | ||
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 44d1ea32570a..d87b6423ffb2 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -213,7 +213,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
213 | { | 213 | { |
214 | /* REPLY */ | 214 | /* REPLY */ |
215 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 215 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
216 | /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 }, | 216 | /*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 }, |
217 | /* | 217 | /* |
218 | * sNO -> sIV Never reached. | 218 | * sNO -> sIV Never reached. |
219 | * sSS -> sS2 Simultaneous open | 219 | * sSS -> sS2 Simultaneous open |
@@ -223,7 +223,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
223 | * sFW -> sIV | 223 | * sFW -> sIV |
224 | * sCW -> sIV | 224 | * sCW -> sIV |
225 | * sLA -> sIV | 225 | * sLA -> sIV |
226 | * sTW -> sIV Reopened connection, but server may not do it. | 226 | * sTW -> sSS Reopened connection, but server may have switched role |
227 | * sCL -> sIV | 227 | * sCL -> sIV |
228 | */ | 228 | */ |
229 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ | 229 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 556a0dfa4abc..11ab4b078f3b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -1328,10 +1328,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
1328 | basechain->stats = stats; | 1328 | basechain->stats = stats; |
1329 | } else { | 1329 | } else { |
1330 | stats = netdev_alloc_pcpu_stats(struct nft_stats); | 1330 | stats = netdev_alloc_pcpu_stats(struct nft_stats); |
1331 | if (IS_ERR(stats)) { | 1331 | if (stats == NULL) { |
1332 | module_put(type->owner); | 1332 | module_put(type->owner); |
1333 | kfree(basechain); | 1333 | kfree(basechain); |
1334 | return PTR_ERR(stats); | 1334 | return -ENOMEM; |
1335 | } | 1335 | } |
1336 | rcu_assign_pointer(basechain->stats, stats); | 1336 | rcu_assign_pointer(basechain->stats, stats); |
1337 | } | 1337 | } |
@@ -3744,6 +3744,20 @@ static const struct nfnetlink_subsystem nf_tables_subsys = { | |||
3744 | .abort = nf_tables_abort, | 3744 | .abort = nf_tables_abort, |
3745 | }; | 3745 | }; |
3746 | 3746 | ||
3747 | int nft_chain_validate_dependency(const struct nft_chain *chain, | ||
3748 | enum nft_chain_type type) | ||
3749 | { | ||
3750 | const struct nft_base_chain *basechain; | ||
3751 | |||
3752 | if (chain->flags & NFT_BASE_CHAIN) { | ||
3753 | basechain = nft_base_chain(chain); | ||
3754 | if (basechain->type->type != type) | ||
3755 | return -EOPNOTSUPP; | ||
3756 | } | ||
3757 | return 0; | ||
3758 | } | ||
3759 | EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); | ||
3760 | |||
3747 | /* | 3761 | /* |
3748 | * Loop detection - walk through the ruleset beginning at the destination chain | 3762 | * Loop detection - walk through the ruleset beginning at the destination chain |
3749 | * of a new jump until either the source chain is reached (loop) or all | 3763 | * of a new jump until either the source chain is reached (loop) or all |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index b1e3a0579416..5f1be5ba3559 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -43,7 +43,8 @@ | |||
43 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE | 43 | #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE |
44 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ | 44 | #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ |
45 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ | 45 | #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ |
46 | #define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ | 46 | /* max packet size is limited by 16-bit struct nfattr nfa_len field */ |
47 | #define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN) | ||
47 | 48 | ||
48 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ | 49 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ |
49 | printk(x, ## args); } while (0); | 50 | printk(x, ## args); } while (0); |
@@ -252,6 +253,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, | |||
252 | 253 | ||
253 | case NFULNL_COPY_PACKET: | 254 | case NFULNL_COPY_PACKET: |
254 | inst->copy_mode = mode; | 255 | inst->copy_mode = mode; |
256 | if (range == 0) | ||
257 | range = NFULNL_COPY_RANGE_MAX; | ||
255 | inst->copy_range = min_t(unsigned int, | 258 | inst->copy_range = min_t(unsigned int, |
256 | range, NFULNL_COPY_RANGE_MAX); | 259 | range, NFULNL_COPY_RANGE_MAX); |
257 | break; | 260 | break; |
@@ -343,26 +346,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size, | |||
343 | return skb; | 346 | return skb; |
344 | } | 347 | } |
345 | 348 | ||
346 | static int | 349 | static void |
347 | __nfulnl_send(struct nfulnl_instance *inst) | 350 | __nfulnl_send(struct nfulnl_instance *inst) |
348 | { | 351 | { |
349 | int status = -1; | ||
350 | |||
351 | if (inst->qlen > 1) { | 352 | if (inst->qlen > 1) { |
352 | struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, | 353 | struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, |
353 | NLMSG_DONE, | 354 | NLMSG_DONE, |
354 | sizeof(struct nfgenmsg), | 355 | sizeof(struct nfgenmsg), |
355 | 0); | 356 | 0); |
356 | if (!nlh) | 357 | if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n", |
358 | inst->skb->len, skb_tailroom(inst->skb))) { | ||
359 | kfree_skb(inst->skb); | ||
357 | goto out; | 360 | goto out; |
361 | } | ||
358 | } | 362 | } |
359 | status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, | 363 | nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, |
360 | MSG_DONTWAIT); | 364 | MSG_DONTWAIT); |
361 | 365 | out: | |
362 | inst->qlen = 0; | 366 | inst->qlen = 0; |
363 | inst->skb = NULL; | 367 | inst->skb = NULL; |
364 | out: | ||
365 | return status; | ||
366 | } | 368 | } |
367 | 369 | ||
368 | static void | 370 | static void |
@@ -649,7 +651,8 @@ nfulnl_log_packet(struct net *net, | |||
649 | + nla_total_size(sizeof(u_int32_t)) /* gid */ | 651 | + nla_total_size(sizeof(u_int32_t)) /* gid */ |
650 | + nla_total_size(plen) /* prefix */ | 652 | + nla_total_size(plen) /* prefix */ |
651 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) | 653 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) |
652 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); | 654 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) |
655 | + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ | ||
653 | 656 | ||
654 | if (in && skb_mac_header_was_set(skb)) { | 657 | if (in && skb_mac_header_was_set(skb)) { |
655 | size += nla_total_size(skb->dev->hard_header_len) | 658 | size += nla_total_size(skb->dev->hard_header_len) |
@@ -678,8 +681,7 @@ nfulnl_log_packet(struct net *net, | |||
678 | break; | 681 | break; |
679 | 682 | ||
680 | case NFULNL_COPY_PACKET: | 683 | case NFULNL_COPY_PACKET: |
681 | if (inst->copy_range == 0 | 684 | if (inst->copy_range > skb->len) |
682 | || inst->copy_range > skb->len) | ||
683 | data_len = skb->len; | 685 | data_len = skb->len; |
684 | else | 686 | else |
685 | data_len = inst->copy_range; | 687 | data_len = inst->copy_range; |
@@ -692,8 +694,7 @@ nfulnl_log_packet(struct net *net, | |||
692 | goto unlock_and_release; | 694 | goto unlock_and_release; |
693 | } | 695 | } |
694 | 696 | ||
695 | if (inst->skb && | 697 | if (inst->skb && size > skb_tailroom(inst->skb)) { |
696 | size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { | ||
697 | /* either the queue len is too high or we don't have | 698 | /* either the queue len is too high or we don't have |
698 | * enough room in the skb left. flush to userspace. */ | 699 | * enough room in the skb left. flush to userspace. */ |
699 | __nfulnl_flush(inst); | 700 | __nfulnl_flush(inst); |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index a82077d9f59b..7c60ccd61a3e 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) | |||
665 | * returned by nf_queue. For instance, callers rely on -ECANCELED to | 665 | * returned by nf_queue. For instance, callers rely on -ECANCELED to |
666 | * mean 'ignore this hook'. | 666 | * mean 'ignore this hook'. |
667 | */ | 667 | */ |
668 | if (IS_ERR(segs)) | 668 | if (IS_ERR_OR_NULL(segs)) |
669 | goto out_err; | 669 | goto out_err; |
670 | queued = 0; | 670 | queued = 0; |
671 | err = 0; | 671 | err = 0; |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7e2683c8a44a..9d6d6f60a80f 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -19,9 +19,52 @@ | |||
19 | #include <linux/netfilter/x_tables.h> | 19 | #include <linux/netfilter/x_tables.h> |
20 | #include <linux/netfilter_ipv4/ip_tables.h> | 20 | #include <linux/netfilter_ipv4/ip_tables.h> |
21 | #include <linux/netfilter_ipv6/ip6_tables.h> | 21 | #include <linux/netfilter_ipv6/ip6_tables.h> |
22 | #include <asm/uaccess.h> /* for set_fs */ | ||
23 | #include <net/netfilter/nf_tables.h> | 22 | #include <net/netfilter/nf_tables.h> |
24 | 23 | ||
24 | static const struct { | ||
25 | const char *name; | ||
26 | u8 type; | ||
27 | } table_to_chaintype[] = { | ||
28 | { "filter", NFT_CHAIN_T_DEFAULT }, | ||
29 | { "raw", NFT_CHAIN_T_DEFAULT }, | ||
30 | { "security", NFT_CHAIN_T_DEFAULT }, | ||
31 | { "mangle", NFT_CHAIN_T_ROUTE }, | ||
32 | { "nat", NFT_CHAIN_T_NAT }, | ||
33 | { }, | ||
34 | }; | ||
35 | |||
36 | static int nft_compat_table_to_chaintype(const char *table) | ||
37 | { | ||
38 | int i; | ||
39 | |||
40 | for (i = 0; table_to_chaintype[i].name != NULL; i++) { | ||
41 | if (strcmp(table_to_chaintype[i].name, table) == 0) | ||
42 | return table_to_chaintype[i].type; | ||
43 | } | ||
44 | |||
45 | return -1; | ||
46 | } | ||
47 | |||
48 | static int nft_compat_chain_validate_dependency(const char *tablename, | ||
49 | const struct nft_chain *chain) | ||
50 | { | ||
51 | enum nft_chain_type type; | ||
52 | const struct nft_base_chain *basechain; | ||
53 | |||
54 | if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) | ||
55 | return 0; | ||
56 | |||
57 | type = nft_compat_table_to_chaintype(tablename); | ||
58 | if (type < 0) | ||
59 | return -EINVAL; | ||
60 | |||
61 | basechain = nft_base_chain(chain); | ||
62 | if (basechain->type->type != type) | ||
63 | return -EINVAL; | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
25 | union nft_entry { | 68 | union nft_entry { |
26 | struct ipt_entry e4; | 69 | struct ipt_entry e4; |
27 | struct ip6t_entry e6; | 70 | struct ip6t_entry e6; |
@@ -95,6 +138,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
95 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 138 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
96 | 139 | ||
97 | par->hook_mask = 1 << ops->hooknum; | 140 | par->hook_mask = 1 << ops->hooknum; |
141 | } else { | ||
142 | par->hook_mask = 0; | ||
98 | } | 143 | } |
99 | par->family = ctx->afi->family; | 144 | par->family = ctx->afi->family; |
100 | } | 145 | } |
@@ -151,6 +196,10 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
151 | union nft_entry e = {}; | 196 | union nft_entry e = {}; |
152 | int ret; | 197 | int ret; |
153 | 198 | ||
199 | ret = nft_compat_chain_validate_dependency(target->table, ctx->chain); | ||
200 | if (ret < 0) | ||
201 | goto err; | ||
202 | |||
154 | target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); | 203 | target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); |
155 | 204 | ||
156 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 205 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
@@ -216,6 +265,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, | |||
216 | { | 265 | { |
217 | struct xt_target *target = expr->ops->data; | 266 | struct xt_target *target = expr->ops->data; |
218 | unsigned int hook_mask = 0; | 267 | unsigned int hook_mask = 0; |
268 | int ret; | ||
219 | 269 | ||
220 | if (ctx->chain->flags & NFT_BASE_CHAIN) { | 270 | if (ctx->chain->flags & NFT_BASE_CHAIN) { |
221 | const struct nft_base_chain *basechain = | 271 | const struct nft_base_chain *basechain = |
@@ -223,11 +273,13 @@ static int nft_target_validate(const struct nft_ctx *ctx, | |||
223 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 273 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
224 | 274 | ||
225 | hook_mask = 1 << ops->hooknum; | 275 | hook_mask = 1 << ops->hooknum; |
226 | if (hook_mask & target->hooks) | 276 | if (!(hook_mask & target->hooks)) |
227 | return 0; | 277 | return -EINVAL; |
228 | 278 | ||
229 | /* This target is being called from an invalid chain */ | 279 | ret = nft_compat_chain_validate_dependency(target->table, |
230 | return -EINVAL; | 280 | ctx->chain); |
281 | if (ret < 0) | ||
282 | return ret; | ||
231 | } | 283 | } |
232 | return 0; | 284 | return 0; |
233 | } | 285 | } |
@@ -293,6 +345,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
293 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 345 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
294 | 346 | ||
295 | par->hook_mask = 1 << ops->hooknum; | 347 | par->hook_mask = 1 << ops->hooknum; |
348 | } else { | ||
349 | par->hook_mask = 0; | ||
296 | } | 350 | } |
297 | par->family = ctx->afi->family; | 351 | par->family = ctx->afi->family; |
298 | } | 352 | } |
@@ -320,6 +374,10 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
320 | union nft_entry e = {}; | 374 | union nft_entry e = {}; |
321 | int ret; | 375 | int ret; |
322 | 376 | ||
377 | ret = nft_compat_chain_validate_dependency(match->name, ctx->chain); | ||
378 | if (ret < 0) | ||
379 | goto err; | ||
380 | |||
323 | match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); | 381 | match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); |
324 | 382 | ||
325 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 383 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
@@ -379,6 +437,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, | |||
379 | { | 437 | { |
380 | struct xt_match *match = expr->ops->data; | 438 | struct xt_match *match = expr->ops->data; |
381 | unsigned int hook_mask = 0; | 439 | unsigned int hook_mask = 0; |
440 | int ret; | ||
382 | 441 | ||
383 | if (ctx->chain->flags & NFT_BASE_CHAIN) { | 442 | if (ctx->chain->flags & NFT_BASE_CHAIN) { |
384 | const struct nft_base_chain *basechain = | 443 | const struct nft_base_chain *basechain = |
@@ -386,11 +445,13 @@ static int nft_match_validate(const struct nft_ctx *ctx, | |||
386 | const struct nf_hook_ops *ops = &basechain->ops[0]; | 445 | const struct nf_hook_ops *ops = &basechain->ops[0]; |
387 | 446 | ||
388 | hook_mask = 1 << ops->hooknum; | 447 | hook_mask = 1 << ops->hooknum; |
389 | if (hook_mask & match->hooks) | 448 | if (!(hook_mask & match->hooks)) |
390 | return 0; | 449 | return -EINVAL; |
391 | 450 | ||
392 | /* This match is being called from an invalid chain */ | 451 | ret = nft_compat_chain_validate_dependency(match->name, |
393 | return -EINVAL; | 452 | ctx->chain); |
453 | if (ret < 0) | ||
454 | return ret; | ||
394 | } | 455 | } |
395 | return 0; | 456 | return 0; |
396 | } | 457 | } |
@@ -611,7 +672,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
611 | family = ctx->afi->family; | 672 | family = ctx->afi->family; |
612 | 673 | ||
613 | /* Re-use the existing target if it's already loaded. */ | 674 | /* Re-use the existing target if it's already loaded. */ |
614 | list_for_each_entry(nft_target, &nft_match_list, head) { | 675 | list_for_each_entry(nft_target, &nft_target_list, head) { |
615 | struct xt_target *target = nft_target->ops.data; | 676 | struct xt_target *target = nft_target->ops.data; |
616 | 677 | ||
617 | if (strcmp(target->name, tg_name) == 0 && | 678 | if (strcmp(target->name, tg_name) == 0 && |
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index 6637bab00567..d1ffd5eb3a9b 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c | |||
@@ -26,6 +26,11 @@ int nft_masq_init(const struct nft_ctx *ctx, | |||
26 | const struct nlattr * const tb[]) | 26 | const struct nlattr * const tb[]) |
27 | { | 27 | { |
28 | struct nft_masq *priv = nft_expr_priv(expr); | 28 | struct nft_masq *priv = nft_expr_priv(expr); |
29 | int err; | ||
30 | |||
31 | err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); | ||
32 | if (err < 0) | ||
33 | return err; | ||
29 | 34 | ||
30 | if (tb[NFTA_MASQ_FLAGS] == NULL) | 35 | if (tb[NFTA_MASQ_FLAGS] == NULL) |
31 | return 0; | 36 | return 0; |
@@ -55,5 +60,12 @@ nla_put_failure: | |||
55 | } | 60 | } |
56 | EXPORT_SYMBOL_GPL(nft_masq_dump); | 61 | EXPORT_SYMBOL_GPL(nft_masq_dump); |
57 | 62 | ||
63 | int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
64 | const struct nft_data **data) | ||
65 | { | ||
66 | return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); | ||
67 | } | ||
68 | EXPORT_SYMBOL_GPL(nft_masq_validate); | ||
69 | |||
58 | MODULE_LICENSE("GPL"); | 70 | MODULE_LICENSE("GPL"); |
59 | MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); | 71 | MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); |
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 799550b476fb..afe2b0b45ec4 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c | |||
@@ -95,7 +95,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
95 | u32 family; | 95 | u32 family; |
96 | int err; | 96 | int err; |
97 | 97 | ||
98 | if (tb[NFTA_NAT_TYPE] == NULL) | 98 | err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); |
99 | if (err < 0) | ||
100 | return err; | ||
101 | |||
102 | if (tb[NFTA_NAT_TYPE] == NULL || | ||
103 | (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && | ||
104 | tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) | ||
99 | return -EINVAL; | 105 | return -EINVAL; |
100 | 106 | ||
101 | switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { | 107 | switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { |
@@ -120,38 +126,44 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
120 | priv->family = family; | 126 | priv->family = family; |
121 | 127 | ||
122 | if (tb[NFTA_NAT_REG_ADDR_MIN]) { | 128 | if (tb[NFTA_NAT_REG_ADDR_MIN]) { |
123 | priv->sreg_addr_min = ntohl(nla_get_be32( | 129 | priv->sreg_addr_min = |
124 | tb[NFTA_NAT_REG_ADDR_MIN])); | 130 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MIN])); |
131 | |||
125 | err = nft_validate_input_register(priv->sreg_addr_min); | 132 | err = nft_validate_input_register(priv->sreg_addr_min); |
126 | if (err < 0) | 133 | if (err < 0) |
127 | return err; | 134 | return err; |
128 | } | ||
129 | 135 | ||
130 | if (tb[NFTA_NAT_REG_ADDR_MAX]) { | 136 | if (tb[NFTA_NAT_REG_ADDR_MAX]) { |
131 | priv->sreg_addr_max = ntohl(nla_get_be32( | 137 | priv->sreg_addr_max = |
132 | tb[NFTA_NAT_REG_ADDR_MAX])); | 138 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MAX])); |
133 | err = nft_validate_input_register(priv->sreg_addr_max); | 139 | |
134 | if (err < 0) | 140 | err = nft_validate_input_register(priv->sreg_addr_max); |
135 | return err; | 141 | if (err < 0) |
136 | } else | 142 | return err; |
137 | priv->sreg_addr_max = priv->sreg_addr_min; | 143 | } else { |
144 | priv->sreg_addr_max = priv->sreg_addr_min; | ||
145 | } | ||
146 | } | ||
138 | 147 | ||
139 | if (tb[NFTA_NAT_REG_PROTO_MIN]) { | 148 | if (tb[NFTA_NAT_REG_PROTO_MIN]) { |
140 | priv->sreg_proto_min = ntohl(nla_get_be32( | 149 | priv->sreg_proto_min = |
141 | tb[NFTA_NAT_REG_PROTO_MIN])); | 150 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MIN])); |
151 | |||
142 | err = nft_validate_input_register(priv->sreg_proto_min); | 152 | err = nft_validate_input_register(priv->sreg_proto_min); |
143 | if (err < 0) | 153 | if (err < 0) |
144 | return err; | 154 | return err; |
145 | } | ||
146 | 155 | ||
147 | if (tb[NFTA_NAT_REG_PROTO_MAX]) { | 156 | if (tb[NFTA_NAT_REG_PROTO_MAX]) { |
148 | priv->sreg_proto_max = ntohl(nla_get_be32( | 157 | priv->sreg_proto_max = |
149 | tb[NFTA_NAT_REG_PROTO_MAX])); | 158 | ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MAX])); |
150 | err = nft_validate_input_register(priv->sreg_proto_max); | 159 | |
151 | if (err < 0) | 160 | err = nft_validate_input_register(priv->sreg_proto_max); |
152 | return err; | 161 | if (err < 0) |
153 | } else | 162 | return err; |
154 | priv->sreg_proto_max = priv->sreg_proto_min; | 163 | } else { |
164 | priv->sreg_proto_max = priv->sreg_proto_min; | ||
165 | } | ||
166 | } | ||
155 | 167 | ||
156 | if (tb[NFTA_NAT_FLAGS]) { | 168 | if (tb[NFTA_NAT_FLAGS]) { |
157 | priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); | 169 | priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); |
@@ -179,17 +191,19 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
179 | 191 | ||
180 | if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family))) | 192 | if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family))) |
181 | goto nla_put_failure; | 193 | goto nla_put_failure; |
182 | if (nla_put_be32(skb, | 194 | |
183 | NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min))) | 195 | if (priv->sreg_addr_min) { |
184 | goto nla_put_failure; | 196 | if (nla_put_be32(skb, NFTA_NAT_REG_ADDR_MIN, |
185 | if (nla_put_be32(skb, | 197 | htonl(priv->sreg_addr_min)) || |
186 | NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) | 198 | nla_put_be32(skb, NFTA_NAT_REG_ADDR_MAX, |
187 | goto nla_put_failure; | 199 | htonl(priv->sreg_addr_max))) |
200 | goto nla_put_failure; | ||
201 | } | ||
202 | |||
188 | if (priv->sreg_proto_min) { | 203 | if (priv->sreg_proto_min) { |
189 | if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN, | 204 | if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN, |
190 | htonl(priv->sreg_proto_min))) | 205 | htonl(priv->sreg_proto_min)) || |
191 | goto nla_put_failure; | 206 | nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, |
192 | if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, | ||
193 | htonl(priv->sreg_proto_max))) | 207 | htonl(priv->sreg_proto_max))) |
194 | goto nla_put_failure; | 208 | goto nla_put_failure; |
195 | } | 209 | } |
@@ -205,6 +219,13 @@ nla_put_failure: | |||
205 | return -1; | 219 | return -1; |
206 | } | 220 | } |
207 | 221 | ||
222 | static int nft_nat_validate(const struct nft_ctx *ctx, | ||
223 | const struct nft_expr *expr, | ||
224 | const struct nft_data **data) | ||
225 | { | ||
226 | return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); | ||
227 | } | ||
228 | |||
208 | static struct nft_expr_type nft_nat_type; | 229 | static struct nft_expr_type nft_nat_type; |
209 | static const struct nft_expr_ops nft_nat_ops = { | 230 | static const struct nft_expr_ops nft_nat_ops = { |
210 | .type = &nft_nat_type, | 231 | .type = &nft_nat_type, |
@@ -212,6 +233,7 @@ static const struct nft_expr_ops nft_nat_ops = { | |||
212 | .eval = nft_nat_eval, | 233 | .eval = nft_nat_eval, |
213 | .init = nft_nat_init, | 234 | .init = nft_nat_init, |
214 | .dump = nft_nat_dump, | 235 | .dump = nft_nat_dump, |
236 | .validate = nft_nat_validate, | ||
215 | }; | 237 | }; |
216 | 238 | ||
217 | static struct nft_expr_type nft_nat_type __read_mostly = { | 239 | static struct nft_expr_type nft_nat_type __read_mostly = { |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7a186e74b1b3..0007b8180397 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | |||
96 | static int netlink_dump(struct sock *sk); | 96 | static int netlink_dump(struct sock *sk); |
97 | static void netlink_skb_destructor(struct sk_buff *skb); | 97 | static void netlink_skb_destructor(struct sk_buff *skb); |
98 | 98 | ||
99 | /* nl_table locking explained: | ||
100 | * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock | ||
101 | * combined with an RCU read-side lock. Insertion and removal are protected | ||
102 | * with nl_sk_hash_lock while using RCU list modification primitives and may | ||
103 | * run in parallel to nl_table_lock protected lookups. Destruction of the | ||
104 | * Netlink socket may only occur *after* nl_table_lock has been acquired | ||
105 | * either during or after the socket has been removed from the list. | ||
106 | */ | ||
99 | DEFINE_RWLOCK(nl_table_lock); | 107 | DEFINE_RWLOCK(nl_table_lock); |
100 | EXPORT_SYMBOL_GPL(nl_table_lock); | 108 | EXPORT_SYMBOL_GPL(nl_table_lock); |
101 | static atomic_t nl_table_users = ATOMIC_INIT(0); | 109 | static atomic_t nl_table_users = ATOMIC_INIT(0); |
@@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock); | |||
109 | static int lockdep_nl_sk_hash_is_held(void) | 117 | static int lockdep_nl_sk_hash_is_held(void) |
110 | { | 118 | { |
111 | #ifdef CONFIG_LOCKDEP | 119 | #ifdef CONFIG_LOCKDEP |
112 | return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1; | 120 | if (debug_locks) |
113 | #else | 121 | return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock); |
114 | return 1; | ||
115 | #endif | 122 | #endif |
123 | return 1; | ||
116 | } | 124 | } |
117 | 125 | ||
118 | static ATOMIC_NOTIFIER_HEAD(netlink_chain); | 126 | static ATOMIC_NOTIFIER_HEAD(netlink_chain); |
@@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) | |||
1028 | struct netlink_table *table = &nl_table[protocol]; | 1036 | struct netlink_table *table = &nl_table[protocol]; |
1029 | struct sock *sk; | 1037 | struct sock *sk; |
1030 | 1038 | ||
1039 | read_lock(&nl_table_lock); | ||
1031 | rcu_read_lock(); | 1040 | rcu_read_lock(); |
1032 | sk = __netlink_lookup(table, portid, net); | 1041 | sk = __netlink_lookup(table, portid, net); |
1033 | if (sk) | 1042 | if (sk) |
1034 | sock_hold(sk); | 1043 | sock_hold(sk); |
1035 | rcu_read_unlock(); | 1044 | rcu_read_unlock(); |
1045 | read_unlock(&nl_table_lock); | ||
1036 | 1046 | ||
1037 | return sk; | 1047 | return sk; |
1038 | } | 1048 | } |
@@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock) | |||
1257 | } | 1267 | } |
1258 | netlink_table_ungrab(); | 1268 | netlink_table_ungrab(); |
1259 | 1269 | ||
1260 | /* Wait for readers to complete */ | ||
1261 | synchronize_net(); | ||
1262 | |||
1263 | kfree(nlk->groups); | 1270 | kfree(nlk->groups); |
1264 | nlk->groups = NULL; | 1271 | nlk->groups = NULL; |
1265 | 1272 | ||
@@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock) | |||
1281 | 1288 | ||
1282 | retry: | 1289 | retry: |
1283 | cond_resched(); | 1290 | cond_resched(); |
1291 | netlink_table_grab(); | ||
1284 | rcu_read_lock(); | 1292 | rcu_read_lock(); |
1285 | if (__netlink_lookup(table, portid, net)) { | 1293 | if (__netlink_lookup(table, portid, net)) { |
1286 | /* Bind collision, search negative portid values. */ | 1294 | /* Bind collision, search negative portid values. */ |
@@ -1288,9 +1296,11 @@ retry: | |||
1288 | if (rover > -4097) | 1296 | if (rover > -4097) |
1289 | rover = -4097; | 1297 | rover = -4097; |
1290 | rcu_read_unlock(); | 1298 | rcu_read_unlock(); |
1299 | netlink_table_ungrab(); | ||
1291 | goto retry; | 1300 | goto retry; |
1292 | } | 1301 | } |
1293 | rcu_read_unlock(); | 1302 | rcu_read_unlock(); |
1303 | netlink_table_ungrab(); | ||
1294 | 1304 | ||
1295 | err = netlink_insert(sk, net, portid); | 1305 | err = netlink_insert(sk, net, portid); |
1296 | if (err == -EADDRINUSE) | 1306 | if (err == -EADDRINUSE) |
@@ -1430,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups, | |||
1430 | return; | 1440 | return; |
1431 | 1441 | ||
1432 | for (undo = 0; undo < group; undo++) | 1442 | for (undo = 0; undo < group; undo++) |
1433 | if (test_bit(group, &groups)) | 1443 | if (test_bit(undo, &groups)) |
1434 | nlk->netlink_unbind(undo); | 1444 | nlk->netlink_unbind(undo); |
1435 | } | 1445 | } |
1436 | 1446 | ||
@@ -1482,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | |||
1482 | netlink_insert(sk, net, nladdr->nl_pid) : | 1492 | netlink_insert(sk, net, nladdr->nl_pid) : |
1483 | netlink_autobind(sock); | 1493 | netlink_autobind(sock); |
1484 | if (err) { | 1494 | if (err) { |
1485 | netlink_unbind(nlk->ngroups - 1, groups, nlk); | 1495 | netlink_unbind(nlk->ngroups, groups, nlk); |
1486 | return err; | 1496 | return err; |
1487 | } | 1497 | } |
1488 | } | 1498 | } |
@@ -2499,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, | |||
2499 | nl_table[unit].module = module; | 2509 | nl_table[unit].module = module; |
2500 | if (cfg) { | 2510 | if (cfg) { |
2501 | nl_table[unit].bind = cfg->bind; | 2511 | nl_table[unit].bind = cfg->bind; |
2512 | nl_table[unit].unbind = cfg->unbind; | ||
2502 | nl_table[unit].flags = cfg->flags; | 2513 | nl_table[unit].flags = cfg->flags; |
2503 | if (cfg->compare) | 2514 | if (cfg->compare) |
2504 | nl_table[unit].compare = cfg->compare; | 2515 | nl_table[unit].compare = cfg->compare; |
@@ -2921,14 +2932,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | |||
2921 | } | 2932 | } |
2922 | 2933 | ||
2923 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) | 2934 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) |
2924 | __acquires(RCU) | 2935 | __acquires(nl_table_lock) __acquires(RCU) |
2925 | { | 2936 | { |
2937 | read_lock(&nl_table_lock); | ||
2926 | rcu_read_lock(); | 2938 | rcu_read_lock(); |
2927 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2939 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
2928 | } | 2940 | } |
2929 | 2941 | ||
2930 | static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2942 | static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
2931 | { | 2943 | { |
2944 | struct rhashtable *ht; | ||
2932 | struct netlink_sock *nlk; | 2945 | struct netlink_sock *nlk; |
2933 | struct nl_seq_iter *iter; | 2946 | struct nl_seq_iter *iter; |
2934 | struct net *net; | 2947 | struct net *net; |
@@ -2943,19 +2956,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2943 | iter = seq->private; | 2956 | iter = seq->private; |
2944 | nlk = v; | 2957 | nlk = v; |
2945 | 2958 | ||
2946 | rht_for_each_entry_rcu(nlk, nlk->node.next, node) | 2959 | i = iter->link; |
2960 | ht = &nl_table[i].hash; | ||
2961 | rht_for_each_entry(nlk, nlk->node.next, ht, node) | ||
2947 | if (net_eq(sock_net((struct sock *)nlk), net)) | 2962 | if (net_eq(sock_net((struct sock *)nlk), net)) |
2948 | return nlk; | 2963 | return nlk; |
2949 | 2964 | ||
2950 | i = iter->link; | ||
2951 | j = iter->hash_idx + 1; | 2965 | j = iter->hash_idx + 1; |
2952 | 2966 | ||
2953 | do { | 2967 | do { |
2954 | struct rhashtable *ht = &nl_table[i].hash; | ||
2955 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | 2968 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); |
2956 | 2969 | ||
2957 | for (; j < tbl->size; j++) { | 2970 | for (; j < tbl->size; j++) { |
2958 | rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) { | 2971 | rht_for_each_entry(nlk, tbl->buckets[j], ht, node) { |
2959 | if (net_eq(sock_net((struct sock *)nlk), net)) { | 2972 | if (net_eq(sock_net((struct sock *)nlk), net)) { |
2960 | iter->link = i; | 2973 | iter->link = i; |
2961 | iter->hash_idx = j; | 2974 | iter->hash_idx = j; |
@@ -2971,9 +2984,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2971 | } | 2984 | } |
2972 | 2985 | ||
2973 | static void netlink_seq_stop(struct seq_file *seq, void *v) | 2986 | static void netlink_seq_stop(struct seq_file *seq, void *v) |
2974 | __releases(RCU) | 2987 | __releases(RCU) __releases(nl_table_lock) |
2975 | { | 2988 | { |
2976 | rcu_read_unlock(); | 2989 | rcu_read_unlock(); |
2990 | read_unlock(&nl_table_lock); | ||
2977 | } | 2991 | } |
2978 | 2992 | ||
2979 | 2993 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2e31d9e7f4dc..e6d7255183eb 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -324,6 +324,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, | |||
324 | segs = __skb_gso_segment(skb, NETIF_F_SG, false); | 324 | segs = __skb_gso_segment(skb, NETIF_F_SG, false); |
325 | if (IS_ERR(segs)) | 325 | if (IS_ERR(segs)) |
326 | return PTR_ERR(segs); | 326 | return PTR_ERR(segs); |
327 | if (segs == NULL) | ||
328 | return -EINVAL; | ||
327 | 329 | ||
328 | /* Queue all of the segments. */ | 330 | /* Queue all of the segments. */ |
329 | skb = segs; | 331 | skb = segs; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2cf61b3e633c..76f402e05bd6 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -947,7 +947,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
947 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { | 947 | if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { |
948 | if (qdisc_is_percpu_stats(sch)) { | 948 | if (qdisc_is_percpu_stats(sch)) { |
949 | sch->cpu_bstats = | 949 | sch->cpu_bstats = |
950 | alloc_percpu(struct gnet_stats_basic_cpu); | 950 | netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); |
951 | if (!sch->cpu_bstats) | 951 | if (!sch->cpu_bstats) |
952 | goto err_out4; | 952 | goto err_out4; |
953 | 953 | ||
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 33d7a98a7a97..b783a446d884 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c | |||
@@ -445,7 +445,6 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) | |||
445 | sch->limit = q->params.limit; | 445 | sch->limit = q->params.limit; |
446 | 446 | ||
447 | setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); | 447 | setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); |
448 | mod_timer(&q->adapt_timer, jiffies + HZ / 2); | ||
449 | 448 | ||
450 | if (opt) { | 449 | if (opt) { |
451 | int err = pie_change(sch, opt); | 450 | int err = pie_change(sch, opt); |
@@ -454,6 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) | |||
454 | return err; | 453 | return err; |
455 | } | 454 | } |
456 | 455 | ||
456 | mod_timer(&q->adapt_timer, jiffies + HZ / 2); | ||
457 | return 0; | 457 | return 0; |
458 | } | 458 | } |
459 | 459 | ||
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 0e8529113dc5..fb7976aee61c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, | |||
862 | list_add(&cur_key->key_list, sh_keys); | 862 | list_add(&cur_key->key_list, sh_keys); |
863 | 863 | ||
864 | cur_key->key = key; | 864 | cur_key->key = key; |
865 | sctp_auth_key_hold(key); | ||
866 | |||
867 | return 0; | 865 | return 0; |
868 | nomem: | 866 | nomem: |
869 | if (!replace) | 867 | if (!replace) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ab734be8cb20..9f32741abb1c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2609,6 +2609,9 @@ do_addr_param: | |||
2609 | addr_param = param.v + sizeof(sctp_addip_param_t); | 2609 | addr_param = param.v + sizeof(sctp_addip_param_t); |
2610 | 2610 | ||
2611 | af = sctp_get_af_specific(param_type2af(param.p->type)); | 2611 | af = sctp_get_af_specific(param_type2af(param.p->type)); |
2612 | if (af == NULL) | ||
2613 | break; | ||
2614 | |||
2612 | af->from_addr_param(&addr, addr_param, | 2615 | af->from_addr_param(&addr, addr_param, |
2613 | htons(asoc->peer.port), 0); | 2616 | htons(asoc->peer.port), 0); |
2614 | 2617 | ||
diff --git a/net/tipc/node.c b/net/tipc/node.c index 90cee4a6fce4..5781634e957d 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -219,11 +219,11 @@ void tipc_node_abort_sock_conns(struct list_head *conns) | |||
219 | void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 219 | void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
220 | { | 220 | { |
221 | struct tipc_link **active = &n_ptr->active_links[0]; | 221 | struct tipc_link **active = &n_ptr->active_links[0]; |
222 | u32 addr = n_ptr->addr; | ||
223 | 222 | ||
224 | n_ptr->working_links++; | 223 | n_ptr->working_links++; |
225 | tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, | 224 | n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP; |
226 | l_ptr->bearer_id, addr); | 225 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; |
226 | |||
227 | pr_info("Established link <%s> on network plane %c\n", | 227 | pr_info("Established link <%s> on network plane %c\n", |
228 | l_ptr->name, l_ptr->net_plane); | 228 | l_ptr->name, l_ptr->net_plane); |
229 | 229 | ||
@@ -284,10 +284,10 @@ static void node_select_active_links(struct tipc_node *n_ptr) | |||
284 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 284 | void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
285 | { | 285 | { |
286 | struct tipc_link **active; | 286 | struct tipc_link **active; |
287 | u32 addr = n_ptr->addr; | ||
288 | 287 | ||
289 | n_ptr->working_links--; | 288 | n_ptr->working_links--; |
290 | tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr); | 289 | n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN; |
290 | n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; | ||
291 | 291 | ||
292 | if (!tipc_link_is_active(l_ptr)) { | 292 | if (!tipc_link_is_active(l_ptr)) { |
293 | pr_info("Lost standby link <%s> on network plane %c\n", | 293 | pr_info("Lost standby link <%s> on network plane %c\n", |
@@ -552,28 +552,30 @@ void tipc_node_unlock(struct tipc_node *node) | |||
552 | LIST_HEAD(conn_sks); | 552 | LIST_HEAD(conn_sks); |
553 | struct sk_buff_head waiting_sks; | 553 | struct sk_buff_head waiting_sks; |
554 | u32 addr = 0; | 554 | u32 addr = 0; |
555 | unsigned int flags = node->action_flags; | 555 | int flags = node->action_flags; |
556 | u32 link_id = 0; | ||
556 | 557 | ||
557 | if (likely(!node->action_flags)) { | 558 | if (likely(!flags)) { |
558 | spin_unlock_bh(&node->lock); | 559 | spin_unlock_bh(&node->lock); |
559 | return; | 560 | return; |
560 | } | 561 | } |
561 | 562 | ||
563 | addr = node->addr; | ||
564 | link_id = node->link_id; | ||
562 | __skb_queue_head_init(&waiting_sks); | 565 | __skb_queue_head_init(&waiting_sks); |
563 | if (node->action_flags & TIPC_WAKEUP_USERS) { | 566 | |
567 | if (flags & TIPC_WAKEUP_USERS) | ||
564 | skb_queue_splice_init(&node->waiting_sks, &waiting_sks); | 568 | skb_queue_splice_init(&node->waiting_sks, &waiting_sks); |
565 | node->action_flags &= ~TIPC_WAKEUP_USERS; | 569 | |
566 | } | 570 | if (flags & TIPC_NOTIFY_NODE_DOWN) { |
567 | if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { | ||
568 | list_replace_init(&node->nsub, &nsub_list); | 571 | list_replace_init(&node->nsub, &nsub_list); |
569 | list_replace_init(&node->conn_sks, &conn_sks); | 572 | list_replace_init(&node->conn_sks, &conn_sks); |
570 | node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; | ||
571 | } | 573 | } |
572 | if (node->action_flags & TIPC_NOTIFY_NODE_UP) { | 574 | node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN | |
573 | node->action_flags &= ~TIPC_NOTIFY_NODE_UP; | 575 | TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP | |
574 | addr = node->addr; | 576 | TIPC_NOTIFY_LINK_DOWN | |
575 | } | 577 | TIPC_WAKEUP_BCAST_USERS); |
576 | node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS; | 578 | |
577 | spin_unlock_bh(&node->lock); | 579 | spin_unlock_bh(&node->lock); |
578 | 580 | ||
579 | while (!skb_queue_empty(&waiting_sks)) | 581 | while (!skb_queue_empty(&waiting_sks)) |
@@ -588,6 +590,14 @@ void tipc_node_unlock(struct tipc_node *node) | |||
588 | if (flags & TIPC_WAKEUP_BCAST_USERS) | 590 | if (flags & TIPC_WAKEUP_BCAST_USERS) |
589 | tipc_bclink_wakeup_users(); | 591 | tipc_bclink_wakeup_users(); |
590 | 592 | ||
591 | if (addr) | 593 | if (flags & TIPC_NOTIFY_NODE_UP) |
592 | tipc_named_node_up(addr); | 594 | tipc_named_node_up(addr); |
595 | |||
596 | if (flags & TIPC_NOTIFY_LINK_UP) | ||
597 | tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, | ||
598 | TIPC_NODE_SCOPE, link_id, addr); | ||
599 | |||
600 | if (flags & TIPC_NOTIFY_LINK_DOWN) | ||
601 | tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, | ||
602 | link_id, addr); | ||
593 | } | 603 | } |
diff --git a/net/tipc/node.h b/net/tipc/node.h index 67513c3c852c..04e91458bb29 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h | |||
@@ -53,6 +53,7 @@ | |||
53 | * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down | 53 | * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down |
54 | * TIPC_NOTIFY_NODE_DOWN: notify node is down | 54 | * TIPC_NOTIFY_NODE_DOWN: notify node is down |
55 | * TIPC_NOTIFY_NODE_UP: notify node is up | 55 | * TIPC_NOTIFY_NODE_UP: notify node is up |
56 | * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type | ||
56 | */ | 57 | */ |
57 | enum { | 58 | enum { |
58 | TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), | 59 | TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), |
@@ -60,7 +61,9 @@ enum { | |||
60 | TIPC_NOTIFY_NODE_DOWN = (1 << 3), | 61 | TIPC_NOTIFY_NODE_DOWN = (1 << 3), |
61 | TIPC_NOTIFY_NODE_UP = (1 << 4), | 62 | TIPC_NOTIFY_NODE_UP = (1 << 4), |
62 | TIPC_WAKEUP_USERS = (1 << 5), | 63 | TIPC_WAKEUP_USERS = (1 << 5), |
63 | TIPC_WAKEUP_BCAST_USERS = (1 << 6) | 64 | TIPC_WAKEUP_BCAST_USERS = (1 << 6), |
65 | TIPC_NOTIFY_LINK_UP = (1 << 7), | ||
66 | TIPC_NOTIFY_LINK_DOWN = (1 << 8) | ||
64 | }; | 67 | }; |
65 | 68 | ||
66 | /** | 69 | /** |
@@ -100,6 +103,7 @@ struct tipc_node_bclink { | |||
100 | * @working_links: number of working links to node (both active and standby) | 103 | * @working_links: number of working links to node (both active and standby) |
101 | * @link_cnt: number of links to node | 104 | * @link_cnt: number of links to node |
102 | * @signature: node instance identifier | 105 | * @signature: node instance identifier |
106 | * @link_id: local and remote bearer ids of changing link, if any | ||
103 | * @nsub: list of "node down" subscriptions monitoring node | 107 | * @nsub: list of "node down" subscriptions monitoring node |
104 | * @rcu: rcu struct for tipc_node | 108 | * @rcu: rcu struct for tipc_node |
105 | */ | 109 | */ |
@@ -116,6 +120,7 @@ struct tipc_node { | |||
116 | int link_cnt; | 120 | int link_cnt; |
117 | int working_links; | 121 | int working_links; |
118 | u32 signature; | 122 | u32 signature; |
123 | u32 link_id; | ||
119 | struct list_head nsub; | 124 | struct list_head nsub; |
120 | struct sk_buff_head waiting_sks; | 125 | struct sk_buff_head waiting_sks; |
121 | struct list_head conn_sks; | 126 | struct list_head conn_sks; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 75275c5cf929..51bddc236a15 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1776,7 +1776,7 @@ int tipc_sk_rcv(struct sk_buff *buf) | |||
1776 | sk = &tsk->sk; | 1776 | sk = &tsk->sk; |
1777 | 1777 | ||
1778 | /* Queue message */ | 1778 | /* Queue message */ |
1779 | bh_lock_sock(sk); | 1779 | spin_lock_bh(&sk->sk_lock.slock); |
1780 | 1780 | ||
1781 | if (!sock_owned_by_user(sk)) { | 1781 | if (!sock_owned_by_user(sk)) { |
1782 | rc = filter_rcv(sk, buf); | 1782 | rc = filter_rcv(sk, buf); |
@@ -1787,7 +1787,7 @@ int tipc_sk_rcv(struct sk_buff *buf) | |||
1787 | if (sk_add_backlog(sk, buf, limit)) | 1787 | if (sk_add_backlog(sk, buf, limit)) |
1788 | rc = -TIPC_ERR_OVERLOAD; | 1788 | rc = -TIPC_ERR_OVERLOAD; |
1789 | } | 1789 | } |
1790 | bh_unlock_sock(sk); | 1790 | spin_unlock_bh(&sk->sk_lock.slock); |
1791 | tipc_sk_put(tsk); | 1791 | tipc_sk_put(tsk); |
1792 | if (likely(!rc)) | 1792 | if (likely(!rc)) |
1793 | return 0; | 1793 | return 0; |
@@ -2673,7 +2673,7 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) | |||
2673 | case SIOCGETLINKNAME: | 2673 | case SIOCGETLINKNAME: |
2674 | if (copy_from_user(&lnr, argp, sizeof(lnr))) | 2674 | if (copy_from_user(&lnr, argp, sizeof(lnr))) |
2675 | return -EFAULT; | 2675 | return -EFAULT; |
2676 | if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer, | 2676 | if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer, |
2677 | lnr.linkname, TIPC_MAX_LINK_NAME)) { | 2677 | lnr.linkname, TIPC_MAX_LINK_NAME)) { |
2678 | if (copy_to_user(argp, &lnr, sizeof(lnr))) | 2678 | if (copy_to_user(argp, &lnr, sizeof(lnr))) |
2679 | return -EFAULT; | 2679 | return -EFAULT; |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 499d6c18a8ce..7c532856b398 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb) | |||
157 | kfree_skb(skb); | 157 | kfree_skb(skb); |
158 | if (IS_ERR(segs)) | 158 | if (IS_ERR(segs)) |
159 | return PTR_ERR(segs); | 159 | return PTR_ERR(segs); |
160 | if (segs == NULL) | ||
161 | return -EINVAL; | ||
160 | 162 | ||
161 | do { | 163 | do { |
162 | struct sk_buff *nskb = segs->next; | 164 | struct sk_buff *nskb = segs->next; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4c4e457e7888..88bf289abdc9 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1962,7 +1962,7 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) | |||
1962 | struct xfrm_policy *pol = xdst->pols[0]; | 1962 | struct xfrm_policy *pol = xdst->pols[0]; |
1963 | struct xfrm_policy_queue *pq = &pol->polq; | 1963 | struct xfrm_policy_queue *pq = &pol->polq; |
1964 | 1964 | ||
1965 | if (unlikely(skb_fclone_busy(skb))) { | 1965 | if (unlikely(skb_fclone_busy(sk, skb))) { |
1966 | kfree_skb(skb); | 1966 | kfree_skb(skb); |
1967 | return 0; | 1967 | return 0; |
1968 | } | 1968 | } |