diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 12:31:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-18 12:31:37 -0400 |
commit | 2e923b0251932ad4a82cc87ec1443a1f1d17073e (patch) | |
tree | d12032bc9bcfbb8a57659275d1b9b582f23f2ecc /net | |
parent | ffd8221bc348f8c282d1271883dbe629ea8ae289 (diff) | |
parent | f2d9da1a8375cbe53df5b415d059429013a3a79f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Include fixes for netrom and dsa (Fabian Frederick and Florian
Fainelli)
2) Fix FIXED_PHY support in stmmac, from Giuseppe CAVALLARO.
3) Several SKB use after free fixes (vxlan, openvswitch, vxlan,
ip_tunnel, fou), from Li ROngQing.
4) fec driver PTP support fixes from Luwei Zhou and Nimrod Andy.
5) Use after free in virtio_net, from Michael S Tsirkin.
6) Fix flow mask handling for megaflows in openvswitch, from Pravin B
Shelar.
7) ISDN gigaset and capi bug fixes from Tilman Schmidt.
8) Fix route leak in ip_send_unicast_reply(), from Vasily Averin.
9) Fix two eBPF JIT bugs on x86, from Alexei Starovoitov.
10) TCP_SKB_CB() reorganization caused a few regressions, fixed by Cong
Wang and Eric Dumazet.
11) Don't overwrite end of SKB when parsing malformed sctp ASCONF
chunks, from Daniel Borkmann.
12) Don't call sock_kfree_s() with NULL pointers, this function also has
the side effect of adjusting the socket memory usage. From Cong Wang.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (90 commits)
bna: fix skb->truesize underestimation
net: dsa: add includes for ethtool and phy_fixed definitions
openvswitch: Set flow-key members.
netrom: use linux/uaccess.h
dsa: Fix conversion from host device to mii bus
tipc: fix bug in bundled buffer reception
ipv6: introduce tcp_v6_iif()
sfc: add support for skb->xmit_more
r8152: return -EBUSY for runtime suspend
ipv4: fix a potential use after free in fou.c
ipv4: fix a potential use after free in ip_tunnel_core.c
hyperv: Add handling of IP header with option field in netvsc_set_hash()
openvswitch: Create right mask with disabled megaflows
vxlan: fix a free after use
openvswitch: fix a use after free
ipv4: dst_entry leak in ip_send_unicast_reply()
ipv4: clean up cookie_v4_check()
ipv4: share tcp_v4_save_options() with cookie_v4_check()
ipv4: call __ip_options_echo() in cookie_v4_check()
atm: simplify lanai.c by using module_pci_driver
...
Diffstat (limited to 'net')
34 files changed, 265 insertions, 202 deletions
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index ba02db022900..5cd44f001f64 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c | |||
@@ -87,13 +87,12 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], | |||
87 | { | 87 | { |
88 | struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); | 88 | struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC); |
89 | 89 | ||
90 | if (!this) { | 90 | if (!this) |
91 | pr_warn("Out of memory\n"); | ||
92 | return NULL; | 91 | return NULL; |
93 | } | 92 | |
94 | caif_assert(offsetof(struct cfusbl, layer) == 0); | 93 | caif_assert(offsetof(struct cfusbl, layer) == 0); |
95 | 94 | ||
96 | memset(this, 0, sizeof(struct cflayer)); | 95 | memset(&this->layer, 0, sizeof(this->layer)); |
97 | this->layer.receive = cfusbl_receive; | 96 | this->layer.receive = cfusbl_receive; |
98 | this->layer.transmit = cfusbl_transmit; | 97 | this->layer.transmit = cfusbl_transmit; |
99 | this->layer.ctrlcmd = cfusbl_ctrlcmd; | 98 | this->layer.ctrlcmd = cfusbl_ctrlcmd; |
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 8c5d6386319f..510aa5a753f0 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c | |||
@@ -47,10 +47,10 @@ static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); | |||
47 | 47 | ||
48 | struct cflayer *cfmuxl_create(void) | 48 | struct cflayer *cfmuxl_create(void) |
49 | { | 49 | { |
50 | struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); | 50 | struct cfmuxl *this = kzalloc(sizeof(struct cfmuxl), GFP_ATOMIC); |
51 | |||
51 | if (!this) | 52 | if (!this) |
52 | return NULL; | 53 | return NULL; |
53 | memset(this, 0, sizeof(*this)); | ||
54 | this->layer.receive = cfmuxl_receive; | 54 | this->layer.receive = cfmuxl_receive; |
55 | this->layer.transmit = cfmuxl_transmit; | 55 | this->layer.transmit = cfmuxl_transmit; |
56 | this->layer.ctrlcmd = cfmuxl_ctrlcmd; | 56 | this->layer.ctrlcmd = cfmuxl_ctrlcmd; |
diff --git a/net/core/dev.c b/net/core/dev.c index 6470716ddba4..b793e3521a36 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2675,7 +2675,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device | |||
2675 | if (skb->encapsulation) | 2675 | if (skb->encapsulation) |
2676 | features &= dev->hw_enc_features; | 2676 | features &= dev->hw_enc_features; |
2677 | 2677 | ||
2678 | if (netif_needs_gso(skb, features)) { | 2678 | if (netif_needs_gso(dev, skb, features)) { |
2679 | struct sk_buff *segs; | 2679 | struct sk_buff *segs; |
2680 | 2680 | ||
2681 | segs = skb_gso_segment(skb, features); | 2681 | segs = skb_gso_segment(skb, features); |
diff --git a/net/core/sock.c b/net/core/sock.c index b4f3ea2fce60..15e0c67b1069 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1718,6 +1718,8 @@ EXPORT_SYMBOL(sock_kmalloc); | |||
1718 | */ | 1718 | */ |
1719 | void sock_kfree_s(struct sock *sk, void *mem, int size) | 1719 | void sock_kfree_s(struct sock *sk, void *mem, int size) |
1720 | { | 1720 | { |
1721 | if (WARN_ON_ONCE(!mem)) | ||
1722 | return; | ||
1721 | kfree(mem); | 1723 | kfree(mem); |
1722 | atomic_sub(size, &sk->sk_omem_alloc); | 1724 | atomic_sub(size, &sk->sk_omem_alloc); |
1723 | } | 1725 | } |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index ad2acfe1ca61..6bcaa33cd804 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -757,7 +757,8 @@ static int dccp_v6_rcv(struct sk_buff *skb) | |||
757 | /* Step 2: | 757 | /* Step 2: |
758 | * Look up flow ID in table and get corresponding socket */ | 758 | * Look up flow ID in table and get corresponding socket */ |
759 | sk = __inet6_lookup_skb(&dccp_hashinfo, skb, | 759 | sk = __inet6_lookup_skb(&dccp_hashinfo, skb, |
760 | dh->dccph_sport, dh->dccph_dport); | 760 | dh->dccph_sport, dh->dccph_dport, |
761 | inet6_iif(skb)); | ||
761 | /* | 762 | /* |
762 | * Step 2: | 763 | * Step 2: |
763 | * If no socket ... | 764 | * If no socket ... |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 8030489d9cbe..a851e9f14118 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/etherdevice.h> | 12 | #include <linux/etherdevice.h> |
13 | #include <linux/phy.h> | 13 | #include <linux/phy.h> |
14 | #include <linux/phy_fixed.h> | ||
14 | #include <linux/of_net.h> | 15 | #include <linux/of_net.h> |
15 | #include <linux/of_mdio.h> | 16 | #include <linux/of_mdio.h> |
16 | #include "dsa_priv.h" | 17 | #include "dsa_priv.h" |
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 5b6efb3d2308..f99f41bd15b8 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -537,7 +537,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | |||
537 | return 1; | 537 | return 1; |
538 | 538 | ||
539 | attrlen = rtnh_attrlen(rtnh); | 539 | attrlen = rtnh_attrlen(rtnh); |
540 | if (attrlen < 0) { | 540 | if (attrlen > 0) { |
541 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); | 541 | struct nlattr *nla, *attrs = rtnh_attrs(rtnh); |
542 | 542 | ||
543 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); | 543 | nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index efa70ad44906..32e78924e246 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -87,6 +87,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) | |||
87 | if (!pskb_may_pull(skb, len)) | 87 | if (!pskb_may_pull(skb, len)) |
88 | goto drop; | 88 | goto drop; |
89 | 89 | ||
90 | uh = udp_hdr(skb); | ||
91 | guehdr = (struct guehdr *)&uh[1]; | ||
92 | |||
90 | if (guehdr->version != 0) | 93 | if (guehdr->version != 0) |
91 | goto drop; | 94 | goto drop; |
92 | 95 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index e35b71289156..88e5ef2c7f51 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -1535,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, | |||
1535 | struct sk_buff *nskb; | 1535 | struct sk_buff *nskb; |
1536 | struct sock *sk; | 1536 | struct sock *sk; |
1537 | struct inet_sock *inet; | 1537 | struct inet_sock *inet; |
1538 | int err; | ||
1538 | 1539 | ||
1539 | if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) | 1540 | if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) |
1540 | return; | 1541 | return; |
@@ -1574,8 +1575,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, | |||
1574 | sock_net_set(sk, net); | 1575 | sock_net_set(sk, net); |
1575 | __skb_queue_head_init(&sk->sk_write_queue); | 1576 | __skb_queue_head_init(&sk->sk_write_queue); |
1576 | sk->sk_sndbuf = sysctl_wmem_default; | 1577 | sk->sk_sndbuf = sysctl_wmem_default; |
1577 | ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, | 1578 | err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, |
1578 | &ipc, &rt, MSG_DONTWAIT); | 1579 | len, 0, &ipc, &rt, MSG_DONTWAIT); |
1580 | if (unlikely(err)) { | ||
1581 | ip_flush_pending_frames(sk); | ||
1582 | goto out; | ||
1583 | } | ||
1584 | |||
1579 | nskb = skb_peek(&sk->sk_write_queue); | 1585 | nskb = skb_peek(&sk->sk_write_queue); |
1580 | if (nskb) { | 1586 | if (nskb) { |
1581 | if (arg->csumoffset >= 0) | 1587 | if (arg->csumoffset >= 0) |
@@ -1587,7 +1593,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, | |||
1587 | skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); | 1593 | skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); |
1588 | ip_push_pending_frames(sk, &fl4); | 1594 | ip_push_pending_frames(sk, &fl4); |
1589 | } | 1595 | } |
1590 | 1596 | out: | |
1591 | put_cpu_var(unicast_sock); | 1597 | put_cpu_var(unicast_sock); |
1592 | 1598 | ||
1593 | ip_rt_put(rt); | 1599 | ip_rt_put(rt); |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index f4c987bb7e94..88c386cf7d85 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) | |||
91 | skb_pull_rcsum(skb, hdr_len); | 91 | skb_pull_rcsum(skb, hdr_len); |
92 | 92 | ||
93 | if (inner_proto == htons(ETH_P_TEB)) { | 93 | if (inner_proto == htons(ETH_P_TEB)) { |
94 | struct ethhdr *eh = (struct ethhdr *)skb->data; | 94 | struct ethhdr *eh; |
95 | 95 | ||
96 | if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) | 96 | if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) |
97 | return -ENOMEM; | 97 | return -ENOMEM; |
98 | 98 | ||
99 | eh = (struct ethhdr *)skb->data; | ||
99 | if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) | 100 | if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) |
100 | skb->protocol = eh->h_proto; | 101 | skb->protocol = eh->h_proto; |
101 | else | 102 | else |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index af660030e3c7..32b98d0207b4 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -255,9 +255,9 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, | |||
255 | } | 255 | } |
256 | EXPORT_SYMBOL(cookie_check_timestamp); | 256 | EXPORT_SYMBOL(cookie_check_timestamp); |
257 | 257 | ||
258 | struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | 258 | struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) |
259 | struct ip_options *opt) | ||
260 | { | 259 | { |
260 | struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; | ||
261 | struct tcp_options_received tcp_opt; | 261 | struct tcp_options_received tcp_opt; |
262 | struct inet_request_sock *ireq; | 262 | struct inet_request_sock *ireq; |
263 | struct tcp_request_sock *treq; | 263 | struct tcp_request_sock *treq; |
@@ -317,15 +317,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
317 | /* We throwed the options of the initial SYN away, so we hope | 317 | /* We throwed the options of the initial SYN away, so we hope |
318 | * the ACK carries the same options again (see RFC1122 4.2.3.8) | 318 | * the ACK carries the same options again (see RFC1122 4.2.3.8) |
319 | */ | 319 | */ |
320 | if (opt && opt->optlen) { | 320 | ireq->opt = tcp_v4_save_options(skb); |
321 | int opt_size = sizeof(struct ip_options_rcu) + opt->optlen; | ||
322 | |||
323 | ireq->opt = kmalloc(opt_size, GFP_ATOMIC); | ||
324 | if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) { | ||
325 | kfree(ireq->opt); | ||
326 | ireq->opt = NULL; | ||
327 | } | ||
328 | } | ||
329 | 321 | ||
330 | if (security_inet_conn_request(sk, skb, req)) { | 322 | if (security_inet_conn_request(sk, skb, req)) { |
331 | reqsk_free(req); | 323 | reqsk_free(req); |
@@ -344,7 +336,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
344 | flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark, | 336 | flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark, |
345 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, | 337 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, |
346 | inet_sk_flowi_flags(sk), | 338 | inet_sk_flowi_flags(sk), |
347 | (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr, | 339 | opt->srr ? opt->faddr : ireq->ir_rmt_addr, |
348 | ireq->ir_loc_addr, th->source, th->dest); | 340 | ireq->ir_loc_addr, th->source, th->dest); |
349 | security_req_classify_flow(req, flowi4_to_flowi(&fl4)); | 341 | security_req_classify_flow(req, flowi4_to_flowi(&fl4)); |
350 | rt = ip_route_output_key(sock_net(sk), &fl4); | 342 | rt = ip_route_output_key(sock_net(sk), &fl4); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 00a41499d52c..a12b455928e5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -68,6 +68,7 @@ | |||
68 | #include <linux/module.h> | 68 | #include <linux/module.h> |
69 | #include <linux/sysctl.h> | 69 | #include <linux/sysctl.h> |
70 | #include <linux/kernel.h> | 70 | #include <linux/kernel.h> |
71 | #include <linux/prefetch.h> | ||
71 | #include <net/dst.h> | 72 | #include <net/dst.h> |
72 | #include <net/tcp.h> | 73 | #include <net/tcp.h> |
73 | #include <net/inet_common.h> | 74 | #include <net/inet_common.h> |
@@ -3029,6 +3030,21 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) | |||
3029 | return packets_acked; | 3030 | return packets_acked; |
3030 | } | 3031 | } |
3031 | 3032 | ||
3033 | static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, | ||
3034 | u32 prior_snd_una) | ||
3035 | { | ||
3036 | const struct skb_shared_info *shinfo; | ||
3037 | |||
3038 | /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ | ||
3039 | if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) | ||
3040 | return; | ||
3041 | |||
3042 | shinfo = skb_shinfo(skb); | ||
3043 | if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && | ||
3044 | between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) | ||
3045 | __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); | ||
3046 | } | ||
3047 | |||
3032 | /* Remove acknowledged frames from the retransmission queue. If our packet | 3048 | /* Remove acknowledged frames from the retransmission queue. If our packet |
3033 | * is before the ack sequence we can discard it as it's confirmed to have | 3049 | * is before the ack sequence we can discard it as it's confirmed to have |
3034 | * arrived at the other end. | 3050 | * arrived at the other end. |
@@ -3052,14 +3068,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3052 | first_ackt.v64 = 0; | 3068 | first_ackt.v64 = 0; |
3053 | 3069 | ||
3054 | while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { | 3070 | while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { |
3055 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
3056 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); | 3071 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
3057 | u8 sacked = scb->sacked; | 3072 | u8 sacked = scb->sacked; |
3058 | u32 acked_pcount; | 3073 | u32 acked_pcount; |
3059 | 3074 | ||
3060 | if (unlikely(shinfo->tx_flags & SKBTX_ACK_TSTAMP) && | 3075 | tcp_ack_tstamp(sk, skb, prior_snd_una); |
3061 | between(shinfo->tskey, prior_snd_una, tp->snd_una - 1)) | ||
3062 | __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); | ||
3063 | 3076 | ||
3064 | /* Determine how many packets and what bytes were acked, tso and else */ | 3077 | /* Determine how many packets and what bytes were acked, tso and else */ |
3065 | if (after(scb->end_seq, tp->snd_una)) { | 3078 | if (after(scb->end_seq, tp->snd_una)) { |
@@ -3073,10 +3086,12 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3073 | 3086 | ||
3074 | fully_acked = false; | 3087 | fully_acked = false; |
3075 | } else { | 3088 | } else { |
3089 | /* Speedup tcp_unlink_write_queue() and next loop */ | ||
3090 | prefetchw(skb->next); | ||
3076 | acked_pcount = tcp_skb_pcount(skb); | 3091 | acked_pcount = tcp_skb_pcount(skb); |
3077 | } | 3092 | } |
3078 | 3093 | ||
3079 | if (sacked & TCPCB_RETRANS) { | 3094 | if (unlikely(sacked & TCPCB_RETRANS)) { |
3080 | if (sacked & TCPCB_SACKED_RETRANS) | 3095 | if (sacked & TCPCB_SACKED_RETRANS) |
3081 | tp->retrans_out -= acked_pcount; | 3096 | tp->retrans_out -= acked_pcount; |
3082 | flag |= FLAG_RETRANS_DATA_ACKED; | 3097 | flag |= FLAG_RETRANS_DATA_ACKED; |
@@ -3107,7 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3107 | * connection startup slow start one packet too | 3122 | * connection startup slow start one packet too |
3108 | * quickly. This is severely frowned upon behavior. | 3123 | * quickly. This is severely frowned upon behavior. |
3109 | */ | 3124 | */ |
3110 | if (!(scb->tcp_flags & TCPHDR_SYN)) { | 3125 | if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { |
3111 | flag |= FLAG_DATA_ACKED; | 3126 | flag |= FLAG_DATA_ACKED; |
3112 | } else { | 3127 | } else { |
3113 | flag |= FLAG_SYN_ACKED; | 3128 | flag |= FLAG_SYN_ACKED; |
@@ -3119,9 +3134,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3119 | 3134 | ||
3120 | tcp_unlink_write_queue(skb, sk); | 3135 | tcp_unlink_write_queue(skb, sk); |
3121 | sk_wmem_free_skb(sk, skb); | 3136 | sk_wmem_free_skb(sk, skb); |
3122 | if (skb == tp->retransmit_skb_hint) | 3137 | if (unlikely(skb == tp->retransmit_skb_hint)) |
3123 | tp->retransmit_skb_hint = NULL; | 3138 | tp->retransmit_skb_hint = NULL; |
3124 | if (skb == tp->lost_skb_hint) | 3139 | if (unlikely(skb == tp->lost_skb_hint)) |
3125 | tp->lost_skb_hint = NULL; | 3140 | tp->lost_skb_hint = NULL; |
3126 | } | 3141 | } |
3127 | 3142 | ||
@@ -3132,7 +3147,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3132 | flag |= FLAG_SACK_RENEGING; | 3147 | flag |= FLAG_SACK_RENEGING; |
3133 | 3148 | ||
3134 | skb_mstamp_get(&now); | 3149 | skb_mstamp_get(&now); |
3135 | if (first_ackt.v64) { | 3150 | if (likely(first_ackt.v64)) { |
3136 | seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt); | 3151 | seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt); |
3137 | ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt); | 3152 | ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt); |
3138 | } | 3153 | } |
@@ -3394,6 +3409,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3394 | int acked = 0; /* Number of packets newly acked */ | 3409 | int acked = 0; /* Number of packets newly acked */ |
3395 | long sack_rtt_us = -1L; | 3410 | long sack_rtt_us = -1L; |
3396 | 3411 | ||
3412 | /* We very likely will need to access write queue head. */ | ||
3413 | prefetchw(sk->sk_write_queue.next); | ||
3414 | |||
3397 | /* If the ack is older than previous acks | 3415 | /* If the ack is older than previous acks |
3398 | * then we can probably ignore it. | 3416 | * then we can probably ignore it. |
3399 | */ | 3417 | */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 552e87e3c269..94d1a7757ff7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -880,26 +880,6 @@ bool tcp_syn_flood_action(struct sock *sk, | |||
880 | } | 880 | } |
881 | EXPORT_SYMBOL(tcp_syn_flood_action); | 881 | EXPORT_SYMBOL(tcp_syn_flood_action); |
882 | 882 | ||
883 | /* | ||
884 | * Save and compile IPv4 options into the request_sock if needed. | ||
885 | */ | ||
886 | static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb) | ||
887 | { | ||
888 | const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; | ||
889 | struct ip_options_rcu *dopt = NULL; | ||
890 | |||
891 | if (opt && opt->optlen) { | ||
892 | int opt_size = sizeof(*dopt) + opt->optlen; | ||
893 | |||
894 | dopt = kmalloc(opt_size, GFP_ATOMIC); | ||
895 | if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) { | ||
896 | kfree(dopt); | ||
897 | dopt = NULL; | ||
898 | } | ||
899 | } | ||
900 | return dopt; | ||
901 | } | ||
902 | |||
903 | #ifdef CONFIG_TCP_MD5SIG | 883 | #ifdef CONFIG_TCP_MD5SIG |
904 | /* | 884 | /* |
905 | * RFC2385 MD5 checksumming requires a mapping of | 885 | * RFC2385 MD5 checksumming requires a mapping of |
@@ -1428,7 +1408,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
1428 | 1408 | ||
1429 | #ifdef CONFIG_SYN_COOKIES | 1409 | #ifdef CONFIG_SYN_COOKIES |
1430 | if (!th->syn) | 1410 | if (!th->syn) |
1431 | sk = cookie_v4_check(sk, skb, &TCP_SKB_CB(skb)->header.h4.opt); | 1411 | sk = cookie_v4_check(sk, skb); |
1432 | #endif | 1412 | #endif |
1433 | return sk; | 1413 | return sk; |
1434 | } | 1414 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index becd98ce9a1c..3af21296d967 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -839,26 +839,38 @@ void tcp_wfree(struct sk_buff *skb) | |||
839 | { | 839 | { |
840 | struct sock *sk = skb->sk; | 840 | struct sock *sk = skb->sk; |
841 | struct tcp_sock *tp = tcp_sk(sk); | 841 | struct tcp_sock *tp = tcp_sk(sk); |
842 | int wmem; | ||
843 | |||
844 | /* Keep one reference on sk_wmem_alloc. | ||
845 | * Will be released by sk_free() from here or tcp_tasklet_func() | ||
846 | */ | ||
847 | wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); | ||
848 | |||
849 | /* If this softirq is serviced by ksoftirqd, we are likely under stress. | ||
850 | * Wait until our queues (qdisc + devices) are drained. | ||
851 | * This gives : | ||
852 | * - less callbacks to tcp_write_xmit(), reducing stress (batches) | ||
853 | * - chance for incoming ACK (processed by another cpu maybe) | ||
854 | * to migrate this flow (skb->ooo_okay will be eventually set) | ||
855 | */ | ||
856 | if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) | ||
857 | goto out; | ||
842 | 858 | ||
843 | if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && | 859 | if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && |
844 | !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { | 860 | !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { |
845 | unsigned long flags; | 861 | unsigned long flags; |
846 | struct tsq_tasklet *tsq; | 862 | struct tsq_tasklet *tsq; |
847 | 863 | ||
848 | /* Keep a ref on socket. | ||
849 | * This last ref will be released in tcp_tasklet_func() | ||
850 | */ | ||
851 | atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); | ||
852 | |||
853 | /* queue this socket to tasklet queue */ | 864 | /* queue this socket to tasklet queue */ |
854 | local_irq_save(flags); | 865 | local_irq_save(flags); |
855 | tsq = this_cpu_ptr(&tsq_tasklet); | 866 | tsq = this_cpu_ptr(&tsq_tasklet); |
856 | list_add(&tp->tsq_node, &tsq->head); | 867 | list_add(&tp->tsq_node, &tsq->head); |
857 | tasklet_schedule(&tsq->tasklet); | 868 | tasklet_schedule(&tsq->tasklet); |
858 | local_irq_restore(flags); | 869 | local_irq_restore(flags); |
859 | } else { | 870 | return; |
860 | sock_wfree(skb); | ||
861 | } | 871 | } |
872 | out: | ||
873 | sk_free(sk); | ||
862 | } | 874 | } |
863 | 875 | ||
864 | /* This routine actually transmits TCP packets queued in by | 876 | /* This routine actually transmits TCP packets queued in by |
@@ -914,9 +926,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
914 | tcp_ca_event(sk, CA_EVENT_TX_START); | 926 | tcp_ca_event(sk, CA_EVENT_TX_START); |
915 | 927 | ||
916 | /* if no packet is in qdisc/device queue, then allow XPS to select | 928 | /* if no packet is in qdisc/device queue, then allow XPS to select |
917 | * another queue. | 929 | * another queue. We can be called from tcp_tsq_handler() |
930 | * which holds one reference to sk_wmem_alloc. | ||
931 | * | ||
932 | * TODO: Ideally, in-flight pure ACK packets should not matter here. | ||
933 | * One way to get this would be to set skb->truesize = 2 on them. | ||
918 | */ | 934 | */ |
919 | skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; | 935 | skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); |
920 | 936 | ||
921 | skb_push(skb, tcp_header_size); | 937 | skb_push(skb, tcp_header_size); |
922 | skb_reset_transport_header(skb); | 938 | skb_reset_transport_header(skb); |
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index f5e319a8d4e2..baf2742d1ec4 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -235,7 +235,6 @@ static struct ifacaddr6 *aca_alloc(struct rt6_info *rt, | |||
235 | /* aca_tstamp should be updated upon changes */ | 235 | /* aca_tstamp should be updated upon changes */ |
236 | aca->aca_cstamp = aca->aca_tstamp = jiffies; | 236 | aca->aca_cstamp = aca->aca_tstamp = jiffies; |
237 | atomic_set(&aca->aca_refcnt, 1); | 237 | atomic_set(&aca->aca_refcnt, 1); |
238 | spin_lock_init(&aca->aca_lock); | ||
239 | 238 | ||
240 | return aca; | 239 | return aca; |
241 | } | 240 | } |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index e25b633266c3..2f25cb6347ca 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -214,7 +214,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
214 | /* So that link locals have meaning */ | 214 | /* So that link locals have meaning */ |
215 | if (!sk->sk_bound_dev_if && | 215 | if (!sk->sk_bound_dev_if && |
216 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) | 216 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) |
217 | ireq->ir_iif = inet6_iif(skb); | 217 | ireq->ir_iif = tcp_v6_iif(skb); |
218 | 218 | ||
219 | ireq->ir_mark = inet_request_mark(sk, skb); | 219 | ireq->ir_mark = inet_request_mark(sk, skb); |
220 | 220 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index cf2e45ab2fa4..831495529b82 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -424,6 +424,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
424 | if (sock_owned_by_user(sk)) | 424 | if (sock_owned_by_user(sk)) |
425 | goto out; | 425 | goto out; |
426 | 426 | ||
427 | /* Note : We use inet6_iif() here, not tcp_v6_iif() */ | ||
427 | req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, | 428 | req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, |
428 | &hdr->saddr, inet6_iif(skb)); | 429 | &hdr->saddr, inet6_iif(skb)); |
429 | if (!req) | 430 | if (!req) |
@@ -738,7 +739,7 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, | |||
738 | /* So that link locals have meaning */ | 739 | /* So that link locals have meaning */ |
739 | if (!sk->sk_bound_dev_if && | 740 | if (!sk->sk_bound_dev_if && |
740 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) | 741 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) |
741 | ireq->ir_iif = inet6_iif(skb); | 742 | ireq->ir_iif = tcp_v6_iif(skb); |
742 | 743 | ||
743 | if (!TCP_SKB_CB(skb)->tcp_tw_isn && | 744 | if (!TCP_SKB_CB(skb)->tcp_tw_isn && |
744 | (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || | 745 | (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || |
@@ -860,7 +861,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, | |||
860 | 861 | ||
861 | fl6.flowi6_proto = IPPROTO_TCP; | 862 | fl6.flowi6_proto = IPPROTO_TCP; |
862 | if (rt6_need_strict(&fl6.daddr) && !oif) | 863 | if (rt6_need_strict(&fl6.daddr) && !oif) |
863 | fl6.flowi6_oif = inet6_iif(skb); | 864 | fl6.flowi6_oif = tcp_v6_iif(skb); |
864 | else | 865 | else |
865 | fl6.flowi6_oif = oif; | 866 | fl6.flowi6_oif = oif; |
866 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); | 867 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); |
@@ -918,7 +919,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
918 | sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), | 919 | sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), |
919 | &tcp_hashinfo, &ipv6h->saddr, | 920 | &tcp_hashinfo, &ipv6h->saddr, |
920 | th->source, &ipv6h->daddr, | 921 | th->source, &ipv6h->daddr, |
921 | ntohs(th->source), inet6_iif(skb)); | 922 | ntohs(th->source), tcp_v6_iif(skb)); |
922 | if (!sk1) | 923 | if (!sk1) |
923 | return; | 924 | return; |
924 | 925 | ||
@@ -1000,13 +1001,14 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
1000 | /* Find possible connection requests. */ | 1001 | /* Find possible connection requests. */ |
1001 | req = inet6_csk_search_req(sk, &prev, th->source, | 1002 | req = inet6_csk_search_req(sk, &prev, th->source, |
1002 | &ipv6_hdr(skb)->saddr, | 1003 | &ipv6_hdr(skb)->saddr, |
1003 | &ipv6_hdr(skb)->daddr, inet6_iif(skb)); | 1004 | &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); |
1004 | if (req) | 1005 | if (req) |
1005 | return tcp_check_req(sk, skb, req, prev, false); | 1006 | return tcp_check_req(sk, skb, req, prev, false); |
1006 | 1007 | ||
1007 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, | 1008 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, |
1008 | &ipv6_hdr(skb)->saddr, th->source, | 1009 | &ipv6_hdr(skb)->saddr, th->source, |
1009 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); | 1010 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), |
1011 | tcp_v6_iif(skb)); | ||
1010 | 1012 | ||
1011 | if (nsk) { | 1013 | if (nsk) { |
1012 | if (nsk->sk_state != TCP_TIME_WAIT) { | 1014 | if (nsk->sk_state != TCP_TIME_WAIT) { |
@@ -1090,7 +1092,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1090 | newnp->ipv6_fl_list = NULL; | 1092 | newnp->ipv6_fl_list = NULL; |
1091 | newnp->pktoptions = NULL; | 1093 | newnp->pktoptions = NULL; |
1092 | newnp->opt = NULL; | 1094 | newnp->opt = NULL; |
1093 | newnp->mcast_oif = inet6_iif(skb); | 1095 | newnp->mcast_oif = tcp_v6_iif(skb); |
1094 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; | 1096 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; |
1095 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); | 1097 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); |
1096 | if (np->repflow) | 1098 | if (np->repflow) |
@@ -1174,7 +1176,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1174 | skb_set_owner_r(newnp->pktoptions, newsk); | 1176 | skb_set_owner_r(newnp->pktoptions, newsk); |
1175 | } | 1177 | } |
1176 | newnp->opt = NULL; | 1178 | newnp->opt = NULL; |
1177 | newnp->mcast_oif = inet6_iif(skb); | 1179 | newnp->mcast_oif = tcp_v6_iif(skb); |
1178 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; | 1180 | newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; |
1179 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); | 1181 | newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); |
1180 | if (np->repflow) | 1182 | if (np->repflow) |
@@ -1360,7 +1362,7 @@ ipv6_pktoptions: | |||
1360 | if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && | 1362 | if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && |
1361 | !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { | 1363 | !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { |
1362 | if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) | 1364 | if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) |
1363 | np->mcast_oif = inet6_iif(opt_skb); | 1365 | np->mcast_oif = tcp_v6_iif(opt_skb); |
1364 | if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) | 1366 | if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) |
1365 | np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; | 1367 | np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; |
1366 | if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) | 1368 | if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) |
@@ -1427,7 +1429,8 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1427 | TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); | 1429 | TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); |
1428 | TCP_SKB_CB(skb)->sacked = 0; | 1430 | TCP_SKB_CB(skb)->sacked = 0; |
1429 | 1431 | ||
1430 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); | 1432 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, |
1433 | tcp_v6_iif(skb)); | ||
1431 | if (!sk) | 1434 | if (!sk) |
1432 | goto no_tcp_socket; | 1435 | goto no_tcp_socket; |
1433 | 1436 | ||
@@ -1514,7 +1517,7 @@ do_time_wait: | |||
1514 | sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, | 1517 | sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, |
1515 | &ipv6_hdr(skb)->saddr, th->source, | 1518 | &ipv6_hdr(skb)->saddr, th->source, |
1516 | &ipv6_hdr(skb)->daddr, | 1519 | &ipv6_hdr(skb)->daddr, |
1517 | ntohs(th->dest), inet6_iif(skb)); | 1520 | ntohs(th->dest), tcp_v6_iif(skb)); |
1518 | if (sk2 != NULL) { | 1521 | if (sk2 != NULL) { |
1519 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1522 | struct inet_timewait_sock *tw = inet_twsk(sk); |
1520 | inet_twsk_deschedule(tw, &tcp_death_row); | 1523 | inet_twsk_deschedule(tw, &tcp_death_row); |
@@ -1553,6 +1556,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
1553 | if (th->doff < sizeof(struct tcphdr) / 4) | 1556 | if (th->doff < sizeof(struct tcphdr) / 4) |
1554 | return; | 1557 | return; |
1555 | 1558 | ||
1559 | /* Note : We use inet6_iif() here, not tcp_v6_iif() */ | ||
1556 | sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, | 1560 | sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, |
1557 | &hdr->saddr, th->source, | 1561 | &hdr->saddr, th->source, |
1558 | &hdr->daddr, ntohs(th->dest), | 1562 | &hdr->daddr, ntohs(th->dest), |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 71cf1bffea06..1b06a1fcf3e8 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/skbuff.h> | 30 | #include <linux/skbuff.h> |
31 | #include <net/net_namespace.h> | 31 | #include <net/net_namespace.h> |
32 | #include <net/sock.h> | 32 | #include <net/sock.h> |
33 | #include <asm/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | #include <linux/fcntl.h> | 34 | #include <linux/fcntl.h> |
35 | #include <linux/termios.h> /* For TIOCINQ/OUTQ */ | 35 | #include <linux/termios.h> /* For TIOCINQ/OUTQ */ |
36 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 743262becd6e..6ae063cebf7d 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c | |||
@@ -20,8 +20,8 @@ | |||
20 | #include <linux/in.h> | 20 | #include <linux/in.h> |
21 | #include <linux/if_ether.h> /* For the statistics structure. */ | 21 | #include <linux/if_ether.h> /* For the statistics structure. */ |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/uaccess.h> | ||
23 | 24 | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | 26 | ||
27 | #include <linux/inet.h> | 27 | #include <linux/inet.h> |
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c index c3073a2ef634..80dbd0beb516 100644 --- a/net/netrom/nr_in.c +++ b/net/netrom/nr_in.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <net/sock.h> | 24 | #include <net/sock.h> |
25 | #include <net/tcp_states.h> | 25 | #include <net/tcp_states.h> |
26 | #include <asm/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/fcntl.h> | 27 | #include <linux/fcntl.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c index 0b4bcb2bf38f..00fbf1419ec6 100644 --- a/net/netrom/nr_out.c +++ b/net/netrom/nr_out.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <net/sock.h> | 24 | #include <net/sock.h> |
25 | #include <asm/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <linux/fcntl.h> | 26 | #include <linux/fcntl.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index b976d5eff2de..96b64d2f6dbf 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/if_arp.h> | 25 | #include <linux/if_arp.h> |
26 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
27 | #include <net/sock.h> | 27 | #include <net/sock.h> |
28 | #include <asm/uaccess.h> | 28 | #include <linux/uaccess.h> |
29 | #include <linux/fcntl.h> | 29 | #include <linux/fcntl.h> |
30 | #include <linux/termios.h> /* For TIOCINQ/OUTQ */ | 30 | #include <linux/termios.h> /* For TIOCINQ/OUTQ */ |
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c index ca40e2298f5a..029c8bb90f4c 100644 --- a/net/netrom/nr_subr.c +++ b/net/netrom/nr_subr.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/skbuff.h> | 22 | #include <linux/skbuff.h> |
23 | #include <net/sock.h> | 23 | #include <net/sock.h> |
24 | #include <net/tcp_states.h> | 24 | #include <net/tcp_states.h> |
25 | #include <asm/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <linux/fcntl.h> | 26 | #include <linux/fcntl.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index ff2c1b142f57..94d05806a9a2 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
24 | #include <net/sock.h> | 24 | #include <net/sock.h> |
25 | #include <net/tcp_states.h> | 25 | #include <net/tcp_states.h> |
26 | #include <asm/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/fcntl.h> | 27 | #include <linux/fcntl.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 62db02ba36bc..2b78789ea7c5 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -274,6 +274,8 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) | |||
274 | key->ip.frag = OVS_FRAG_TYPE_LATER; | 274 | key->ip.frag = OVS_FRAG_TYPE_LATER; |
275 | else | 275 | else |
276 | key->ip.frag = OVS_FRAG_TYPE_FIRST; | 276 | key->ip.frag = OVS_FRAG_TYPE_FIRST; |
277 | } else { | ||
278 | key->ip.frag = OVS_FRAG_TYPE_NONE; | ||
277 | } | 279 | } |
278 | 280 | ||
279 | nh_len = payload_ofs - nh_ofs; | 281 | nh_len = payload_ofs - nh_ofs; |
@@ -358,6 +360,7 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | |||
358 | */ | 360 | */ |
359 | key->tp.src = htons(icmp->icmp6_type); | 361 | key->tp.src = htons(icmp->icmp6_type); |
360 | key->tp.dst = htons(icmp->icmp6_code); | 362 | key->tp.dst = htons(icmp->icmp6_code); |
363 | memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd)); | ||
361 | 364 | ||
362 | if (icmp->icmp6_code == 0 && | 365 | if (icmp->icmp6_code == 0 && |
363 | (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || | 366 | (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || |
@@ -557,10 +560,11 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) | |||
557 | } else if (key->eth.type == htons(ETH_P_ARP) || | 560 | } else if (key->eth.type == htons(ETH_P_ARP) || |
558 | key->eth.type == htons(ETH_P_RARP)) { | 561 | key->eth.type == htons(ETH_P_RARP)) { |
559 | struct arp_eth_header *arp; | 562 | struct arp_eth_header *arp; |
563 | bool arp_available = arphdr_ok(skb); | ||
560 | 564 | ||
561 | arp = (struct arp_eth_header *)skb_network_header(skb); | 565 | arp = (struct arp_eth_header *)skb_network_header(skb); |
562 | 566 | ||
563 | if (arphdr_ok(skb) && | 567 | if (arp_available && |
564 | arp->ar_hrd == htons(ARPHRD_ETHER) && | 568 | arp->ar_hrd == htons(ARPHRD_ETHER) && |
565 | arp->ar_pro == htons(ETH_P_IP) && | 569 | arp->ar_pro == htons(ETH_P_IP) && |
566 | arp->ar_hln == ETH_ALEN && | 570 | arp->ar_hln == ETH_ALEN && |
@@ -673,9 +677,6 @@ int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, | |||
673 | key->ovs_flow_hash = 0; | 677 | key->ovs_flow_hash = 0; |
674 | key->recirc_id = 0; | 678 | key->recirc_id = 0; |
675 | 679 | ||
676 | /* Flags are always used as part of stats */ | ||
677 | key->tp.flags = 0; | ||
678 | |||
679 | return key_extract(skb, key); | 680 | return key_extract(skb, key); |
680 | } | 681 | } |
681 | 682 | ||
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 368f23307911..939bcb32100f 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -103,10 +103,19 @@ static void update_range__(struct sw_flow_match *match, | |||
103 | SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ | 103 | SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ |
104 | value_p, len, is_mask) | 104 | value_p, len, is_mask) |
105 | 105 | ||
106 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | 106 | #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ |
107 | { | 107 | do { \ |
108 | return range->end - range->start; | 108 | update_range__(match, offsetof(struct sw_flow_key, field), \ |
109 | } | 109 | sizeof((match)->key->field), is_mask); \ |
110 | if (is_mask) { \ | ||
111 | if ((match)->mask) \ | ||
112 | memset((u8 *)&(match)->mask->key.field, value,\ | ||
113 | sizeof((match)->mask->key.field)); \ | ||
114 | } else { \ | ||
115 | memset((u8 *)&(match)->key->field, value, \ | ||
116 | sizeof((match)->key->field)); \ | ||
117 | } \ | ||
118 | } while (0) | ||
110 | 119 | ||
111 | static bool match_validate(const struct sw_flow_match *match, | 120 | static bool match_validate(const struct sw_flow_match *match, |
112 | u64 key_attrs, u64 mask_attrs) | 121 | u64 key_attrs, u64 mask_attrs) |
@@ -809,13 +818,26 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
809 | return 0; | 818 | return 0; |
810 | } | 819 | } |
811 | 820 | ||
812 | static void sw_flow_mask_set(struct sw_flow_mask *mask, | 821 | static void nlattr_set(struct nlattr *attr, u8 val, bool is_attr_mask_key) |
813 | struct sw_flow_key_range *range, u8 val) | ||
814 | { | 822 | { |
815 | u8 *m = (u8 *)&mask->key + range->start; | 823 | struct nlattr *nla; |
824 | int rem; | ||
825 | |||
826 | /* The nlattr stream should already have been validated */ | ||
827 | nla_for_each_nested(nla, attr, rem) { | ||
828 | /* We assume that ovs_key_lens[type] == -1 means that type is a | ||
829 | * nested attribute | ||
830 | */ | ||
831 | if (is_attr_mask_key && ovs_key_lens[nla_type(nla)] == -1) | ||
832 | nlattr_set(nla, val, false); | ||
833 | else | ||
834 | memset(nla_data(nla), val, nla_len(nla)); | ||
835 | } | ||
836 | } | ||
816 | 837 | ||
817 | mask->range = *range; | 838 | static void mask_set_nlattr(struct nlattr *attr, u8 val) |
818 | memset(m, val, range_n_bytes(range)); | 839 | { |
840 | nlattr_set(attr, val, true); | ||
819 | } | 841 | } |
820 | 842 | ||
821 | /** | 843 | /** |
@@ -836,6 +858,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
836 | { | 858 | { |
837 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | 859 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; |
838 | const struct nlattr *encap; | 860 | const struct nlattr *encap; |
861 | struct nlattr *newmask = NULL; | ||
839 | u64 key_attrs = 0; | 862 | u64 key_attrs = 0; |
840 | u64 mask_attrs = 0; | 863 | u64 mask_attrs = 0; |
841 | bool encap_valid = false; | 864 | bool encap_valid = false; |
@@ -882,18 +905,44 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
882 | if (err) | 905 | if (err) |
883 | return err; | 906 | return err; |
884 | 907 | ||
908 | if (match->mask && !mask) { | ||
909 | /* Create an exact match mask. We need to set to 0xff all the | ||
910 | * 'match->mask' fields that have been touched in 'match->key'. | ||
911 | * We cannot simply memset 'match->mask', because padding bytes | ||
912 | * and fields not specified in 'match->key' should be left to 0. | ||
913 | * Instead, we use a stream of netlink attributes, copied from | ||
914 | * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care | ||
915 | * of filling 'match->mask' appropriately. | ||
916 | */ | ||
917 | newmask = kmemdup(key, nla_total_size(nla_len(key)), | ||
918 | GFP_KERNEL); | ||
919 | if (!newmask) | ||
920 | return -ENOMEM; | ||
921 | |||
922 | mask_set_nlattr(newmask, 0xff); | ||
923 | |||
924 | /* The userspace does not send tunnel attributes that are 0, | ||
925 | * but we should not wildcard them nonetheless. | ||
926 | */ | ||
927 | if (match->key->tun_key.ipv4_dst) | ||
928 | SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true); | ||
929 | |||
930 | mask = newmask; | ||
931 | } | ||
932 | |||
885 | if (mask) { | 933 | if (mask) { |
886 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | 934 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); |
887 | if (err) | 935 | if (err) |
888 | return err; | 936 | goto free_newmask; |
889 | 937 | ||
890 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { | 938 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { |
891 | __be16 eth_type = 0; | 939 | __be16 eth_type = 0; |
892 | __be16 tci = 0; | 940 | __be16 tci = 0; |
893 | 941 | ||
894 | if (!encap_valid) { | 942 | if (!encap_valid) { |
895 | OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); | 943 | OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); |
896 | return -EINVAL; | 944 | err = -EINVAL; |
945 | goto free_newmask; | ||
897 | } | 946 | } |
898 | 947 | ||
899 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | 948 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); |
@@ -904,10 +953,13 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
904 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | 953 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); |
905 | encap = a[OVS_KEY_ATTR_ENCAP]; | 954 | encap = a[OVS_KEY_ATTR_ENCAP]; |
906 | err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); | 955 | err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); |
956 | if (err) | ||
957 | goto free_newmask; | ||
907 | } else { | 958 | } else { |
908 | OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", | 959 | OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", |
909 | ntohs(eth_type)); | 960 | ntohs(eth_type)); |
910 | return -EINVAL; | 961 | err = -EINVAL; |
962 | goto free_newmask; | ||
911 | } | 963 | } |
912 | 964 | ||
913 | if (a[OVS_KEY_ATTR_VLAN]) | 965 | if (a[OVS_KEY_ATTR_VLAN]) |
@@ -915,23 +967,22 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
915 | 967 | ||
916 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | 968 | if (!(tci & htons(VLAN_TAG_PRESENT))) { |
917 | OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); | 969 | OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); |
918 | return -EINVAL; | 970 | err = -EINVAL; |
971 | goto free_newmask; | ||
919 | } | 972 | } |
920 | } | 973 | } |
921 | 974 | ||
922 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); | 975 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); |
923 | if (err) | 976 | if (err) |
924 | return err; | 977 | goto free_newmask; |
925 | } else { | ||
926 | /* Populate exact match flow's key mask. */ | ||
927 | if (match->mask) | ||
928 | sw_flow_mask_set(match->mask, &match->range, 0xff); | ||
929 | } | 978 | } |
930 | 979 | ||
931 | if (!match_validate(match, key_attrs, mask_attrs)) | 980 | if (!match_validate(match, key_attrs, mask_attrs)) |
932 | return -EINVAL; | 981 | err = -EINVAL; |
933 | 982 | ||
934 | return 0; | 983 | free_newmask: |
984 | kfree(newmask); | ||
985 | return err; | ||
935 | } | 986 | } |
936 | 987 | ||
937 | /** | 988 | /** |
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index 910b3ef2c0d5..106a9d80b663 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | /** | 31 | /** |
32 | * struct geneve_port - Keeps track of open UDP ports | 32 | * struct geneve_port - Keeps track of open UDP ports |
33 | * @sock: The socket created for this port number. | 33 | * @gs: The socket created for this port number. |
34 | * @name: vport name. | 34 | * @name: vport name. |
35 | */ | 35 | */ |
36 | struct geneve_port { | 36 | struct geneve_port { |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 53001b020ca7..6015802ebe6f 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -408,13 +408,13 @@ int ovs_vport_get_upcall_portids(const struct vport *vport, | |||
408 | * | 408 | * |
409 | * Returns the portid of the target socket. Must be called with rcu_read_lock. | 409 | * Returns the portid of the target socket. Must be called with rcu_read_lock. |
410 | */ | 410 | */ |
411 | u32 ovs_vport_find_upcall_portid(const struct vport *p, struct sk_buff *skb) | 411 | u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb) |
412 | { | 412 | { |
413 | struct vport_portids *ids; | 413 | struct vport_portids *ids; |
414 | u32 ids_index; | 414 | u32 ids_index; |
415 | u32 hash; | 415 | u32 hash; |
416 | 416 | ||
417 | ids = rcu_dereference(p->upcall_portids); | 417 | ids = rcu_dereference(vport->upcall_portids); |
418 | 418 | ||
419 | if (ids->n_ids == 1 && ids->ids[0] == 0) | 419 | if (ids->n_ids == 1 && ids->ids[0] == 0) |
420 | return 0; | 420 | return 0; |
diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 4e37c1cbe8b2..40084d843e9f 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c | |||
@@ -564,12 +564,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
564 | 564 | ||
565 | if (rs->rs_bound_addr == 0) { | 565 | if (rs->rs_bound_addr == 0) { |
566 | ret = -ENOTCONN; /* XXX not a great errno */ | 566 | ret = -ENOTCONN; /* XXX not a great errno */ |
567 | goto out; | 567 | goto out_ret; |
568 | } | 568 | } |
569 | 569 | ||
570 | if (args->nr_local > UIO_MAXIOV) { | 570 | if (args->nr_local > UIO_MAXIOV) { |
571 | ret = -EMSGSIZE; | 571 | ret = -EMSGSIZE; |
572 | goto out; | 572 | goto out_ret; |
573 | } | 573 | } |
574 | 574 | ||
575 | /* Check whether to allocate the iovec area */ | 575 | /* Check whether to allocate the iovec area */ |
@@ -578,7 +578,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |||
578 | iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); | 578 | iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); |
579 | if (!iovs) { | 579 | if (!iovs) { |
580 | ret = -ENOMEM; | 580 | ret = -ENOMEM; |
581 | goto out; | 581 | goto out_ret; |
582 | } | 582 | } |
583 | } | 583 | } |
584 | 584 | ||
@@ -696,6 +696,7 @@ out: | |||
696 | if (iovs != iovstack) | 696 | if (iovs != iovstack) |
697 | sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); | 697 | sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); |
698 | kfree(pages); | 698 | kfree(pages); |
699 | out_ret: | ||
699 | if (ret) | 700 | if (ret) |
700 | rds_rdma_free_op(op); | 701 | rds_rdma_free_op(op); |
701 | else | 702 | else |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index a88b8524846e..f791edd64d6c 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1668,6 +1668,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack( | |||
1668 | * ack chunk whose serial number matches that of the request. | 1668 | * ack chunk whose serial number matches that of the request. |
1669 | */ | 1669 | */ |
1670 | list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { | 1670 | list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { |
1671 | if (sctp_chunk_pending(ack)) | ||
1672 | continue; | ||
1671 | if (ack->subh.addip_hdr->serial == serial) { | 1673 | if (ack->subh.addip_hdr->serial == serial) { |
1672 | sctp_chunk_hold(ack); | 1674 | sctp_chunk_hold(ack); |
1673 | return ack; | 1675 | return ack; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 4de12afa13d4..7e8a16c77039 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -140,18 +140,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
140 | } else { | 140 | } else { |
141 | /* Nothing to do. Next chunk in the packet, please. */ | 141 | /* Nothing to do. Next chunk in the packet, please. */ |
142 | ch = (sctp_chunkhdr_t *) chunk->chunk_end; | 142 | ch = (sctp_chunkhdr_t *) chunk->chunk_end; |
143 | |||
144 | /* Force chunk->skb->data to chunk->chunk_end. */ | 143 | /* Force chunk->skb->data to chunk->chunk_end. */ |
145 | skb_pull(chunk->skb, | 144 | skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); |
146 | chunk->chunk_end - chunk->skb->data); | 145 | /* We are guaranteed to pull a SCTP header. */ |
147 | |||
148 | /* Verify that we have at least chunk headers | ||
149 | * worth of buffer left. | ||
150 | */ | ||
151 | if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { | ||
152 | sctp_chunk_free(chunk); | ||
153 | chunk = queue->in_progress = NULL; | ||
154 | } | ||
155 | } | 146 | } |
156 | } | 147 | } |
157 | 148 | ||
@@ -187,24 +178,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
187 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); | 178 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); |
188 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ | 179 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ |
189 | 180 | ||
190 | if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { | 181 | if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) < |
182 | skb_tail_pointer(chunk->skb)) { | ||
191 | /* This is not a singleton */ | 183 | /* This is not a singleton */ |
192 | chunk->singleton = 0; | 184 | chunk->singleton = 0; |
193 | } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { | 185 | } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { |
194 | /* RFC 2960, Section 6.10 Bundling | 186 | /* Discard inside state machine. */ |
195 | * | 187 | chunk->pdiscard = 1; |
196 | * Partial chunks MUST NOT be placed in an SCTP packet. | 188 | chunk->chunk_end = skb_tail_pointer(chunk->skb); |
197 | * If the receiver detects a partial chunk, it MUST drop | ||
198 | * the chunk. | ||
199 | * | ||
200 | * Since the end of the chunk is past the end of our buffer | ||
201 | * (which contains the whole packet, we can freely discard | ||
202 | * the whole packet. | ||
203 | */ | ||
204 | sctp_chunk_free(chunk); | ||
205 | chunk = queue->in_progress = NULL; | ||
206 | |||
207 | return NULL; | ||
208 | } else { | 189 | } else { |
209 | /* We are at the end of the packet, so mark the chunk | 190 | /* We are at the end of the packet, so mark the chunk |
210 | * in case we need to send a SACK. | 191 | * in case we need to send a SACK. |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ae0e616a7ca5..ab734be8cb20 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -3110,50 +3110,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
3110 | return SCTP_ERROR_NO_ERROR; | 3110 | return SCTP_ERROR_NO_ERROR; |
3111 | } | 3111 | } |
3112 | 3112 | ||
3113 | /* Verify the ASCONF packet before we process it. */ | 3113 | /* Verify the ASCONF packet before we process it. */ |
3114 | int sctp_verify_asconf(const struct sctp_association *asoc, | 3114 | bool sctp_verify_asconf(const struct sctp_association *asoc, |
3115 | struct sctp_paramhdr *param_hdr, void *chunk_end, | 3115 | struct sctp_chunk *chunk, bool addr_param_needed, |
3116 | struct sctp_paramhdr **errp) { | 3116 | struct sctp_paramhdr **errp) |
3117 | sctp_addip_param_t *asconf_param; | 3117 | { |
3118 | sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; | ||
3118 | union sctp_params param; | 3119 | union sctp_params param; |
3119 | int length, plen; | 3120 | bool addr_param_seen = false; |
3120 | |||
3121 | param.v = (sctp_paramhdr_t *) param_hdr; | ||
3122 | while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { | ||
3123 | length = ntohs(param.p->length); | ||
3124 | *errp = param.p; | ||
3125 | 3121 | ||
3126 | if (param.v > chunk_end - length || | 3122 | sctp_walk_params(param, addip, addip_hdr.params) { |
3127 | length < sizeof(sctp_paramhdr_t)) | 3123 | size_t length = ntohs(param.p->length); |
3128 | return 0; | ||
3129 | 3124 | ||
3125 | *errp = param.p; | ||
3130 | switch (param.p->type) { | 3126 | switch (param.p->type) { |
3127 | case SCTP_PARAM_ERR_CAUSE: | ||
3128 | break; | ||
3129 | case SCTP_PARAM_IPV4_ADDRESS: | ||
3130 | if (length != sizeof(sctp_ipv4addr_param_t)) | ||
3131 | return false; | ||
3132 | addr_param_seen = true; | ||
3133 | break; | ||
3134 | case SCTP_PARAM_IPV6_ADDRESS: | ||
3135 | if (length != sizeof(sctp_ipv6addr_param_t)) | ||
3136 | return false; | ||
3137 | addr_param_seen = true; | ||
3138 | break; | ||
3131 | case SCTP_PARAM_ADD_IP: | 3139 | case SCTP_PARAM_ADD_IP: |
3132 | case SCTP_PARAM_DEL_IP: | 3140 | case SCTP_PARAM_DEL_IP: |
3133 | case SCTP_PARAM_SET_PRIMARY: | 3141 | case SCTP_PARAM_SET_PRIMARY: |
3134 | asconf_param = (sctp_addip_param_t *)param.v; | 3142 | /* In ASCONF chunks, these need to be first. */ |
3135 | plen = ntohs(asconf_param->param_hdr.length); | 3143 | if (addr_param_needed && !addr_param_seen) |
3136 | if (plen < sizeof(sctp_addip_param_t) + | 3144 | return false; |
3137 | sizeof(sctp_paramhdr_t)) | 3145 | length = ntohs(param.addip->param_hdr.length); |
3138 | return 0; | 3146 | if (length < sizeof(sctp_addip_param_t) + |
3147 | sizeof(sctp_paramhdr_t)) | ||
3148 | return false; | ||
3139 | break; | 3149 | break; |
3140 | case SCTP_PARAM_SUCCESS_REPORT: | 3150 | case SCTP_PARAM_SUCCESS_REPORT: |
3141 | case SCTP_PARAM_ADAPTATION_LAYER_IND: | 3151 | case SCTP_PARAM_ADAPTATION_LAYER_IND: |
3142 | if (length != sizeof(sctp_addip_param_t)) | 3152 | if (length != sizeof(sctp_addip_param_t)) |
3143 | return 0; | 3153 | return false; |
3144 | |||
3145 | break; | 3154 | break; |
3146 | default: | 3155 | default: |
3147 | break; | 3156 | /* This is unkown to us, reject! */ |
3157 | return false; | ||
3148 | } | 3158 | } |
3149 | |||
3150 | param.v += WORD_ROUND(length); | ||
3151 | } | 3159 | } |
3152 | 3160 | ||
3153 | if (param.v != chunk_end) | 3161 | /* Remaining sanity checks. */ |
3154 | return 0; | 3162 | if (addr_param_needed && !addr_param_seen) |
3163 | return false; | ||
3164 | if (!addr_param_needed && addr_param_seen) | ||
3165 | return false; | ||
3166 | if (param.v != chunk->chunk_end) | ||
3167 | return false; | ||
3155 | 3168 | ||
3156 | return 1; | 3169 | return true; |
3157 | } | 3170 | } |
3158 | 3171 | ||
3159 | /* Process an incoming ASCONF chunk with the next expected serial no. and | 3172 | /* Process an incoming ASCONF chunk with the next expected serial no. and |
@@ -3162,16 +3175,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc, | |||
3162 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, | 3175 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, |
3163 | struct sctp_chunk *asconf) | 3176 | struct sctp_chunk *asconf) |
3164 | { | 3177 | { |
3178 | sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; | ||
3179 | bool all_param_pass = true; | ||
3180 | union sctp_params param; | ||
3165 | sctp_addiphdr_t *hdr; | 3181 | sctp_addiphdr_t *hdr; |
3166 | union sctp_addr_param *addr_param; | 3182 | union sctp_addr_param *addr_param; |
3167 | sctp_addip_param_t *asconf_param; | 3183 | sctp_addip_param_t *asconf_param; |
3168 | struct sctp_chunk *asconf_ack; | 3184 | struct sctp_chunk *asconf_ack; |
3169 | |||
3170 | __be16 err_code; | 3185 | __be16 err_code; |
3171 | int length = 0; | 3186 | int length = 0; |
3172 | int chunk_len; | 3187 | int chunk_len; |
3173 | __u32 serial; | 3188 | __u32 serial; |
3174 | int all_param_pass = 1; | ||
3175 | 3189 | ||
3176 | chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); | 3190 | chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); |
3177 | hdr = (sctp_addiphdr_t *)asconf->skb->data; | 3191 | hdr = (sctp_addiphdr_t *)asconf->skb->data; |
@@ -3199,9 +3213,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, | |||
3199 | goto done; | 3213 | goto done; |
3200 | 3214 | ||
3201 | /* Process the TLVs contained within the ASCONF chunk. */ | 3215 | /* Process the TLVs contained within the ASCONF chunk. */ |
3202 | while (chunk_len > 0) { | 3216 | sctp_walk_params(param, addip, addip_hdr.params) { |
3217 | /* Skip preceeding address parameters. */ | ||
3218 | if (param.p->type == SCTP_PARAM_IPV4_ADDRESS || | ||
3219 | param.p->type == SCTP_PARAM_IPV6_ADDRESS) | ||
3220 | continue; | ||
3221 | |||
3203 | err_code = sctp_process_asconf_param(asoc, asconf, | 3222 | err_code = sctp_process_asconf_param(asoc, asconf, |
3204 | asconf_param); | 3223 | param.addip); |
3205 | /* ADDIP 4.1 A7) | 3224 | /* ADDIP 4.1 A7) |
3206 | * If an error response is received for a TLV parameter, | 3225 | * If an error response is received for a TLV parameter, |
3207 | * all TLVs with no response before the failed TLV are | 3226 | * all TLVs with no response before the failed TLV are |
@@ -3209,28 +3228,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, | |||
3209 | * the failed response are considered unsuccessful unless | 3228 | * the failed response are considered unsuccessful unless |
3210 | * a specific success indication is present for the parameter. | 3229 | * a specific success indication is present for the parameter. |
3211 | */ | 3230 | */ |
3212 | if (SCTP_ERROR_NO_ERROR != err_code) | 3231 | if (err_code != SCTP_ERROR_NO_ERROR) |
3213 | all_param_pass = 0; | 3232 | all_param_pass = false; |
3214 | |||
3215 | if (!all_param_pass) | 3233 | if (!all_param_pass) |
3216 | sctp_add_asconf_response(asconf_ack, | 3234 | sctp_add_asconf_response(asconf_ack, param.addip->crr_id, |
3217 | asconf_param->crr_id, err_code, | 3235 | err_code, param.addip); |
3218 | asconf_param); | ||
3219 | 3236 | ||
3220 | /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add | 3237 | /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add |
3221 | * an IP address sends an 'Out of Resource' in its response, it | 3238 | * an IP address sends an 'Out of Resource' in its response, it |
3222 | * MUST also fail any subsequent add or delete requests bundled | 3239 | * MUST also fail any subsequent add or delete requests bundled |
3223 | * in the ASCONF. | 3240 | * in the ASCONF. |
3224 | */ | 3241 | */ |
3225 | if (SCTP_ERROR_RSRC_LOW == err_code) | 3242 | if (err_code == SCTP_ERROR_RSRC_LOW) |
3226 | goto done; | 3243 | goto done; |
3227 | |||
3228 | /* Move to the next ASCONF param. */ | ||
3229 | length = ntohs(asconf_param->param_hdr.length); | ||
3230 | asconf_param = (void *)asconf_param + length; | ||
3231 | chunk_len -= length; | ||
3232 | } | 3244 | } |
3233 | |||
3234 | done: | 3245 | done: |
3235 | asoc->peer.addip_serial++; | 3246 | asoc->peer.addip_serial++; |
3236 | 3247 | ||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index c8f606324134..3ee27b7704ff 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -170,6 +170,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk, | |||
170 | { | 170 | { |
171 | __u16 chunk_length = ntohs(chunk->chunk_hdr->length); | 171 | __u16 chunk_length = ntohs(chunk->chunk_hdr->length); |
172 | 172 | ||
173 | /* Previously already marked? */ | ||
174 | if (unlikely(chunk->pdiscard)) | ||
175 | return 0; | ||
173 | if (unlikely(chunk_length < required_length)) | 176 | if (unlikely(chunk_length < required_length)) |
174 | return 0; | 177 | return 0; |
175 | 178 | ||
@@ -3591,9 +3594,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, | |||
3591 | struct sctp_chunk *asconf_ack = NULL; | 3594 | struct sctp_chunk *asconf_ack = NULL; |
3592 | struct sctp_paramhdr *err_param = NULL; | 3595 | struct sctp_paramhdr *err_param = NULL; |
3593 | sctp_addiphdr_t *hdr; | 3596 | sctp_addiphdr_t *hdr; |
3594 | union sctp_addr_param *addr_param; | ||
3595 | __u32 serial; | 3597 | __u32 serial; |
3596 | int length; | ||
3597 | 3598 | ||
3598 | if (!sctp_vtag_verify(chunk, asoc)) { | 3599 | if (!sctp_vtag_verify(chunk, asoc)) { |
3599 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | 3600 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, |
@@ -3618,17 +3619,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, | |||
3618 | hdr = (sctp_addiphdr_t *)chunk->skb->data; | 3619 | hdr = (sctp_addiphdr_t *)chunk->skb->data; |
3619 | serial = ntohl(hdr->serial); | 3620 | serial = ntohl(hdr->serial); |
3620 | 3621 | ||
3621 | addr_param = (union sctp_addr_param *)hdr->params; | ||
3622 | length = ntohs(addr_param->p.length); | ||
3623 | if (length < sizeof(sctp_paramhdr_t)) | ||
3624 | return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, | ||
3625 | (void *)addr_param, commands); | ||
3626 | |||
3627 | /* Verify the ASCONF chunk before processing it. */ | 3622 | /* Verify the ASCONF chunk before processing it. */ |
3628 | if (!sctp_verify_asconf(asoc, | 3623 | if (!sctp_verify_asconf(asoc, chunk, true, &err_param)) |
3629 | (sctp_paramhdr_t *)((void *)addr_param + length), | ||
3630 | (void *)chunk->chunk_end, | ||
3631 | &err_param)) | ||
3632 | return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, | 3624 | return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, |
3633 | (void *)err_param, commands); | 3625 | (void *)err_param, commands); |
3634 | 3626 | ||
@@ -3745,10 +3737,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, | |||
3745 | rcvd_serial = ntohl(addip_hdr->serial); | 3737 | rcvd_serial = ntohl(addip_hdr->serial); |
3746 | 3738 | ||
3747 | /* Verify the ASCONF-ACK chunk before processing it. */ | 3739 | /* Verify the ASCONF-ACK chunk before processing it. */ |
3748 | if (!sctp_verify_asconf(asoc, | 3740 | if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param)) |
3749 | (sctp_paramhdr_t *)addip_hdr->params, | ||
3750 | (void *)asconf_ack->chunk_end, | ||
3751 | &err_param)) | ||
3752 | return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, | 3741 | return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, |
3753 | (void *)err_param, commands); | 3742 | (void *)err_param, commands); |
3754 | 3743 | ||
diff --git a/net/tipc/link.c b/net/tipc/link.c index 65410e18b8a6..1db162aa64a5 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1924,7 +1924,12 @@ void tipc_link_bundle_rcv(struct sk_buff *buf) | |||
1924 | } | 1924 | } |
1925 | omsg = buf_msg(obuf); | 1925 | omsg = buf_msg(obuf); |
1926 | pos += align(msg_size(omsg)); | 1926 | pos += align(msg_size(omsg)); |
1927 | if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) { | 1927 | if (msg_isdata(omsg)) { |
1928 | if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG)) | ||
1929 | tipc_sk_mcast_rcv(obuf); | ||
1930 | else | ||
1931 | tipc_sk_rcv(obuf); | ||
1932 | } else if (msg_user(omsg) == CONN_MANAGER) { | ||
1928 | tipc_sk_rcv(obuf); | 1933 | tipc_sk_rcv(obuf); |
1929 | } else if (msg_user(omsg) == NAME_DISTRIBUTOR) { | 1934 | } else if (msg_user(omsg) == NAME_DISTRIBUTOR) { |
1930 | tipc_named_rcv(obuf); | 1935 | tipc_named_rcv(obuf); |