aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-12-02 13:09:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-02 13:09:07 -0500
commit5fc92de3c7106d17f85c245383ba072d810d6bb0 (patch)
tree1e27a49e82ca9736144cc73ad5544a7ddf906644 /net
parentb0d8d2292160bb63de1972361ebed100c64b5b37 (diff)
parent833846e8fa0c51fb3e47bca8adfdd7b10643b737 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: "Here is a pile of bug fixes that accumulated while I was in Europe" 1) In fixing kernel leaks to userspace during copying of socket addresses, we broke a case that used to work, namely the user providing a buffer larger than the in-kernel generic socket address structure. This broke Ruby amongst other things. Fix from Dan Carpenter. 2) Fix regression added by byte queue limit support in 8139cp driver, from Yang Yingliang. 3) The addition of MSG_SENDPAGE_NOTLAST buggered up a few sendpage implementations, they should just treat it the same as MSG_MORE. Fix from Richard Weinberger and Shawn Landden. 4) Handle icmpv4 errors received on ipv6 SIT tunnels correctly, from Oussama Ghorbel. In particular we should send an ICMPv6 unreachable in such situations. 5) Fix some regressions in the recent genetlink fixes, in particular get the pmcraid driver to use the new safer interfaces correctly. From Johannes Berg. 6) macvtap was converted to use a per-cpu set of statistics, but some code was still bumping tx_dropped elsewhere. From Jason Wang. 7) Fix build failure of xen-netback due to missing include on some architectures, from Andy Whitecroft. 8) macvtap double counts received packets in statistics, fix from Vlad Yasevich. 9) Fix various cases of using *_STATS_BH() when *_STATS() is more appropriate. From Eric Dumazet and Hannes Frederic Sowa. 10) Pktgen ipsec mode doesn't update the ipv4 header length and checksum properly after encapsulation. Fix from Fan Du. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits) net/mlx4_en: Remove selftest TX queues empty condition {pktgen, xfrm} Update IPv4 header total len and checksum after tranformation virtio_net: make all RX paths handle erors consistently virtio_net: fix error handling for mergeable buffers virtio_net: Fixed a trivial typo (fitler --> filter) netem: fix gemodel loss generator netem: fix loss 4 state model netem: missing break in ge loss generator net/hsr: Support iproute print_opt ('ip -details ...') net/hsr: Very small fix of comment style. MAINTAINERS: Added net/hsr/ maintainer ipv6: fix possible seqlock deadlock in ip6_finish_output2 ixgbe: Make ixgbe_identify_qsfp_module_generic static ixgbe: turn NETIF_F_HW_L2FW_DOFFLOAD off by default ixgbe: ixgbe_fwd_ring_down needs to be static e1000: fix possible reset_task running after adapter down e1000: fix lockdep warning in e1000_reset_task e1000: prevent oops when adapter is being closed and reset simultaneously igb: Fixed Wake On LAN support inet: fix possible seqlock deadlocks ...
Diffstat (limited to 'net')
-rw-r--r--net/compat.c2
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_netlink.c28
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_memcontrol.c2
-rw-r--r--net/ipv4/tcp_offload.c31
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/datagram.c8
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/protocol.c4
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/sit.c50
-rw-r--r--net/ipv6/tcpv6_offload.c32
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/netlink/genetlink.c13
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_tbf.c32
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/socket.c2
28 files changed, 186 insertions, 94 deletions
diff --git a/net/compat.c b/net/compat.c
index 618c6a8a911b..dd32e34c1e2c 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72 __get_user(kmsg->msg_flags, &umsg->msg_flags)) 72 __get_user(kmsg->msg_flags, &umsg->msg_flags))
73 return -EFAULT; 73 return -EFAULT;
74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 74 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
75 return -EINVAL; 75 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
76 kmsg->msg_name = compat_ptr(tmp1); 76 kmsg->msg_name = compat_ptr(tmp1);
77 kmsg->msg_iov = compat_ptr(tmp2); 77 kmsg->msg_iov = compat_ptr(tmp2);
78 kmsg->msg_control = compat_ptr(tmp3); 78 kmsg->msg_control = compat_ptr(tmp3);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 261357a66300..a797fff7f222 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2527 if (x) { 2527 if (x) {
2528 int ret; 2528 int ret;
2529 __u8 *eth; 2529 __u8 *eth;
2530 struct iphdr *iph;
2531
2530 nhead = x->props.header_len - skb_headroom(skb); 2532 nhead = x->props.header_len - skb_headroom(skb);
2531 if (nhead > 0) { 2533 if (nhead > 0) {
2532 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2534 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2548 eth = (__u8 *) skb_push(skb, ETH_HLEN); 2550 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2549 memcpy(eth, pkt_dev->hh, 12); 2551 memcpy(eth, pkt_dev->hh, 12);
2550 *(u16 *) &eth[12] = protocol; 2552 *(u16 *) &eth[12] = protocol;
2553
2554 /* Update IPv4 header len as well as checksum value */
2555 iph = ip_hdr(skb);
2556 iph->tot_len = htons(skb->len - ETH_HLEN);
2557 ip_send_check(iph);
2551 } 2558 }
2552 } 2559 }
2553 return 1; 2560 return 1;
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 003f5bb3acd2..4bdab1521878 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -288,7 +288,8 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
288static bool seq_nr_after(u16 a, u16 b) 288static bool seq_nr_after(u16 a, u16 b)
289{ 289{
290 /* Remove inconsistency where 290 /* Remove inconsistency where
291 * seq_nr_after(a, b) == seq_nr_before(a, b) */ 291 * seq_nr_after(a, b) == seq_nr_before(a, b)
292 */
292 if ((int) b - a == 32768) 293 if ((int) b - a == 32768)
293 return false; 294 return false;
294 295
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 5325af85eea6..01a5261ac7a5 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -23,6 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, 23 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, 24 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, 25 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
26 [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN },
27 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
26}; 28};
27 29
28 30
@@ -59,6 +61,31 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
59 return hsr_dev_finalize(dev, link, multicast_spec); 61 return hsr_dev_finalize(dev, link, multicast_spec);
60} 62}
61 63
64static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
65{
66 struct hsr_priv *hsr_priv;
67
68 hsr_priv = netdev_priv(dev);
69
70 if (hsr_priv->slave[0])
71 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex))
72 goto nla_put_failure;
73
74 if (hsr_priv->slave[1])
75 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex))
76 goto nla_put_failure;
77
78 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
79 hsr_priv->sup_multicast_addr) ||
80 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr))
81 goto nla_put_failure;
82
83 return 0;
84
85nla_put_failure:
86 return -EMSGSIZE;
87}
88
62static struct rtnl_link_ops hsr_link_ops __read_mostly = { 89static struct rtnl_link_ops hsr_link_ops __read_mostly = {
63 .kind = "hsr", 90 .kind = "hsr",
64 .maxtype = IFLA_HSR_MAX, 91 .maxtype = IFLA_HSR_MAX,
@@ -66,6 +93,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
66 .priv_size = sizeof(struct hsr_priv), 93 .priv_size = sizeof(struct hsr_priv),
67 .setup = hsr_dev_setup, 94 .setup = hsr_dev_setup,
68 .newlink = hsr_newlink, 95 .newlink = hsr_newlink,
96 .fill_info = hsr_fill_info,
69}; 97};
70 98
71 99
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3f858266fa7e..ddf32a6bc415 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -386,7 +386,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
386/* 386/*
387 * Handle MSG_ERRQUEUE 387 * Handle MSG_ERRQUEUE
388 */ 388 */
389int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) 389int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
390{ 390{
391 struct sock_exterr_skb *serr; 391 struct sock_exterr_skb *serr;
392 struct sk_buff *skb, *skb2; 392 struct sk_buff *skb, *skb2;
@@ -423,6 +423,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
423 serr->addr_offset); 423 serr->addr_offset);
424 sin->sin_port = serr->port; 424 sin->sin_port = serr->port;
425 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 425 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
426 *addr_len = sizeof(*sin);
426 } 427 }
427 428
428 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 429 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 876c6ca2d8f9..242e7f4ed6f4 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -772,7 +772,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
772 err = PTR_ERR(rt); 772 err = PTR_ERR(rt);
773 rt = NULL; 773 rt = NULL;
774 if (err == -ENETUNREACH) 774 if (err == -ENETUNREACH)
775 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 775 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
776 goto out; 776 goto out;
777 } 777 }
778 778
@@ -841,10 +841,11 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
841 841
842 if (flags & MSG_ERRQUEUE) { 842 if (flags & MSG_ERRQUEUE) {
843 if (family == AF_INET) { 843 if (family == AF_INET) {
844 return ip_recv_error(sk, msg, len); 844 return ip_recv_error(sk, msg, len, addr_len);
845#if IS_ENABLED(CONFIG_IPV6) 845#if IS_ENABLED(CONFIG_IPV6)
846 } else if (family == AF_INET6) { 846 } else if (family == AF_INET6) {
847 return pingv6_ops.ipv6_recv_error(sk, msg, len); 847 return pingv6_ops.ipv6_recv_error(sk, msg, len,
848 addr_len);
848#endif 849#endif
849 } 850 }
850 } 851 }
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index ce848461acbb..46d6a1c923a8 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -31,10 +31,6 @@
31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; 31const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
32const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; 32const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
33 33
34/*
35 * Add a protocol handler to the hash tables
36 */
37
38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 34int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
39{ 35{
40 if (!prot->netns_ok) { 36 if (!prot->netns_ok) {
@@ -55,10 +51,6 @@ int inet_add_offload(const struct net_offload *prot, unsigned char protocol)
55} 51}
56EXPORT_SYMBOL(inet_add_offload); 52EXPORT_SYMBOL(inet_add_offload);
57 53
58/*
59 * Remove a protocol from the hash tables.
60 */
61
62int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 54int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
63{ 55{
64 int ret; 56 int ret;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 5cb8ddb505ee..23c3e5b5bb53 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -697,7 +697,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
697 goto out; 697 goto out;
698 698
699 if (flags & MSG_ERRQUEUE) { 699 if (flags & MSG_ERRQUEUE) {
700 err = ip_recv_error(sk, msg, len); 700 err = ip_recv_error(sk, msg, len, addr_len);
701 goto out; 701 goto out;
702 } 702 }
703 703
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 59a6f8b90cd9..067213924751 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
177 if (IS_ERR(rt)) { 177 if (IS_ERR(rt)) {
178 err = PTR_ERR(rt); 178 err = PTR_ERR(rt);
179 if (err == -ENETUNREACH) 179 if (err == -ENETUNREACH)
180 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 return err; 181 return err;
182 } 182 }
183 183
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 03e9154f7e68..269a89ecd2f4 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -60,7 +60,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup);
60static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) 60static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
61{ 61{
62 struct cg_proto *cg_proto; 62 struct cg_proto *cg_proto;
63 u64 old_lim;
64 int i; 63 int i;
65 int ret; 64 int ret;
66 65
@@ -71,7 +70,6 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
71 if (val > RES_COUNTER_MAX) 70 if (val > RES_COUNTER_MAX)
72 val = RES_COUNTER_MAX; 71 val = RES_COUNTER_MAX;
73 72
74 old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
75 ret = res_counter_set_limit(&cg_proto->memory_allocated, val); 73 ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
76 if (ret) 74 if (ret)
77 return ret; 75 return ret;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a2b68a108eae..05606353c7e7 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -274,33 +274,32 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *
274{ 274{
275 const struct iphdr *iph = skb_gro_network_header(skb); 275 const struct iphdr *iph = skb_gro_network_header(skb);
276 __wsum wsum; 276 __wsum wsum;
277 __sum16 sum; 277
278 /* Don't bother verifying checksum if we're going to flush anyway. */
279 if (NAPI_GRO_CB(skb)->flush)
280 goto skip_csum;
281
282 wsum = skb->csum;
278 283
279 switch (skb->ip_summed) { 284 switch (skb->ip_summed) {
285 case CHECKSUM_NONE:
286 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
287 0);
288
289 /* fall through */
290
280 case CHECKSUM_COMPLETE: 291 case CHECKSUM_COMPLETE:
281 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, 292 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
282 skb->csum)) { 293 wsum)) {
283 skb->ip_summed = CHECKSUM_UNNECESSARY; 294 skb->ip_summed = CHECKSUM_UNNECESSARY;
284 break; 295 break;
285 } 296 }
286flush: 297
287 NAPI_GRO_CB(skb)->flush = 1; 298 NAPI_GRO_CB(skb)->flush = 1;
288 return NULL; 299 return NULL;
289
290 case CHECKSUM_NONE:
291 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
292 skb_gro_len(skb), IPPROTO_TCP, 0);
293 sum = csum_fold(skb_checksum(skb,
294 skb_gro_offset(skb),
295 skb_gro_len(skb),
296 wsum));
297 if (sum)
298 goto flush;
299
300 skb->ip_summed = CHECKSUM_UNNECESSARY;
301 break;
302 } 300 }
303 301
302skip_csum:
304 return tcp_gro_receive(head, skb); 303 return tcp_gro_receive(head, skb);
305} 304}
306 305
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5944d7d668dd..44f6a20fa29d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -999,7 +999,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
999 err = PTR_ERR(rt); 999 err = PTR_ERR(rt);
1000 rt = NULL; 1000 rt = NULL;
1001 if (err == -ENETUNREACH) 1001 if (err == -ENETUNREACH)
1002 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 1002 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1003 goto out; 1003 goto out;
1004 } 1004 }
1005 1005
@@ -1098,6 +1098,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1098 struct udp_sock *up = udp_sk(sk); 1098 struct udp_sock *up = udp_sk(sk);
1099 int ret; 1099 int ret;
1100 1100
1101 if (flags & MSG_SENDPAGE_NOTLAST)
1102 flags |= MSG_MORE;
1103
1101 if (!up->pending) { 1104 if (!up->pending) {
1102 struct msghdr msg = { .msg_flags = flags|MSG_MORE }; 1105 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
1103 1106
@@ -1236,7 +1239,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1236 bool slow; 1239 bool slow;
1237 1240
1238 if (flags & MSG_ERRQUEUE) 1241 if (flags & MSG_ERRQUEUE)
1239 return ip_recv_error(sk, msg, len); 1242 return ip_recv_error(sk, msg, len, addr_len);
1240 1243
1241try_again: 1244try_again:
1242 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 1245 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a454b0ff57c7..8dfe1f4d3c1a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
318/* 318/*
319 * Handle MSG_ERRQUEUE 319 * Handle MSG_ERRQUEUE
320 */ 320 */
321int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 321int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
322{ 322{
323 struct ipv6_pinfo *np = inet6_sk(sk); 323 struct ipv6_pinfo *np = inet6_sk(sk);
324 struct sock_exterr_skb *serr; 324 struct sock_exterr_skb *serr;
@@ -369,6 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
369 &sin->sin6_addr); 369 &sin->sin6_addr);
370 sin->sin6_scope_id = 0; 370 sin->sin6_scope_id = 0;
371 } 371 }
372 *addr_len = sizeof(*sin);
372 } 373 }
373 374
374 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 375 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
@@ -377,6 +378,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
377 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { 378 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
378 sin->sin6_family = AF_INET6; 379 sin->sin6_family = AF_INET6;
379 sin->sin6_flowinfo = 0; 380 sin->sin6_flowinfo = 0;
381 sin->sin6_port = 0;
380 if (skb->protocol == htons(ETH_P_IPV6)) { 382 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 383 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 384 if (np->rxopt.all)
@@ -423,7 +425,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
423/* 425/*
424 * Handle IPV6_RECVPATHMTU 426 * Handle IPV6_RECVPATHMTU
425 */ 427 */
426int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) 428int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
429 int *addr_len)
427{ 430{
428 struct ipv6_pinfo *np = inet6_sk(sk); 431 struct ipv6_pinfo *np = inet6_sk(sk);
429 struct sk_buff *skb; 432 struct sk_buff *skb;
@@ -457,6 +460,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
457 sin->sin6_port = 0; 460 sin->sin6_port = 0;
458 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; 461 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
459 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; 462 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
463 *addr_len = sizeof(*sin);
460 } 464 }
461 465
462 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); 466 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 59df872e2f4d..4acdb63495db 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
116 } 116 }
117 rcu_read_unlock_bh(); 117 rcu_read_unlock_bh();
118 118
119 IP6_INC_STATS_BH(dev_net(dst->dev), 119 IP6_INC_STATS(dev_net(dst->dev),
120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 120 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
121 kfree_skb(skb); 121 kfree_skb(skb);
122 return -EINVAL; 122 return -EINVAL;
123} 123}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 8815e31a87fe..a83243c3d656 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = {
57 57
58 58
59/* Compatibility glue so we can support IPv6 when it's compiled as a module */ 59/* Compatibility glue so we can support IPv6 when it's compiled as a module */
60static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) 60static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
61 int *addr_len)
61{ 62{
62 return -EAFNOSUPPORT; 63 return -EAFNOSUPPORT;
63} 64}
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 22d1bd4670da..e048cf1bb6a2 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -36,10 +36,6 @@ int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol
36} 36}
37EXPORT_SYMBOL(inet6_add_protocol); 37EXPORT_SYMBOL(inet6_add_protocol);
38 38
39/*
40 * Remove a protocol from the hash tables.
41 */
42
43int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) 39int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
44{ 40{
45 int ret; 41 int ret;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e24ff1df0401..7fb4e14c467f 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -466,10 +466,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
466 return -EOPNOTSUPP; 466 return -EOPNOTSUPP;
467 467
468 if (flags & MSG_ERRQUEUE) 468 if (flags & MSG_ERRQUEUE)
469 return ipv6_recv_error(sk, msg, len); 469 return ipv6_recv_error(sk, msg, len, addr_len);
470 470
471 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 471 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
472 return ipv6_recv_rxpmtu(sk, msg, len); 472 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
473 473
474 skb = skb_recv_datagram(sk, flags, noblock, &err); 474 skb = skb_recv_datagram(sk, flags, noblock, &err);
475 if (!skb) 475 if (!skb)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1b4a4a953675..366fbba3359a 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -478,14 +478,44 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
478 dev_put(dev); 478 dev_put(dev);
479} 479}
480 480
481/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
482 * if sufficient data bytes are available
483 */
484static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
485{
486 const struct iphdr *iph = (const struct iphdr *) skb->data;
487 struct rt6_info *rt;
488 struct sk_buff *skb2;
489
490 if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
491 return 1;
492
493 skb2 = skb_clone(skb, GFP_ATOMIC);
494
495 if (!skb2)
496 return 1;
497
498 skb_dst_drop(skb2);
499 skb_pull(skb2, iph->ihl * 4);
500 skb_reset_network_header(skb2);
501
502 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
503
504 if (rt && rt->dst.dev)
505 skb2->dev = rt->dst.dev;
506
507 icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
508
509 if (rt)
510 ip6_rt_put(rt);
511
512 kfree_skb(skb2);
513
514 return 0;
515}
481 516
482static int ipip6_err(struct sk_buff *skb, u32 info) 517static int ipip6_err(struct sk_buff *skb, u32 info)
483{ 518{
484
485/* All the routers (except for Linux) return only
486 8 bytes of packet payload. It means, that precise relaying of
487 ICMP in the real Internet is absolutely infeasible.
488 */
489 const struct iphdr *iph = (const struct iphdr *)skb->data; 519 const struct iphdr *iph = (const struct iphdr *)skb->data;
490 const int type = icmp_hdr(skb)->type; 520 const int type = icmp_hdr(skb)->type;
491 const int code = icmp_hdr(skb)->code; 521 const int code = icmp_hdr(skb)->code;
@@ -500,7 +530,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
500 case ICMP_DEST_UNREACH: 530 case ICMP_DEST_UNREACH:
501 switch (code) { 531 switch (code) {
502 case ICMP_SR_FAILED: 532 case ICMP_SR_FAILED:
503 case ICMP_PORT_UNREACH:
504 /* Impossible event. */ 533 /* Impossible event. */
505 return 0; 534 return 0;
506 default: 535 default:
@@ -545,6 +574,9 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
545 goto out; 574 goto out;
546 575
547 err = 0; 576 err = 0;
577 if (!ipip6_err_gen_icmpv6_unreach(skb))
578 goto out;
579
548 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 580 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
549 goto out; 581 goto out;
550 582
@@ -919,7 +951,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
919 if (!new_skb) { 951 if (!new_skb) {
920 ip_rt_put(rt); 952 ip_rt_put(rt);
921 dev->stats.tx_dropped++; 953 dev->stats.tx_dropped++;
922 dev_kfree_skb(skb); 954 kfree_skb(skb);
923 return NETDEV_TX_OK; 955 return NETDEV_TX_OK;
924 } 956 }
925 if (skb->sk) 957 if (skb->sk)
@@ -945,7 +977,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
945tx_error_icmp: 977tx_error_icmp:
946 dst_link_failure(skb); 978 dst_link_failure(skb);
947tx_error: 979tx_error:
948 dev_kfree_skb(skb); 980 kfree_skb(skb);
949out: 981out:
950 dev->stats.tx_errors++; 982 dev->stats.tx_errors++;
951 return NETDEV_TX_OK; 983 return NETDEV_TX_OK;
@@ -985,7 +1017,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
985 1017
986tx_err: 1018tx_err:
987 dev->stats.tx_errors++; 1019 dev->stats.tx_errors++;
988 dev_kfree_skb(skb); 1020 kfree_skb(skb);
989 return NETDEV_TX_OK; 1021 return NETDEV_TX_OK;
990 1022
991} 1023}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c1097c798900..6d18157dc32c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -37,34 +37,32 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
37{ 37{
38 const struct ipv6hdr *iph = skb_gro_network_header(skb); 38 const struct ipv6hdr *iph = skb_gro_network_header(skb);
39 __wsum wsum; 39 __wsum wsum;
40 __sum16 sum; 40
41 /* Don't bother verifying checksum if we're going to flush anyway. */
42 if (NAPI_GRO_CB(skb)->flush)
43 goto skip_csum;
44
45 wsum = skb->csum;
41 46
42 switch (skb->ip_summed) { 47 switch (skb->ip_summed) {
48 case CHECKSUM_NONE:
49 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
50 wsum);
51
52 /* fall through */
53
43 case CHECKSUM_COMPLETE: 54 case CHECKSUM_COMPLETE:
44 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, 55 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
45 skb->csum)) { 56 wsum)) {
46 skb->ip_summed = CHECKSUM_UNNECESSARY; 57 skb->ip_summed = CHECKSUM_UNNECESSARY;
47 break; 58 break;
48 } 59 }
49flush: 60
50 NAPI_GRO_CB(skb)->flush = 1; 61 NAPI_GRO_CB(skb)->flush = 1;
51 return NULL; 62 return NULL;
52
53 case CHECKSUM_NONE:
54 wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
55 skb_gro_len(skb),
56 IPPROTO_TCP, 0));
57 sum = csum_fold(skb_checksum(skb,
58 skb_gro_offset(skb),
59 skb_gro_len(skb),
60 wsum));
61 if (sum)
62 goto flush;
63
64 skb->ip_summed = CHECKSUM_UNNECESSARY;
65 break;
66 } 63 }
67 64
65skip_csum:
68 return tcp_gro_receive(head, skb); 66 return tcp_gro_receive(head, skb);
69} 67}
70 68
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 81eb8cf8389b..bcd5699313c3 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -393,10 +393,10 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
393 bool slow; 393 bool slow;
394 394
395 if (flags & MSG_ERRQUEUE) 395 if (flags & MSG_ERRQUEUE)
396 return ipv6_recv_error(sk, msg, len); 396 return ipv6_recv_error(sk, msg, len, addr_len);
397 397
398 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 398 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
399 return ipv6_recv_rxpmtu(sk, msg, len); 399 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
400 400
401try_again: 401try_again:
402 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 402 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index cfd65304be60..d9b437e55007 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -665,7 +665,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
665 *addr_len = sizeof(*lsa); 665 *addr_len = sizeof(*lsa);
666 666
667 if (flags & MSG_ERRQUEUE) 667 if (flags & MSG_ERRQUEUE)
668 return ipv6_recv_error(sk, msg, len); 668 return ipv6_recv_error(sk, msg, len, addr_len);
669 669
670 skb = skb_recv_datagram(sk, flags, noblock, &err); 670 skb = skb_recv_datagram(sk, flags, noblock, &err);
671 if (!skb) 671 if (!skb)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 4518a57aa5fe..713671ae45af 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -74,9 +74,12 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE];
74 * Bit 17 is marked as already used since the VFS quota code 74 * Bit 17 is marked as already used since the VFS quota code
75 * also abused this API and relied on family == group ID, we 75 * also abused this API and relied on family == group ID, we
76 * cater to that by giving it a static family and group ID. 76 * cater to that by giving it a static family and group ID.
77 * Bit 18 is marked as already used since the PMCRAID driver
78 * did the same thing as the VFS quota code (maybe copied?)
77 */ 79 */
78static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | 80static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) |
79 BIT(GENL_ID_VFS_DQUOT); 81 BIT(GENL_ID_VFS_DQUOT) |
82 BIT(GENL_ID_PMCRAID);
80static unsigned long *mc_groups = &mc_group_start; 83static unsigned long *mc_groups = &mc_group_start;
81static unsigned long mc_groups_longs = 1; 84static unsigned long mc_groups_longs = 1;
82 85
@@ -139,6 +142,7 @@ static u16 genl_generate_id(void)
139 142
140 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { 143 for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) {
141 if (id_gen_idx != GENL_ID_VFS_DQUOT && 144 if (id_gen_idx != GENL_ID_VFS_DQUOT &&
145 id_gen_idx != GENL_ID_PMCRAID &&
142 !genl_family_find_byid(id_gen_idx)) 146 !genl_family_find_byid(id_gen_idx))
143 return id_gen_idx; 147 return id_gen_idx;
144 if (++id_gen_idx > GENL_MAX_ID) 148 if (++id_gen_idx > GENL_MAX_ID)
@@ -214,7 +218,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
214{ 218{
215 int first_id; 219 int first_id;
216 int n_groups = family->n_mcgrps; 220 int n_groups = family->n_mcgrps;
217 int err, i; 221 int err = 0, i;
218 bool groups_allocated = false; 222 bool groups_allocated = false;
219 223
220 if (!n_groups) 224 if (!n_groups)
@@ -236,9 +240,12 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
236 } else if (strcmp(family->name, "NET_DM") == 0) { 240 } else if (strcmp(family->name, "NET_DM") == 0) {
237 first_id = 1; 241 first_id = 1;
238 BUG_ON(n_groups != 1); 242 BUG_ON(n_groups != 1);
239 } else if (strcmp(family->name, "VFS_DQUOT") == 0) { 243 } else if (family->id == GENL_ID_VFS_DQUOT) {
240 first_id = GENL_ID_VFS_DQUOT; 244 first_id = GENL_ID_VFS_DQUOT;
241 BUG_ON(n_groups != 1); 245 BUG_ON(n_groups != 1);
246 } else if (family->id == GENL_ID_PMCRAID) {
247 first_id = GENL_ID_PMCRAID;
248 BUG_ON(n_groups != 1);
242 } else { 249 } else {
243 groups_allocated = true; 250 groups_allocated = true;
244 err = genl_allocate_reserve_groups(n_groups, &first_id); 251 err = genl_allocate_reserve_groups(n_groups, &first_id);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ac27c86ef6d1..ba2548bd85bf 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -439,9 +439,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
439 439
440 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; 440 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
441 441
442 spin_lock(&rb_queue->lock); 442 spin_lock_bh(&rb_queue->lock);
443 pkc->delete_blk_timer = 1; 443 pkc->delete_blk_timer = 1;
444 spin_unlock(&rb_queue->lock); 444 spin_unlock_bh(&rb_queue->lock);
445 445
446 prb_del_retire_blk_timer(pkc); 446 prb_del_retire_blk_timer(pkc);
447} 447}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 75c94e59a3bd..bccd52b36e97 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q)
215 if (rnd < clg->a4) { 215 if (rnd < clg->a4) {
216 clg->state = 4; 216 clg->state = 4;
217 return true; 217 return true;
218 } else if (clg->a4 < rnd && rnd < clg->a1) { 218 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
219 clg->state = 3; 219 clg->state = 3;
220 return true; 220 return true;
221 } else if (clg->a1 < rnd) 221 } else if (clg->a1 + clg->a4 < rnd)
222 clg->state = 1; 222 clg->state = 1;
223 223
224 break; 224 break;
@@ -268,10 +268,11 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
268 clg->state = 2; 268 clg->state = 2;
269 if (net_random() < clg->a4) 269 if (net_random() < clg->a4)
270 return true; 270 return true;
271 break;
271 case 2: 272 case 2:
272 if (net_random() < clg->a2) 273 if (net_random() < clg->a2)
273 clg->state = 1; 274 clg->state = 1;
274 if (clg->a3 > net_random()) 275 if (net_random() > clg->a3)
275 return true; 276 return true;
276 } 277 }
277 278
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 68f98595819c..a6090051c5db 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -21,6 +21,7 @@
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/sch_generic.h> 22#include <net/sch_generic.h>
23#include <net/pkt_sched.h> 23#include <net/pkt_sched.h>
24#include <net/tcp.h>
24 25
25 26
26/* Simple Token Bucket Filter. 27/* Simple Token Bucket Filter.
@@ -117,6 +118,22 @@ struct tbf_sched_data {
117}; 118};
118 119
119 120
121/*
122 * Return length of individual segments of a gso packet,
123 * including all headers (MAC, IP, TCP/UDP)
124 */
125static unsigned int skb_gso_seglen(const struct sk_buff *skb)
126{
127 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
128 const struct skb_shared_info *shinfo = skb_shinfo(skb);
129
130 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
131 hdr_len += tcp_hdrlen(skb);
132 else
133 hdr_len += sizeof(struct udphdr);
134 return hdr_len + shinfo->gso_size;
135}
136
120/* GSO packet is too big, segment it so that tbf can transmit 137/* GSO packet is too big, segment it so that tbf can transmit
121 * each segment in time 138 * each segment in time
122 */ 139 */
@@ -136,12 +153,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
136 while (segs) { 153 while (segs) {
137 nskb = segs->next; 154 nskb = segs->next;
138 segs->next = NULL; 155 segs->next = NULL;
139 if (likely(segs->len <= q->max_size)) { 156 qdisc_skb_cb(segs)->pkt_len = segs->len;
140 qdisc_skb_cb(segs)->pkt_len = segs->len; 157 ret = qdisc_enqueue(segs, q->qdisc);
141 ret = qdisc_enqueue(segs, q->qdisc);
142 } else {
143 ret = qdisc_reshape_fail(skb, sch);
144 }
145 if (ret != NET_XMIT_SUCCESS) { 158 if (ret != NET_XMIT_SUCCESS) {
146 if (net_xmit_drop_count(ret)) 159 if (net_xmit_drop_count(ret))
147 sch->qstats.drops++; 160 sch->qstats.drops++;
@@ -163,7 +176,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
163 int ret; 176 int ret;
164 177
165 if (qdisc_pkt_len(skb) > q->max_size) { 178 if (qdisc_pkt_len(skb) > q->max_size) {
166 if (skb_is_gso(skb)) 179 if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
167 return tbf_segment(skb, sch); 180 return tbf_segment(skb, sch);
168 return qdisc_reshape_fail(skb, sch); 181 return qdisc_reshape_fail(skb, sch);
169 } 182 }
@@ -319,6 +332,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
319 if (max_size < 0) 332 if (max_size < 0)
320 goto done; 333 goto done;
321 334
335 if (max_size < psched_mtu(qdisc_dev(sch)))
336 pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
337 max_size, qdisc_dev(sch)->name,
338 psched_mtu(qdisc_dev(sch)));
339
322 if (q->qdisc != &noop_qdisc) { 340 if (q->qdisc != &noop_qdisc) {
323 err = fifo_set_limit(q->qdisc, qopt->limit); 341 err = fifo_set_limit(q->qdisc, qopt->limit);
324 if (err) 342 if (err)
diff --git a/net/sctp/output.c b/net/sctp/output.c
index e650978daf27..0e2644d0a773 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -474,10 +474,11 @@ int sctp_packet_transmit(struct sctp_packet *packet)
474 * for a given destination transport address. 474 * for a given destination transport address.
475 */ 475 */
476 476
477 if (!tp->rto_pending) { 477 if (!chunk->resent && !tp->rto_pending) {
478 chunk->rtt_in_progress = 1; 478 chunk->rtt_in_progress = 1;
479 tp->rto_pending = 1; 479 tp->rto_pending = 1;
480 } 480 }
481
481 has_data = 1; 482 has_data = 1;
482 } 483 }
483 484
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 94df75877869..f51ba985a36e 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -446,6 +446,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
446 transport->rto_pending = 0; 446 transport->rto_pending = 0;
447 } 447 }
448 448
449 chunk->resent = 1;
450
449 /* Move the chunk to the retransmit queue. The chunks 451 /* Move the chunk to the retransmit queue. The chunks
450 * on the retransmit queue are always kept in order. 452 * on the retransmit queue are always kept in order.
451 */ 453 */
@@ -1375,6 +1377,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1375 * instance). 1377 * instance).
1376 */ 1378 */
1377 if (!tchunk->tsn_gap_acked && 1379 if (!tchunk->tsn_gap_acked &&
1380 !tchunk->resent &&
1378 tchunk->rtt_in_progress) { 1381 tchunk->rtt_in_progress) {
1379 tchunk->rtt_in_progress = 0; 1382 tchunk->rtt_in_progress = 0;
1380 rtt = jiffies - tchunk->sent_at; 1383 rtt = jiffies - tchunk->sent_at;
@@ -1391,7 +1394,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1391 */ 1394 */
1392 if (!tchunk->tsn_gap_acked) { 1395 if (!tchunk->tsn_gap_acked) {
1393 tchunk->tsn_gap_acked = 1; 1396 tchunk->tsn_gap_acked = 1;
1394 *highest_new_tsn_in_sack = tsn; 1397 if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1398 *highest_new_tsn_in_sack = tsn;
1395 bytes_acked += sctp_data_size(tchunk); 1399 bytes_acked += sctp_data_size(tchunk);
1396 if (!tchunk->transport) 1400 if (!tchunk->transport)
1397 migrate_bytes += sctp_data_size(tchunk); 1401 migrate_bytes += sctp_data_size(tchunk);
diff --git a/net/socket.c b/net/socket.c
index 0b18693f2be6..e83c416708af 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1973,7 +1973,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
1973 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) 1973 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
1974 return -EFAULT; 1974 return -EFAULT;
1975 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) 1975 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1976 return -EINVAL; 1976 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
1977 return 0; 1977 return 0;
1978} 1978}
1979 1979