aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/ax25/ax25_subr.c11
-rw-r--r--net/bluetooth/rfcomm/tty.c13
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/user_dma.c2
-rw-r--r--net/ipv4/devinet.c9
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/raw.c9
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c35
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tunnel4.c2
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/addrconf.c107
-rw-r--r--net/ipv6/datagram.c45
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c21
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/tunnel6.c2
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/irda/af_irda.c12
-rw-r--r--net/netfilter/xt_connlimit.c3
-rw-r--r--net/netlink/attr.c12
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/sched/sch_dsmark.c6
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sctp/associola.c21
-rw-r--r--net/sctp/ipv6.c11
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c120
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/transport.c50
-rw-r--r--net/wireless/nl80211.c12
-rw-r--r--net/xfrm/xfrm_algo.c4
40 files changed, 376 insertions, 233 deletions
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index d8f215733175..034aa10a5198 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -64,20 +64,15 @@ void ax25_frames_acked(ax25_cb *ax25, unsigned short nr)
64 64
65void ax25_requeue_frames(ax25_cb *ax25) 65void ax25_requeue_frames(ax25_cb *ax25)
66{ 66{
67 struct sk_buff *skb, *skb_prev = NULL; 67 struct sk_buff *skb;
68 68
69 /* 69 /*
70 * Requeue all the un-ack-ed frames on the output queue to be picked 70 * Requeue all the un-ack-ed frames on the output queue to be picked
71 * up by ax25_kick called from the timer. This arrangement handles the 71 * up by ax25_kick called from the timer. This arrangement handles the
72 * possibility of an empty output queue. 72 * possibility of an empty output queue.
73 */ 73 */
74 while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { 74 while ((skb = skb_dequeue_tail(&ax25->ack_queue)) != NULL)
75 if (skb_prev == NULL) 75 skb_queue_head(&ax25->write_queue, skb);
76 skb_queue_head(&ax25->write_queue, skb);
77 else
78 skb_append(skb_prev, skb, &ax25->write_queue);
79 skb_prev = skb;
80 }
81} 76}
82 77
83/* 78/*
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index c3f749abb2d0..c9191871c1e0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -566,11 +566,22 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
566 if (dlc->state == BT_CLOSED) { 566 if (dlc->state == BT_CLOSED) {
567 if (!dev->tty) { 567 if (!dev->tty) {
568 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { 568 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
569 if (rfcomm_dev_get(dev->id) == NULL) 569 /* Drop DLC lock here to avoid deadlock
570 * 1. rfcomm_dev_get will take rfcomm_dev_lock
571 * but in rfcomm_dev_add there's lock order:
572 * rfcomm_dev_lock -> dlc lock
573 * 2. rfcomm_dev_put will deadlock if it's
574 * the last reference
575 */
576 rfcomm_dlc_unlock(dlc);
577 if (rfcomm_dev_get(dev->id) == NULL) {
578 rfcomm_dlc_lock(dlc);
570 return; 579 return;
580 }
571 581
572 rfcomm_dev_del(dev); 582 rfcomm_dev_del(dev);
573 rfcomm_dev_put(dev); 583 rfcomm_dev_put(dev);
584 rfcomm_dlc_lock(dlc);
574 } 585 }
575 } else 586 } else
576 tty_hangup(dev->tty); 587 tty_hangup(dev->tty);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 5d9d7130bd6e..65f01f71b3f3 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1714,7 +1714,8 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1714 return nla_nest_end(skb, nest); 1714 return nla_nest_end(skb, nest);
1715 1715
1716nla_put_failure: 1716nla_put_failure:
1717 return nla_nest_cancel(skb, nest); 1717 nla_nest_cancel(skb, nest);
1718 return -EMSGSIZE;
1718} 1719}
1719 1720
1720static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, 1721static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
@@ -2057,9 +2058,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2057 goto nla_put_failure; 2058 goto nla_put_failure;
2058 } 2059 }
2059 2060
2060 ci.ndm_used = now - neigh->used; 2061 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2061 ci.ndm_confirmed = now - neigh->confirmed; 2062 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2062 ci.ndm_updated = now - neigh->updated; 2063 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2063 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; 2064 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2064 read_unlock_bh(&neigh->lock); 2065 read_unlock_bh(&neigh->lock);
2065 2066
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index cf857c4dc7b1..a9a77216310e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -498,7 +498,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
498 return nla_nest_end(skb, mx); 498 return nla_nest_end(skb, mx);
499 499
500nla_put_failure: 500nla_put_failure:
501 return nla_nest_cancel(skb, mx); 501 nla_nest_cancel(skb, mx);
502 return -EMSGSIZE;
502} 503}
503 504
504int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, 505int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5c459f2b7985..1e556d312117 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1445,6 +1445,7 @@ done:
1445 1445
1446 if (spd.nr_pages) { 1446 if (spd.nr_pages) {
1447 int ret; 1447 int ret;
1448 struct sock *sk = __skb->sk;
1448 1449
1449 /* 1450 /*
1450 * Drop the socket lock, otherwise we have reverse 1451 * Drop the socket lock, otherwise we have reverse
@@ -1455,9 +1456,9 @@ done:
1455 * we call into ->sendpage() with the i_mutex lock held 1456 * we call into ->sendpage() with the i_mutex lock held
1456 * and networking will grab the socket lock. 1457 * and networking will grab the socket lock.
1457 */ 1458 */
1458 release_sock(__skb->sk); 1459 release_sock(sk);
1459 ret = splice_to_pipe(pipe, &spd); 1460 ret = splice_to_pipe(pipe, &spd);
1460 lock_sock(__skb->sk); 1461 lock_sock(sk);
1461 return ret; 1462 return ret;
1462 } 1463 }
1463 1464
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 0ad1cd57bc39..c77aff9c6eb3 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -75,7 +75,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
75 75
76 end = start + skb_shinfo(skb)->frags[i].size; 76 end = start + skb_shinfo(skb)->frags[i].size;
77 copy = end - offset; 77 copy = end - offset;
78 if ((copy = end - offset) > 0) { 78 if (copy > 0) {
79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
80 struct page *page = frag->page; 80 struct page *page = frag->page;
81 81
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6848e4760f34..79a7ef6209ff 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -90,7 +90,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
90 [IFA_LOCAL] = { .type = NLA_U32 }, 90 [IFA_LOCAL] = { .type = NLA_U32 },
91 [IFA_ADDRESS] = { .type = NLA_U32 }, 91 [IFA_ADDRESS] = { .type = NLA_U32 },
92 [IFA_BROADCAST] = { .type = NLA_U32 }, 92 [IFA_BROADCAST] = { .type = NLA_U32 },
93 [IFA_ANYCAST] = { .type = NLA_U32 },
94 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 93 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
95}; 94};
96 95
@@ -536,9 +535,6 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
536 if (tb[IFA_BROADCAST]) 535 if (tb[IFA_BROADCAST])
537 ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]); 536 ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]);
538 537
539 if (tb[IFA_ANYCAST])
540 ifa->ifa_anycast = nla_get_be32(tb[IFA_ANYCAST]);
541
542 if (tb[IFA_LABEL]) 538 if (tb[IFA_LABEL])
543 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); 539 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
544 else 540 else
@@ -745,7 +741,6 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
745 break; 741 break;
746 inet_del_ifa(in_dev, ifap, 0); 742 inet_del_ifa(in_dev, ifap, 0);
747 ifa->ifa_broadcast = 0; 743 ifa->ifa_broadcast = 0;
748 ifa->ifa_anycast = 0;
749 ifa->ifa_scope = 0; 744 ifa->ifa_scope = 0;
750 } 745 }
751 746
@@ -1113,7 +1108,6 @@ static inline size_t inet_nlmsg_size(void)
1113 + nla_total_size(4) /* IFA_ADDRESS */ 1108 + nla_total_size(4) /* IFA_ADDRESS */
1114 + nla_total_size(4) /* IFA_LOCAL */ 1109 + nla_total_size(4) /* IFA_LOCAL */
1115 + nla_total_size(4) /* IFA_BROADCAST */ 1110 + nla_total_size(4) /* IFA_BROADCAST */
1116 + nla_total_size(4) /* IFA_ANYCAST */
1117 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */ 1111 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
1118} 1112}
1119 1113
@@ -1143,9 +1137,6 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1143 if (ifa->ifa_broadcast) 1137 if (ifa->ifa_broadcast)
1144 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast); 1138 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
1145 1139
1146 if (ifa->ifa_anycast)
1147 NLA_PUT_BE32(skb, IFA_ANYCAST, ifa->ifa_anycast);
1148
1149 if (ifa->ifa_label[0]) 1140 if (ifa->ifa_label[0])
1150 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); 1141 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
1151 1142
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 0f1557a4ac7a..0b2ac6a3d903 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -506,7 +506,6 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = {
506 [RTA_PREFSRC] = { .type = NLA_U32 }, 506 [RTA_PREFSRC] = { .type = NLA_U32 },
507 [RTA_METRICS] = { .type = NLA_NESTED }, 507 [RTA_METRICS] = { .type = NLA_NESTED },
508 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 508 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
509 [RTA_PROTOINFO] = { .type = NLA_U32 },
510 [RTA_FLOW] = { .type = NLA_U32 }, 509 [RTA_FLOW] = { .type = NLA_U32 },
511}; 510};
512 511
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index fead049daf43..e7e091d365ff 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -608,6 +608,14 @@ static void raw_close(struct sock *sk, long timeout)
608 sk_common_release(sk); 608 sk_common_release(sk);
609} 609}
610 610
611static int raw_destroy(struct sock *sk)
612{
613 lock_sock(sk);
614 ip_flush_pending_frames(sk);
615 release_sock(sk);
616 return 0;
617}
618
611/* This gets rid of all the nasties in af_inet. -DaveM */ 619/* This gets rid of all the nasties in af_inet. -DaveM */
612static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 620static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
613{ 621{
@@ -820,6 +828,7 @@ struct proto raw_prot = {
820 .name = "RAW", 828 .name = "RAW",
821 .owner = THIS_MODULE, 829 .owner = THIS_MODULE,
822 .close = raw_close, 830 .close = raw_close,
831 .destroy = raw_destroy,
823 .connect = ip4_datagram_connect, 832 .connect = ip4_datagram_connect,
824 .disconnect = udp_disconnect, 833 .disconnect = udp_disconnect,
825 .ioctl = raw_ioctl, 834 .ioctl = raw_ioctl,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index df41026b60db..96be336064fb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1792,7 +1792,7 @@ static int __mkroute_input(struct sk_buff *skb,
1792 if (err) 1792 if (err)
1793 flags |= RTCF_DIRECTSRC; 1793 flags |= RTCF_DIRECTSRC;
1794 1794
1795 if (out_dev == in_dev && err && !(flags & RTCF_MASQ) && 1795 if (out_dev == in_dev && err &&
1796 (IN_DEV_SHARED_MEDIA(out_dev) || 1796 (IN_DEV_SHARED_MEDIA(out_dev) ||
1797 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) 1797 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1798 flags |= RTCF_DOREDIRECT; 1798 flags |= RTCF_DOREDIRECT;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f88653138621..ab66683b8043 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1227,7 +1227,14 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1227 copied += used; 1227 copied += used;
1228 offset += used; 1228 offset += used;
1229 } 1229 }
1230 if (offset != skb->len) 1230 /*
1231 * If recv_actor drops the lock (e.g. TCP splice
1232 * receive) the skb pointer might be invalid when
1233 * getting here: tcp_collapse might have deleted it
1234 * while aggregating skbs from the socket queue.
1235 */
1236 skb = tcp_recv_skb(sk, seq-1, &offset);
1237 if (!skb || (offset+1 != skb->len))
1231 break; 1238 break;
1232 } 1239 }
1233 if (tcp_hdr(skb)->fin) { 1240 if (tcp_hdr(skb)->fin) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b54d9d37b636..eba873e9b560 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1392,9 +1392,9 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1392 1392
1393 if (before(next_dup->start_seq, skip_to_seq)) { 1393 if (before(next_dup->start_seq, skip_to_seq)) {
1394 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); 1394 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
1395 tcp_sacktag_walk(skb, sk, NULL, 1395 skb = tcp_sacktag_walk(skb, sk, NULL,
1396 next_dup->start_seq, next_dup->end_seq, 1396 next_dup->start_seq, next_dup->end_seq,
1397 1, fack_count, reord, flag); 1397 1, fack_count, reord, flag);
1398 } 1398 }
1399 1399
1400 return skb; 1400 return skb;
@@ -2483,6 +2483,20 @@ static inline void tcp_complete_cwr(struct sock *sk)
2483 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2483 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2484} 2484}
2485 2485
2486static void tcp_try_keep_open(struct sock *sk)
2487{
2488 struct tcp_sock *tp = tcp_sk(sk);
2489 int state = TCP_CA_Open;
2490
2491 if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
2492 state = TCP_CA_Disorder;
2493
2494 if (inet_csk(sk)->icsk_ca_state != state) {
2495 tcp_set_ca_state(sk, state);
2496 tp->high_seq = tp->snd_nxt;
2497 }
2498}
2499
2486static void tcp_try_to_open(struct sock *sk, int flag) 2500static void tcp_try_to_open(struct sock *sk, int flag)
2487{ 2501{
2488 struct tcp_sock *tp = tcp_sk(sk); 2502 struct tcp_sock *tp = tcp_sk(sk);
@@ -2496,15 +2510,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
2496 tcp_enter_cwr(sk, 1); 2510 tcp_enter_cwr(sk, 1);
2497 2511
2498 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2512 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2499 int state = TCP_CA_Open; 2513 tcp_try_keep_open(sk);
2500
2501 if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
2502 state = TCP_CA_Disorder;
2503
2504 if (inet_csk(sk)->icsk_ca_state != state) {
2505 tcp_set_ca_state(sk, state);
2506 tp->high_seq = tp->snd_nxt;
2507 }
2508 tcp_moderate_cwnd(tp); 2514 tcp_moderate_cwnd(tp);
2509 } else { 2515 } else {
2510 tcp_cwnd_down(sk, flag); 2516 tcp_cwnd_down(sk, flag);
@@ -3310,8 +3316,11 @@ no_queue:
3310 return 1; 3316 return 1;
3311 3317
3312old_ack: 3318old_ack:
3313 if (TCP_SKB_CB(skb)->sacked) 3319 if (TCP_SKB_CB(skb)->sacked) {
3314 tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3320 tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3321 if (icsk->icsk_ca_state == TCP_CA_Open)
3322 tcp_try_keep_open(sk);
3323 }
3315 3324
3316uninteresting_ack: 3325uninteresting_ack:
3317 SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3326 SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e399bde7813a..ad993ecb4810 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2131,6 +2131,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2131 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2131 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2132 if (tcp_transmit_skb(sk, skb, 0, priority)) 2132 if (tcp_transmit_skb(sk, skb, 0, priority))
2133 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 2133 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
2134
2135 TCP_INC_STATS(TCP_MIB_OUTRSTS);
2134} 2136}
2135 2137
2136/* WARNING: This routine must only be called when we have already sent 2138/* WARNING: This routine must only be called when we have already sent
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index d3b709a6f264..cb1f0e83830b 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -97,7 +97,7 @@ static int tunnel64_rcv(struct sk_buff *skb)
97{ 97{
98 struct xfrm_tunnel *handler; 98 struct xfrm_tunnel *handler;
99 99
100 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 100 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
101 goto drop; 101 goto drop;
102 102
103 for (handler = tunnel64_handlers; handler; handler = handler->next) 103 for (handler = tunnel64_handlers; handler; handler = handler->next)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index db1cb7c96d63..56fcda3694ba 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -420,7 +420,7 @@ void udp_err(struct sk_buff *skb, u32 info)
420/* 420/*
421 * Throw away all pending data and cancel the corking. Socket is locked. 421 * Throw away all pending data and cancel the corking. Socket is locked.
422 */ 422 */
423static void udp_flush_pending_frames(struct sock *sk) 423void udp_flush_pending_frames(struct sock *sk)
424{ 424{
425 struct udp_sock *up = udp_sk(sk); 425 struct udp_sock *up = udp_sk(sk);
426 426
@@ -430,6 +430,7 @@ static void udp_flush_pending_frames(struct sock *sk)
430 ip_flush_pending_frames(sk); 430 ip_flush_pending_frames(sk);
431 } 431 }
432} 432}
433EXPORT_SYMBOL(udp_flush_pending_frames);
433 434
434/** 435/**
435 * udp4_hwcsum_outgoing - handle outgoing HW checksumming 436 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3a835578fd1c..147588f4c7c0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -731,8 +731,13 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
731 onlink = -1; 731 onlink = -1;
732 732
733 spin_lock(&ifa->lock); 733 spin_lock(&ifa->lock);
734 lifetime = min_t(unsigned long, 734
735 ifa->valid_lft, 0x7fffffffUL/HZ); 735 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
736 /*
737 * Note: Because this address is
738 * not permanent, lifetime <
739 * LONG_MAX / HZ here.
740 */
736 if (time_before(expires, 741 if (time_before(expires,
737 ifa->tstamp + lifetime * HZ)) 742 ifa->tstamp + lifetime * HZ))
738 expires = ifa->tstamp + lifetime * HZ; 743 expires = ifa->tstamp + lifetime * HZ;
@@ -1722,7 +1727,6 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1722 __u32 valid_lft; 1727 __u32 valid_lft;
1723 __u32 prefered_lft; 1728 __u32 prefered_lft;
1724 int addr_type; 1729 int addr_type;
1725 unsigned long rt_expires;
1726 struct inet6_dev *in6_dev; 1730 struct inet6_dev *in6_dev;
1727 1731
1728 pinfo = (struct prefix_info *) opt; 1732 pinfo = (struct prefix_info *) opt;
@@ -1764,28 +1768,23 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1764 * 2) Configure prefixes with the auto flag set 1768 * 2) Configure prefixes with the auto flag set
1765 */ 1769 */
1766 1770
1767 if (valid_lft == INFINITY_LIFE_TIME) 1771 if (pinfo->onlink) {
1768 rt_expires = ~0UL; 1772 struct rt6_info *rt;
1769 else if (valid_lft >= 0x7FFFFFFF/HZ) { 1773 unsigned long rt_expires;
1774
1770 /* Avoid arithmetic overflow. Really, we could 1775 /* Avoid arithmetic overflow. Really, we could
1771 * save rt_expires in seconds, likely valid_lft, 1776 * save rt_expires in seconds, likely valid_lft,
1772 * but it would require division in fib gc, that it 1777 * but it would require division in fib gc, that it
1773 * not good. 1778 * not good.
1774 */ 1779 */
1775 rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ); 1780 if (HZ > USER_HZ)
1776 } else 1781 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
1777 rt_expires = valid_lft * HZ; 1782 else
1783 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
1778 1784
1779 /* 1785 if (addrconf_finite_timeout(rt_expires))
1780 * We convert this (in jiffies) to clock_t later. 1786 rt_expires *= HZ;
1781 * Avoid arithmetic overflow there as well.
1782 * Overflow can happen only if HZ < USER_HZ.
1783 */
1784 if (HZ < USER_HZ && ~rt_expires && rt_expires > 0x7FFFFFFF / USER_HZ)
1785 rt_expires = 0x7FFFFFFF / USER_HZ;
1786 1787
1787 if (pinfo->onlink) {
1788 struct rt6_info *rt;
1789 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1788 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL,
1790 dev->ifindex, 1); 1789 dev->ifindex, 1);
1791 1790
@@ -1794,7 +1793,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1794 if (valid_lft == 0) { 1793 if (valid_lft == 0) {
1795 ip6_del_rt(rt); 1794 ip6_del_rt(rt);
1796 rt = NULL; 1795 rt = NULL;
1797 } else if (~rt_expires) { 1796 } else if (addrconf_finite_timeout(rt_expires)) {
1798 /* not infinity */ 1797 /* not infinity */
1799 rt->rt6i_expires = jiffies + rt_expires; 1798 rt->rt6i_expires = jiffies + rt_expires;
1800 rt->rt6i_flags |= RTF_EXPIRES; 1799 rt->rt6i_flags |= RTF_EXPIRES;
@@ -1803,9 +1802,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1803 rt->rt6i_expires = 0; 1802 rt->rt6i_expires = 0;
1804 } 1803 }
1805 } else if (valid_lft) { 1804 } else if (valid_lft) {
1806 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
1807 clock_t expires = 0; 1805 clock_t expires = 0;
1808 if (~rt_expires) { 1806 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
1807 if (addrconf_finite_timeout(rt_expires)) {
1809 /* not infinity */ 1808 /* not infinity */
1810 flags |= RTF_EXPIRES; 1809 flags |= RTF_EXPIRES;
1811 expires = jiffies_to_clock_t(rt_expires); 1810 expires = jiffies_to_clock_t(rt_expires);
@@ -2027,7 +2026,7 @@ err_exit:
2027 * Manual configuration of address on an interface 2026 * Manual configuration of address on an interface
2028 */ 2027 */
2029static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, 2028static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2030 int plen, __u8 ifa_flags, __u32 prefered_lft, 2029 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
2031 __u32 valid_lft) 2030 __u32 valid_lft)
2032{ 2031{
2033 struct inet6_ifaddr *ifp; 2032 struct inet6_ifaddr *ifp;
@@ -2036,9 +2035,13 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2036 int scope; 2035 int scope;
2037 u32 flags; 2036 u32 flags;
2038 clock_t expires; 2037 clock_t expires;
2038 unsigned long timeout;
2039 2039
2040 ASSERT_RTNL(); 2040 ASSERT_RTNL();
2041 2041
2042 if (plen > 128)
2043 return -EINVAL;
2044
2042 /* check the lifetime */ 2045 /* check the lifetime */
2043 if (!valid_lft || prefered_lft > valid_lft) 2046 if (!valid_lft || prefered_lft > valid_lft)
2044 return -EINVAL; 2047 return -EINVAL;
@@ -2052,22 +2055,23 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2052 2055
2053 scope = ipv6_addr_scope(pfx); 2056 scope = ipv6_addr_scope(pfx);
2054 2057
2055 if (valid_lft == INFINITY_LIFE_TIME) { 2058 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2056 ifa_flags |= IFA_F_PERMANENT; 2059 if (addrconf_finite_timeout(timeout)) {
2057 flags = 0; 2060 expires = jiffies_to_clock_t(timeout * HZ);
2058 expires = 0; 2061 valid_lft = timeout;
2059 } else {
2060 if (valid_lft >= 0x7FFFFFFF/HZ)
2061 valid_lft = 0x7FFFFFFF/HZ;
2062 flags = RTF_EXPIRES; 2062 flags = RTF_EXPIRES;
2063 expires = jiffies_to_clock_t(valid_lft * HZ); 2063 } else {
2064 expires = 0;
2065 flags = 0;
2066 ifa_flags |= IFA_F_PERMANENT;
2064 } 2067 }
2065 2068
2066 if (prefered_lft == 0) 2069 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2067 ifa_flags |= IFA_F_DEPRECATED; 2070 if (addrconf_finite_timeout(timeout)) {
2068 else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 2071 if (timeout == 0)
2069 (prefered_lft != INFINITY_LIFE_TIME)) 2072 ifa_flags |= IFA_F_DEPRECATED;
2070 prefered_lft = 0x7FFFFFFF/HZ; 2073 prefered_lft = timeout;
2074 }
2071 2075
2072 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 2076 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
2073 2077
@@ -2095,12 +2099,15 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2095} 2099}
2096 2100
2097static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, 2101static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
2098 int plen) 2102 unsigned int plen)
2099{ 2103{
2100 struct inet6_ifaddr *ifp; 2104 struct inet6_ifaddr *ifp;
2101 struct inet6_dev *idev; 2105 struct inet6_dev *idev;
2102 struct net_device *dev; 2106 struct net_device *dev;
2103 2107
2108 if (plen > 128)
2109 return -EINVAL;
2110
2104 dev = __dev_get_by_index(net, ifindex); 2111 dev = __dev_get_by_index(net, ifindex);
2105 if (!dev) 2112 if (!dev)
2106 return -ENODEV; 2113 return -ENODEV;
@@ -3169,26 +3176,28 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
3169{ 3176{
3170 u32 flags; 3177 u32 flags;
3171 clock_t expires; 3178 clock_t expires;
3179 unsigned long timeout;
3172 3180
3173 if (!valid_lft || (prefered_lft > valid_lft)) 3181 if (!valid_lft || (prefered_lft > valid_lft))
3174 return -EINVAL; 3182 return -EINVAL;
3175 3183
3176 if (valid_lft == INFINITY_LIFE_TIME) { 3184 timeout = addrconf_timeout_fixup(valid_lft, HZ);
3177 ifa_flags |= IFA_F_PERMANENT; 3185 if (addrconf_finite_timeout(timeout)) {
3178 flags = 0; 3186 expires = jiffies_to_clock_t(timeout * HZ);
3179 expires = 0; 3187 valid_lft = timeout;
3180 } else {
3181 if (valid_lft >= 0x7FFFFFFF/HZ)
3182 valid_lft = 0x7FFFFFFF/HZ;
3183 flags = RTF_EXPIRES; 3188 flags = RTF_EXPIRES;
3184 expires = jiffies_to_clock_t(valid_lft * HZ); 3189 } else {
3190 expires = 0;
3191 flags = 0;
3192 ifa_flags |= IFA_F_PERMANENT;
3185 } 3193 }
3186 3194
3187 if (prefered_lft == 0) 3195 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
3188 ifa_flags |= IFA_F_DEPRECATED; 3196 if (addrconf_finite_timeout(timeout)) {
3189 else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 3197 if (timeout == 0)
3190 (prefered_lft != INFINITY_LIFE_TIME)) 3198 ifa_flags |= IFA_F_DEPRECATED;
3191 prefered_lft = 0x7FFFFFFF/HZ; 3199 prefered_lft = timeout;
3200 }
3192 3201
3193 spin_lock_bh(&ifp->lock); 3202 spin_lock_bh(&ifp->lock);
3194 ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags; 3203 ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 94fa6ae77cfe..b9c2de84a8a2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -496,7 +496,8 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
496 return 0; 496 return 0;
497} 497}
498 498
499int datagram_send_ctl(struct msghdr *msg, struct flowi *fl, 499int datagram_send_ctl(struct net *net,
500 struct msghdr *msg, struct flowi *fl,
500 struct ipv6_txoptions *opt, 501 struct ipv6_txoptions *opt,
501 int *hlimit, int *tclass) 502 int *hlimit, int *tclass)
502{ 503{
@@ -509,7 +510,6 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
509 510
510 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 511 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
511 int addr_type; 512 int addr_type;
512 struct net_device *dev = NULL;
513 513
514 if (!CMSG_OK(msg, cmsg)) { 514 if (!CMSG_OK(msg, cmsg)) {
515 err = -EINVAL; 515 err = -EINVAL;
@@ -522,6 +522,9 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
522 switch (cmsg->cmsg_type) { 522 switch (cmsg->cmsg_type) {
523 case IPV6_PKTINFO: 523 case IPV6_PKTINFO:
524 case IPV6_2292PKTINFO: 524 case IPV6_2292PKTINFO:
525 {
526 struct net_device *dev = NULL;
527
525 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) { 528 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
526 err = -EINVAL; 529 err = -EINVAL;
527 goto exit_f; 530 goto exit_f;
@@ -535,32 +538,32 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl,
535 fl->oif = src_info->ipi6_ifindex; 538 fl->oif = src_info->ipi6_ifindex;
536 } 539 }
537 540
538 addr_type = ipv6_addr_type(&src_info->ipi6_addr); 541 addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
539 542
540 if (addr_type == IPV6_ADDR_ANY) 543 if (fl->oif) {
541 break; 544 dev = dev_get_by_index(net, fl->oif);
545 if (!dev)
546 return -ENODEV;
547 } else if (addr_type & IPV6_ADDR_LINKLOCAL)
548 return -EINVAL;
542 549
543 if (addr_type & IPV6_ADDR_LINKLOCAL) { 550 if (addr_type != IPV6_ADDR_ANY) {
544 if (!src_info->ipi6_ifindex) 551 int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
545 return -EINVAL; 552 if (!ipv6_chk_addr(net, &src_info->ipi6_addr,
546 else { 553 strict ? dev : NULL, 0))
547 dev = dev_get_by_index(&init_net, src_info->ipi6_ifindex); 554 err = -EINVAL;
548 if (!dev) 555 else
549 return -ENODEV; 556 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
550 }
551 }
552 if (!ipv6_chk_addr(&init_net, &src_info->ipi6_addr,
553 dev, 0)) {
554 if (dev)
555 dev_put(dev);
556 err = -EINVAL;
557 goto exit_f;
558 } 557 }
558
559 if (dev) 559 if (dev)
560 dev_put(dev); 560 dev_put(dev);
561 561
562 ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); 562 if (err)
563 goto exit_f;
564
563 break; 565 break;
566 }
564 567
565 case IPV6_FLOWINFO: 568 case IPV6_FLOWINFO:
566 if (cmsg->cmsg_len < CMSG_LEN(4)) { 569 if (cmsg->cmsg_len < CMSG_LEN(4)) {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index eb7a940310f4..37a4e777e347 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -354,7 +354,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
354 msg.msg_control = (void*)(fl->opt+1); 354 msg.msg_control = (void*)(fl->opt+1);
355 flowi.oif = 0; 355 flowi.oif = 0;
356 356
357 err = datagram_send_ctl(&msg, &flowi, fl->opt, &junk, &junk); 357 err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk);
358 if (err) 358 if (err)
359 goto done; 359 goto done;
360 err = -EINVAL; 360 err = -EINVAL;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 56d55fecf8ec..26b83e512a09 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -161,9 +161,17 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
161 struct ipv6_txoptions *opt; 161 struct ipv6_txoptions *opt;
162 struct sk_buff *pktopt; 162 struct sk_buff *pktopt;
163 163
164 if (sk->sk_protocol != IPPROTO_UDP && 164 if (sk->sk_type == SOCK_RAW)
165 sk->sk_protocol != IPPROTO_UDPLITE && 165 break;
166 sk->sk_protocol != IPPROTO_TCP) 166
167 if (sk->sk_protocol == IPPROTO_UDP ||
168 sk->sk_protocol == IPPROTO_UDPLITE) {
169 struct udp_sock *up = udp_sk(sk);
170 if (up->pending == AF_INET6) {
171 retv = -EBUSY;
172 break;
173 }
174 } else if (sk->sk_protocol != IPPROTO_TCP)
167 break; 175 break;
168 176
169 if (sk->sk_state != TCP_ESTABLISHED) { 177 if (sk->sk_state != TCP_ESTABLISHED) {
@@ -416,7 +424,7 @@ sticky_done:
416 msg.msg_controllen = optlen; 424 msg.msg_controllen = optlen;
417 msg.msg_control = (void*)(opt+1); 425 msg.msg_control = (void*)(opt+1);
418 426
419 retv = datagram_send_ctl(&msg, &fl, opt, &junk, &junk); 427 retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk);
420 if (retv) 428 if (retv)
421 goto done; 429 goto done;
422update: 430update:
@@ -832,7 +840,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
832 len = min_t(unsigned int, len, ipv6_optlen(hdr)); 840 len = min_t(unsigned int, len, ipv6_optlen(hdr));
833 if (copy_to_user(optval, hdr, len)) 841 if (copy_to_user(optval, hdr, len))
834 return -EFAULT; 842 return -EFAULT;
835 return ipv6_optlen(hdr); 843 return len;
836} 844}
837 845
838static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, 846static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
@@ -975,6 +983,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
975 len = ipv6_getsockopt_sticky(sk, np->opt, 983 len = ipv6_getsockopt_sticky(sk, np->opt,
976 optname, optval, len); 984 optname, optval, len);
977 release_sock(sk); 985 release_sock(sk);
986 /* check if ipv6_getsockopt_sticky() returns err code */
987 if (len < 0)
988 return len;
978 return put_user(len, optlen); 989 return put_user(len, optlen);
979 } 990 }
980 991
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 2dccad48058c..e65e26e210ee 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -209,7 +209,9 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
209 arg.dst = dst; 209 arg.dst = dst;
210 hash = ip6qhashfn(id, src, dst); 210 hash = ip6qhashfn(id, src, dst);
211 211
212 local_bh_disable();
212 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); 213 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
214 local_bh_enable();
213 if (q == NULL) 215 if (q == NULL)
214 goto oom; 216 goto oom;
215 217
@@ -638,10 +640,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
638 goto ret_orig; 640 goto ret_orig;
639 } 641 }
640 642
641 spin_lock(&fq->q.lock); 643 spin_lock_bh(&fq->q.lock);
642 644
643 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { 645 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
644 spin_unlock(&fq->q.lock); 646 spin_unlock_bh(&fq->q.lock);
645 pr_debug("Can't insert skb to queue\n"); 647 pr_debug("Can't insert skb to queue\n");
646 fq_put(fq); 648 fq_put(fq);
647 goto ret_orig; 649 goto ret_orig;
@@ -653,7 +655,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
653 if (ret_skb == NULL) 655 if (ret_skb == NULL)
654 pr_debug("Can't reassemble fragmented packets\n"); 656 pr_debug("Can't reassemble fragmented packets\n");
655 } 657 }
656 spin_unlock(&fq->q.lock); 658 spin_unlock_bh(&fq->q.lock);
657 659
658 fq_put(fq); 660 fq_put(fq);
659 return ret_skb; 661 return ret_skb;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 232e0dc45bf5..8fee9a15b2d3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -813,7 +813,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
813 memset(opt, 0, sizeof(struct ipv6_txoptions)); 813 memset(opt, 0, sizeof(struct ipv6_txoptions));
814 opt->tot_len = sizeof(struct ipv6_txoptions); 814 opt->tot_len = sizeof(struct ipv6_txoptions);
815 815
816 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass); 816 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
817 if (err < 0) { 817 if (err < 0) {
818 fl6_sock_release(flowlabel); 818 fl6_sock_release(flowlabel);
819 return err; 819 return err;
@@ -1164,6 +1164,14 @@ static void rawv6_close(struct sock *sk, long timeout)
1164 sk_common_release(sk); 1164 sk_common_release(sk);
1165} 1165}
1166 1166
1167static int raw6_destroy(struct sock *sk)
1168{
1169 lock_sock(sk);
1170 ip6_flush_pending_frames(sk);
1171 release_sock(sk);
1172 return 0;
1173}
1174
1167static int rawv6_init_sk(struct sock *sk) 1175static int rawv6_init_sk(struct sock *sk)
1168{ 1176{
1169 struct raw6_sock *rp = raw6_sk(sk); 1177 struct raw6_sock *rp = raw6_sk(sk);
@@ -1187,6 +1195,7 @@ struct proto rawv6_prot = {
1187 .name = "RAWv6", 1195 .name = "RAWv6",
1188 .owner = THIS_MODULE, 1196 .owner = THIS_MODULE,
1189 .close = rawv6_close, 1197 .close = rawv6_close,
1198 .destroy = raw6_destroy,
1190 .connect = ip6_datagram_connect, 1199 .connect = ip6_datagram_connect,
1191 .disconnect = udp_disconnect, 1200 .disconnect = udp_disconnect,
1192 .ioctl = rawv6_ioctl, 1201 .ioctl = rawv6_ioctl,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 48534c6c0735..220cffe9e63b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -446,7 +446,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
446 struct route_info *rinfo = (struct route_info *) opt; 446 struct route_info *rinfo = (struct route_info *) opt;
447 struct in6_addr prefix_buf, *prefix; 447 struct in6_addr prefix_buf, *prefix;
448 unsigned int pref; 448 unsigned int pref;
449 u32 lifetime; 449 unsigned long lifetime;
450 struct rt6_info *rt; 450 struct rt6_info *rt;
451 451
452 if (len < sizeof(struct route_info)) { 452 if (len < sizeof(struct route_info)) {
@@ -472,13 +472,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
472 if (pref == ICMPV6_ROUTER_PREF_INVALID) 472 if (pref == ICMPV6_ROUTER_PREF_INVALID)
473 pref = ICMPV6_ROUTER_PREF_MEDIUM; 473 pref = ICMPV6_ROUTER_PREF_MEDIUM;
474 474
475 lifetime = ntohl(rinfo->lifetime); 475 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
476 if (lifetime == 0xffffffff) {
477 /* infinity */
478 } else if (lifetime > 0x7fffffff/HZ - 1) {
479 /* Avoid arithmetic overflow */
480 lifetime = 0x7fffffff/HZ - 1;
481 }
482 476
483 if (rinfo->length == 3) 477 if (rinfo->length == 3)
484 prefix = (struct in6_addr *)rinfo->prefix; 478 prefix = (struct in6_addr *)rinfo->prefix;
@@ -506,7 +500,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
506 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 500 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
507 501
508 if (rt) { 502 if (rt) {
509 if (lifetime == 0xffffffff) { 503 if (!addrconf_finite_timeout(lifetime)) {
510 rt->rt6i_flags &= ~RTF_EXPIRES; 504 rt->rt6i_flags &= ~RTF_EXPIRES;
511 } else { 505 } else {
512 rt->rt6i_expires = jiffies + HZ * lifetime; 506 rt->rt6i_expires = jiffies + HZ * lifetime;
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 6323921b40be..669f280989c3 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -109,7 +109,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
109{ 109{
110 struct xfrm6_tunnel *handler; 110 struct xfrm6_tunnel *handler;
111 111
112 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 112 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
113 goto drop; 113 goto drop;
114 114
115 for (handler = tunnel46_handlers; handler; handler = handler->next) 115 for (handler = tunnel46_handlers; handler; handler = handler->next)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1fd784f3e2ec..dd309626ae9a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -534,7 +534,9 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
534{ 534{
535 struct udp_sock *up = udp_sk(sk); 535 struct udp_sock *up = udp_sk(sk);
536 536
537 if (up->pending) { 537 if (up->pending == AF_INET)
538 udp_flush_pending_frames(sk);
539 else if (up->pending) {
538 up->len = 0; 540 up->len = 0;
539 up->pending = 0; 541 up->pending = 0;
540 ip6_flush_pending_frames(sk); 542 ip6_flush_pending_frames(sk);
@@ -731,7 +733,7 @@ do_udp_sendmsg:
731 memset(opt, 0, sizeof(struct ipv6_txoptions)); 733 memset(opt, 0, sizeof(struct ipv6_txoptions));
732 opt->tot_len = sizeof(*opt); 734 opt->tot_len = sizeof(*opt);
733 735
734 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass); 736 err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass);
735 if (err < 0) { 737 if (err < 0) {
736 fl6_sock_release(flowlabel); 738 fl6_sock_release(flowlabel);
737 return err; 739 return err;
@@ -848,12 +850,14 @@ do_append_data:
848 } else { 850 } else {
849 dst_release(dst); 851 dst_release(dst);
850 } 852 }
853 dst = NULL;
851 } 854 }
852 855
853 if (err > 0) 856 if (err > 0)
854 err = np->recverr ? net_xmit_errno(err) : 0; 857 err = np->recverr ? net_xmit_errno(err) : 0;
855 release_sock(sk); 858 release_sock(sk);
856out: 859out:
860 dst_release(dst);
857 fl6_sock_release(flowlabel); 861 fl6_sock_release(flowlabel);
858 if (!err) 862 if (!err)
859 return len; 863 return len;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index ae54b20d0470..3eb5bcc75f99 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1093,11 +1093,6 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1093 1093
1094 init_waitqueue_head(&self->query_wait); 1094 init_waitqueue_head(&self->query_wait);
1095 1095
1096 /* Initialise networking socket struct */
1097 sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
1098 sk->sk_family = PF_IRDA;
1099 sk->sk_protocol = protocol;
1100
1101 switch (sock->type) { 1096 switch (sock->type) {
1102 case SOCK_STREAM: 1097 case SOCK_STREAM:
1103 sock->ops = &irda_stream_ops; 1098 sock->ops = &irda_stream_ops;
@@ -1124,13 +1119,20 @@ static int irda_create(struct net *net, struct socket *sock, int protocol)
1124 self->max_sdu_size_rx = TTP_SAR_UNBOUND; 1119 self->max_sdu_size_rx = TTP_SAR_UNBOUND;
1125 break; 1120 break;
1126 default: 1121 default:
1122 sk_free(sk);
1127 return -ESOCKTNOSUPPORT; 1123 return -ESOCKTNOSUPPORT;
1128 } 1124 }
1129 break; 1125 break;
1130 default: 1126 default:
1127 sk_free(sk);
1131 return -ESOCKTNOSUPPORT; 1128 return -ESOCKTNOSUPPORT;
1132 } 1129 }
1133 1130
1131 /* Initialise networking socket struct */
1132 sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */
1133 sk->sk_family = PF_IRDA;
1134 sk->sk_protocol = protocol;
1135
1134 /* Register as a client with IrLMP */ 1136 /* Register as a client with IrLMP */
1135 self->ckey = irlmp_register_client(0, NULL, NULL, NULL); 1137 self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
1136 self->mask.word = 0xffff; 1138 self->mask.word = 0xffff;
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 2e89a00df92c..70907f6baac3 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -73,7 +73,8 @@ connlimit_iphash6(const union nf_inet_addr *addr,
73static inline bool already_closed(const struct nf_conn *conn) 73static inline bool already_closed(const struct nf_conn *conn)
74{ 74{
75 if (nf_ct_protonum(conn) == IPPROTO_TCP) 75 if (nf_ct_protonum(conn) == IPPROTO_TCP)
76 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT; 76 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
77 conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
77 else 78 else
78 return 0; 79 return 0;
79} 80}
diff --git a/net/netlink/attr.c b/net/netlink/attr.c
index feb326f4a752..47bbf45ae5d7 100644
--- a/net/netlink/attr.c
+++ b/net/netlink/attr.c
@@ -400,13 +400,13 @@ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
400 * @attrlen: length of attribute payload 400 * @attrlen: length of attribute payload
401 * @data: head of attribute payload 401 * @data: head of attribute payload
402 * 402 *
403 * Returns -1 if the tailroom of the skb is insufficient to store 403 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
404 * the attribute header and payload. 404 * the attribute header and payload.
405 */ 405 */
406int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) 406int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
407{ 407{
408 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) 408 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
409 return -1; 409 return -EMSGSIZE;
410 410
411 __nla_put(skb, attrtype, attrlen, data); 411 __nla_put(skb, attrtype, attrlen, data);
412 return 0; 412 return 0;
@@ -418,13 +418,13 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
418 * @attrlen: length of attribute payload 418 * @attrlen: length of attribute payload
419 * @data: head of attribute payload 419 * @data: head of attribute payload
420 * 420 *
421 * Returns -1 if the tailroom of the skb is insufficient to store 421 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
422 * the attribute payload. 422 * the attribute payload.
423 */ 423 */
424int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) 424int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
425{ 425{
426 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) 426 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
427 return -1; 427 return -EMSGSIZE;
428 428
429 __nla_put_nohdr(skb, attrlen, data); 429 __nla_put_nohdr(skb, attrlen, data);
430 return 0; 430 return 0;
@@ -436,13 +436,13 @@ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
436 * @attrlen: length of attribute payload 436 * @attrlen: length of attribute payload
437 * @data: head of attribute payload 437 * @data: head of attribute payload
438 * 438 *
439 * Returns -1 if the tailroom of the skb is insufficient to store 439 * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
440 * the attribute payload. 440 * the attribute payload.
441 */ 441 */
442int nla_append(struct sk_buff *skb, int attrlen, const void *data) 442int nla_append(struct sk_buff *skb, int attrlen, const void *data)
443{ 443{
444 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) 444 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
445 return -1; 445 return -EMSGSIZE;
446 446
447 memcpy(skb_put(skb, attrlen), data, attrlen); 447 memcpy(skb_put(skb, attrlen), data, attrlen);
448 return 0; 448 return 0;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d16929c9b4bc..f5aa23c3e886 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -554,7 +554,8 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
554 return genlmsg_end(skb, hdr); 554 return genlmsg_end(skb, hdr);
555 555
556nla_put_failure: 556nla_put_failure:
557 return genlmsg_cancel(skb, hdr); 557 genlmsg_cancel(skb, hdr);
558 return -EMSGSIZE;
558} 559}
559 560
560static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, 561static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
@@ -590,7 +591,8 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
590 return genlmsg_end(skb, hdr); 591 return genlmsg_end(skb, hdr);
591 592
592nla_put_failure: 593nla_put_failure:
593 return genlmsg_cancel(skb, hdr); 594 genlmsg_cancel(skb, hdr);
595 return -EMSGSIZE;
594} 596}
595 597
596static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) 598static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 0df911fd67b1..64465bacbe79 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -444,7 +444,8 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
444 return nla_nest_end(skb, opts); 444 return nla_nest_end(skb, opts);
445 445
446nla_put_failure: 446nla_put_failure:
447 return nla_nest_cancel(skb, opts); 447 nla_nest_cancel(skb, opts);
448 return -EMSGSIZE;
448} 449}
449 450
450static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) 451static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -466,7 +467,8 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
466 return nla_nest_end(skb, opts); 467 return nla_nest_end(skb, opts);
467 468
468nla_put_failure: 469nla_put_failure:
469 return nla_nest_cancel(skb, opts); 470 nla_nest_cancel(skb, opts);
471 return -EMSGSIZE;
470} 472}
471 473
472static const struct Qdisc_class_ops dsmark_class_ops = { 474static const struct Qdisc_class_ops dsmark_class_ops = {
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 3a9d226ff1e4..c89fba56db56 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -582,7 +582,8 @@ append_opt:
582 return nla_nest_end(skb, opts); 582 return nla_nest_end(skb, opts);
583 583
584nla_put_failure: 584nla_put_failure:
585 return nla_nest_cancel(skb, opts); 585 nla_nest_cancel(skb, opts);
586 return -EMSGSIZE;
586} 587}
587 588
588static void gred_destroy(struct Qdisc *sch) 589static void gred_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 87293d0db1d7..fdfaa3fcc16d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1360,7 +1360,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1360 1360
1361 nla_put_failure: 1361 nla_put_failure:
1362 nla_nest_cancel(skb, nest); 1362 nla_nest_cancel(skb, nest);
1363 return -1; 1363 return -EMSGSIZE;
1364} 1364}
1365 1365
1366static int 1366static int
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3dcd493f4f4a..5c569853b9c0 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -281,7 +281,8 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
281 return nla_nest_end(skb, opts); 281 return nla_nest_end(skb, opts);
282 282
283nla_put_failure: 283nla_put_failure:
284 return nla_nest_cancel(skb, opts); 284 nla_nest_cancel(skb, opts);
285 return -EMSGSIZE;
285} 286}
286 287
287static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 288static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index b4cd2b71953f..532634861db1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1203,6 +1203,9 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1203 struct list_head *head = &asoc->peer.transport_addr_list; 1203 struct list_head *head = &asoc->peer.transport_addr_list;
1204 struct list_head *pos; 1204 struct list_head *pos;
1205 1205
1206 if (asoc->peer.transport_count == 1)
1207 return;
1208
1206 /* Find the next transport in a round-robin fashion. */ 1209 /* Find the next transport in a round-robin fashion. */
1207 t = asoc->peer.retran_path; 1210 t = asoc->peer.retran_path;
1208 pos = &t->transports; 1211 pos = &t->transports;
@@ -1217,6 +1220,15 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1217 1220
1218 t = list_entry(pos, struct sctp_transport, transports); 1221 t = list_entry(pos, struct sctp_transport, transports);
1219 1222
1223 /* We have exhausted the list, but didn't find any
1224 * other active transports. If so, use the next
1225 * transport.
1226 */
1227 if (t == asoc->peer.retran_path) {
1228 t = next;
1229 break;
1230 }
1231
1220 /* Try to find an active transport. */ 1232 /* Try to find an active transport. */
1221 1233
1222 if ((t->state == SCTP_ACTIVE) || 1234 if ((t->state == SCTP_ACTIVE) ||
@@ -1229,15 +1241,6 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1229 if (!next) 1241 if (!next)
1230 next = t; 1242 next = t;
1231 } 1243 }
1232
1233 /* We have exhausted the list, but didn't find any
1234 * other active transports. If so, use the next
1235 * transport.
1236 */
1237 if (t == asoc->peer.retran_path) {
1238 t = next;
1239 break;
1240 }
1241 } 1244 }
1242 1245
1243 asoc->peer.retran_path = t; 1246 asoc->peer.retran_path = t;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e45e44c60635..a2f4d4d51593 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -299,7 +299,8 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
299/* Fills in the source address(saddr) based on the destination address(daddr) 299/* Fills in the source address(saddr) based on the destination address(daddr)
300 * and asoc's bind address list. 300 * and asoc's bind address list.
301 */ 301 */
302static void sctp_v6_get_saddr(struct sctp_association *asoc, 302static void sctp_v6_get_saddr(struct sctp_sock *sk,
303 struct sctp_association *asoc,
303 struct dst_entry *dst, 304 struct dst_entry *dst,
304 union sctp_addr *daddr, 305 union sctp_addr *daddr,
305 union sctp_addr *saddr) 306 union sctp_addr *saddr)
@@ -318,7 +319,7 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
318 if (!asoc) { 319 if (!asoc) {
319 ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, 320 ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
320 &daddr->v6.sin6_addr, 321 &daddr->v6.sin6_addr,
321 inet6_sk(asoc->base.sk)->srcprefs, 322 inet6_sk(&sk->inet.sk)->srcprefs,
322 &saddr->v6.sin6_addr); 323 &saddr->v6.sin6_addr);
323 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", 324 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
324 NIP6(saddr->v6.sin6_addr)); 325 NIP6(saddr->v6.sin6_addr));
@@ -726,6 +727,11 @@ static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
726 seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr)); 727 seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr));
727} 728}
728 729
730static void sctp_v6_ecn_capable(struct sock *sk)
731{
732 inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
733}
734
729/* Initialize a PF_INET6 socket msg_name. */ 735/* Initialize a PF_INET6 socket msg_name. */
730static void sctp_inet6_msgname(char *msgname, int *addr_len) 736static void sctp_inet6_msgname(char *msgname, int *addr_len)
731{ 737{
@@ -996,6 +1002,7 @@ static struct sctp_af sctp_af_inet6 = {
996 .skb_iif = sctp_v6_skb_iif, 1002 .skb_iif = sctp_v6_skb_iif,
997 .is_ce = sctp_v6_is_ce, 1003 .is_ce = sctp_v6_is_ce,
998 .seq_dump_addr = sctp_v6_seq_dump_addr, 1004 .seq_dump_addr = sctp_v6_seq_dump_addr,
1005 .ecn_capable = sctp_v6_ecn_capable,
999 .net_header_len = sizeof(struct ipv6hdr), 1006 .net_header_len = sizeof(struct ipv6hdr),
1000 .sockaddr_len = sizeof(struct sockaddr_in6), 1007 .sockaddr_len = sizeof(struct sockaddr_in6),
1001#ifdef CONFIG_COMPAT 1008#ifdef CONFIG_COMPAT
diff --git a/net/sctp/output.c b/net/sctp/output.c
index cf4f9fb6819d..6d45bae93b46 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -548,7 +548,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
548 * Note: The works for IPv6 layer checks this bit too later 548 * Note: The works for IPv6 layer checks this bit too later
549 * in transmission. See IP6_ECN_flow_xmit(). 549 * in transmission. See IP6_ECN_flow_xmit().
550 */ 550 */
551 INET_ECN_xmit(nskb->sk); 551 (*tp->af_specific->ecn_capable)(nskb->sk);
552 552
553 /* Set up the IP options. */ 553 /* Set up the IP options. */
554 /* BUG: not implemented 554 /* BUG: not implemented
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 59edfd25a19c..ace6770e9048 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -208,6 +208,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 INIT_LIST_HEAD(&q->sacked); 208 INIT_LIST_HEAD(&q->sacked);
209 INIT_LIST_HEAD(&q->abandoned); 209 INIT_LIST_HEAD(&q->abandoned);
210 210
211 q->fast_rtx = 0;
211 q->outstanding_bytes = 0; 212 q->outstanding_bytes = 0;
212 q->empty = 1; 213 q->empty = 1;
213 q->cork = 0; 214 q->cork = 0;
@@ -500,6 +501,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
500 case SCTP_RTXR_FAST_RTX: 501 case SCTP_RTXR_FAST_RTX:
501 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 502 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
502 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 503 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
504 q->fast_rtx = 1;
503 break; 505 break;
504 case SCTP_RTXR_PMTUD: 506 case SCTP_RTXR_PMTUD:
505 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); 507 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
@@ -518,9 +520,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
518 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 520 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
519 * following the procedures outlined in C1 - C5. 521 * following the procedures outlined in C1 - C5.
520 */ 522 */
521 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 523 if (reason == SCTP_RTXR_T3_RTX)
524 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
522 525
523 error = sctp_outq_flush(q, /* rtx_timeout */ 1); 526 /* Flush the queues only on timeout, since fast_rtx is only
527 * triggered during sack processing and the queue
528 * will be flushed at the end.
529 */
530 if (reason != SCTP_RTXR_FAST_RTX)
531 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
524 532
525 if (error) 533 if (error)
526 q->asoc->base.sk->sk_err = -error; 534 q->asoc->base.sk->sk_err = -error;
@@ -538,17 +546,23 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
538 int rtx_timeout, int *start_timer) 546 int rtx_timeout, int *start_timer)
539{ 547{
540 struct list_head *lqueue; 548 struct list_head *lqueue;
541 struct list_head *lchunk;
542 struct sctp_transport *transport = pkt->transport; 549 struct sctp_transport *transport = pkt->transport;
543 sctp_xmit_t status; 550 sctp_xmit_t status;
544 struct sctp_chunk *chunk, *chunk1; 551 struct sctp_chunk *chunk, *chunk1;
545 struct sctp_association *asoc; 552 struct sctp_association *asoc;
553 int fast_rtx;
546 int error = 0; 554 int error = 0;
555 int timer = 0;
556 int done = 0;
547 557
548 asoc = q->asoc; 558 asoc = q->asoc;
549 lqueue = &q->retransmit; 559 lqueue = &q->retransmit;
560 fast_rtx = q->fast_rtx;
550 561
551 /* RFC 2960 6.3.3 Handle T3-rtx Expiration 562 /* This loop handles time-out retransmissions, fast retransmissions,
563 * and retransmissions due to opening of whindow.
564 *
565 * RFC 2960 6.3.3 Handle T3-rtx Expiration
552 * 566 *
553 * E3) Determine how many of the earliest (i.e., lowest TSN) 567 * E3) Determine how many of the earliest (i.e., lowest TSN)
554 * outstanding DATA chunks for the address for which the 568 * outstanding DATA chunks for the address for which the
@@ -563,12 +577,12 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
563 * [Just to be painfully clear, if we are retransmitting 577 * [Just to be painfully clear, if we are retransmitting
564 * because a timeout just happened, we should send only ONE 578 * because a timeout just happened, we should send only ONE
565 * packet of retransmitted data.] 579 * packet of retransmitted data.]
580 *
581 * For fast retransmissions we also send only ONE packet. However,
582 * if we are just flushing the queue due to open window, we'll
583 * try to send as much as possible.
566 */ 584 */
567 lchunk = sctp_list_dequeue(lqueue); 585 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
568
569 while (lchunk) {
570 chunk = list_entry(lchunk, struct sctp_chunk,
571 transmitted_list);
572 586
573 /* Make sure that Gap Acked TSNs are not retransmitted. A 587 /* Make sure that Gap Acked TSNs are not retransmitted. A
574 * simple approach is just to move such TSNs out of the 588 * simple approach is just to move such TSNs out of the
@@ -576,58 +590,60 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
576 * next chunk. 590 * next chunk.
577 */ 591 */
578 if (chunk->tsn_gap_acked) { 592 if (chunk->tsn_gap_acked) {
579 list_add_tail(lchunk, &transport->transmitted); 593 list_del(&chunk->transmitted_list);
580 lchunk = sctp_list_dequeue(lqueue); 594 list_add_tail(&chunk->transmitted_list,
595 &transport->transmitted);
581 continue; 596 continue;
582 } 597 }
583 598
599 /* If we are doing fast retransmit, ignore non-fast_rtransmit
600 * chunks
601 */
602 if (fast_rtx && !chunk->fast_retransmit)
603 continue;
604
584 /* Attempt to append this chunk to the packet. */ 605 /* Attempt to append this chunk to the packet. */
585 status = sctp_packet_append_chunk(pkt, chunk); 606 status = sctp_packet_append_chunk(pkt, chunk);
586 607
587 switch (status) { 608 switch (status) {
588 case SCTP_XMIT_PMTU_FULL: 609 case SCTP_XMIT_PMTU_FULL:
589 /* Send this packet. */ 610 /* Send this packet. */
590 if ((error = sctp_packet_transmit(pkt)) == 0) 611 error = sctp_packet_transmit(pkt);
591 *start_timer = 1;
592 612
593 /* If we are retransmitting, we should only 613 /* If we are retransmitting, we should only
594 * send a single packet. 614 * send a single packet.
595 */ 615 */
596 if (rtx_timeout) { 616 if (rtx_timeout || fast_rtx)
597 list_add(lchunk, lqueue); 617 done = 1;
598 lchunk = NULL;
599 }
600 618
601 /* Bundle lchunk in the next round. */ 619 /* Bundle next chunk in the next round. */
602 break; 620 break;
603 621
604 case SCTP_XMIT_RWND_FULL: 622 case SCTP_XMIT_RWND_FULL:
605 /* Send this packet. */ 623 /* Send this packet. */
606 if ((error = sctp_packet_transmit(pkt)) == 0) 624 error = sctp_packet_transmit(pkt);
607 *start_timer = 1;
608 625
609 /* Stop sending DATA as there is no more room 626 /* Stop sending DATA as there is no more room
610 * at the receiver. 627 * at the receiver.
611 */ 628 */
612 list_add(lchunk, lqueue); 629 done = 1;
613 lchunk = NULL;
614 break; 630 break;
615 631
616 case SCTP_XMIT_NAGLE_DELAY: 632 case SCTP_XMIT_NAGLE_DELAY:
617 /* Send this packet. */ 633 /* Send this packet. */
618 if ((error = sctp_packet_transmit(pkt)) == 0) 634 error = sctp_packet_transmit(pkt);
619 *start_timer = 1;
620 635
621 /* Stop sending DATA because of nagle delay. */ 636 /* Stop sending DATA because of nagle delay. */
622 list_add(lchunk, lqueue); 637 done = 1;
623 lchunk = NULL;
624 break; 638 break;
625 639
626 default: 640 default:
627 /* The append was successful, so add this chunk to 641 /* The append was successful, so add this chunk to
628 * the transmitted list. 642 * the transmitted list.
629 */ 643 */
630 list_add_tail(lchunk, &transport->transmitted); 644 list_del(&chunk->transmitted_list);
645 list_add_tail(&chunk->transmitted_list,
646 &transport->transmitted);
631 647
632 /* Mark the chunk as ineligible for fast retransmit 648 /* Mark the chunk as ineligible for fast retransmit
633 * after it is retransmitted. 649 * after it is retransmitted.
@@ -635,27 +651,44 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
635 if (chunk->fast_retransmit > 0) 651 if (chunk->fast_retransmit > 0)
636 chunk->fast_retransmit = -1; 652 chunk->fast_retransmit = -1;
637 653
638 *start_timer = 1; 654 /* Force start T3-rtx timer when fast retransmitting
639 q->empty = 0; 655 * the earliest outstanding TSN
656 */
657 if (!timer && fast_rtx &&
658 ntohl(chunk->subh.data_hdr->tsn) ==
659 asoc->ctsn_ack_point + 1)
660 timer = 2;
640 661
641 /* Retrieve a new chunk to bundle. */ 662 q->empty = 0;
642 lchunk = sctp_list_dequeue(lqueue);
643 break; 663 break;
644 } 664 }
645 665
646 /* If we are here due to a retransmit timeout or a fast 666 /* Set the timer if there were no errors */
647 * retransmit and if there are any chunks left in the retransmit 667 if (!error && !timer)
648 * queue that could not fit in the PMTU sized packet, they need 668 timer = 1;
649 * to be marked as ineligible for a subsequent fast retransmit. 669
650 */ 670 if (done)
651 if (rtx_timeout && !lchunk) { 671 break;
652 list_for_each_entry(chunk1, lqueue, transmitted_list) { 672 }
653 if (chunk1->fast_retransmit > 0) 673
654 chunk1->fast_retransmit = -1; 674 /* If we are here due to a retransmit timeout or a fast
655 } 675 * retransmit and if there are any chunks left in the retransmit
676 * queue that could not fit in the PMTU sized packet, they need
677 * to be marked as ineligible for a subsequent fast retransmit.
678 */
679 if (rtx_timeout || fast_rtx) {
680 list_for_each_entry(chunk1, lqueue, transmitted_list) {
681 if (chunk1->fast_retransmit > 0)
682 chunk1->fast_retransmit = -1;
656 } 683 }
657 } 684 }
658 685
686 *start_timer = timer;
687
688 /* Clear fast retransmit hint */
689 if (fast_rtx)
690 q->fast_rtx = 0;
691
659 return error; 692 return error;
660} 693}
661 694
@@ -862,7 +895,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
862 rtx_timeout, &start_timer); 895 rtx_timeout, &start_timer);
863 896
864 if (start_timer) 897 if (start_timer)
865 sctp_transport_reset_timers(transport); 898 sctp_transport_reset_timers(transport,
899 start_timer-1);
866 900
867 /* This can happen on COOKIE-ECHO resend. Only 901 /* This can happen on COOKIE-ECHO resend. Only
868 * one chunk can get bundled with a COOKIE-ECHO. 902 * one chunk can get bundled with a COOKIE-ECHO.
@@ -977,7 +1011,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
977 list_add_tail(&chunk->transmitted_list, 1011 list_add_tail(&chunk->transmitted_list,
978 &transport->transmitted); 1012 &transport->transmitted);
979 1013
980 sctp_transport_reset_timers(transport); 1014 sctp_transport_reset_timers(transport, start_timer-1);
981 1015
982 q->empty = 0; 1016 q->empty = 0;
983 1017
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 0ec234b762c2..b435a193c5df 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -470,11 +470,11 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
470 /* Walk through the bind address list and look for a bind 470 /* Walk through the bind address list and look for a bind
471 * address that matches the source address of the returned dst. 471 * address that matches the source address of the returned dst.
472 */ 472 */
473 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
473 rcu_read_lock(); 474 rcu_read_lock();
474 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 475 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
475 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 476 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
476 continue; 477 continue;
477 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port));
478 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 478 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
479 goto out_unlock; 479 goto out_unlock;
480 } 480 }
@@ -519,7 +519,8 @@ out:
519/* For v4, the source address is cached in the route entry(dst). So no need 519/* For v4, the source address is cached in the route entry(dst). So no need
520 * to cache it separately and hence this is an empty routine. 520 * to cache it separately and hence this is an empty routine.
521 */ 521 */
522static void sctp_v4_get_saddr(struct sctp_association *asoc, 522static void sctp_v4_get_saddr(struct sctp_sock *sk,
523 struct sctp_association *asoc,
523 struct dst_entry *dst, 524 struct dst_entry *dst,
524 union sctp_addr *daddr, 525 union sctp_addr *daddr,
525 union sctp_addr *saddr) 526 union sctp_addr *saddr)
@@ -616,6 +617,11 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
616 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); 617 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
617} 618}
618 619
620static void sctp_v4_ecn_capable(struct sock *sk)
621{
622 INET_ECN_xmit(sk);
623}
624
619/* Event handler for inet address addition/deletion events. 625/* Event handler for inet address addition/deletion events.
620 * The sctp_local_addr_list needs to be protocted by a spin lock since 626 * The sctp_local_addr_list needs to be protocted by a spin lock since
621 * multiple notifiers (say IPv4 and IPv6) may be running at the same 627 * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -934,6 +940,7 @@ static struct sctp_af sctp_af_inet = {
934 .skb_iif = sctp_v4_skb_iif, 940 .skb_iif = sctp_v4_skb_iif,
935 .is_ce = sctp_v4_is_ce, 941 .is_ce = sctp_v4_is_ce,
936 .seq_dump_addr = sctp_v4_seq_dump_addr, 942 .seq_dump_addr = sctp_v4_seq_dump_addr,
943 .ecn_capable = sctp_v4_ecn_capable,
937 .net_header_len = sizeof(struct iphdr), 944 .net_header_len = sizeof(struct iphdr),
938 .sockaddr_len = sizeof(struct sockaddr_in), 945 .sockaddr_len = sizeof(struct sockaddr_in),
939#ifdef CONFIG_COMPAT 946#ifdef CONFIG_COMPAT
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index f4938f6c5abe..3f34f61221ec 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -79,6 +79,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
79 peer->rttvar = 0; 79 peer->rttvar = 0;
80 peer->srtt = 0; 80 peer->srtt = 0;
81 peer->rto_pending = 0; 81 peer->rto_pending = 0;
82 peer->fast_recovery = 0;
82 83
83 peer->last_time_heard = jiffies; 84 peer->last_time_heard = jiffies;
84 peer->last_time_used = jiffies; 85 peer->last_time_used = jiffies;
@@ -190,7 +191,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
190/* Start T3_rtx timer if it is not already running and update the heartbeat 191/* Start T3_rtx timer if it is not already running and update the heartbeat
191 * timer. This routine is called every time a DATA chunk is sent. 192 * timer. This routine is called every time a DATA chunk is sent.
192 */ 193 */
193void sctp_transport_reset_timers(struct sctp_transport *transport) 194void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
194{ 195{
195 /* RFC 2960 6.3.2 Retransmission Timer Rules 196 /* RFC 2960 6.3.2 Retransmission Timer Rules
196 * 197 *
@@ -200,7 +201,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
200 * address. 201 * address.
201 */ 202 */
202 203
203 if (!timer_pending(&transport->T3_rtx_timer)) 204 if (force || !timer_pending(&transport->T3_rtx_timer))
204 if (!mod_timer(&transport->T3_rtx_timer, 205 if (!mod_timer(&transport->T3_rtx_timer,
205 jiffies + transport->rto)) 206 jiffies + transport->rto))
206 sctp_transport_hold(transport); 207 sctp_transport_hold(transport);
@@ -291,7 +292,7 @@ void sctp_transport_route(struct sctp_transport *transport,
291 if (saddr) 292 if (saddr)
292 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 293 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
293 else 294 else
294 af->get_saddr(asoc, dst, daddr, &transport->saddr); 295 af->get_saddr(opt, asoc, dst, daddr, &transport->saddr);
295 296
296 transport->dst = dst; 297 transport->dst = dst;
297 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { 298 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
@@ -403,11 +404,16 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
403 cwnd = transport->cwnd; 404 cwnd = transport->cwnd;
404 flight_size = transport->flight_size; 405 flight_size = transport->flight_size;
405 406
407 /* See if we need to exit Fast Recovery first */
408 if (transport->fast_recovery &&
409 TSN_lte(transport->fast_recovery_exit, sack_ctsn))
410 transport->fast_recovery = 0;
411
406 /* The appropriate cwnd increase algorithm is performed if, and only 412 /* The appropriate cwnd increase algorithm is performed if, and only
407 * if the cumulative TSN has advanced and the congestion window is 413 * if the cumulative TSN whould advanced and the congestion window is
408 * being fully utilized. 414 * being fully utilized.
409 */ 415 */
410 if ((transport->asoc->ctsn_ack_point >= sack_ctsn) || 416 if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
411 (flight_size < cwnd)) 417 (flight_size < cwnd))
412 return; 418 return;
413 419
@@ -416,17 +422,23 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
416 pmtu = transport->asoc->pathmtu; 422 pmtu = transport->asoc->pathmtu;
417 423
418 if (cwnd <= ssthresh) { 424 if (cwnd <= ssthresh) {
419 /* RFC 2960 7.2.1, sctpimpguide-05 2.14.2 When cwnd is less 425 /* RFC 4960 7.2.1
420 * than or equal to ssthresh an SCTP endpoint MUST use the 426 * o When cwnd is less than or equal to ssthresh, an SCTP
421 * slow start algorithm to increase cwnd only if the current 427 * endpoint MUST use the slow-start algorithm to increase
422 * congestion window is being fully utilized and an incoming 428 * cwnd only if the current congestion window is being fully
423 * SACK advances the Cumulative TSN Ack Point. Only when these 429 * utilized, an incoming SACK advances the Cumulative TSN
424 * two conditions are met can the cwnd be increased otherwise 430 * Ack Point, and the data sender is not in Fast Recovery.
425 * the cwnd MUST not be increased. If these conditions are met 431 * Only when these three conditions are met can the cwnd be
426 * then cwnd MUST be increased by at most the lesser of 432 * increased; otherwise, the cwnd MUST not be increased.
427 * 1) the total size of the previously outstanding DATA 433 * If these conditions are met, then cwnd MUST be increased
428 * chunk(s) acknowledged, and 2) the destination's path MTU. 434 * by, at most, the lesser of 1) the total size of the
435 * previously outstanding DATA chunk(s) acknowledged, and
436 * 2) the destination's path MTU. This upper bound protects
437 * against the ACK-Splitting attack outlined in [SAVAGE99].
429 */ 438 */
439 if (transport->fast_recovery)
440 return;
441
430 if (bytes_acked > pmtu) 442 if (bytes_acked > pmtu)
431 cwnd += pmtu; 443 cwnd += pmtu;
432 else 444 else
@@ -502,6 +514,13 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
502 * cwnd = ssthresh 514 * cwnd = ssthresh
503 * partial_bytes_acked = 0 515 * partial_bytes_acked = 0
504 */ 516 */
517 if (transport->fast_recovery)
518 return;
519
520 /* Mark Fast recovery */
521 transport->fast_recovery = 1;
522 transport->fast_recovery_exit = transport->asoc->next_tsn - 1;
523
505 transport->ssthresh = max(transport->cwnd/2, 524 transport->ssthresh = max(transport->cwnd/2,
506 4*transport->asoc->pathmtu); 525 4*transport->asoc->pathmtu);
507 transport->cwnd = transport->ssthresh; 526 transport->cwnd = transport->ssthresh;
@@ -586,6 +605,7 @@ void sctp_transport_reset(struct sctp_transport *t)
586 t->flight_size = 0; 605 t->flight_size = 0;
587 t->error_count = 0; 606 t->error_count = 0;
588 t->rto_pending = 0; 607 t->rto_pending = 0;
608 t->fast_recovery = 0;
589 609
590 /* Initialize the state information for SFR-CACC */ 610 /* Initialize the state information for SFR-CACC */
591 t->cacc.changeover_active = 0; 611 t->cacc.changeover_active = 0;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2bdd4dddc0e1..fb75f265b39c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -187,7 +187,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
187 return genlmsg_end(msg, hdr); 187 return genlmsg_end(msg, hdr);
188 188
189 nla_put_failure: 189 nla_put_failure:
190 return genlmsg_cancel(msg, hdr); 190 genlmsg_cancel(msg, hdr);
191 return -EMSGSIZE;
191} 192}
192 193
193static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) 194static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
@@ -273,7 +274,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
273 return genlmsg_end(msg, hdr); 274 return genlmsg_end(msg, hdr);
274 275
275 nla_put_failure: 276 nla_put_failure:
276 return genlmsg_cancel(msg, hdr); 277 genlmsg_cancel(msg, hdr);
278 return -EMSGSIZE;
277} 279}
278 280
279static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) 281static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb)
@@ -928,7 +930,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
928 return genlmsg_end(msg, hdr); 930 return genlmsg_end(msg, hdr);
929 931
930 nla_put_failure: 932 nla_put_failure:
931 return genlmsg_cancel(msg, hdr); 933 genlmsg_cancel(msg, hdr);
934 return -EMSGSIZE;
932} 935}
933 936
934static int nl80211_dump_station(struct sk_buff *skb, 937static int nl80211_dump_station(struct sk_buff *skb,
@@ -1267,7 +1270,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
1267 return genlmsg_end(msg, hdr); 1270 return genlmsg_end(msg, hdr);
1268 1271
1269 nla_put_failure: 1272 nla_put_failure:
1270 return genlmsg_cancel(msg, hdr); 1273 genlmsg_cancel(msg, hdr);
1274 return -EMSGSIZE;
1271} 1275}
1272 1276
1273static int nl80211_dump_mpath(struct sk_buff *skb, 1277static int nl80211_dump_mpath(struct sk_buff *skb,
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index ac765dd9c7f5..23a2cc04b8cd 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -200,8 +200,8 @@ static struct xfrm_algo_desc aalg_list[] = {
200 } 200 }
201}, 201},
202{ 202{
203 .name = "hmac(ripemd160)", 203 .name = "hmac(rmd160)",
204 .compat = "ripemd160", 204 .compat = "rmd160",
205 205
206 .uinfo = { 206 .uinfo = {
207 .auth = { 207 .auth = {