aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/icmp.c24
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/tcp_input.c65
-rw-r--r--net/ipv4/tcp_output.c3
6 files changed, 70 insertions, 30 deletions
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index efc7cbe759c9..3e14d9cd29b3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -578,7 +578,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
578 } 578 }
579 579
580 if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET)) 580 if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
581 goto ende; 581 goto relookup_failed;
582 582
583 if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL) 583 if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
584 err = __ip_route_output_key(net, &rt2, &fl); 584 err = __ip_route_output_key(net, &rt2, &fl);
@@ -588,7 +588,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
588 588
589 fl2.fl4_dst = fl.fl4_src; 589 fl2.fl4_dst = fl.fl4_src;
590 if (ip_route_output_key(net, &rt2, &fl2)) 590 if (ip_route_output_key(net, &rt2, &fl2))
591 goto ende; 591 goto relookup_failed;
592 592
593 /* Ugh! */ 593 /* Ugh! */
594 odst = skb_in->dst; 594 odst = skb_in->dst;
@@ -601,21 +601,23 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
601 } 601 }
602 602
603 if (err) 603 if (err)
604 goto ende; 604 goto relookup_failed;
605 605
606 err = xfrm_lookup((struct dst_entry **)&rt2, &fl, NULL, 606 err = xfrm_lookup((struct dst_entry **)&rt2, &fl, NULL,
607 XFRM_LOOKUP_ICMP); 607 XFRM_LOOKUP_ICMP);
608 if (err == -ENOENT) { 608 switch (err) {
609 case 0:
610 dst_release(&rt->u.dst);
611 rt = rt2;
612 break;
613 case -EPERM:
614 goto ende;
615 default:
616relookup_failed:
609 if (!rt) 617 if (!rt)
610 goto out_unlock; 618 goto out_unlock;
611 goto route_done; 619 break;
612 } 620 }
613
614 dst_release(&rt->u.dst);
615 rt = rt2;
616
617 if (err)
618 goto out_unlock;
619 } 621 }
620 622
621route_done: 623route_done:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d6e76f5229cc..d8adfd4972e2 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1133,7 +1133,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1133 } 1133 }
1134 release_sock(sk); 1134 release_sock(sk);
1135 1135
1136 if (len < sizeof(int) && len > 0 && val>=0 && val<255) { 1136 if (len < sizeof(int) && len > 0 && val>=0 && val<=255) {
1137 unsigned char ucval = (unsigned char)val; 1137 unsigned char ucval = (unsigned char)val;
1138 len = 1; 1138 len = 1;
1139 if (put_user(len, optlen)) 1139 if (put_user(len, optlen))
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 965b08a7d738..380d8daac72b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -82,8 +82,8 @@ clusterip_config_put(struct clusterip_config *c)
82static inline void 82static inline void
83clusterip_config_entry_put(struct clusterip_config *c) 83clusterip_config_entry_put(struct clusterip_config *c)
84{ 84{
85 write_lock_bh(&clusterip_lock);
85 if (atomic_dec_and_test(&c->entries)) { 86 if (atomic_dec_and_test(&c->entries)) {
86 write_lock_bh(&clusterip_lock);
87 list_del(&c->list); 87 list_del(&c->list);
88 write_unlock_bh(&clusterip_lock); 88 write_unlock_bh(&clusterip_lock);
89 89
@@ -96,7 +96,9 @@ clusterip_config_entry_put(struct clusterip_config *c)
96#ifdef CONFIG_PROC_FS 96#ifdef CONFIG_PROC_FS
97 remove_proc_entry(c->pde->name, c->pde->parent); 97 remove_proc_entry(c->pde->name, c->pde->parent);
98#endif 98#endif
99 return;
99 } 100 }
101 write_unlock_bh(&clusterip_lock);
100} 102}
101 103
102static struct clusterip_config * 104static struct clusterip_config *
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 0d5fa3a54d04..36b4e3bb056f 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -629,6 +629,8 @@ static int __init nf_nat_init(void)
629 size_t i; 629 size_t i;
630 int ret; 630 int ret;
631 631
632 need_ipv4_conntrack();
633
632 ret = nf_ct_extend_register(&nat_extend); 634 ret = nf_ct_extend_register(&nat_extend);
633 if (ret < 0) { 635 if (ret < 0) {
634 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 636 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 743611956045..bd0ee8ca8b21 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1625,13 +1625,11 @@ out:
1625 return flag; 1625 return flag;
1626} 1626}
1627 1627
1628/* If we receive more dupacks than we expected counting segments 1628/* Limits sacked_out so that sum with lost_out isn't ever larger than
1629 * in assumption of absent reordering, interpret this as reordering. 1629 * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
1630 * The only another reason could be bug in receiver TCP.
1631 */ 1630 */
1632static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1631int tcp_limit_reno_sacked(struct tcp_sock *tp)
1633{ 1632{
1634 struct tcp_sock *tp = tcp_sk(sk);
1635 u32 holes; 1633 u32 holes;
1636 1634
1637 holes = max(tp->lost_out, 1U); 1635 holes = max(tp->lost_out, 1U);
@@ -1639,8 +1637,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1639 1637
1640 if ((tp->sacked_out + holes) > tp->packets_out) { 1638 if ((tp->sacked_out + holes) > tp->packets_out) {
1641 tp->sacked_out = tp->packets_out - holes; 1639 tp->sacked_out = tp->packets_out - holes;
1642 tcp_update_reordering(sk, tp->packets_out + addend, 0); 1640 return 1;
1643 } 1641 }
1642 return 0;
1643}
1644
1645/* If we receive more dupacks than we expected counting segments
1646 * in assumption of absent reordering, interpret this as reordering.
1647 * The only another reason could be bug in receiver TCP.
1648 */
1649static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1650{
1651 struct tcp_sock *tp = tcp_sk(sk);
1652 if (tcp_limit_reno_sacked(tp))
1653 tcp_update_reordering(sk, tp->packets_out + addend, 0);
1644} 1654}
1645 1655
1646/* Emulate SACKs for SACKless connection: account for a new dupack. */ 1656/* Emulate SACKs for SACKless connection: account for a new dupack. */
@@ -1681,11 +1691,16 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1681int tcp_use_frto(struct sock *sk) 1691int tcp_use_frto(struct sock *sk)
1682{ 1692{
1683 const struct tcp_sock *tp = tcp_sk(sk); 1693 const struct tcp_sock *tp = tcp_sk(sk);
1694 const struct inet_connection_sock *icsk = inet_csk(sk);
1684 struct sk_buff *skb; 1695 struct sk_buff *skb;
1685 1696
1686 if (!sysctl_tcp_frto) 1697 if (!sysctl_tcp_frto)
1687 return 0; 1698 return 0;
1688 1699
1700 /* MTU probe and F-RTO won't really play nicely along currently */
1701 if (icsk->icsk_mtup.probe_size)
1702 return 0;
1703
1689 if (IsSackFrto()) 1704 if (IsSackFrto())
1690 return 1; 1705 return 1;
1691 1706
@@ -2134,11 +2149,13 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
2134/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2149/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
2135 * is against sacked "cnt", otherwise it's against facked "cnt" 2150 * is against sacked "cnt", otherwise it's against facked "cnt"
2136 */ 2151 */
2137static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit) 2152static void tcp_mark_head_lost(struct sock *sk, int packets)
2138{ 2153{
2139 struct tcp_sock *tp = tcp_sk(sk); 2154 struct tcp_sock *tp = tcp_sk(sk);
2140 struct sk_buff *skb; 2155 struct sk_buff *skb;
2141 int cnt; 2156 int cnt, oldcnt;
2157 int err;
2158 unsigned int mss;
2142 2159
2143 BUG_TRAP(packets <= tp->packets_out); 2160 BUG_TRAP(packets <= tp->packets_out);
2144 if (tp->lost_skb_hint) { 2161 if (tp->lost_skb_hint) {
@@ -2157,13 +2174,25 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit)
2157 tp->lost_skb_hint = skb; 2174 tp->lost_skb_hint = skb;
2158 tp->lost_cnt_hint = cnt; 2175 tp->lost_cnt_hint = cnt;
2159 2176
2177 if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
2178 break;
2179
2180 oldcnt = cnt;
2160 if (tcp_is_fack(tp) || tcp_is_reno(tp) || 2181 if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
2161 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2182 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2162 cnt += tcp_skb_pcount(skb); 2183 cnt += tcp_skb_pcount(skb);
2163 2184
2164 if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) || 2185 if (cnt > packets) {
2165 after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) 2186 if (tcp_is_sack(tp) || (oldcnt >= packets))
2166 break; 2187 break;
2188
2189 mss = skb_shinfo(skb)->gso_size;
2190 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss);
2191 if (err < 0)
2192 break;
2193 cnt = packets;
2194 }
2195
2167 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2196 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
2168 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2197 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2169 tp->lost_out += tcp_skb_pcount(skb); 2198 tp->lost_out += tcp_skb_pcount(skb);
@@ -2180,17 +2209,17 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2180 struct tcp_sock *tp = tcp_sk(sk); 2209 struct tcp_sock *tp = tcp_sk(sk);
2181 2210
2182 if (tcp_is_reno(tp)) { 2211 if (tcp_is_reno(tp)) {
2183 tcp_mark_head_lost(sk, 1, fast_rexmit); 2212 tcp_mark_head_lost(sk, 1);
2184 } else if (tcp_is_fack(tp)) { 2213 } else if (tcp_is_fack(tp)) {
2185 int lost = tp->fackets_out - tp->reordering; 2214 int lost = tp->fackets_out - tp->reordering;
2186 if (lost <= 0) 2215 if (lost <= 0)
2187 lost = 1; 2216 lost = 1;
2188 tcp_mark_head_lost(sk, lost, fast_rexmit); 2217 tcp_mark_head_lost(sk, lost);
2189 } else { 2218 } else {
2190 int sacked_upto = tp->sacked_out - tp->reordering; 2219 int sacked_upto = tp->sacked_out - tp->reordering;
2191 if (sacked_upto < 0) 2220 if (sacked_upto < fast_rexmit)
2192 sacked_upto = 0; 2221 sacked_upto = fast_rexmit;
2193 tcp_mark_head_lost(sk, sacked_upto, fast_rexmit); 2222 tcp_mark_head_lost(sk, sacked_upto);
2194 } 2223 }
2195 2224
2196 /* New heuristics: it is possible only after we switched 2225 /* New heuristics: it is possible only after we switched
@@ -2524,7 +2553,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2524 before(tp->snd_una, tp->high_seq) && 2553 before(tp->snd_una, tp->high_seq) &&
2525 icsk->icsk_ca_state != TCP_CA_Open && 2554 icsk->icsk_ca_state != TCP_CA_Open &&
2526 tp->fackets_out > tp->reordering) { 2555 tp->fackets_out > tp->reordering) {
2527 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); 2556 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2528 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 2557 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
2529 } 2558 }
2530 2559
@@ -2586,6 +2615,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2586 case TCP_CA_Loss: 2615 case TCP_CA_Loss:
2587 if (flag & FLAG_DATA_ACKED) 2616 if (flag & FLAG_DATA_ACKED)
2588 icsk->icsk_retransmits = 0; 2617 icsk->icsk_retransmits = 0;
2618 if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
2619 tcp_reset_reno_sack(tp);
2589 if (!tcp_try_undo_loss(sk)) { 2620 if (!tcp_try_undo_loss(sk)) {
2590 tcp_moderate_cwnd(tp); 2621 tcp_moderate_cwnd(tp);
2591 tcp_xmit_retransmit_queue(sk); 2622 tcp_xmit_retransmit_queue(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 76b3653e9b4c..90270cbdf42c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1808,6 +1808,9 @@ void tcp_simple_retransmit(struct sock *sk)
1808 if (!lost) 1808 if (!lost)
1809 return; 1809 return;
1810 1810
1811 if (tcp_is_reno(tp))
1812 tcp_limit_reno_sacked(tp);
1813
1811 tcp_verify_left_out(tp); 1814 tcp_verify_left_out(tp);
1812 1815
1813 /* Don't muck with the congestion window here. 1816 /* Don't muck with the congestion window here.