aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_ipv4.c59
-rw-r--r--net/ipv4/tcp_timer.c13
7 files changed, 66 insertions, 27 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 090e9991ac2a..4e80f336c0cf 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -130,7 +130,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
130static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb); 130static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
131static void parp_redo(struct sk_buff *skb); 131static void parp_redo(struct sk_buff *skb);
132 132
133static struct neigh_ops arp_generic_ops = { 133static const struct neigh_ops arp_generic_ops = {
134 .family = AF_INET, 134 .family = AF_INET,
135 .solicit = arp_solicit, 135 .solicit = arp_solicit,
136 .error_report = arp_error_report, 136 .error_report = arp_error_report,
@@ -140,7 +140,7 @@ static struct neigh_ops arp_generic_ops = {
140 .queue_xmit = dev_queue_xmit, 140 .queue_xmit = dev_queue_xmit,
141}; 141};
142 142
143static struct neigh_ops arp_hh_ops = { 143static const struct neigh_ops arp_hh_ops = {
144 .family = AF_INET, 144 .family = AF_INET,
145 .solicit = arp_solicit, 145 .solicit = arp_solicit,
146 .error_report = arp_error_report, 146 .error_report = arp_error_report,
@@ -150,7 +150,7 @@ static struct neigh_ops arp_hh_ops = {
150 .queue_xmit = dev_queue_xmit, 150 .queue_xmit = dev_queue_xmit,
151}; 151};
152 152
153static struct neigh_ops arp_direct_ops = { 153static const struct neigh_ops arp_direct_ops = {
154 .family = AF_INET, 154 .family = AF_INET,
155 .output = dev_queue_xmit, 155 .output = dev_queue_xmit,
156 .connected_output = dev_queue_xmit, 156 .connected_output = dev_queue_xmit,
@@ -158,7 +158,7 @@ static struct neigh_ops arp_direct_ops = {
158 .queue_xmit = dev_queue_xmit, 158 .queue_xmit = dev_queue_xmit,
159}; 159};
160 160
161struct neigh_ops arp_broken_ops = { 161const struct neigh_ops arp_broken_ops = {
162 .family = AF_INET, 162 .family = AF_INET,
163 .solicit = arp_solicit, 163 .solicit = arp_solicit,
164 .error_report = arp_error_report, 164 .error_report = arp_error_report,
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b902ef55be7f..533afaadefd4 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -662,7 +662,7 @@ drop_nolock:
662 return(0); 662 return(0);
663} 663}
664 664
665static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 665static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
666{ 666{
667 struct ip_tunnel *tunnel = netdev_priv(dev); 667 struct ip_tunnel *tunnel = netdev_priv(dev);
668 struct net_device_stats *stats = &tunnel->dev->stats; 668 struct net_device_stats *stats = &tunnel->dev->stats;
@@ -951,7 +951,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
951 addend += 4; 951 addend += 4;
952 } 952 }
953 dev->needed_headroom = addend + hlen; 953 dev->needed_headroom = addend + hlen;
954 mtu -= dev->hard_header_len - addend; 954 mtu -= dev->hard_header_len + addend;
955 955
956 if (mtu < 68) 956 if (mtu < 68)
957 mtu = 68; 957 mtu = 68;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 98075b6d619c..62548cb0923c 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -387,7 +387,7 @@ static int ipip_rcv(struct sk_buff *skb)
387 * and that skb is filled properly by that function. 387 * and that skb is filled properly by that function.
388 */ 388 */
389 389
390static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 390static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
391{ 391{
392 struct ip_tunnel *tunnel = netdev_priv(dev); 392 struct ip_tunnel *tunnel = netdev_priv(dev);
393 struct net_device_stats *stats = &tunnel->dev->stats; 393 struct net_device_stats *stats = &tunnel->dev->stats;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 06c33fb6b321..65d421cf5bc7 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -201,7 +201,7 @@ failure:
201 201
202#ifdef CONFIG_IP_PIMSM 202#ifdef CONFIG_IP_PIMSM
203 203
204static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 204static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
205{ 205{
206 struct net *net = dev_net(dev); 206 struct net *net = dev_net(dev);
207 207
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2bdb0da237e6..af6d6fa00db1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -685,7 +685,7 @@ static inline void tcp_set_rto(struct sock *sk)
685 * is invisible. Actually, Linux-2.4 also generates erratic 685 * is invisible. Actually, Linux-2.4 also generates erratic
686 * ACKs in some circumstances. 686 * ACKs in some circumstances.
687 */ 687 */
688 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 688 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
689 689
690 /* 2. Fixups made earlier cannot be right. 690 /* 2. Fixups made earlier cannot be right.
691 * If we do not estimate RTO correctly without them, 691 * If we do not estimate RTO correctly without them,
@@ -696,8 +696,7 @@ static inline void tcp_set_rto(struct sock *sk)
696 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 696 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
697 * guarantees that rto is higher. 697 * guarantees that rto is higher.
698 */ 698 */
699 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 699 tcp_bound_rto(sk);
700 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
701} 700}
702 701
703/* Save metrics learned by this TCP session. 702/* Save metrics learned by this TCP session.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6d88219c5e22..ce7d3b021ffc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -328,26 +328,29 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
328 * 328 *
329 */ 329 */
330 330
331void tcp_v4_err(struct sk_buff *skb, u32 info) 331void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
332{ 332{
333 struct iphdr *iph = (struct iphdr *)skb->data; 333 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
334 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); 334 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
335 struct inet_connection_sock *icsk;
335 struct tcp_sock *tp; 336 struct tcp_sock *tp;
336 struct inet_sock *inet; 337 struct inet_sock *inet;
337 const int type = icmp_hdr(skb)->type; 338 const int type = icmp_hdr(icmp_skb)->type;
338 const int code = icmp_hdr(skb)->code; 339 const int code = icmp_hdr(icmp_skb)->code;
339 struct sock *sk; 340 struct sock *sk;
341 struct sk_buff *skb;
340 __u32 seq; 342 __u32 seq;
343 __u32 remaining;
341 int err; 344 int err;
342 struct net *net = dev_net(skb->dev); 345 struct net *net = dev_net(icmp_skb->dev);
343 346
344 if (skb->len < (iph->ihl << 2) + 8) { 347 if (icmp_skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); 348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
346 return; 349 return;
347 } 350 }
348 351
349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, 352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(skb)); 353 iph->saddr, th->source, inet_iif(icmp_skb));
351 if (!sk) { 354 if (!sk) {
352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); 355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 return; 356 return;
@@ -367,6 +370,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
367 if (sk->sk_state == TCP_CLOSE) 370 if (sk->sk_state == TCP_CLOSE)
368 goto out; 371 goto out;
369 372
373 icsk = inet_csk(sk);
370 tp = tcp_sk(sk); 374 tp = tcp_sk(sk);
371 seq = ntohl(th->seq); 375 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN && 376 if (sk->sk_state != TCP_LISTEN &&
@@ -393,6 +397,39 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
393 } 397 }
394 398
395 err = icmp_err_convert[code].errno; 399 err = icmp_err_convert[code].errno;
400 /* check if icmp_skb allows revert of backoff
401 * (see draft-zimmermann-tcp-lcd) */
402 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
403 break;
404 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
405 !icsk->icsk_backoff)
406 break;
407
408 icsk->icsk_backoff--;
409 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
410 icsk->icsk_backoff;
411 tcp_bound_rto(sk);
412
413 skb = tcp_write_queue_head(sk);
414 BUG_ON(!skb);
415
416 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
417 tcp_time_stamp - TCP_SKB_CB(skb)->when);
418
419 if (remaining) {
420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
421 remaining, TCP_RTO_MAX);
422 } else if (sock_owned_by_user(sk)) {
423 /* RTO revert clocked out retransmission,
424 * but socket is locked. Will defer. */
425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
426 HZ/20, TCP_RTO_MAX);
427 } else {
428 /* RTO revert clocked out retransmission.
429 * Will retransmit now */
430 tcp_retransmit_timer(sk);
431 }
432
396 break; 433 break;
397 case ICMP_TIME_EXCEEDED: 434 case ICMP_TIME_EXCEEDED:
398 err = EHOSTUNREACH; 435 err = EHOSTUNREACH;
@@ -1158,7 +1195,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1158}; 1195};
1159 1196
1160#ifdef CONFIG_TCP_MD5SIG 1197#ifdef CONFIG_TCP_MD5SIG
1161static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { 1198static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1162 .md5_lookup = tcp_v4_reqsk_md5_lookup, 1199 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1163 .calc_md5_hash = tcp_v4_md5_hash_skb, 1200 .calc_md5_hash = tcp_v4_md5_hash_skb,
1164}; 1201};
@@ -1717,7 +1754,7 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1717 return 0; 1754 return 0;
1718} 1755}
1719 1756
1720struct inet_connection_sock_af_ops ipv4_specific = { 1757const struct inet_connection_sock_af_ops ipv4_specific = {
1721 .queue_xmit = ip_queue_xmit, 1758 .queue_xmit = ip_queue_xmit,
1722 .send_check = tcp_v4_send_check, 1759 .send_check = tcp_v4_send_check,
1723 .rebuild_header = inet_sk_rebuild_header, 1760 .rebuild_header = inet_sk_rebuild_header,
@@ -1737,7 +1774,7 @@ struct inet_connection_sock_af_ops ipv4_specific = {
1737}; 1774};
1738 1775
1739#ifdef CONFIG_TCP_MD5SIG 1776#ifdef CONFIG_TCP_MD5SIG
1740static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1777static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1741 .md5_lookup = tcp_v4_md5_lookup, 1778 .md5_lookup = tcp_v4_md5_lookup,
1742 .calc_md5_hash = tcp_v4_md5_hash_skb, 1779 .calc_md5_hash = tcp_v4_md5_hash_skb,
1743 .md5_add = tcp_v4_md5_add_func, 1780 .md5_add = tcp_v4_md5_add_func,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c520fb6e06d9..cdb2ca7684d4 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -137,13 +137,14 @@ static int tcp_write_timeout(struct sock *sk)
137{ 137{
138 struct inet_connection_sock *icsk = inet_csk(sk); 138 struct inet_connection_sock *icsk = inet_csk(sk);
139 int retry_until; 139 int retry_until;
140 bool do_reset;
140 141
141 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 142 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
142 if (icsk->icsk_retransmits) 143 if (icsk->icsk_retransmits)
143 dst_negative_advice(&sk->sk_dst_cache); 144 dst_negative_advice(&sk->sk_dst_cache);
144 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 145 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
145 } else { 146 } else {
146 if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { 147 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
147 /* Black hole detection */ 148 /* Black hole detection */
148 tcp_mtu_probing(icsk, sk); 149 tcp_mtu_probing(icsk, sk);
149 150
@@ -155,13 +156,15 @@ static int tcp_write_timeout(struct sock *sk)
155 const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 156 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
156 157
157 retry_until = tcp_orphan_retries(sk, alive); 158 retry_until = tcp_orphan_retries(sk, alive);
159 do_reset = alive ||
160 !retransmits_timed_out(sk, retry_until);
158 161
159 if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) 162 if (tcp_out_of_resources(sk, do_reset))
160 return 1; 163 return 1;
161 } 164 }
162 } 165 }
163 166
164 if (icsk->icsk_retransmits >= retry_until) { 167 if (retransmits_timed_out(sk, retry_until)) {
165 /* Has it gone just too far? */ 168 /* Has it gone just too far? */
166 tcp_write_err(sk); 169 tcp_write_err(sk);
167 return 1; 170 return 1;
@@ -279,7 +282,7 @@ static void tcp_probe_timer(struct sock *sk)
279 * The TCP retransmit timer. 282 * The TCP retransmit timer.
280 */ 283 */
281 284
282static void tcp_retransmit_timer(struct sock *sk) 285void tcp_retransmit_timer(struct sock *sk)
283{ 286{
284 struct tcp_sock *tp = tcp_sk(sk); 287 struct tcp_sock *tp = tcp_sk(sk);
285 struct inet_connection_sock *icsk = inet_csk(sk); 288 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -385,7 +388,7 @@ static void tcp_retransmit_timer(struct sock *sk)
385out_reset_timer: 388out_reset_timer:
386 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 389 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
387 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 390 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
388 if (icsk->icsk_retransmits > sysctl_tcp_retries1) 391 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
389 __sk_dst_reset(sk); 392 __sk_dst_reset(sk);
390 393
391out:; 394out:;