aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-10-04 14:56:38 -0400
committerDavid S. Miller <davem@davemloft.net>2010-10-04 14:56:38 -0400
commit21a180cda012e1f93e362dd4a9b0bfd3d8c92940 (patch)
tree0e0d10baa3fdcd8ffbc6881076ff1695808dad9d /net/ipv4
parentc7d4426a98a5f6654cd0b4b33d9dab2e77192c18 (diff)
parent51e97a12bef19b7e43199fc153cf9bd5f2140362 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/ipv4/Kconfig net/ipv4/tcp_timer.c
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/igmp.c14
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_timer.c25
6 files changed, 33 insertions, 17 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 5462e2d147a6..e848e6c062cd 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -223,7 +223,7 @@ config NET_IPGRE_DEMUX
223 223
224config NET_IPGRE 224config NET_IPGRE
225 tristate "IP: GRE tunnels over IP" 225 tristate "IP: GRE tunnels over IP"
226 depends on NET_IPGRE_DEMUX 226 depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
227 help 227 help
228 Tunneling means encapsulating data of one protocol type within 228 Tunneling means encapsulating data of one protocol type within
229 another protocol and sending it over a channel that understands the 229 another protocol and sending it over a channel that understands the
@@ -419,7 +419,7 @@ config INET_XFRM_MODE_BEET
419 If unsure, say Y. 419 If unsure, say Y.
420 420
421config INET_LRO 421config INET_LRO
422 bool "Large Receive Offload (ipv4/tcp)" 422 tristate "Large Receive Offload (ipv4/tcp)"
423 default y 423 default y
424 ---help--- 424 ---help---
425 Support for Large Receive Offload (ipv4/tcp). 425 Support for Large Receive Offload (ipv4/tcp).
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1fdcacd36ce7..2a4bb76f2132 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
834 int mark = 0; 834 int mark = 0;
835 835
836 836
837 if (len == 8 || IGMP_V2_SEEN(in_dev)) { 837 if (len == 8) {
838 if (ih->code == 0) { 838 if (ih->code == 0) {
839 /* Alas, old v1 router presents here. */ 839 /* Alas, old v1 router presents here. */
840 840
@@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
856 igmpv3_clear_delrec(in_dev); 856 igmpv3_clear_delrec(in_dev);
857 } else if (len < 12) { 857 } else if (len < 12) {
858 return; /* ignore bogus packet; freed by caller */ 858 return; /* ignore bogus packet; freed by caller */
859 } else if (IGMP_V1_SEEN(in_dev)) {
860 /* This is a v3 query with v1 queriers present */
861 max_delay = IGMP_Query_Response_Interval;
862 group = 0;
863 } else if (IGMP_V2_SEEN(in_dev)) {
864 /* this is a v3 query with v2 queriers present;
865 * Interpretation of the max_delay code is problematic here.
866 * A real v2 host would use ih_code directly, while v3 has a
867 * different encoding. We use the v3 encoding as more likely
868 * to be intended in a v3 query.
869 */
870 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
859 } else { /* v3 */ 871 } else { /* v3 */
860 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) 872 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
861 return; 873 return;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c3cb8bd23638..04e0df82b88c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1232,7 +1232,7 @@ restart:
1232 } 1232 }
1233 1233
1234 if (net_ratelimit()) 1234 if (net_ratelimit())
1235 printk(KERN_WARNING "Neighbour table overflow.\n"); 1235 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1236 rt_drop(rt); 1236 rt_drop(rt);
1237 return -ENOBUFS; 1237 return -ENOBUFS;
1238 } 1238 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19192c5fe67a..1664a0590bb8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -943,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
943 sg = sk->sk_route_caps & NETIF_F_SG; 943 sg = sk->sk_route_caps & NETIF_F_SG;
944 944
945 while (--iovlen >= 0) { 945 while (--iovlen >= 0) {
946 int seglen = iov->iov_len; 946 size_t seglen = iov->iov_len;
947 unsigned char __user *from = iov->iov_base; 947 unsigned char __user *from = iov->iov_base;
948 948
949 iov++; 949 iov++;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eaf20e7e61da..f6fdd727a23d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2532,7 +2532,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2532 cnt += tcp_skb_pcount(skb); 2532 cnt += tcp_skb_pcount(skb);
2533 2533
2534 if (cnt > packets) { 2534 if (cnt > packets) {
2535 if (tcp_is_sack(tp) || (oldcnt >= packets)) 2535 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
2536 (oldcnt >= packets))
2536 break; 2537 break;
2537 2538
2538 mss = skb_shinfo(skb)->gso_size; 2539 mss = skb_shinfo(skb)->gso_size;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index baea4a129022..f3c8c6c019ae 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
135 135
136/* This function calculates a "timeout" which is equivalent to the timeout of a 136/* This function calculates a "timeout" which is equivalent to the timeout of a
137 * TCP connection after "boundary" unsuccessful, exponentially backed-off 137 * TCP connection after "boundary" unsuccessful, exponentially backed-off
138 * retransmissions with an initial RTO of TCP_RTO_MIN. 138 * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
139 * syn_set flag is set.
139 */ 140 */
140static bool retransmits_timed_out(struct sock *sk, 141static bool retransmits_timed_out(struct sock *sk,
141 unsigned int boundary, 142 unsigned int boundary,
142 unsigned int timeout) 143 unsigned int timeout,
144 bool syn_set)
143{ 145{
144 unsigned int linear_backoff_thresh, start_ts; 146 unsigned int linear_backoff_thresh, start_ts;
147 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
145 148
146 if (!inet_csk(sk)->icsk_retransmits) 149 if (!inet_csk(sk)->icsk_retransmits)
147 return false; 150 return false;
@@ -152,12 +155,12 @@ static bool retransmits_timed_out(struct sock *sk,
152 start_ts = tcp_sk(sk)->retrans_stamp; 155 start_ts = tcp_sk(sk)->retrans_stamp;
153 156
154 if (likely(timeout == 0)) { 157 if (likely(timeout == 0)) {
155 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); 158 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
156 159
157 if (boundary <= linear_backoff_thresh) 160 if (boundary <= linear_backoff_thresh)
158 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; 161 timeout = ((2 << boundary) - 1) * rto_base;
159 else 162 else
160 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + 163 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
161 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 164 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
162 } 165 }
163 return (tcp_time_stamp - start_ts) >= timeout; 166 return (tcp_time_stamp - start_ts) >= timeout;
@@ -168,14 +171,15 @@ static int tcp_write_timeout(struct sock *sk)
168{ 171{
169 struct inet_connection_sock *icsk = inet_csk(sk); 172 struct inet_connection_sock *icsk = inet_csk(sk);
170 int retry_until; 173 int retry_until;
171 bool do_reset; 174 bool do_reset, syn_set = 0;
172 175
173 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 176 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
174 if (icsk->icsk_retransmits) 177 if (icsk->icsk_retransmits)
175 dst_negative_advice(sk); 178 dst_negative_advice(sk);
176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 179 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
180 syn_set = 1;
177 } else { 181 } else {
178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) { 182 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
179 /* Black hole detection */ 183 /* Black hole detection */
180 tcp_mtu_probing(icsk, sk); 184 tcp_mtu_probing(icsk, sk);
181 185
@@ -188,7 +192,7 @@ static int tcp_write_timeout(struct sock *sk)
188 192
189 retry_until = tcp_orphan_retries(sk, alive); 193 retry_until = tcp_orphan_retries(sk, alive);
190 do_reset = alive || 194 do_reset = alive ||
191 !retransmits_timed_out(sk, retry_until, 0); 195 !retransmits_timed_out(sk, retry_until, 0, 0);
192 196
193 if (tcp_out_of_resources(sk, do_reset)) 197 if (tcp_out_of_resources(sk, do_reset))
194 return 1; 198 return 1;
@@ -196,8 +200,7 @@ static int tcp_write_timeout(struct sock *sk)
196 } 200 }
197 201
198 if (retransmits_timed_out(sk, retry_until, 202 if (retransmits_timed_out(sk, retry_until,
199 (1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) ? 0 : 203 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
200 icsk->icsk_user_timeout)) {
201 /* Has it gone just too far? */ 204 /* Has it gone just too far? */
202 tcp_write_err(sk); 205 tcp_write_err(sk);
203 return 1; 206 return 1;
@@ -439,7 +442,7 @@ out_reset_timer:
439 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 442 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
440 } 443 }
441 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
442 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0)) 445 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
443 __sk_dst_reset(sk); 446 __sk_dst_reset(sk);
444 447
445out:; 448out:;