aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-10-11 20:34:25 -0400
committerDavid S. Miller <davem@davemloft.net>2007-10-11 20:34:25 -0400
commitd193594299064d386a2705928cd61ab2ca3d7cee (patch)
treec84b5bde72a9f64e5504a9d05e88c8a5b628854e
parentf6fb128d272eb7b0f1a8be78153a724545e28be8 (diff)
[TCP]: Extract tcp_match_queue_to_sack from sacktag code
This is necessary for upcoming DSACK bugfix. Reduces sacktag length which is not very sad thing at all... :-) Notice that there's a need to handle out-of-mem at caller's place. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_input.c54
1 files changed, 35 insertions, 19 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 50047045df6..bd18c252dee 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1181,6 +1181,38 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
1181 return dup_sack; 1181 return dup_sack;
1182} 1182}
1183 1183
1184/* Check if skb is fully within the SACK block. In presence of GSO skbs,
1185 * the incoming SACK may not exactly match but we can find smaller MSS
1186 * aligned portion of it that matches. Therefore we might need to fragment
1187 * which may fail and creates some hassle (caller must handle error case
1188 * returns).
1189 */
1190int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1191 u32 start_seq, u32 end_seq)
1192{
1193 int in_sack, err;
1194 unsigned int pkt_len;
1195
1196 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1197 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1198
1199 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1200 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1201
1202 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1203
1204 if (!in_sack)
1205 pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
1206 else
1207 pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
1208 err = tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size);
1209 if (err < 0)
1210 return err;
1211 }
1212
1213 return in_sack;
1214}
1215
1184static int 1216static int
1185tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) 1217tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
1186{ 1218{
@@ -1333,25 +1365,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1333 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1365 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1334 break; 1366 break;
1335 1367
1336 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1368 in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
1337 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1369 if (in_sack < 0)
1338 1370 break;
1339 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1340 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1341 unsigned int pkt_len;
1342
1343 in_sack = !after(start_seq,
1344 TCP_SKB_CB(skb)->seq);
1345
1346 if (!in_sack)
1347 pkt_len = (start_seq -
1348 TCP_SKB_CB(skb)->seq);
1349 else
1350 pkt_len = (end_seq -
1351 TCP_SKB_CB(skb)->seq);
1352 if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
1353 break;
1354 }
1355 1371
1356 fack_count += tcp_skb_pcount(skb); 1372 fack_count += tcp_skb_pcount(skb);
1357 1373