aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-12-06 01:42:41 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-06 01:42:41 -0500
commit50133161a83c9e5974d430cabd77d6430ca7d579 (patch)
tree288ad9673bb907399f1d8f646c1a1a1f0af64de8 /net/ipv4/tcp_input.c
parenta1197f5a6faa23e5d0c1f8ed97b011deb2a75457 (diff)
tcp: no need to pass prev skb around, reduces arg pressure
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e25827719e70..2d9151c94368 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1376,12 +1376,12 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1376 return sacked; 1376 return sacked;
1377} 1377}
1378 1378
1379static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, 1379static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1380 struct sk_buff *skb,
1381 struct tcp_sacktag_state *state, 1380 struct tcp_sacktag_state *state,
1382 unsigned int pcount, int shifted, int mss) 1381 unsigned int pcount, int shifted, int mss)
1383{ 1382{
1384 struct tcp_sock *tp = tcp_sk(sk); 1383 struct tcp_sock *tp = tcp_sk(sk);
1384 struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
1385 1385
1386 BUG_ON(!pcount); 1386 BUG_ON(!pcount);
1387 1387
@@ -1565,7 +1565,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1565 1565
1566 if (!skb_shift(prev, skb, len)) 1566 if (!skb_shift(prev, skb, len))
1567 goto fallback; 1567 goto fallback;
1568 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss)) 1568 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss))
1569 goto out; 1569 goto out;
1570 1570
1571 /* Hole filled allows collapsing with the next as well, this is very 1571 /* Hole filled allows collapsing with the next as well, this is very
@@ -1584,8 +1584,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1584 len = skb->len; 1584 len = skb->len;
1585 if (skb_shift(prev, skb, len)) { 1585 if (skb_shift(prev, skb, len)) {
1586 pcount += tcp_skb_pcount(skb); 1586 pcount += tcp_skb_pcount(skb);
1587 tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), len, 1587 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss);
1588 mss);
1589 } 1588 }
1590 1589
1591out: 1590out: