aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-04-11 00:04:22 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:25:26 -0400
commitaa8223c7bb0b05183e1737881ed21827aa5b9e73 (patch)
tree05c9832326edfeb878472f15cf8133ed9f014cdf /net/ipv4/tcp_ipv4.c
parentab6a5bb6b28a970104a34f0f6959b73cf61bdc72 (diff)
[SK_BUFF]: Introduce tcp_hdr(), remove skb->h.th
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c146a02f849..617a5e4ca01 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -127,8 +127,8 @@ static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
127{ 127{
128 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 128 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
129 ip_hdr(skb)->saddr, 129 ip_hdr(skb)->saddr,
130 skb->h.th->dest, 130 tcp_hdr(skb)->dest,
131 skb->h.th->source); 131 tcp_hdr(skb)->source);
132} 132}
133 133
134int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 134int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -499,7 +499,7 @@ out:
499void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) 499void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
500{ 500{
501 struct inet_sock *inet = inet_sk(sk); 501 struct inet_sock *inet = inet_sk(sk);
502 struct tcphdr *th = skb->h.th; 502 struct tcphdr *th = tcp_hdr(skb);
503 503
504 if (skb->ip_summed == CHECKSUM_PARTIAL) { 504 if (skb->ip_summed == CHECKSUM_PARTIAL) {
505 th->check = ~tcp_v4_check(len, inet->saddr, 505 th->check = ~tcp_v4_check(len, inet->saddr,
@@ -522,7 +522,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 iph = ip_hdr(skb); 524 iph = ip_hdr(skb);
525 th = skb->h.th; 525 th = tcp_hdr(skb);
526 526
527 th->check = 0; 527 th->check = 0;
528 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0); 528 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
@@ -546,7 +546,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
546 546
547static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) 547static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
548{ 548{
549 struct tcphdr *th = skb->h.th; 549 struct tcphdr *th = tcp_hdr(skb);
550 struct { 550 struct {
551 struct tcphdr th; 551 struct tcphdr th;
552#ifdef CONFIG_TCP_MD5SIG 552#ifdef CONFIG_TCP_MD5SIG
@@ -622,7 +622,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
622 struct sk_buff *skb, u32 seq, u32 ack, 622 struct sk_buff *skb, u32 seq, u32 ack,
623 u32 win, u32 ts) 623 u32 win, u32 ts)
624{ 624{
625 struct tcphdr *th = skb->h.th; 625 struct tcphdr *th = tcp_hdr(skb);
626 struct { 626 struct {
627 struct tcphdr th; 627 struct tcphdr th;
628 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) 628 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -745,7 +745,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
745 skb = tcp_make_synack(sk, dst, req); 745 skb = tcp_make_synack(sk, dst, req);
746 746
747 if (skb) { 747 if (skb) {
748 struct tcphdr *th = skb->h.th; 748 struct tcphdr *th = tcp_hdr(skb);
749 749
750 th->check = tcp_v4_check(skb->len, 750 th->check = tcp_v4_check(skb->len,
751 ireq->loc_addr, 751 ireq->loc_addr,
@@ -781,7 +781,7 @@ static void syn_flood_warning(struct sk_buff *skb)
781 warntime = jiffies; 781 warntime = jiffies;
782 printk(KERN_INFO 782 printk(KERN_INFO
783 "possible SYN flooding on port %d. Sending cookies.\n", 783 "possible SYN flooding on port %d. Sending cookies.\n",
784 ntohs(skb->h.th->dest)); 784 ntohs(tcp_hdr(skb)->dest));
785 } 785 }
786} 786}
787#endif 787#endif
@@ -1134,7 +1134,7 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1134 __u8 *hash_location = NULL; 1134 __u8 *hash_location = NULL;
1135 struct tcp_md5sig_key *hash_expected; 1135 struct tcp_md5sig_key *hash_expected;
1136 const struct iphdr *iph = ip_hdr(skb); 1136 const struct iphdr *iph = ip_hdr(skb);
1137 struct tcphdr *th = skb->h.th; 1137 struct tcphdr *th = tcp_hdr(skb);
1138 int length = (th->doff << 2) - sizeof(struct tcphdr); 1138 int length = (th->doff << 2) - sizeof(struct tcphdr);
1139 int genhash; 1139 int genhash;
1140 unsigned char *ptr; 1140 unsigned char *ptr;
@@ -1327,7 +1327,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1327 ireq->rmt_addr = saddr; 1327 ireq->rmt_addr = saddr;
1328 ireq->opt = tcp_v4_save_options(sk, skb); 1328 ireq->opt = tcp_v4_save_options(sk, skb);
1329 if (!want_cookie) 1329 if (!want_cookie)
1330 TCP_ECN_create_request(req, skb->h.th); 1330 TCP_ECN_create_request(req, tcp_hdr(skb));
1331 1331
1332 if (want_cookie) { 1332 if (want_cookie) {
1333#ifdef CONFIG_SYN_COOKIES 1333#ifdef CONFIG_SYN_COOKIES
@@ -1375,7 +1375,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1375 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " 1375 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
1376 "request from %u.%u.%u.%u/%u\n", 1376 "request from %u.%u.%u.%u/%u\n",
1377 NIPQUAD(saddr), 1377 NIPQUAD(saddr),
1378 ntohs(skb->h.th->source)); 1378 ntohs(tcp_hdr(skb)->source));
1379 dst_release(dst); 1379 dst_release(dst);
1380 goto drop_and_free; 1380 goto drop_and_free;
1381 } 1381 }
@@ -1481,7 +1481,7 @@ exit:
1481 1481
1482static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) 1482static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1483{ 1483{
1484 struct tcphdr *th = skb->h.th; 1484 struct tcphdr *th = tcp_hdr(skb);
1485 const struct iphdr *iph = ip_hdr(skb); 1485 const struct iphdr *iph = ip_hdr(skb);
1486 struct sock *nsk; 1486 struct sock *nsk;
1487 struct request_sock **prev; 1487 struct request_sock **prev;
@@ -1556,7 +1556,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1556 1556
1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1558 TCP_CHECK_TIMER(sk); 1558 TCP_CHECK_TIMER(sk);
1559 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) { 1559 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1560 rsk = sk; 1560 rsk = sk;
1561 goto reset; 1561 goto reset;
1562 } 1562 }
@@ -1582,7 +1582,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1582 } 1582 }
1583 1583
1584 TCP_CHECK_TIMER(sk); 1584 TCP_CHECK_TIMER(sk);
1585 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) { 1585 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1586 rsk = sk; 1586 rsk = sk;
1587 goto reset; 1587 goto reset;
1588 } 1588 }
@@ -1625,7 +1625,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1625 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1625 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1626 goto discard_it; 1626 goto discard_it;
1627 1627
1628 th = skb->h.th; 1628 th = tcp_hdr(skb);
1629 1629
1630 if (th->doff < sizeof(struct tcphdr) / 4) 1630 if (th->doff < sizeof(struct tcphdr) / 4)
1631 goto bad_packet; 1631 goto bad_packet;
@@ -1640,7 +1640,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1640 tcp_v4_checksum_init(skb))) 1640 tcp_v4_checksum_init(skb)))
1641 goto bad_packet; 1641 goto bad_packet;
1642 1642
1643 th = skb->h.th; 1643 th = tcp_hdr(skb);
1644 iph = ip_hdr(skb); 1644 iph = ip_hdr(skb);
1645 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1645 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1646 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1646 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +