aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/syncookies.c36
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv4/tcp_input.c28
-rw-r--r--net/ipv4/tcp_ipv4.c32
-rw-r--r--net/ipv4/tcp_minisocks.c9
-rw-r--r--net/ipv4/tcp_output.c13
7 files changed, 75 insertions, 69 deletions
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6d92358fc513..602268661eb3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1352,8 +1352,8 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1352 .tos = RT_TOS(ip_hdr(skb)->tos) } }, 1352 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1353 /* Not quite clean, but right. */ 1353 /* Not quite clean, but right. */
1354 .uli_u = { .ports = 1354 .uli_u = { .ports =
1355 { .sport = skb->h.th->dest, 1355 { .sport = tcp_hdr(skb)->dest,
1356 .dport = skb->h.th->source } }, 1356 .dport = tcp_hdr(skb)->source } },
1357 .proto = sk->sk_protocol }; 1357 .proto = sk->sk_protocol };
1358 security_skb_classify_flow(skb, &fl); 1358 security_skb_classify_flow(skb, &fl);
1359 if (ip_route_output_key(&rt, &fl)) 1359 if (ip_route_output_key(&rt, &fl))
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 261607178491..2da1be0589a9 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -125,10 +125,11 @@ static __u16 const msstab[] = {
125__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 125__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
126{ 126{
127 struct tcp_sock *tp = tcp_sk(sk); 127 struct tcp_sock *tp = tcp_sk(sk);
128 const struct iphdr *iph = ip_hdr(skb);
129 const struct tcphdr *th = tcp_hdr(skb);
128 int mssind; 130 int mssind;
129 const __u16 mss = *mssp; 131 const __u16 mss = *mssp;
130 132
131
132 tp->last_synq_overflow = jiffies; 133 tp->last_synq_overflow = jiffies;
133 134
134 /* XXX sort msstab[] by probability? Binary search? */ 135 /* XXX sort msstab[] by probability? Binary search? */
@@ -138,9 +139,8 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
138 139
139 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT); 140 NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
140 141
141 return secure_tcp_syn_cookie(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 142 return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
142 skb->h.th->source, skb->h.th->dest, 143 th->source, th->dest, ntohl(th->seq),
143 ntohl(skb->h.th->seq),
144 jiffies / (HZ * 60), mssind); 144 jiffies / (HZ * 60), mssind);
145} 145}
146 146
@@ -157,14 +157,13 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
157 */ 157 */
158static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 158static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
159{ 159{
160 __u32 seq; 160 const struct iphdr *iph = ip_hdr(skb);
161 __u32 mssind; 161 const struct tcphdr *th = tcp_hdr(skb);
162 162 __u32 seq = ntohl(th->seq) - 1;
163 seq = ntohl(skb->h.th->seq)-1; 163 __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
164 mssind = check_tcp_syn_cookie(cookie, 164 th->source, th->dest, seq,
165 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 165 jiffies / (HZ * 60),
166 skb->h.th->source, skb->h.th->dest, 166 COUNTER_TRIES);
167 seq, jiffies / (HZ * 60), COUNTER_TRIES);
168 167
169 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; 168 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
170} 169}
@@ -191,14 +190,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
191 struct inet_request_sock *ireq; 190 struct inet_request_sock *ireq;
192 struct tcp_request_sock *treq; 191 struct tcp_request_sock *treq;
193 struct tcp_sock *tp = tcp_sk(sk); 192 struct tcp_sock *tp = tcp_sk(sk);
194 __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; 193 const struct tcphdr *th = tcp_hdr(skb);
194 __u32 cookie = ntohl(th->ack_seq) - 1;
195 struct sock *ret = sk; 195 struct sock *ret = sk;
196 struct request_sock *req; 196 struct request_sock *req;
197 int mss; 197 int mss;
198 struct rtable *rt; 198 struct rtable *rt;
199 __u8 rcv_wscale; 199 __u8 rcv_wscale;
200 200
201 if (!sysctl_tcp_syncookies || !skb->h.th->ack) 201 if (!sysctl_tcp_syncookies || !th->ack)
202 goto out; 202 goto out;
203 203
204 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || 204 if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
@@ -220,10 +220,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
220 } 220 }
221 ireq = inet_rsk(req); 221 ireq = inet_rsk(req);
222 treq = tcp_rsk(req); 222 treq = tcp_rsk(req);
223 treq->rcv_isn = ntohl(skb->h.th->seq) - 1; 223 treq->rcv_isn = ntohl(th->seq) - 1;
224 treq->snt_isn = cookie; 224 treq->snt_isn = cookie;
225 req->mss = mss; 225 req->mss = mss;
226 ireq->rmt_port = skb->h.th->source; 226 ireq->rmt_port = th->source;
227 ireq->loc_addr = ip_hdr(skb)->daddr; 227 ireq->loc_addr = ip_hdr(skb)->daddr;
228 ireq->rmt_addr = ip_hdr(skb)->saddr; 228 ireq->rmt_addr = ip_hdr(skb)->saddr;
229 ireq->opt = NULL; 229 ireq->opt = NULL;
@@ -261,8 +261,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
261 .tos = RT_CONN_FLAGS(sk) } }, 261 .tos = RT_CONN_FLAGS(sk) } },
262 .proto = IPPROTO_TCP, 262 .proto = IPPROTO_TCP,
263 .uli_u = { .ports = 263 .uli_u = { .ports =
264 { .sport = skb->h.th->dest, 264 { .sport = th->dest,
265 .dport = skb->h.th->source } } }; 265 .dport = th->source } } };
266 security_req_classify_flow(req, &fl); 266 security_req_classify_flow(req, &fl);
267 if (ip_route_output_key(&rt, &fl)) { 267 if (ip_route_output_key(&rt, &fl)) {
268 reqsk_free(req); 268 reqsk_free(req);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 689f9330f1b9..f832f3c33ab1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -425,7 +425,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
425 /* Subtract 1, if FIN is in queue. */ 425 /* Subtract 1, if FIN is in queue. */
426 if (answ && !skb_queue_empty(&sk->sk_receive_queue)) 426 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 answ -= 427 answ -=
428 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin; 428 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
429 } else 429 } else
430 answ = tp->urg_seq - tp->copied_seq; 430 answ = tp->urg_seq - tp->copied_seq;
431 release_sock(sk); 431 release_sock(sk);
@@ -1016,9 +1016,9 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1016 1016
1017 skb_queue_walk(&sk->sk_receive_queue, skb) { 1017 skb_queue_walk(&sk->sk_receive_queue, skb) {
1018 offset = seq - TCP_SKB_CB(skb)->seq; 1018 offset = seq - TCP_SKB_CB(skb)->seq;
1019 if (skb->h.th->syn) 1019 if (tcp_hdr(skb)->syn)
1020 offset--; 1020 offset--;
1021 if (offset < skb->len || skb->h.th->fin) { 1021 if (offset < skb->len || tcp_hdr(skb)->fin) {
1022 *off = offset; 1022 *off = offset;
1023 return skb; 1023 return skb;
1024 } 1024 }
@@ -1070,7 +1070,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1070 if (offset != skb->len) 1070 if (offset != skb->len)
1071 break; 1071 break;
1072 } 1072 }
1073 if (skb->h.th->fin) { 1073 if (tcp_hdr(skb)->fin) {
1074 sk_eat_skb(sk, skb, 0); 1074 sk_eat_skb(sk, skb, 0);
1075 ++seq; 1075 ++seq;
1076 break; 1076 break;
@@ -1174,11 +1174,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1174 break; 1174 break;
1175 } 1175 }
1176 offset = *seq - TCP_SKB_CB(skb)->seq; 1176 offset = *seq - TCP_SKB_CB(skb)->seq;
1177 if (skb->h.th->syn) 1177 if (tcp_hdr(skb)->syn)
1178 offset--; 1178 offset--;
1179 if (offset < skb->len) 1179 if (offset < skb->len)
1180 goto found_ok_skb; 1180 goto found_ok_skb;
1181 if (skb->h.th->fin) 1181 if (tcp_hdr(skb)->fin)
1182 goto found_fin_ok; 1182 goto found_fin_ok;
1183 BUG_TRAP(flags & MSG_PEEK); 1183 BUG_TRAP(flags & MSG_PEEK);
1184 skb = skb->next; 1184 skb = skb->next;
@@ -1394,7 +1394,7 @@ skip_copy:
1394 if (used + offset < skb->len) 1394 if (used + offset < skb->len)
1395 continue; 1395 continue;
1396 1396
1397 if (skb->h.th->fin) 1397 if (tcp_hdr(skb)->fin)
1398 goto found_fin_ok; 1398 goto found_fin_ok;
1399 if (!(flags & MSG_PEEK)) { 1399 if (!(flags & MSG_PEEK)) {
1400 sk_eat_skb(sk, skb, copied_early); 1400 sk_eat_skb(sk, skb, copied_early);
@@ -1563,7 +1563,7 @@ void tcp_close(struct sock *sk, long timeout)
1563 */ 1563 */
1564 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 1564 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1565 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - 1565 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1566 skb->h.th->fin; 1566 tcp_hdr(skb)->fin;
1567 data_was_unread += len; 1567 data_was_unread += len;
1568 __kfree_skb(skb); 1568 __kfree_skb(skb);
1569 } 1569 }
@@ -2170,7 +2170,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2170 if (!pskb_may_pull(skb, sizeof(*th))) 2170 if (!pskb_may_pull(skb, sizeof(*th)))
2171 goto out; 2171 goto out;
2172 2172
2173 th = skb->h.th; 2173 th = tcp_hdr(skb);
2174 thlen = th->doff * 4; 2174 thlen = th->doff * 4;
2175 if (thlen < sizeof(*th)) 2175 if (thlen < sizeof(*th))
2176 goto out; 2176 goto out;
@@ -2210,7 +2210,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2210 delta = htonl(oldlen + (thlen + len)); 2210 delta = htonl(oldlen + (thlen + len));
2211 2211
2212 skb = segs; 2212 skb = segs;
2213 th = skb->h.th; 2213 th = tcp_hdr(skb);
2214 seq = ntohl(th->seq); 2214 seq = ntohl(th->seq);
2215 2215
2216 do { 2216 do {
@@ -2224,7 +2224,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2224 2224
2225 seq += len; 2225 seq += len;
2226 skb = skb->next; 2226 skb = skb->next;
2227 th = skb->h.th; 2227 th = tcp_hdr(skb);
2228 2228
2229 th->seq = htonl(seq); 2229 th->seq = htonl(seq);
2230 th->cwr = 0; 2230 th->cwr = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2776a8b01339..c1ce36237380 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -148,7 +148,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
148 * to handle super-low mtu links fairly. 148 * to handle super-low mtu links fairly.
149 */ 149 */
150 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 150 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
151 !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) { 151 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
152 /* Subtract also invariant (if peer is RFC compliant), 152 /* Subtract also invariant (if peer is RFC compliant),
153 * tcp header plus fixed timestamp option length. 153 * tcp header plus fixed timestamp option length.
154 * Resulting "len" is MSS free of SACK jitter. 154 * Resulting "len" is MSS free of SACK jitter.
@@ -2559,9 +2559,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
2559 struct sk_buff *skb, u32 ack, u32 ack_seq) 2559 struct sk_buff *skb, u32 ack, u32 ack_seq)
2560{ 2560{
2561 int flag = 0; 2561 int flag = 0;
2562 u32 nwin = ntohs(skb->h.th->window); 2562 u32 nwin = ntohs(tcp_hdr(skb)->window);
2563 2563
2564 if (likely(!skb->h.th->syn)) 2564 if (likely(!tcp_hdr(skb)->syn))
2565 nwin <<= tp->rx_opt.snd_wscale; 2565 nwin <<= tp->rx_opt.snd_wscale;
2566 2566
2567 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 2567 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
@@ -2766,7 +2766,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2766 if (TCP_SKB_CB(skb)->sacked) 2766 if (TCP_SKB_CB(skb)->sacked)
2767 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 2767 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
2768 2768
2769 if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) 2769 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
2770 flag |= FLAG_ECE; 2770 flag |= FLAG_ECE;
2771 2771
2772 tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 2772 tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
@@ -2833,7 +2833,7 @@ uninteresting_ack:
2833void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab) 2833void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
2834{ 2834{
2835 unsigned char *ptr; 2835 unsigned char *ptr;
2836 struct tcphdr *th = skb->h.th; 2836 struct tcphdr *th = tcp_hdr(skb);
2837 int length=(th->doff*4)-sizeof(struct tcphdr); 2837 int length=(th->doff*4)-sizeof(struct tcphdr);
2838 2838
2839 ptr = (unsigned char *)(th + 1); 2839 ptr = (unsigned char *)(th + 1);
@@ -2995,7 +2995,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
2995static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 2995static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
2996{ 2996{
2997 struct tcp_sock *tp = tcp_sk(sk); 2997 struct tcp_sock *tp = tcp_sk(sk);
2998 struct tcphdr *th = skb->h.th; 2998 struct tcphdr *th = tcp_hdr(skb);
2999 u32 seq = TCP_SKB_CB(skb)->seq; 2999 u32 seq = TCP_SKB_CB(skb)->seq;
3000 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3000 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3001 3001
@@ -3357,8 +3357,8 @@ static void tcp_ofo_queue(struct sock *sk)
3357 __skb_unlink(skb, &tp->out_of_order_queue); 3357 __skb_unlink(skb, &tp->out_of_order_queue);
3358 __skb_queue_tail(&sk->sk_receive_queue, skb); 3358 __skb_queue_tail(&sk->sk_receive_queue, skb);
3359 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3359 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3360 if (skb->h.th->fin) 3360 if (tcp_hdr(skb)->fin)
3361 tcp_fin(skb, sk, skb->h.th); 3361 tcp_fin(skb, sk, tcp_hdr(skb));
3362 } 3362 }
3363} 3363}
3364 3364
@@ -3366,7 +3366,7 @@ static int tcp_prune_queue(struct sock *sk);
3366 3366
3367static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 3367static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
3368{ 3368{
3369 struct tcphdr *th = skb->h.th; 3369 struct tcphdr *th = tcp_hdr(skb);
3370 struct tcp_sock *tp = tcp_sk(sk); 3370 struct tcp_sock *tp = tcp_sk(sk);
3371 int eaten = -1; 3371 int eaten = -1;
3372 3372
@@ -3605,7 +3605,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3605 * - bloated or contains data before "start" or 3605 * - bloated or contains data before "start" or
3606 * overlaps to the next one. 3606 * overlaps to the next one.
3607 */ 3607 */
3608 if (!skb->h.th->syn && !skb->h.th->fin && 3608 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
3609 (tcp_win_from_space(skb->truesize) > skb->len || 3609 (tcp_win_from_space(skb->truesize) > skb->len ||
3610 before(TCP_SKB_CB(skb)->seq, start) || 3610 before(TCP_SKB_CB(skb)->seq, start) ||
3611 (skb->next != tail && 3611 (skb->next != tail &&
@@ -3616,7 +3616,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3616 start = TCP_SKB_CB(skb)->end_seq; 3616 start = TCP_SKB_CB(skb)->end_seq;
3617 skb = skb->next; 3617 skb = skb->next;
3618 } 3618 }
3619 if (skb == tail || skb->h.th->syn || skb->h.th->fin) 3619 if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
3620 return; 3620 return;
3621 3621
3622 while (before(start, end)) { 3622 while (before(start, end)) {
@@ -3665,7 +3665,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
3665 __kfree_skb(skb); 3665 __kfree_skb(skb);
3666 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 3666 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
3667 skb = next; 3667 skb = next;
3668 if (skb == tail || skb->h.th->syn || skb->h.th->fin) 3668 if (skb == tail ||
3669 tcp_hdr(skb)->syn ||
3670 tcp_hdr(skb)->fin)
3669 return; 3671 return;
3670 } 3672 }
3671 } 3673 }
@@ -4072,7 +4074,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
4072 tcp_rcv_space_adjust(sk); 4074 tcp_rcv_space_adjust(sk);
4073 4075
4074 if ((tp->ucopy.len == 0) || 4076 if ((tp->ucopy.len == 0) ||
4075 (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) || 4077 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
4076 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 4078 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
4077 tp->ucopy.wakeup = 1; 4079 tp->ucopy.wakeup = 1;
4078 sk->sk_data_ready(sk, 0); 4080 sk->sk_data_ready(sk, 0);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c146a02f8495..617a5e4ca010 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -127,8 +127,8 @@ static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
127{ 127{
128 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 128 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
129 ip_hdr(skb)->saddr, 129 ip_hdr(skb)->saddr,
130 skb->h.th->dest, 130 tcp_hdr(skb)->dest,
131 skb->h.th->source); 131 tcp_hdr(skb)->source);
132} 132}
133 133
134int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) 134int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -499,7 +499,7 @@ out:
499void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) 499void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
500{ 500{
501 struct inet_sock *inet = inet_sk(sk); 501 struct inet_sock *inet = inet_sk(sk);
502 struct tcphdr *th = skb->h.th; 502 struct tcphdr *th = tcp_hdr(skb);
503 503
504 if (skb->ip_summed == CHECKSUM_PARTIAL) { 504 if (skb->ip_summed == CHECKSUM_PARTIAL) {
505 th->check = ~tcp_v4_check(len, inet->saddr, 505 th->check = ~tcp_v4_check(len, inet->saddr,
@@ -522,7 +522,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
522 return -EINVAL; 522 return -EINVAL;
523 523
524 iph = ip_hdr(skb); 524 iph = ip_hdr(skb);
525 th = skb->h.th; 525 th = tcp_hdr(skb);
526 526
527 th->check = 0; 527 th->check = 0;
528 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0); 528 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
@@ -546,7 +546,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
546 546
547static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) 547static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
548{ 548{
549 struct tcphdr *th = skb->h.th; 549 struct tcphdr *th = tcp_hdr(skb);
550 struct { 550 struct {
551 struct tcphdr th; 551 struct tcphdr th;
552#ifdef CONFIG_TCP_MD5SIG 552#ifdef CONFIG_TCP_MD5SIG
@@ -622,7 +622,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
622 struct sk_buff *skb, u32 seq, u32 ack, 622 struct sk_buff *skb, u32 seq, u32 ack,
623 u32 win, u32 ts) 623 u32 win, u32 ts)
624{ 624{
625 struct tcphdr *th = skb->h.th; 625 struct tcphdr *th = tcp_hdr(skb);
626 struct { 626 struct {
627 struct tcphdr th; 627 struct tcphdr th;
628 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) 628 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -745,7 +745,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
745 skb = tcp_make_synack(sk, dst, req); 745 skb = tcp_make_synack(sk, dst, req);
746 746
747 if (skb) { 747 if (skb) {
748 struct tcphdr *th = skb->h.th; 748 struct tcphdr *th = tcp_hdr(skb);
749 749
750 th->check = tcp_v4_check(skb->len, 750 th->check = tcp_v4_check(skb->len,
751 ireq->loc_addr, 751 ireq->loc_addr,
@@ -781,7 +781,7 @@ static void syn_flood_warning(struct sk_buff *skb)
781 warntime = jiffies; 781 warntime = jiffies;
782 printk(KERN_INFO 782 printk(KERN_INFO
783 "possible SYN flooding on port %d. Sending cookies.\n", 783 "possible SYN flooding on port %d. Sending cookies.\n",
784 ntohs(skb->h.th->dest)); 784 ntohs(tcp_hdr(skb)->dest));
785 } 785 }
786} 786}
787#endif 787#endif
@@ -1134,7 +1134,7 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1134 __u8 *hash_location = NULL; 1134 __u8 *hash_location = NULL;
1135 struct tcp_md5sig_key *hash_expected; 1135 struct tcp_md5sig_key *hash_expected;
1136 const struct iphdr *iph = ip_hdr(skb); 1136 const struct iphdr *iph = ip_hdr(skb);
1137 struct tcphdr *th = skb->h.th; 1137 struct tcphdr *th = tcp_hdr(skb);
1138 int length = (th->doff << 2) - sizeof(struct tcphdr); 1138 int length = (th->doff << 2) - sizeof(struct tcphdr);
1139 int genhash; 1139 int genhash;
1140 unsigned char *ptr; 1140 unsigned char *ptr;
@@ -1327,7 +1327,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1327 ireq->rmt_addr = saddr; 1327 ireq->rmt_addr = saddr;
1328 ireq->opt = tcp_v4_save_options(sk, skb); 1328 ireq->opt = tcp_v4_save_options(sk, skb);
1329 if (!want_cookie) 1329 if (!want_cookie)
1330 TCP_ECN_create_request(req, skb->h.th); 1330 TCP_ECN_create_request(req, tcp_hdr(skb));
1331 1331
1332 if (want_cookie) { 1332 if (want_cookie) {
1333#ifdef CONFIG_SYN_COOKIES 1333#ifdef CONFIG_SYN_COOKIES
@@ -1375,7 +1375,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1375 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " 1375 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
1376 "request from %u.%u.%u.%u/%u\n", 1376 "request from %u.%u.%u.%u/%u\n",
1377 NIPQUAD(saddr), 1377 NIPQUAD(saddr),
1378 ntohs(skb->h.th->source)); 1378 ntohs(tcp_hdr(skb)->source));
1379 dst_release(dst); 1379 dst_release(dst);
1380 goto drop_and_free; 1380 goto drop_and_free;
1381 } 1381 }
@@ -1481,7 +1481,7 @@ exit:
1481 1481
1482static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) 1482static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1483{ 1483{
1484 struct tcphdr *th = skb->h.th; 1484 struct tcphdr *th = tcp_hdr(skb);
1485 const struct iphdr *iph = ip_hdr(skb); 1485 const struct iphdr *iph = ip_hdr(skb);
1486 struct sock *nsk; 1486 struct sock *nsk;
1487 struct request_sock **prev; 1487 struct request_sock **prev;
@@ -1556,7 +1556,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1556 1556
1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1557 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1558 TCP_CHECK_TIMER(sk); 1558 TCP_CHECK_TIMER(sk);
1559 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) { 1559 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1560 rsk = sk; 1560 rsk = sk;
1561 goto reset; 1561 goto reset;
1562 } 1562 }
@@ -1582,7 +1582,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1582 } 1582 }
1583 1583
1584 TCP_CHECK_TIMER(sk); 1584 TCP_CHECK_TIMER(sk);
1585 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) { 1585 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1586 rsk = sk; 1586 rsk = sk;
1587 goto reset; 1587 goto reset;
1588 } 1588 }
@@ -1625,7 +1625,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1625 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1625 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1626 goto discard_it; 1626 goto discard_it;
1627 1627
1628 th = skb->h.th; 1628 th = tcp_hdr(skb);
1629 1629
1630 if (th->doff < sizeof(struct tcphdr) / 4) 1630 if (th->doff < sizeof(struct tcphdr) / 4)
1631 goto bad_packet; 1631 goto bad_packet;
@@ -1640,7 +1640,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1640 tcp_v4_checksum_init(skb))) 1640 tcp_v4_checksum_init(skb)))
1641 goto bad_packet; 1641 goto bad_packet;
1642 1642
1643 th = skb->h.th; 1643 th = tcp_hdr(skb);
1644 iph = ip_hdr(skb); 1644 iph = ip_hdr(skb);
1645 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1645 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1646 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1646 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 463d2b24d2db..a12b08fca5ad 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -453,7 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
453 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; 453 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
454 newtp->window_clamp = min(newtp->window_clamp, 65535U); 454 newtp->window_clamp = min(newtp->window_clamp, 65535U);
455 } 455 }
456 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale; 456 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
457 newtp->rx_opt.snd_wscale);
457 newtp->max_window = newtp->snd_wnd; 458 newtp->max_window = newtp->snd_wnd;
458 459
459 if (newtp->rx_opt.tstamp_ok) { 460 if (newtp->rx_opt.tstamp_ok) {
@@ -488,7 +489,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
488 struct request_sock *req, 489 struct request_sock *req,
489 struct request_sock **prev) 490 struct request_sock **prev)
490{ 491{
491 struct tcphdr *th = skb->h.th; 492 const struct tcphdr *th = tcp_hdr(skb);
492 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 493 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
493 int paws_reject = 0; 494 int paws_reject = 0;
494 struct tcp_options_received tmp_opt; 495 struct tcp_options_received tmp_opt;
@@ -710,8 +711,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
710 int state = child->sk_state; 711 int state = child->sk_state;
711 712
712 if (!sock_owned_by_user(child)) { 713 if (!sock_owned_by_user(child)) {
713 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len); 714 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
714 715 skb->len);
715 /* Wakeup parent, send SIGIO */ 716 /* Wakeup parent, send SIGIO */
716 if (state == TCP_SYN_RECV && child->sk_state != state) 717 if (state == TCP_SYN_RECV && child->sk_state != state)
717 parent->sk_data_ready(parent, 0); 718 parent->sk_data_ready(parent, 0);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f19f5fb361b5..29c53fbb2204 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -465,11 +465,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
465 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; 465 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
466#endif 466#endif
467 467
468 th = (struct tcphdr *) skb_push(skb, tcp_header_size); 468 skb_push(skb, tcp_header_size);
469 skb->h.th = th; 469 skb_reset_transport_header(skb);
470 skb_set_owner_w(skb, sk); 470 skb_set_owner_w(skb, sk);
471 471
472 /* Build TCP header and checksum it. */ 472 /* Build TCP header and checksum it. */
473 th = tcp_hdr(skb);
473 th->source = inet->sport; 474 th->source = inet->sport;
474 th->dest = inet->dport; 475 th->dest = inet->dport;
475 th->seq = htonl(tcb->seq); 476 th->seq = htonl(tcb->seq);
@@ -524,7 +525,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
524 tp->af_specific->calc_md5_hash(md5_hash_location, 525 tp->af_specific->calc_md5_hash(md5_hash_location,
525 md5, 526 md5,
526 sk, NULL, NULL, 527 sk, NULL, NULL,
527 skb->h.th, 528 tcp_hdr(skb),
528 sk->sk_protocol, 529 sk->sk_protocol,
529 skb->len); 530 skb->len);
530 } 531 }
@@ -2128,8 +2129,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2128 if (md5) 2129 if (md5)
2129 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; 2130 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2130#endif 2131#endif
2131 skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); 2132 skb_push(skb, tcp_header_size);
2133 skb_reset_transport_header(skb);
2132 2134
2135 th = tcp_hdr(skb);
2133 memset(th, 0, sizeof(struct tcphdr)); 2136 memset(th, 0, sizeof(struct tcphdr));
2134 th->syn = 1; 2137 th->syn = 1;
2135 th->ack = 1; 2138 th->ack = 1;
@@ -2183,7 +2186,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2183 tp->af_specific->calc_md5_hash(md5_hash_location, 2186 tp->af_specific->calc_md5_hash(md5_hash_location,
2184 md5, 2187 md5,
2185 NULL, dst, req, 2188 NULL, dst, req,
2186 skb->h.th, sk->sk_protocol, 2189 tcp_hdr(skb), sk->sk_protocol,
2187 skb->len); 2190 skb->len);
2188 } 2191 }
2189#endif 2192#endif