aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_output.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-04-21 01:47:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:25:10 -0400
commiteddc9ec53be2ecdbf4efe0efd4a83052594f0ac0 (patch)
tree4a38ab4dbd9d61fdf5a5ea6ed61463e0b9e33ba7 /net/ipv4/ip_output.c
parente023dd643798c4f06c16466af90b4d250e4b8bd7 (diff)
[SK_BUFF]: Introduce ip_hdr(), remove skb->nh.iph
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_output.c')
-rw-r--r--net/ipv4/ip_output.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 15de9d43950e..1abc48899f2d 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -127,7 +127,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
127 /* Build the IP header. */ 127 /* Build the IP header. */
128 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 128 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
129 skb_reset_network_header(skb); 129 skb_reset_network_header(skb);
130 iph = skb->nh.iph; 130 iph = ip_hdr(skb);
131 iph->version = 4; 131 iph->version = 4;
132 iph->ihl = 5; 132 iph->ihl = 5;
133 iph->tos = inet->tos; 133 iph->tos = inet->tos;
@@ -245,7 +245,7 @@ int ip_mc_output(struct sk_buff *skb)
245 245
246 /* Multicasts with ttl 0 must not go beyond the host */ 246 /* Multicasts with ttl 0 must not go beyond the host */
247 247
248 if (skb->nh.iph->ttl == 0) { 248 if (ip_hdr(skb)->ttl == 0) {
249 kfree_skb(skb); 249 kfree_skb(skb);
250 return 0; 250 return 0;
251 } 251 }
@@ -332,7 +332,7 @@ packet_routed:
332 /* OK, we know where to send it, allocate and build IP header. */ 332 /* OK, we know where to send it, allocate and build IP header. */
333 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 333 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
334 skb_reset_network_header(skb); 334 skb_reset_network_header(skb);
335 iph = skb->nh.iph; 335 iph = ip_hdr(skb);
336 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 336 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
337 iph->tot_len = htons(skb->len); 337 iph->tot_len = htons(skb->len);
338 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) 338 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
@@ -428,7 +428,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
428 * Point into the IP datagram header. 428 * Point into the IP datagram header.
429 */ 429 */
430 430
431 iph = skb->nh.iph; 431 iph = ip_hdr(skb);
432 432
433 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) { 433 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
434 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); 434 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
@@ -504,7 +504,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
504 __skb_push(frag, hlen); 504 __skb_push(frag, hlen);
505 skb_reset_network_header(frag); 505 skb_reset_network_header(frag);
506 memcpy(skb_network_header(frag), iph, hlen); 506 memcpy(skb_network_header(frag), iph, hlen);
507 iph = frag->nh.iph; 507 iph = ip_hdr(frag);
508 iph->tot_len = htons(frag->len); 508 iph->tot_len = htons(frag->len);
509 ip_copy_metadata(frag, skb); 509 ip_copy_metadata(frag, skb);
510 if (offset == 0) 510 if (offset == 0)
@@ -619,7 +619,7 @@ slow_path:
619 /* 619 /*
620 * Fill in the new header fields. 620 * Fill in the new header fields.
621 */ 621 */
622 iph = skb2->nh.iph; 622 iph = ip_hdr(skb2);
623 iph->frag_off = htons((offset >> 3)); 623 iph->frag_off = htons((offset >> 3));
624 624
625 /* ANK: dirty, but effective trick. Upgrade options only if 625 /* ANK: dirty, but effective trick. Upgrade options only if
@@ -1125,7 +1125,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1125 */ 1125 */
1126 data = skb_put(skb, fragheaderlen + fraggap); 1126 data = skb_put(skb, fragheaderlen + fraggap);
1127 skb_reset_network_header(skb); 1127 skb_reset_network_header(skb);
1128 iph = skb->nh.iph; 1128 iph = ip_hdr(skb);
1129 data += fragheaderlen; 1129 data += fragheaderlen;
1130 skb->h.raw = data; 1130 skb->h.raw = data;
1131 1131
@@ -1352,7 +1352,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1352 struct flowi fl = { .nl_u = { .ip4_u = 1352 struct flowi fl = { .nl_u = { .ip4_u =
1353 { .daddr = daddr, 1353 { .daddr = daddr,
1354 .saddr = rt->rt_spec_dst, 1354 .saddr = rt->rt_spec_dst,
1355 .tos = RT_TOS(skb->nh.iph->tos) } }, 1355 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1356 /* Not quite clean, but right. */ 1356 /* Not quite clean, but right. */
1357 .uli_u = { .ports = 1357 .uli_u = { .ports =
1358 { .sport = skb->h.th->dest, 1358 { .sport = skb->h.th->dest,
@@ -1370,9 +1370,9 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1370 with locally disabled BH and that sk cannot be already spinlocked. 1370 with locally disabled BH and that sk cannot be already spinlocked.
1371 */ 1371 */
1372 bh_lock_sock(sk); 1372 bh_lock_sock(sk);
1373 inet->tos = skb->nh.iph->tos; 1373 inet->tos = ip_hdr(skb)->tos;
1374 sk->sk_priority = skb->priority; 1374 sk->sk_priority = skb->priority;
1375 sk->sk_protocol = skb->nh.iph->protocol; 1375 sk->sk_protocol = ip_hdr(skb)->protocol;
1376 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0, 1376 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1377 &ipc, rt, MSG_DONTWAIT); 1377 &ipc, rt, MSG_DONTWAIT);
1378 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 1378 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {