diff options
Diffstat (limited to 'net/ipv4/ip_output.c')
-rw-r--r-- | net/ipv4/ip_output.c | 49 |
1 files changed, 24 insertions, 25 deletions
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3e7e910c7c0f..247026282669 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -95,7 +95,7 @@ int __ip_local_out(struct sk_buff *skb) | |||
95 | 95 | ||
96 | iph->tot_len = htons(skb->len); | 96 | iph->tot_len = htons(skb->len); |
97 | ip_send_check(iph); | 97 | ip_send_check(iph); |
98 | return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, | 98 | return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, |
99 | dst_output); | 99 | dst_output); |
100 | } | 100 | } |
101 | 101 | ||
@@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
118 | __skb_pull(newskb, skb_network_offset(newskb)); | 118 | __skb_pull(newskb, skb_network_offset(newskb)); |
119 | newskb->pkt_type = PACKET_LOOPBACK; | 119 | newskb->pkt_type = PACKET_LOOPBACK; |
120 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 120 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
121 | WARN_ON(!newskb->dst); | 121 | WARN_ON(!skb_dst(newskb)); |
122 | netif_rx(newskb); | 122 | netif_rx(newskb); |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
@@ -140,7 +140,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, | |||
140 | __be32 saddr, __be32 daddr, struct ip_options *opt) | 140 | __be32 saddr, __be32 daddr, struct ip_options *opt) |
141 | { | 141 | { |
142 | struct inet_sock *inet = inet_sk(sk); | 142 | struct inet_sock *inet = inet_sk(sk); |
143 | struct rtable *rt = skb->rtable; | 143 | struct rtable *rt = skb_rtable(skb); |
144 | struct iphdr *iph; | 144 | struct iphdr *iph; |
145 | 145 | ||
146 | /* Build the IP header. */ | 146 | /* Build the IP header. */ |
@@ -176,15 +176,15 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); | |||
176 | 176 | ||
177 | static inline int ip_finish_output2(struct sk_buff *skb) | 177 | static inline int ip_finish_output2(struct sk_buff *skb) |
178 | { | 178 | { |
179 | struct dst_entry *dst = skb->dst; | 179 | struct dst_entry *dst = skb_dst(skb); |
180 | struct rtable *rt = (struct rtable *)dst; | 180 | struct rtable *rt = (struct rtable *)dst; |
181 | struct net_device *dev = dst->dev; | 181 | struct net_device *dev = dst->dev; |
182 | unsigned int hh_len = LL_RESERVED_SPACE(dev); | 182 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
183 | 183 | ||
184 | if (rt->rt_type == RTN_MULTICAST) | 184 | if (rt->rt_type == RTN_MULTICAST) { |
185 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTMCASTPKTS); | 185 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len); |
186 | else if (rt->rt_type == RTN_BROADCAST) | 186 | } else if (rt->rt_type == RTN_BROADCAST) |
187 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTBCASTPKTS); | 187 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len); |
188 | 188 | ||
189 | /* Be paranoid, rather than too clever. */ | 189 | /* Be paranoid, rather than too clever. */ |
190 | if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { | 190 | if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { |
@@ -217,14 +217,14 @@ static inline int ip_skb_dst_mtu(struct sk_buff *skb) | |||
217 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; | 217 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; |
218 | 218 | ||
219 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? | 219 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? |
220 | skb->dst->dev->mtu : dst_mtu(skb->dst); | 220 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int ip_finish_output(struct sk_buff *skb) | 223 | static int ip_finish_output(struct sk_buff *skb) |
224 | { | 224 | { |
225 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 225 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
226 | /* Policy lookup after SNAT yielded a new policy */ | 226 | /* Policy lookup after SNAT yielded a new policy */ |
227 | if (skb->dst->xfrm != NULL) { | 227 | if (skb_dst(skb)->xfrm != NULL) { |
228 | IPCB(skb)->flags |= IPSKB_REROUTED; | 228 | IPCB(skb)->flags |= IPSKB_REROUTED; |
229 | return dst_output(skb); | 229 | return dst_output(skb); |
230 | } | 230 | } |
@@ -238,13 +238,13 @@ static int ip_finish_output(struct sk_buff *skb) | |||
238 | int ip_mc_output(struct sk_buff *skb) | 238 | int ip_mc_output(struct sk_buff *skb) |
239 | { | 239 | { |
240 | struct sock *sk = skb->sk; | 240 | struct sock *sk = skb->sk; |
241 | struct rtable *rt = skb->rtable; | 241 | struct rtable *rt = skb_rtable(skb); |
242 | struct net_device *dev = rt->u.dst.dev; | 242 | struct net_device *dev = rt->u.dst.dev; |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * If the indicated interface is up and running, send the packet. | 245 | * If the indicated interface is up and running, send the packet. |
246 | */ | 246 | */ |
247 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); | 247 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); |
248 | 248 | ||
249 | skb->dev = dev; | 249 | skb->dev = dev; |
250 | skb->protocol = htons(ETH_P_IP); | 250 | skb->protocol = htons(ETH_P_IP); |
@@ -296,9 +296,9 @@ int ip_mc_output(struct sk_buff *skb) | |||
296 | 296 | ||
297 | int ip_output(struct sk_buff *skb) | 297 | int ip_output(struct sk_buff *skb) |
298 | { | 298 | { |
299 | struct net_device *dev = skb->dst->dev; | 299 | struct net_device *dev = skb_dst(skb)->dev; |
300 | 300 | ||
301 | IP_INC_STATS(dev_net(dev), IPSTATS_MIB_OUTREQUESTS); | 301 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); |
302 | 302 | ||
303 | skb->dev = dev; | 303 | skb->dev = dev; |
304 | skb->protocol = htons(ETH_P_IP); | 304 | skb->protocol = htons(ETH_P_IP); |
@@ -319,7 +319,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
319 | /* Skip all of this if the packet is already routed, | 319 | /* Skip all of this if the packet is already routed, |
320 | * f.e. by something like SCTP. | 320 | * f.e. by something like SCTP. |
321 | */ | 321 | */ |
322 | rt = skb->rtable; | 322 | rt = skb_rtable(skb); |
323 | if (rt != NULL) | 323 | if (rt != NULL) |
324 | goto packet_routed; | 324 | goto packet_routed; |
325 | 325 | ||
@@ -355,7 +355,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
355 | } | 355 | } |
356 | sk_setup_caps(sk, &rt->u.dst); | 356 | sk_setup_caps(sk, &rt->u.dst); |
357 | } | 357 | } |
358 | skb->dst = dst_clone(&rt->u.dst); | 358 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
359 | 359 | ||
360 | packet_routed: | 360 | packet_routed: |
361 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 361 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
@@ -401,8 +401,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
401 | to->pkt_type = from->pkt_type; | 401 | to->pkt_type = from->pkt_type; |
402 | to->priority = from->priority; | 402 | to->priority = from->priority; |
403 | to->protocol = from->protocol; | 403 | to->protocol = from->protocol; |
404 | dst_release(to->dst); | 404 | skb_dst_drop(to); |
405 | to->dst = dst_clone(from->dst); | 405 | skb_dst_set(to, dst_clone(skb_dst(from))); |
406 | to->dev = from->dev; | 406 | to->dev = from->dev; |
407 | to->mark = from->mark; | 407 | to->mark = from->mark; |
408 | 408 | ||
@@ -440,7 +440,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
440 | unsigned int mtu, hlen, left, len, ll_rs, pad; | 440 | unsigned int mtu, hlen, left, len, ll_rs, pad; |
441 | int offset; | 441 | int offset; |
442 | __be16 not_last_frag; | 442 | __be16 not_last_frag; |
443 | struct rtable *rt = skb->rtable; | 443 | struct rtable *rt = skb_rtable(skb); |
444 | int err = 0; | 444 | int err = 0; |
445 | 445 | ||
446 | dev = rt->u.dst.dev; | 446 | dev = rt->u.dst.dev; |
@@ -474,7 +474,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
474 | * LATER: this step can be merged to real generation of fragments, | 474 | * LATER: this step can be merged to real generation of fragments, |
475 | * we can switch to copy when see the first bad fragment. | 475 | * we can switch to copy when see the first bad fragment. |
476 | */ | 476 | */ |
477 | if (skb_shinfo(skb)->frag_list) { | 477 | if (skb_has_frags(skb)) { |
478 | struct sk_buff *frag; | 478 | struct sk_buff *frag; |
479 | int first_len = skb_pagelen(skb); | 479 | int first_len = skb_pagelen(skb); |
480 | int truesizes = 0; | 480 | int truesizes = 0; |
@@ -485,7 +485,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
485 | skb_cloned(skb)) | 485 | skb_cloned(skb)) |
486 | goto slow_path; | 486 | goto slow_path; |
487 | 487 | ||
488 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | 488 | skb_walk_frags(skb, frag) { |
489 | /* Correct geometry. */ | 489 | /* Correct geometry. */ |
490 | if (frag->len > mtu || | 490 | if (frag->len > mtu || |
491 | ((frag->len & 7) && frag->next) || | 491 | ((frag->len & 7) && frag->next) || |
@@ -498,7 +498,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
498 | 498 | ||
499 | BUG_ON(frag->sk); | 499 | BUG_ON(frag->sk); |
500 | if (skb->sk) { | 500 | if (skb->sk) { |
501 | sock_hold(skb->sk); | ||
502 | frag->sk = skb->sk; | 501 | frag->sk = skb->sk; |
503 | frag->destructor = sock_wfree; | 502 | frag->destructor = sock_wfree; |
504 | truesizes += frag->truesize; | 503 | truesizes += frag->truesize; |
@@ -510,7 +509,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
510 | err = 0; | 509 | err = 0; |
511 | offset = 0; | 510 | offset = 0; |
512 | frag = skb_shinfo(skb)->frag_list; | 511 | frag = skb_shinfo(skb)->frag_list; |
513 | skb_shinfo(skb)->frag_list = NULL; | 512 | skb_frag_list_init(skb); |
514 | skb->data_len = first_len - skb_headlen(skb); | 513 | skb->data_len = first_len - skb_headlen(skb); |
515 | skb->truesize -= truesizes; | 514 | skb->truesize -= truesizes; |
516 | skb->len = first_len; | 515 | skb->len = first_len; |
@@ -1294,7 +1293,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1294 | * on dst refcount | 1293 | * on dst refcount |
1295 | */ | 1294 | */ |
1296 | inet->cork.dst = NULL; | 1295 | inet->cork.dst = NULL; |
1297 | skb->dst = &rt->u.dst; | 1296 | skb_dst_set(skb, &rt->u.dst); |
1298 | 1297 | ||
1299 | if (iph->protocol == IPPROTO_ICMP) | 1298 | if (iph->protocol == IPPROTO_ICMP) |
1300 | icmp_out_count(net, ((struct icmphdr *) | 1299 | icmp_out_count(net, ((struct icmphdr *) |
@@ -1362,7 +1361,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar | |||
1362 | } replyopts; | 1361 | } replyopts; |
1363 | struct ipcm_cookie ipc; | 1362 | struct ipcm_cookie ipc; |
1364 | __be32 daddr; | 1363 | __be32 daddr; |
1365 | struct rtable *rt = skb->rtable; | 1364 | struct rtable *rt = skb_rtable(skb); |
1366 | 1365 | ||
1367 | if (ip_options_echo(&replyopts.opt, skb)) | 1366 | if (ip_options_echo(&replyopts.opt, skb)) |
1368 | return; | 1367 | return; |