aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ip_output.c')
-rw-r--r--net/ipv4/ip_output.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a0f2008584bc..bb0bb8f07c54 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -22,7 +22,7 @@
22 * Fixes: 22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit. 23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit. 24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when 25 * Bradford Johnson: Fix faulty handling of some frames when
26 * no route is found. 26 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit 27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by 28 * (in case if packet not accepted by
@@ -33,9 +33,9 @@
33 * some redundant tests. 33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma. 34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply. 35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path 36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86 37 * for decreased register pressure on x86
38 * and more readibility. 38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE, 39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM. 40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments. 41 * Detlev Wengorz : Copy protocol for fragments.
@@ -114,7 +114,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
114 return ttl; 114 return ttl;
115} 115}
116 116
117/* 117/*
118 * Add an ip header to a skbuff and send it out. 118 * Add an ip header to a skbuff and send it out.
119 * 119 *
120 */ 120 */
@@ -243,7 +243,7 @@ int ip_mc_output(struct sk_buff *skb)
243 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 243 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
244 if (newskb) 244 if (newskb)
245 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL, 245 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
246 newskb->dev, 246 newskb->dev,
247 ip_dev_loopback_xmit); 247 ip_dev_loopback_xmit);
248 } 248 }
249 249
@@ -277,7 +277,7 @@ int ip_output(struct sk_buff *skb)
277 skb->protocol = htons(ETH_P_IP); 277 skb->protocol = htons(ETH_P_IP);
278 278
279 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, 279 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
280 ip_finish_output, 280 ip_finish_output,
281 !(IPCB(skb)->flags & IPSKB_REROUTED)); 281 !(IPCB(skb)->flags & IPSKB_REROUTED));
282} 282}
283 283
@@ -660,7 +660,7 @@ slow_path:
660 return err; 660 return err;
661 661
662fail: 662fail:
663 kfree_skb(skb); 663 kfree_skb(skb);
664 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); 664 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
665 return err; 665 return err;
666} 666}
@@ -755,7 +755,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
755 * from many pieces of data. Each pieces will be holded on the socket 755 * from many pieces of data. Each pieces will be holded on the socket
756 * until ip_push_pending_frames() is called. Each piece can be a page 756 * until ip_push_pending_frames() is called. Each piece can be a page
757 * or non-page data. 757 * or non-page data.
758 * 758 *
759 * Not only UDP, other transport protocols - e.g. raw sockets - can use 759 * Not only UDP, other transport protocols - e.g. raw sockets - can use
760 * this interface potentially. 760 * this interface potentially.
761 * 761 *
@@ -888,7 +888,7 @@ alloc_new_skb:
888 datalen = maxfraglen - fragheaderlen; 888 datalen = maxfraglen - fragheaderlen;
889 fraglen = datalen + fragheaderlen; 889 fraglen = datalen + fragheaderlen;
890 890
891 if ((flags & MSG_MORE) && 891 if ((flags & MSG_MORE) &&
892 !(rt->u.dst.dev->features&NETIF_F_SG)) 892 !(rt->u.dst.dev->features&NETIF_F_SG))
893 alloclen = mtu; 893 alloclen = mtu;
894 else 894 else
@@ -903,14 +903,14 @@ alloc_new_skb:
903 alloclen += rt->u.dst.trailer_len; 903 alloclen += rt->u.dst.trailer_len;
904 904
905 if (transhdrlen) { 905 if (transhdrlen) {
906 skb = sock_alloc_send_skb(sk, 906 skb = sock_alloc_send_skb(sk,
907 alloclen + hh_len + 15, 907 alloclen + hh_len + 15,
908 (flags & MSG_DONTWAIT), &err); 908 (flags & MSG_DONTWAIT), &err);
909 } else { 909 } else {
910 skb = NULL; 910 skb = NULL;
911 if (atomic_read(&sk->sk_wmem_alloc) <= 911 if (atomic_read(&sk->sk_wmem_alloc) <=
912 2 * sk->sk_sndbuf) 912 2 * sk->sk_sndbuf)
913 skb = sock_wmalloc(sk, 913 skb = sock_wmalloc(sk,
914 alloclen + hh_len + 15, 1, 914 alloclen + hh_len + 15, 1,
915 sk->sk_allocation); 915 sk->sk_allocation);
916 if (unlikely(skb == NULL)) 916 if (unlikely(skb == NULL))
@@ -971,7 +971,7 @@ alloc_new_skb:
971 unsigned int off; 971 unsigned int off;
972 972
973 off = skb->len; 973 off = skb->len;
974 if (getfrag(from, skb_put(skb, copy), 974 if (getfrag(from, skb_put(skb, copy),
975 offset, copy, off, skb) < 0) { 975 offset, copy, off, skb) < 0) {
976 __skb_trim(skb, off); 976 __skb_trim(skb, off);
977 err = -EFAULT; 977 err = -EFAULT;
@@ -993,7 +993,7 @@ alloc_new_skb:
993 goto error; 993 goto error;
994 } 994 }
995 get_page(page); 995 get_page(page);
996 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); 996 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
997 frag = &skb_shinfo(skb)->frags[i]; 997 frag = &skb_shinfo(skb)->frags[i];
998 } 998 }
999 } else if (i < MAX_SKB_FRAGS) { 999 } else if (i < MAX_SKB_FRAGS) {
@@ -1033,7 +1033,7 @@ alloc_new_skb:
1033error: 1033error:
1034 inet->cork.length -= length; 1034 inet->cork.length -= length;
1035 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); 1035 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1036 return err; 1036 return err;
1037} 1037}
1038 1038
1039ssize_t ip_append_page(struct sock *sk, struct page *page, 1039ssize_t ip_append_page(struct sock *sk, struct page *page,
@@ -1257,7 +1257,7 @@ int ip_push_pending_frames(struct sock *sk)
1257 skb->dst = dst_clone(&rt->u.dst); 1257 skb->dst = dst_clone(&rt->u.dst);
1258 1258
1259 /* Netfilter gets whole the not fragmented skb. */ 1259 /* Netfilter gets whole the not fragmented skb. */
1260 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 1260 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1261 skb->dst->dev, dst_output); 1261 skb->dst->dev, dst_output);
1262 if (err) { 1262 if (err) {
1263 if (err > 0) 1263 if (err > 0)
@@ -1305,21 +1305,21 @@ void ip_flush_pending_frames(struct sock *sk)
1305/* 1305/*
1306 * Fetch data from kernel space and fill in checksum if needed. 1306 * Fetch data from kernel space and fill in checksum if needed.
1307 */ 1307 */
1308static int ip_reply_glue_bits(void *dptr, char *to, int offset, 1308static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1309 int len, int odd, struct sk_buff *skb) 1309 int len, int odd, struct sk_buff *skb)
1310{ 1310{
1311 __wsum csum; 1311 __wsum csum;
1312 1312
1313 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); 1313 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1314 skb->csum = csum_block_add(skb->csum, csum, odd); 1314 skb->csum = csum_block_add(skb->csum, csum, odd);
1315 return 0; 1315 return 0;
1316} 1316}
1317 1317
1318/* 1318/*
1319 * Generic function to send a packet as reply to another packet. 1319 * Generic function to send a packet as reply to another packet.
1320 * Used to send TCP resets so far. ICMP should use this function too. 1320 * Used to send TCP resets so far. ICMP should use this function too.
1321 * 1321 *
1322 * Should run single threaded per socket because it uses the sock 1322 * Should run single threaded per socket because it uses the sock
1323 * structure to pass arguments. 1323 * structure to pass arguments.
1324 * 1324 *
1325 * LATER: switch from ip_build_xmit to ip_append_* 1325 * LATER: switch from ip_build_xmit to ip_append_*
@@ -1357,7 +1357,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1357 /* Not quite clean, but right. */ 1357 /* Not quite clean, but right. */
1358 .uli_u = { .ports = 1358 .uli_u = { .ports =
1359 { .sport = skb->h.th->dest, 1359 { .sport = skb->h.th->dest,
1360 .dport = skb->h.th->source } }, 1360 .dport = skb->h.th->source } },
1361 .proto = sk->sk_protocol }; 1361 .proto = sk->sk_protocol };
1362 security_skb_classify_flow(skb, &fl); 1362 security_skb_classify_flow(skb, &fl);
1363 if (ip_route_output_key(&rt, &fl)) 1363 if (ip_route_output_key(&rt, &fl))