diff options
120 files changed, 1779 insertions, 1779 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 5750a2b2a0d6..cf358c84c440 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -550,7 +550,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, | |||
550 | if (err < 0) | 550 | if (err < 0) |
551 | goto out; | 551 | goto out; |
552 | 552 | ||
553 | sock->state = SS_CONNECTING; | 553 | sock->state = SS_CONNECTING; |
554 | 554 | ||
555 | /* Just entered SS_CONNECTING state; the only | 555 | /* Just entered SS_CONNECTING state; the only |
556 | * difference is that return value in non-blocking | 556 | * difference is that return value in non-blocking |
@@ -878,36 +878,36 @@ static struct net_proto_family inet_family_ops = { | |||
878 | */ | 878 | */ |
879 | static struct inet_protosw inetsw_array[] = | 879 | static struct inet_protosw inetsw_array[] = |
880 | { | 880 | { |
881 | { | 881 | { |
882 | .type = SOCK_STREAM, | 882 | .type = SOCK_STREAM, |
883 | .protocol = IPPROTO_TCP, | 883 | .protocol = IPPROTO_TCP, |
884 | .prot = &tcp_prot, | 884 | .prot = &tcp_prot, |
885 | .ops = &inet_stream_ops, | 885 | .ops = &inet_stream_ops, |
886 | .capability = -1, | 886 | .capability = -1, |
887 | .no_check = 0, | 887 | .no_check = 0, |
888 | .flags = INET_PROTOSW_PERMANENT | | 888 | .flags = INET_PROTOSW_PERMANENT | |
889 | INET_PROTOSW_ICSK, | 889 | INET_PROTOSW_ICSK, |
890 | }, | 890 | }, |
891 | 891 | ||
892 | { | 892 | { |
893 | .type = SOCK_DGRAM, | 893 | .type = SOCK_DGRAM, |
894 | .protocol = IPPROTO_UDP, | 894 | .protocol = IPPROTO_UDP, |
895 | .prot = &udp_prot, | 895 | .prot = &udp_prot, |
896 | .ops = &inet_dgram_ops, | 896 | .ops = &inet_dgram_ops, |
897 | .capability = -1, | 897 | .capability = -1, |
898 | .no_check = UDP_CSUM_DEFAULT, | 898 | .no_check = UDP_CSUM_DEFAULT, |
899 | .flags = INET_PROTOSW_PERMANENT, | 899 | .flags = INET_PROTOSW_PERMANENT, |
900 | }, | 900 | }, |
901 | 901 | ||
902 | 902 | ||
903 | { | 903 | { |
904 | .type = SOCK_RAW, | 904 | .type = SOCK_RAW, |
905 | .protocol = IPPROTO_IP, /* wild card */ | 905 | .protocol = IPPROTO_IP, /* wild card */ |
906 | .prot = &raw_prot, | 906 | .prot = &raw_prot, |
907 | .ops = &inet_sockraw_ops, | 907 | .ops = &inet_sockraw_ops, |
908 | .capability = CAP_NET_RAW, | 908 | .capability = CAP_NET_RAW, |
909 | .no_check = UDP_CSUM_DEFAULT, | 909 | .no_check = UDP_CSUM_DEFAULT, |
910 | .flags = INET_PROTOSW_REUSE, | 910 | .flags = INET_PROTOSW_REUSE, |
911 | } | 911 | } |
912 | }; | 912 | }; |
913 | 913 | ||
@@ -946,7 +946,7 @@ void inet_register_protosw(struct inet_protosw *p) | |||
946 | /* Add the new entry after the last permanent entry if any, so that | 946 | /* Add the new entry after the last permanent entry if any, so that |
947 | * the new entry does not override a permanent entry when matched with | 947 | * the new entry does not override a permanent entry when matched with |
948 | * a wild-card protocol. But it is allowed to override any existing | 948 | * a wild-card protocol. But it is allowed to override any existing |
949 | * non-permanent entry. This means that when we remove this entry, the | 949 | * non-permanent entry. This means that when we remove this entry, the |
950 | * system automatically returns to the old behavior. | 950 | * system automatically returns to the old behavior. |
951 | */ | 951 | */ |
952 | list_add_rcu(&p->list, last_perm); | 952 | list_add_rcu(&p->list, last_perm); |
@@ -1073,7 +1073,7 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1073 | }, | 1073 | }, |
1074 | }, | 1074 | }, |
1075 | }; | 1075 | }; |
1076 | 1076 | ||
1077 | security_sk_classify_flow(sk, &fl); | 1077 | security_sk_classify_flow(sk, &fl); |
1078 | err = ip_route_output_flow(&rt, &fl, sk, 0); | 1078 | err = ip_route_output_flow(&rt, &fl, sk, 0); |
1079 | } | 1079 | } |
@@ -1273,10 +1273,10 @@ static int __init inet_init(void) | |||
1273 | goto out_unregister_udp_proto; | 1273 | goto out_unregister_udp_proto; |
1274 | 1274 | ||
1275 | /* | 1275 | /* |
1276 | * Tell SOCKET that we are alive... | 1276 | * Tell SOCKET that we are alive... |
1277 | */ | 1277 | */ |
1278 | 1278 | ||
1279 | (void)sock_register(&inet_family_ops); | 1279 | (void)sock_register(&inet_family_ops); |
1280 | 1280 | ||
1281 | /* | 1281 | /* |
1282 | * Add all the base protocols. | 1282 | * Add all the base protocols. |
@@ -1306,9 +1306,9 @@ static int __init inet_init(void) | |||
1306 | 1306 | ||
1307 | arp_init(); | 1307 | arp_init(); |
1308 | 1308 | ||
1309 | /* | 1309 | /* |
1310 | * Set the IP module up | 1310 | * Set the IP module up |
1311 | */ | 1311 | */ |
1312 | 1312 | ||
1313 | ip_init(); | 1313 | ip_init(); |
1314 | 1314 | ||
@@ -1334,11 +1334,11 @@ static int __init inet_init(void) | |||
1334 | #endif | 1334 | #endif |
1335 | /* | 1335 | /* |
1336 | * Initialise per-cpu ipv4 mibs | 1336 | * Initialise per-cpu ipv4 mibs |
1337 | */ | 1337 | */ |
1338 | 1338 | ||
1339 | if(init_ipv4_mibs()) | 1339 | if(init_ipv4_mibs()) |
1340 | printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; | 1340 | printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); ; |
1341 | 1341 | ||
1342 | ipv4_proc_init(); | 1342 | ipv4_proc_init(); |
1343 | 1343 | ||
1344 | ipfrag_init(); | 1344 | ipfrag_init(); |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 67a5509e26fc..7194eb40b6d0 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -91,7 +91,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) | |||
91 | top_iph->check = 0; | 91 | top_iph->check = 0; |
92 | 92 | ||
93 | ahp = x->data; | 93 | ahp = x->data; |
94 | ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + | 94 | ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + |
95 | ahp->icv_trunc_len) >> 2) - 2; | 95 | ahp->icv_trunc_len) >> 2) - 2; |
96 | 96 | ||
97 | ah->reserved = 0; | 97 | ah->reserved = 0; |
@@ -135,9 +135,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
135 | ah = (struct ip_auth_hdr*)skb->data; | 135 | ah = (struct ip_auth_hdr*)skb->data; |
136 | ahp = x->data; | 136 | ahp = x->data; |
137 | ah_hlen = (ah->hdrlen + 2) << 2; | 137 | ah_hlen = (ah->hdrlen + 2) << 2; |
138 | 138 | ||
139 | if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) && | 139 | if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) && |
140 | ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len)) | 140 | ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len)) |
141 | goto out; | 141 | goto out; |
142 | 142 | ||
143 | if (!pskb_may_pull(skb, ah_hlen)) | 143 | if (!pskb_may_pull(skb, ah_hlen)) |
@@ -166,9 +166,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
166 | if (ip_clear_mutable_options(iph, &dummy)) | 166 | if (ip_clear_mutable_options(iph, &dummy)) |
167 | goto out; | 167 | goto out; |
168 | } | 168 | } |
169 | { | 169 | { |
170 | u8 auth_data[MAX_AH_AUTH_LEN]; | 170 | u8 auth_data[MAX_AH_AUTH_LEN]; |
171 | 171 | ||
172 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); | 172 | memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); |
173 | skb_push(skb, ihl); | 173 | skb_push(skb, ihl); |
174 | err = ah_mac_digest(ahp, skb, ah->auth_data); | 174 | err = ah_mac_digest(ahp, skb, ah->auth_data); |
@@ -237,7 +237,7 @@ static int ah_init_state(struct xfrm_state *x) | |||
237 | ahp->tfm = tfm; | 237 | ahp->tfm = tfm; |
238 | if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) | 238 | if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) |
239 | goto error; | 239 | goto error; |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * Lookup the algorithm description maintained by xfrm_algo, | 242 | * Lookup the algorithm description maintained by xfrm_algo, |
243 | * verify crypto transform properties, and store information | 243 | * verify crypto transform properties, and store information |
@@ -254,16 +254,16 @@ static int ah_init_state(struct xfrm_state *x) | |||
254 | aalg_desc->uinfo.auth.icv_fullbits/8); | 254 | aalg_desc->uinfo.auth.icv_fullbits/8); |
255 | goto error; | 255 | goto error; |
256 | } | 256 | } |
257 | 257 | ||
258 | ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; | 258 | ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; |
259 | ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; | 259 | ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; |
260 | 260 | ||
261 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); | 261 | BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); |
262 | 262 | ||
263 | ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); | 263 | ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); |
264 | if (!ahp->work_icv) | 264 | if (!ahp->work_icv) |
265 | goto error; | 265 | goto error; |
266 | 266 | ||
267 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); | 267 | x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); |
268 | if (x->props.mode == XFRM_MODE_TUNNEL) | 268 | if (x->props.mode == XFRM_MODE_TUNNEL) |
269 | x->props.header_len += sizeof(struct iphdr); | 269 | x->props.header_len += sizeof(struct iphdr); |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 3981e8be9ab8..a58afde4f72f 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -15,9 +15,9 @@ | |||
15 | * 2 of the License, or (at your option) any later version. | 15 | * 2 of the License, or (at your option) any later version. |
16 | * | 16 | * |
17 | * Fixes: | 17 | * Fixes: |
18 | * Alan Cox : Removed the Ethernet assumptions in | 18 | * Alan Cox : Removed the Ethernet assumptions in |
19 | * Florian's code | 19 | * Florian's code |
20 | * Alan Cox : Fixed some small errors in the ARP | 20 | * Alan Cox : Fixed some small errors in the ARP |
21 | * logic | 21 | * logic |
22 | * Alan Cox : Allow >4K in /proc | 22 | * Alan Cox : Allow >4K in /proc |
23 | * Alan Cox : Make ARP add its own protocol entry | 23 | * Alan Cox : Make ARP add its own protocol entry |
@@ -39,18 +39,18 @@ | |||
39 | * Jonathan Naylor : Only lookup the hardware address for | 39 | * Jonathan Naylor : Only lookup the hardware address for |
40 | * the correct hardware type. | 40 | * the correct hardware type. |
41 | * Germano Caronni : Assorted subtle races. | 41 | * Germano Caronni : Assorted subtle races. |
42 | * Craig Schlenter : Don't modify permanent entry | 42 | * Craig Schlenter : Don't modify permanent entry |
43 | * during arp_rcv. | 43 | * during arp_rcv. |
44 | * Russ Nelson : Tidied up a few bits. | 44 | * Russ Nelson : Tidied up a few bits. |
45 | * Alexey Kuznetsov: Major changes to caching and behaviour, | 45 | * Alexey Kuznetsov: Major changes to caching and behaviour, |
46 | * eg intelligent arp probing and | 46 | * eg intelligent arp probing and |
47 | * generation | 47 | * generation |
48 | * of host down events. | 48 | * of host down events. |
49 | * Alan Cox : Missing unlock in device events. | 49 | * Alan Cox : Missing unlock in device events. |
50 | * Eckes : ARP ioctl control errors. | 50 | * Eckes : ARP ioctl control errors. |
51 | * Alexey Kuznetsov: Arp free fix. | 51 | * Alexey Kuznetsov: Arp free fix. |
52 | * Manuel Rodriguez: Gratuitous ARP. | 52 | * Manuel Rodriguez: Gratuitous ARP. |
53 | * Jonathan Layes : Added arpd support through kerneld | 53 | * Jonathan Layes : Added arpd support through kerneld |
54 | * message queue (960314) | 54 | * message queue (960314) |
55 | * Mike Shaver : /proc/sys/net/ipv4/arp_* support | 55 | * Mike Shaver : /proc/sys/net/ipv4/arp_* support |
56 | * Mike McLagan : Routing by source | 56 | * Mike McLagan : Routing by source |
@@ -210,7 +210,7 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) | |||
210 | case ARPHRD_FDDI: | 210 | case ARPHRD_FDDI: |
211 | case ARPHRD_IEEE802: | 211 | case ARPHRD_IEEE802: |
212 | ip_eth_mc_map(addr, haddr); | 212 | ip_eth_mc_map(addr, haddr); |
213 | return 0; | 213 | return 0; |
214 | case ARPHRD_IEEE802_TR: | 214 | case ARPHRD_IEEE802_TR: |
215 | ip_tr_mc_map(addr, haddr); | 215 | ip_tr_mc_map(addr, haddr); |
216 | return 0; | 216 | return 0; |
@@ -288,7 +288,7 @@ static int arp_constructor(struct neighbour *neigh) | |||
288 | switch (dev->type) { | 288 | switch (dev->type) { |
289 | default: | 289 | default: |
290 | break; | 290 | break; |
291 | case ARPHRD_ROSE: | 291 | case ARPHRD_ROSE: |
292 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 292 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
293 | case ARPHRD_AX25: | 293 | case ARPHRD_AX25: |
294 | #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) | 294 | #if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) |
@@ -425,18 +425,18 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) | |||
425 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip, | 425 | struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip, |
426 | .saddr = tip } } }; | 426 | .saddr = tip } } }; |
427 | struct rtable *rt; | 427 | struct rtable *rt; |
428 | int flag = 0; | 428 | int flag = 0; |
429 | /*unsigned long now; */ | 429 | /*unsigned long now; */ |
430 | 430 | ||
431 | if (ip_route_output_key(&rt, &fl) < 0) | 431 | if (ip_route_output_key(&rt, &fl) < 0) |
432 | return 1; | 432 | return 1; |
433 | if (rt->u.dst.dev != dev) { | 433 | if (rt->u.dst.dev != dev) { |
434 | NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); | 434 | NET_INC_STATS_BH(LINUX_MIB_ARPFILTER); |
435 | flag = 1; | 435 | flag = 1; |
436 | } | 436 | } |
437 | ip_rt_put(rt); | 437 | ip_rt_put(rt); |
438 | return flag; | 438 | return flag; |
439 | } | 439 | } |
440 | 440 | ||
441 | /* OBSOLETE FUNCTIONS */ | 441 | /* OBSOLETE FUNCTIONS */ |
442 | 442 | ||
@@ -490,7 +490,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb) | |||
490 | n->used = jiffies; | 490 | n->used = jiffies; |
491 | if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) { | 491 | if (n->nud_state&NUD_VALID || neigh_event_send(n, skb) == 0) { |
492 | read_lock_bh(&n->lock); | 492 | read_lock_bh(&n->lock); |
493 | memcpy(haddr, n->ha, dev->addr_len); | 493 | memcpy(haddr, n->ha, dev->addr_len); |
494 | read_unlock_bh(&n->lock); | 494 | read_unlock_bh(&n->lock); |
495 | neigh_release(n); | 495 | neigh_release(n); |
496 | return 0; | 496 | return 0; |
@@ -572,7 +572,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
572 | /* | 572 | /* |
573 | * Allocate a buffer | 573 | * Allocate a buffer |
574 | */ | 574 | */ |
575 | 575 | ||
576 | skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) | 576 | skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) |
577 | + LL_RESERVED_SPACE(dev), GFP_ATOMIC); | 577 | + LL_RESERVED_SPACE(dev), GFP_ATOMIC); |
578 | if (skb == NULL) | 578 | if (skb == NULL) |
@@ -685,7 +685,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, | |||
685 | /* | 685 | /* |
686 | * No arp on this interface. | 686 | * No arp on this interface. |
687 | */ | 687 | */ |
688 | 688 | ||
689 | if (dev->flags&IFF_NOARP) | 689 | if (dev->flags&IFF_NOARP) |
690 | return; | 690 | return; |
691 | 691 | ||
@@ -725,7 +725,7 @@ static int arp_process(struct sk_buff *skb) | |||
725 | arp = skb->nh.arph; | 725 | arp = skb->nh.arph; |
726 | 726 | ||
727 | switch (dev_type) { | 727 | switch (dev_type) { |
728 | default: | 728 | default: |
729 | if (arp->ar_pro != htons(ETH_P_IP) || | 729 | if (arp->ar_pro != htons(ETH_P_IP) || |
730 | htons(dev_type) != arp->ar_hrd) | 730 | htons(dev_type) != arp->ar_hrd) |
731 | goto out; | 731 | goto out; |
@@ -792,7 +792,7 @@ static int arp_process(struct sk_buff *skb) | |||
792 | tha = arp_ptr; | 792 | tha = arp_ptr; |
793 | arp_ptr += dev->addr_len; | 793 | arp_ptr += dev->addr_len; |
794 | memcpy(&tip, arp_ptr, 4); | 794 | memcpy(&tip, arp_ptr, 4); |
795 | /* | 795 | /* |
796 | * Check for bad requests for 127.x.x.x and requests for multicast | 796 | * Check for bad requests for 127.x.x.x and requests for multicast |
797 | * addresses. If this is one such, delete it. | 797 | * addresses. If this is one such, delete it. |
798 | */ | 798 | */ |
@@ -809,16 +809,16 @@ static int arp_process(struct sk_buff *skb) | |||
809 | * Process entry. The idea here is we want to send a reply if it is a | 809 | * Process entry. The idea here is we want to send a reply if it is a |
810 | * request for us or if it is a request for someone else that we hold | 810 | * request for us or if it is a request for someone else that we hold |
811 | * a proxy for. We want to add an entry to our cache if it is a reply | 811 | * a proxy for. We want to add an entry to our cache if it is a reply |
812 | * to us or if it is a request for our address. | 812 | * to us or if it is a request for our address. |
813 | * (The assumption for this last is that if someone is requesting our | 813 | * (The assumption for this last is that if someone is requesting our |
814 | * address, they are probably intending to talk to us, so it saves time | 814 | * address, they are probably intending to talk to us, so it saves time |
815 | * if we cache their address. Their address is also probably not in | 815 | * if we cache their address. Their address is also probably not in |
816 | * our cache, since ours is not in their cache.) | 816 | * our cache, since ours is not in their cache.) |
817 | * | 817 | * |
818 | * Putting this another way, we only care about replies if they are to | 818 | * Putting this another way, we only care about replies if they are to |
819 | * us, in which case we add them to the cache. For requests, we care | 819 | * us, in which case we add them to the cache. For requests, we care |
820 | * about those for us and those for our proxies. We reply to both, | 820 | * about those for us and those for our proxies. We reply to both, |
821 | * and in the case of requests for us we add the requester to the arp | 821 | * and in the case of requests for us we add the requester to the arp |
822 | * cache. | 822 | * cache. |
823 | */ | 823 | */ |
824 | 824 | ||
@@ -845,7 +845,7 @@ static int arp_process(struct sk_buff *skb) | |||
845 | if (!dont_send) | 845 | if (!dont_send) |
846 | dont_send |= arp_ignore(in_dev,dev,sip,tip); | 846 | dont_send |= arp_ignore(in_dev,dev,sip,tip); |
847 | if (!dont_send && IN_DEV_ARPFILTER(in_dev)) | 847 | if (!dont_send && IN_DEV_ARPFILTER(in_dev)) |
848 | dont_send |= arp_filter(sip,tip,dev); | 848 | dont_send |= arp_filter(sip,tip,dev); |
849 | if (!dont_send) | 849 | if (!dont_send) |
850 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); | 850 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); |
851 | 851 | ||
@@ -860,7 +860,7 @@ static int arp_process(struct sk_buff *skb) | |||
860 | if (n) | 860 | if (n) |
861 | neigh_release(n); | 861 | neigh_release(n); |
862 | 862 | ||
863 | if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || | 863 | if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || |
864 | skb->pkt_type == PACKET_HOST || | 864 | skb->pkt_type == PACKET_HOST || |
865 | in_dev->arp_parms->proxy_delay == 0) { | 865 | in_dev->arp_parms->proxy_delay == 0) { |
866 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); | 866 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); |
@@ -1039,7 +1039,7 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev) | |||
1039 | if (r->arp_flags & ATF_PERM) | 1039 | if (r->arp_flags & ATF_PERM) |
1040 | state = NUD_PERMANENT; | 1040 | state = NUD_PERMANENT; |
1041 | err = neigh_update(neigh, (r->arp_flags&ATF_COM) ? | 1041 | err = neigh_update(neigh, (r->arp_flags&ATF_COM) ? |
1042 | r->arp_ha.sa_data : NULL, state, | 1042 | r->arp_ha.sa_data : NULL, state, |
1043 | NEIGH_UPDATE_F_OVERRIDE| | 1043 | NEIGH_UPDATE_F_OVERRIDE| |
1044 | NEIGH_UPDATE_F_ADMIN); | 1044 | NEIGH_UPDATE_F_ADMIN); |
1045 | neigh_release(neigh); | 1045 | neigh_release(neigh); |
@@ -1121,7 +1121,7 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev) | |||
1121 | neigh = neigh_lookup(&arp_tbl, &ip, dev); | 1121 | neigh = neigh_lookup(&arp_tbl, &ip, dev); |
1122 | if (neigh) { | 1122 | if (neigh) { |
1123 | if (neigh->nud_state&~NUD_NOARP) | 1123 | if (neigh->nud_state&~NUD_NOARP) |
1124 | err = neigh_update(neigh, NULL, NUD_FAILED, | 1124 | err = neigh_update(neigh, NULL, NUD_FAILED, |
1125 | NEIGH_UPDATE_F_OVERRIDE| | 1125 | NEIGH_UPDATE_F_OVERRIDE| |
1126 | NEIGH_UPDATE_F_ADMIN); | 1126 | NEIGH_UPDATE_F_ADMIN); |
1127 | neigh_release(neigh); | 1127 | neigh_release(neigh); |
@@ -1181,7 +1181,7 @@ int arp_ioctl(unsigned int cmd, void __user *arg) | |||
1181 | 1181 | ||
1182 | switch(cmd) { | 1182 | switch(cmd) { |
1183 | case SIOCDARP: | 1183 | case SIOCDARP: |
1184 | err = arp_req_delete(&r, dev); | 1184 | err = arp_req_delete(&r, dev); |
1185 | break; | 1185 | break; |
1186 | case SIOCSARP: | 1186 | case SIOCSARP: |
1187 | err = arp_req_set(&r, dev); | 1187 | err = arp_req_set(&r, dev); |
@@ -1268,14 +1268,14 @@ static char *ax2asc2(ax25_address *a, char *buf) | |||
1268 | 1268 | ||
1269 | if (c != ' ') *s++ = c; | 1269 | if (c != ' ') *s++ = c; |
1270 | } | 1270 | } |
1271 | 1271 | ||
1272 | *s++ = '-'; | 1272 | *s++ = '-'; |
1273 | 1273 | ||
1274 | if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { | 1274 | if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { |
1275 | *s++ = '1'; | 1275 | *s++ = '1'; |
1276 | n -= 10; | 1276 | n -= 10; |
1277 | } | 1277 | } |
1278 | 1278 | ||
1279 | *s++ = n + '0'; | 1279 | *s++ = n + '0'; |
1280 | *s++ = '\0'; | 1280 | *s++ = '\0'; |
1281 | 1281 | ||
@@ -1373,7 +1373,7 @@ static int arp_seq_open(struct inode *inode, struct file *file) | |||
1373 | struct seq_file *seq; | 1373 | struct seq_file *seq; |
1374 | int rc = -ENOMEM; | 1374 | int rc = -ENOMEM; |
1375 | struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); | 1375 | struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); |
1376 | 1376 | ||
1377 | if (!s) | 1377 | if (!s) |
1378 | goto out; | 1378 | goto out; |
1379 | 1379 | ||
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 0072d79f0c2a..dd02a45d0f67 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -29,12 +29,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
29 | int oif; | 29 | int oif; |
30 | int err; | 30 | int err; |
31 | 31 | ||
32 | |||
33 | if (addr_len < sizeof(*usin)) | ||
34 | return -EINVAL; | ||
35 | 32 | ||
36 | if (usin->sin_family != AF_INET) | 33 | if (addr_len < sizeof(*usin)) |
37 | return -EAFNOSUPPORT; | 34 | return -EINVAL; |
35 | |||
36 | if (usin->sin_family != AF_INET) | ||
37 | return -EAFNOSUPPORT; | ||
38 | 38 | ||
39 | sk_dst_reset(sk); | 39 | sk_dst_reset(sk); |
40 | 40 | ||
@@ -56,8 +56,8 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
56 | ip_rt_put(rt); | 56 | ip_rt_put(rt); |
57 | return -EACCES; | 57 | return -EACCES; |
58 | } | 58 | } |
59 | if (!inet->saddr) | 59 | if (!inet->saddr) |
60 | inet->saddr = rt->rt_src; /* Update source address */ | 60 | inet->saddr = rt->rt_src; /* Update source address */ |
61 | if (!inet->rcv_saddr) | 61 | if (!inet->rcv_saddr) |
62 | inet->rcv_saddr = rt->rt_src; | 62 | inet->rcv_saddr = rt->rt_src; |
63 | inet->daddr = rt->rt_dst; | 63 | inet->daddr = rt->rt_dst; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index c40203640966..ba5e7f4cd127 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -252,7 +252,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, | |||
252 | 252 | ||
253 | ASSERT_RTNL(); | 253 | ASSERT_RTNL(); |
254 | 254 | ||
255 | /* 1. Deleting primary ifaddr forces deletion all secondaries | 255 | /* 1. Deleting primary ifaddr forces deletion all secondaries |
256 | * unless alias promotion is set | 256 | * unless alias promotion is set |
257 | **/ | 257 | **/ |
258 | 258 | ||
@@ -260,7 +260,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, | |||
260 | struct in_ifaddr **ifap1 = &ifa1->ifa_next; | 260 | struct in_ifaddr **ifap1 = &ifa1->ifa_next; |
261 | 261 | ||
262 | while ((ifa = *ifap1) != NULL) { | 262 | while ((ifa = *ifap1) != NULL) { |
263 | if (!(ifa->ifa_flags & IFA_F_SECONDARY) && | 263 | if (!(ifa->ifa_flags & IFA_F_SECONDARY) && |
264 | ifa1->ifa_scope <= ifa->ifa_scope) | 264 | ifa1->ifa_scope <= ifa->ifa_scope) |
265 | last_prim = ifa; | 265 | last_prim = ifa; |
266 | 266 | ||
@@ -583,8 +583,8 @@ static __inline__ int inet_abc_len(__be32 addr) | |||
583 | { | 583 | { |
584 | int rc = -1; /* Something else, probably a multicast. */ | 584 | int rc = -1; /* Something else, probably a multicast. */ |
585 | 585 | ||
586 | if (ZERONET(addr)) | 586 | if (ZERONET(addr)) |
587 | rc = 0; | 587 | rc = 0; |
588 | else { | 588 | else { |
589 | __u32 haddr = ntohl(addr); | 589 | __u32 haddr = ntohl(addr); |
590 | 590 | ||
@@ -596,7 +596,7 @@ static __inline__ int inet_abc_len(__be32 addr) | |||
596 | rc = 24; | 596 | rc = 24; |
597 | } | 597 | } |
598 | 598 | ||
599 | return rc; | 599 | return rc; |
600 | } | 600 | } |
601 | 601 | ||
602 | 602 | ||
@@ -1020,29 +1020,29 @@ int unregister_inetaddr_notifier(struct notifier_block *nb) | |||
1020 | * alias numbering and to create unique labels if possible. | 1020 | * alias numbering and to create unique labels if possible. |
1021 | */ | 1021 | */ |
1022 | static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) | 1022 | static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) |
1023 | { | 1023 | { |
1024 | struct in_ifaddr *ifa; | 1024 | struct in_ifaddr *ifa; |
1025 | int named = 0; | 1025 | int named = 0; |
1026 | 1026 | ||
1027 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { | 1027 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { |
1028 | char old[IFNAMSIZ], *dot; | 1028 | char old[IFNAMSIZ], *dot; |
1029 | 1029 | ||
1030 | memcpy(old, ifa->ifa_label, IFNAMSIZ); | 1030 | memcpy(old, ifa->ifa_label, IFNAMSIZ); |
1031 | memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); | 1031 | memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); |
1032 | if (named++ == 0) | 1032 | if (named++ == 0) |
1033 | continue; | 1033 | continue; |
1034 | dot = strchr(ifa->ifa_label, ':'); | 1034 | dot = strchr(ifa->ifa_label, ':'); |
1035 | if (dot == NULL) { | 1035 | if (dot == NULL) { |
1036 | sprintf(old, ":%d", named); | 1036 | sprintf(old, ":%d", named); |
1037 | dot = old; | 1037 | dot = old; |
1038 | } | 1038 | } |
1039 | if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) { | 1039 | if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) { |
1040 | strcat(ifa->ifa_label, dot); | 1040 | strcat(ifa->ifa_label, dot); |
1041 | } else { | 1041 | } else { |
1042 | strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); | 1042 | strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot); |
1043 | } | 1043 | } |
1044 | } | 1044 | } |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | /* Called only under RTNL semaphore */ | 1047 | /* Called only under RTNL semaphore */ |
1048 | 1048 | ||
@@ -1539,7 +1539,7 @@ static struct devinet_sysctl_table { | |||
1539 | }, | 1539 | }, |
1540 | }, | 1540 | }, |
1541 | .devinet_conf_dir = { | 1541 | .devinet_conf_dir = { |
1542 | { | 1542 | { |
1543 | .ctl_name = NET_IPV4_CONF, | 1543 | .ctl_name = NET_IPV4_CONF, |
1544 | .procname = "conf", | 1544 | .procname = "conf", |
1545 | .mode = 0555, | 1545 | .mode = 0555, |
@@ -1581,18 +1581,18 @@ static void devinet_sysctl_register(struct in_device *in_dev, | |||
1581 | } | 1581 | } |
1582 | 1582 | ||
1583 | if (dev) { | 1583 | if (dev) { |
1584 | dev_name = dev->name; | 1584 | dev_name = dev->name; |
1585 | t->devinet_dev[0].ctl_name = dev->ifindex; | 1585 | t->devinet_dev[0].ctl_name = dev->ifindex; |
1586 | } else { | 1586 | } else { |
1587 | dev_name = "default"; | 1587 | dev_name = "default"; |
1588 | t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT; | 1588 | t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT; |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | /* | 1591 | /* |
1592 | * Make a copy of dev_name, because '.procname' is regarded as const | 1592 | * Make a copy of dev_name, because '.procname' is regarded as const |
1593 | * by sysctl and we wouldn't want anyone to change it under our feet | 1593 | * by sysctl and we wouldn't want anyone to change it under our feet |
1594 | * (see SIOCSIFNAME). | 1594 | * (see SIOCSIFNAME). |
1595 | */ | 1595 | */ |
1596 | dev_name = kstrdup(dev_name, GFP_KERNEL); | 1596 | dev_name = kstrdup(dev_name, GFP_KERNEL); |
1597 | if (!dev_name) | 1597 | if (!dev_name) |
1598 | goto free; | 1598 | goto free; |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index f2c6776ea0e6..31041127eeb8 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -215,7 +215,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
215 | if (padlen+2 >= elen) | 215 | if (padlen+2 >= elen) |
216 | goto out; | 216 | goto out; |
217 | 217 | ||
218 | /* ... check padding bits here. Silly. :-) */ | 218 | /* ... check padding bits here. Silly. :-) */ |
219 | 219 | ||
220 | iph = skb->nh.iph; | 220 | iph = skb->nh.iph; |
221 | ihl = iph->ihl * 4; | 221 | ihl = iph->ihl * 4; |
@@ -236,7 +236,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
236 | 236 | ||
237 | ipaddr.a4 = iph->saddr; | 237 | ipaddr.a4 = iph->saddr; |
238 | km_new_mapping(x, &ipaddr, uh->source); | 238 | km_new_mapping(x, &ipaddr, uh->source); |
239 | 239 | ||
240 | /* XXX: perhaps add an extra | 240 | /* XXX: perhaps add an extra |
241 | * policy check here, to see | 241 | * policy check here, to see |
242 | * if we should allow or | 242 | * if we should allow or |
@@ -245,7 +245,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
245 | * address/port. | 245 | * address/port. |
246 | */ | 246 | */ |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * 2) ignore UDP/TCP checksums in case | 250 | * 2) ignore UDP/TCP checksums in case |
251 | * of NAT-T in Transport Mode, or | 251 | * of NAT-T in Transport Mode, or |
@@ -284,7 +284,7 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) | |||
284 | mtu = ALIGN(mtu + 2, 4) + blksize - 4; | 284 | mtu = ALIGN(mtu + 2, 4) + blksize - 4; |
285 | break; | 285 | break; |
286 | case XFRM_MODE_BEET: | 286 | case XFRM_MODE_BEET: |
287 | /* The worst case. */ | 287 | /* The worst case. */ |
288 | enclen = IPV4_BEET_PHMAXLEN; | 288 | enclen = IPV4_BEET_PHMAXLEN; |
289 | mtu = ALIGN(mtu + enclen + 2, blksize); | 289 | mtu = ALIGN(mtu + enclen + 2, blksize); |
290 | break; | 290 | break; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index d47b72af89ed..64f31e63db7f 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -160,7 +160,7 @@ unsigned inet_addr_type(__be32 addr) | |||
160 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 160 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
161 | res.r = NULL; | 161 | res.r = NULL; |
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | if (ip_fib_local_table) { | 164 | if (ip_fib_local_table) { |
165 | ret = RTN_UNICAST; | 165 | ret = RTN_UNICAST; |
166 | if (!ip_fib_local_table->tb_lookup(ip_fib_local_table, | 166 | if (!ip_fib_local_table->tb_lookup(ip_fib_local_table, |
@@ -378,7 +378,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt, | |||
378 | int len = 0; | 378 | int len = 0; |
379 | 379 | ||
380 | mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); | 380 | mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL); |
381 | if (mx == NULL) | 381 | if (mx == NULL) |
382 | return -ENOMEM; | 382 | return -ENOMEM; |
383 | 383 | ||
384 | if (rt->rt_flags & RTF_MTU) | 384 | if (rt->rt_flags & RTF_MTU) |
@@ -400,7 +400,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt, | |||
400 | /* | 400 | /* |
401 | * Handle IP routing ioctl calls. These are used to manipulate the routing tables | 401 | * Handle IP routing ioctl calls. These are used to manipulate the routing tables |
402 | */ | 402 | */ |
403 | 403 | ||
404 | int ip_rt_ioctl(unsigned int cmd, void __user *arg) | 404 | int ip_rt_ioctl(unsigned int cmd, void __user *arg) |
405 | { | 405 | { |
406 | struct fib_config cfg; | 406 | struct fib_config cfg; |
@@ -600,7 +600,7 @@ int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) | |||
600 | goto next; | 600 | goto next; |
601 | if (dumped) | 601 | if (dumped) |
602 | memset(&cb->args[2], 0, sizeof(cb->args) - | 602 | memset(&cb->args[2], 0, sizeof(cb->args) - |
603 | 2 * sizeof(cb->args[0])); | 603 | 2 * sizeof(cb->args[0])); |
604 | if (tb->tb_dump(tb, skb, cb) < 0) | 604 | if (tb->tb_dump(tb, skb, cb) < 0) |
605 | goto out; | 605 | goto out; |
606 | dumped = 1; | 606 | dumped = 1; |
@@ -766,7 +766,7 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa) | |||
766 | 766 | ||
767 | static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) | 767 | static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) |
768 | { | 768 | { |
769 | 769 | ||
770 | struct fib_result res; | 770 | struct fib_result res; |
771 | struct flowi fl = { .mark = frn->fl_mark, | 771 | struct flowi fl = { .mark = frn->fl_mark, |
772 | .nl_u = { .ip4_u = { .daddr = frn->fl_addr, | 772 | .nl_u = { .ip4_u = { .daddr = frn->fl_addr, |
@@ -791,11 +791,11 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) | |||
791 | static void nl_fib_input(struct sock *sk, int len) | 791 | static void nl_fib_input(struct sock *sk, int len) |
792 | { | 792 | { |
793 | struct sk_buff *skb = NULL; | 793 | struct sk_buff *skb = NULL; |
794 | struct nlmsghdr *nlh = NULL; | 794 | struct nlmsghdr *nlh = NULL; |
795 | struct fib_result_nl *frn; | 795 | struct fib_result_nl *frn; |
796 | u32 pid; | 796 | u32 pid; |
797 | struct fib_table *tb; | 797 | struct fib_table *tb; |
798 | 798 | ||
799 | skb = skb_dequeue(&sk->sk_receive_queue); | 799 | skb = skb_dequeue(&sk->sk_receive_queue); |
800 | nlh = (struct nlmsghdr *)skb->data; | 800 | nlh = (struct nlmsghdr *)skb->data; |
801 | if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || | 801 | if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || |
@@ -803,17 +803,17 @@ static void nl_fib_input(struct sock *sk, int len) | |||
803 | kfree_skb(skb); | 803 | kfree_skb(skb); |
804 | return; | 804 | return; |
805 | } | 805 | } |
806 | 806 | ||
807 | frn = (struct fib_result_nl *) NLMSG_DATA(nlh); | 807 | frn = (struct fib_result_nl *) NLMSG_DATA(nlh); |
808 | tb = fib_get_table(frn->tb_id_in); | 808 | tb = fib_get_table(frn->tb_id_in); |
809 | 809 | ||
810 | nl_fib_lookup(frn, tb); | 810 | nl_fib_lookup(frn, tb); |
811 | 811 | ||
812 | pid = nlh->nlmsg_pid; /*pid of sending process */ | 812 | pid = nlh->nlmsg_pid; /*pid of sending process */ |
813 | NETLINK_CB(skb).pid = 0; /* from kernel */ | 813 | NETLINK_CB(skb).pid = 0; /* from kernel */ |
814 | NETLINK_CB(skb).dst_group = 0; /* unicast */ | 814 | NETLINK_CB(skb).dst_group = 0; /* unicast */ |
815 | netlink_unicast(sk, skb, pid, MSG_DONTWAIT); | 815 | netlink_unicast(sk, skb, pid, MSG_DONTWAIT); |
816 | } | 816 | } |
817 | 817 | ||
818 | static void nl_fib_lookup_init(void) | 818 | static void nl_fib_lookup_init(void) |
819 | { | 819 | { |
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 648f47c1c399..dea04d725b04 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c | |||
@@ -146,7 +146,7 @@ static void fn_rehash_zone(struct fn_zone *fz) | |||
146 | struct hlist_head *ht, *old_ht; | 146 | struct hlist_head *ht, *old_ht; |
147 | int old_divisor, new_divisor; | 147 | int old_divisor, new_divisor; |
148 | u32 new_hashmask; | 148 | u32 new_hashmask; |
149 | 149 | ||
150 | old_divisor = fz->fz_divisor; | 150 | old_divisor = fz->fz_divisor; |
151 | 151 | ||
152 | switch (old_divisor) { | 152 | switch (old_divisor) { |
@@ -911,7 +911,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq) | |||
911 | 911 | ||
912 | if (!iter->zone) | 912 | if (!iter->zone) |
913 | goto out; | 913 | goto out; |
914 | 914 | ||
915 | iter->bucket = 0; | 915 | iter->bucket = 0; |
916 | iter->hash_head = iter->zone->fz_hash; | 916 | iter->hash_head = iter->zone->fz_hash; |
917 | 917 | ||
@@ -932,7 +932,7 @@ static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos) | |||
932 | { | 932 | { |
933 | struct fib_iter_state *iter = seq->private; | 933 | struct fib_iter_state *iter = seq->private; |
934 | struct fib_alias *fa; | 934 | struct fib_alias *fa; |
935 | 935 | ||
936 | if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) { | 936 | if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) { |
937 | fa = iter->fa; | 937 | fa = iter->fa; |
938 | pos -= iter->pos; | 938 | pos -= iter->pos; |
@@ -981,7 +981,7 @@ static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi) | |||
981 | return flags; | 981 | return flags; |
982 | } | 982 | } |
983 | 983 | ||
984 | /* | 984 | /* |
985 | * This outputs /proc/net/route. | 985 | * This outputs /proc/net/route. |
986 | * | 986 | * |
987 | * It always works in backward compatibility mode. | 987 | * It always works in backward compatibility mode. |
@@ -1040,7 +1040,7 @@ static int fib_seq_open(struct inode *inode, struct file *file) | |||
1040 | struct seq_file *seq; | 1040 | struct seq_file *seq; |
1041 | int rc = -ENOMEM; | 1041 | int rc = -ENOMEM; |
1042 | struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); | 1042 | struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); |
1043 | 1043 | ||
1044 | if (!s) | 1044 | if (!s) |
1045 | goto out; | 1045 | goto out; |
1046 | 1046 | ||
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index be1028c9933e..2f1fdae6efa6 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c | |||
@@ -85,12 +85,12 @@ for (nhsel=0; nhsel < 1; nhsel++) | |||
85 | #define endfor_nexthops(fi) } | 85 | #define endfor_nexthops(fi) } |
86 | 86 | ||
87 | 87 | ||
88 | static const struct | 88 | static const struct |
89 | { | 89 | { |
90 | int error; | 90 | int error; |
91 | u8 scope; | 91 | u8 scope; |
92 | } fib_props[RTA_MAX + 1] = { | 92 | } fib_props[RTA_MAX + 1] = { |
93 | { | 93 | { |
94 | .error = 0, | 94 | .error = 0, |
95 | .scope = RT_SCOPE_NOWHERE, | 95 | .scope = RT_SCOPE_NOWHERE, |
96 | }, /* RTN_UNSPEC */ | 96 | }, /* RTN_UNSPEC */ |
@@ -439,7 +439,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | |||
439 | 439 | ||
440 | rtnh = cfg->fc_mp; | 440 | rtnh = cfg->fc_mp; |
441 | remaining = cfg->fc_mp_len; | 441 | remaining = cfg->fc_mp_len; |
442 | 442 | ||
443 | for_nexthops(fi) { | 443 | for_nexthops(fi) { |
444 | int attrlen; | 444 | int attrlen; |
445 | 445 | ||
@@ -508,9 +508,9 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) | |||
508 | Normally it looks as following. | 508 | Normally it looks as following. |
509 | 509 | ||
510 | {universe prefix} -> (gw, oif) [scope link] | 510 | {universe prefix} -> (gw, oif) [scope link] |
511 | | | 511 | | |
512 | |-> {link prefix} -> (gw, oif) [scope local] | 512 | |-> {link prefix} -> (gw, oif) [scope local] |
513 | | | 513 | | |
514 | |-> {local prefix} (terminal node) | 514 | |-> {local prefix} (terminal node) |
515 | */ | 515 | */ |
516 | 516 | ||
@@ -864,7 +864,7 @@ err_inval: | |||
864 | err = -EINVAL; | 864 | err = -EINVAL; |
865 | 865 | ||
866 | failure: | 866 | failure: |
867 | if (fi) { | 867 | if (fi) { |
868 | fi->fib_dead = 1; | 868 | fi->fib_dead = 1; |
869 | free_fib_info(fi); | 869 | free_fib_info(fi); |
870 | } | 870 | } |
@@ -1049,7 +1049,7 @@ int fib_sync_down(__be32 local, struct net_device *dev, int force) | |||
1049 | { | 1049 | { |
1050 | int ret = 0; | 1050 | int ret = 0; |
1051 | int scope = RT_SCOPE_NOWHERE; | 1051 | int scope = RT_SCOPE_NOWHERE; |
1052 | 1052 | ||
1053 | if (force) | 1053 | if (force) |
1054 | scope = -1; | 1054 | scope = -1; |
1055 | 1055 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1e589b91605e..004a437bd7b5 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -7,13 +7,13 @@ | |||
7 | * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet | 7 | * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet |
8 | * & Swedish University of Agricultural Sciences. | 8 | * & Swedish University of Agricultural Sciences. |
9 | * | 9 | * |
10 | * Jens Laas <jens.laas@data.slu.se> Swedish University of | 10 | * Jens Laas <jens.laas@data.slu.se> Swedish University of |
11 | * Agricultural Sciences. | 11 | * Agricultural Sciences. |
12 | * | 12 | * |
13 | * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet | 13 | * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet |
14 | * | 14 | * |
15 | * This work is based on the LPC-trie which is originally descibed in: | 15 | * This work is based on the LPC-trie which is originally descibed in: |
16 | * | 16 | * |
17 | * An experimental study of compression methods for dynamic tries | 17 | * An experimental study of compression methods for dynamic tries |
18 | * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. | 18 | * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. |
19 | * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ | 19 | * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/ |
@@ -224,34 +224,34 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b) | |||
224 | } | 224 | } |
225 | 225 | ||
226 | /* | 226 | /* |
227 | To understand this stuff, an understanding of keys and all their bits is | 227 | To understand this stuff, an understanding of keys and all their bits is |
228 | necessary. Every node in the trie has a key associated with it, but not | 228 | necessary. Every node in the trie has a key associated with it, but not |
229 | all of the bits in that key are significant. | 229 | all of the bits in that key are significant. |
230 | 230 | ||
231 | Consider a node 'n' and its parent 'tp'. | 231 | Consider a node 'n' and its parent 'tp'. |
232 | 232 | ||
233 | If n is a leaf, every bit in its key is significant. Its presence is | 233 | If n is a leaf, every bit in its key is significant. Its presence is |
234 | necessitated by path compression, since during a tree traversal (when | 234 | necessitated by path compression, since during a tree traversal (when |
235 | searching for a leaf - unless we are doing an insertion) we will completely | 235 | searching for a leaf - unless we are doing an insertion) we will completely |
236 | ignore all skipped bits we encounter. Thus we need to verify, at the end of | 236 | ignore all skipped bits we encounter. Thus we need to verify, at the end of |
237 | a potentially successful search, that we have indeed been walking the | 237 | a potentially successful search, that we have indeed been walking the |
238 | correct key path. | 238 | correct key path. |
239 | 239 | ||
240 | Note that we can never "miss" the correct key in the tree if present by | 240 | Note that we can never "miss" the correct key in the tree if present by |
241 | following the wrong path. Path compression ensures that segments of the key | 241 | following the wrong path. Path compression ensures that segments of the key |
242 | that are the same for all keys with a given prefix are skipped, but the | 242 | that are the same for all keys with a given prefix are skipped, but the |
243 | skipped part *is* identical for each node in the subtrie below the skipped | 243 | skipped part *is* identical for each node in the subtrie below the skipped |
244 | bit! trie_insert() in this implementation takes care of that - note the | 244 | bit! trie_insert() in this implementation takes care of that - note the |
245 | call to tkey_sub_equals() in trie_insert(). | 245 | call to tkey_sub_equals() in trie_insert(). |
246 | 246 | ||
247 | if n is an internal node - a 'tnode' here, the various parts of its key | 247 | if n is an internal node - a 'tnode' here, the various parts of its key |
248 | have many different meanings. | 248 | have many different meanings. |
249 | 249 | ||
250 | Example: | 250 | Example: |
251 | _________________________________________________________________ | 251 | _________________________________________________________________ |
252 | | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | | 252 | | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | |
253 | ----------------------------------------------------------------- | 253 | ----------------------------------------------------------------- |
254 | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 | 254 | 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
255 | 255 | ||
256 | _________________________________________________________________ | 256 | _________________________________________________________________ |
257 | | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | | 257 | | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | |
@@ -263,23 +263,23 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b) | |||
263 | n->pos = 15 | 263 | n->pos = 15 |
264 | n->bits = 4 | 264 | n->bits = 4 |
265 | 265 | ||
266 | First, let's just ignore the bits that come before the parent tp, that is | 266 | First, let's just ignore the bits that come before the parent tp, that is |
267 | the bits from 0 to (tp->pos-1). They are *known* but at this point we do | 267 | the bits from 0 to (tp->pos-1). They are *known* but at this point we do |
268 | not use them for anything. | 268 | not use them for anything. |
269 | 269 | ||
270 | The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the | 270 | The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the |
271 | index into the parent's child array. That is, they will be used to find | 271 | index into the parent's child array. That is, they will be used to find |
272 | 'n' among tp's children. | 272 | 'n' among tp's children. |
273 | 273 | ||
274 | The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits | 274 | The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits |
275 | for the node n. | 275 | for the node n. |
276 | 276 | ||
277 | All the bits we have seen so far are significant to the node n. The rest | 277 | All the bits we have seen so far are significant to the node n. The rest |
278 | of the bits are really not needed or indeed known in n->key. | 278 | of the bits are really not needed or indeed known in n->key. |
279 | 279 | ||
280 | The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into | 280 | The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into |
281 | n's child array, and will of course be different for each child. | 281 | n's child array, and will of course be different for each child. |
282 | 282 | ||
283 | 283 | ||
284 | The rest of the bits, from (n->pos + n->bits) onward, are completely unknown | 284 | The rest of the bits, from (n->pos + n->bits) onward, are completely unknown |
285 | at this point. | 285 | at this point. |
@@ -294,7 +294,7 @@ static inline void check_tnode(const struct tnode *tn) | |||
294 | static int halve_threshold = 25; | 294 | static int halve_threshold = 25; |
295 | static int inflate_threshold = 50; | 295 | static int inflate_threshold = 50; |
296 | static int halve_threshold_root = 15; | 296 | static int halve_threshold_root = 15; |
297 | static int inflate_threshold_root = 25; | 297 | static int inflate_threshold_root = 25; |
298 | 298 | ||
299 | 299 | ||
300 | static void __alias_free_mem(struct rcu_head *head) | 300 | static void __alias_free_mem(struct rcu_head *head) |
@@ -355,7 +355,7 @@ static inline void tnode_free(struct tnode *tn) | |||
355 | struct leaf *l = (struct leaf *) tn; | 355 | struct leaf *l = (struct leaf *) tn; |
356 | call_rcu_bh(&l->rcu, __leaf_free_rcu); | 356 | call_rcu_bh(&l->rcu, __leaf_free_rcu); |
357 | } | 357 | } |
358 | else | 358 | else |
359 | call_rcu(&tn->rcu, __tnode_free_rcu); | 359 | call_rcu(&tn->rcu, __tnode_free_rcu); |
360 | } | 360 | } |
361 | 361 | ||
@@ -461,7 +461,7 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
461 | int inflate_threshold_use; | 461 | int inflate_threshold_use; |
462 | int halve_threshold_use; | 462 | int halve_threshold_use; |
463 | 463 | ||
464 | if (!tn) | 464 | if (!tn) |
465 | return NULL; | 465 | return NULL; |
466 | 466 | ||
467 | pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n", | 467 | pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n", |
@@ -556,7 +556,7 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
556 | 556 | ||
557 | if(!tn->parent) | 557 | if(!tn->parent) |
558 | inflate_threshold_use = inflate_threshold_root; | 558 | inflate_threshold_use = inflate_threshold_root; |
559 | else | 559 | else |
560 | inflate_threshold_use = inflate_threshold; | 560 | inflate_threshold_use = inflate_threshold; |
561 | 561 | ||
562 | err = 0; | 562 | err = 0; |
@@ -587,7 +587,7 @@ static struct node *resize(struct trie *t, struct tnode *tn) | |||
587 | 587 | ||
588 | if(!tn->parent) | 588 | if(!tn->parent) |
589 | halve_threshold_use = halve_threshold_root; | 589 | halve_threshold_use = halve_threshold_root; |
590 | else | 590 | else |
591 | halve_threshold_use = halve_threshold; | 591 | halve_threshold_use = halve_threshold; |
592 | 592 | ||
593 | err = 0; | 593 | err = 0; |
@@ -665,10 +665,10 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn) | |||
665 | right = tnode_new(inode->key|m, inode->pos + 1, | 665 | right = tnode_new(inode->key|m, inode->pos + 1, |
666 | inode->bits - 1); | 666 | inode->bits - 1); |
667 | 667 | ||
668 | if (!right) { | 668 | if (!right) { |
669 | tnode_free(left); | 669 | tnode_free(left); |
670 | goto nomem; | 670 | goto nomem; |
671 | } | 671 | } |
672 | 672 | ||
673 | put_child(t, tn, 2*i, (struct node *) left); | 673 | put_child(t, tn, 2*i, (struct node *) left); |
674 | put_child(t, tn, 2*i+1, (struct node *) right); | 674 | put_child(t, tn, 2*i+1, (struct node *) right); |
@@ -890,23 +890,23 @@ static inline struct list_head * get_fa_head(struct leaf *l, int plen) | |||
890 | 890 | ||
891 | static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) | 891 | static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) |
892 | { | 892 | { |
893 | struct leaf_info *li = NULL, *last = NULL; | 893 | struct leaf_info *li = NULL, *last = NULL; |
894 | struct hlist_node *node; | 894 | struct hlist_node *node; |
895 | 895 | ||
896 | if (hlist_empty(head)) { | 896 | if (hlist_empty(head)) { |
897 | hlist_add_head_rcu(&new->hlist, head); | 897 | hlist_add_head_rcu(&new->hlist, head); |
898 | } else { | 898 | } else { |
899 | hlist_for_each_entry(li, node, head, hlist) { | 899 | hlist_for_each_entry(li, node, head, hlist) { |
900 | if (new->plen > li->plen) | 900 | if (new->plen > li->plen) |
901 | break; | 901 | break; |
902 | 902 | ||
903 | last = li; | 903 | last = li; |
904 | } | 904 | } |
905 | if (last) | 905 | if (last) |
906 | hlist_add_after_rcu(&last->hlist, &new->hlist); | 906 | hlist_add_after_rcu(&last->hlist, &new->hlist); |
907 | else | 907 | else |
908 | hlist_add_before_rcu(&new->hlist, &li->hlist); | 908 | hlist_add_before_rcu(&new->hlist, &li->hlist); |
909 | } | 909 | } |
910 | } | 910 | } |
911 | 911 | ||
912 | /* rcu_read_lock needs to be hold by caller from readside */ | 912 | /* rcu_read_lock needs to be hold by caller from readside */ |
@@ -1700,7 +1700,7 @@ static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf) | |||
1700 | /* Decend if tnode */ | 1700 | /* Decend if tnode */ |
1701 | while (IS_TNODE(c)) { | 1701 | while (IS_TNODE(c)) { |
1702 | p = (struct tnode *) c; | 1702 | p = (struct tnode *) c; |
1703 | idx = 0; | 1703 | idx = 0; |
1704 | 1704 | ||
1705 | /* Rightmost non-NULL branch */ | 1705 | /* Rightmost non-NULL branch */ |
1706 | if (p && IS_TNODE(p)) | 1706 | if (p && IS_TNODE(p)) |
@@ -2303,9 +2303,9 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) | |||
2303 | 2303 | ||
2304 | seq_indent(seq, iter->depth-1); | 2304 | seq_indent(seq, iter->depth-1); |
2305 | seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n", | 2305 | seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n", |
2306 | NIPQUAD(prf), tn->pos, tn->bits, tn->full_children, | 2306 | NIPQUAD(prf), tn->pos, tn->bits, tn->full_children, |
2307 | tn->empty_children); | 2307 | tn->empty_children); |
2308 | 2308 | ||
2309 | } else { | 2309 | } else { |
2310 | struct leaf *l = (struct leaf *) n; | 2310 | struct leaf *l = (struct leaf *) n; |
2311 | int i; | 2311 | int i; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 40cf0d0e1b83..4b7a0d946a0d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -304,7 +304,7 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code) | |||
304 | 304 | ||
305 | /* No rate limit on loopback */ | 305 | /* No rate limit on loopback */ |
306 | if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) | 306 | if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) |
307 | goto out; | 307 | goto out; |
308 | 308 | ||
309 | /* Limit if icmp type is enabled in ratemask. */ | 309 | /* Limit if icmp type is enabled in ratemask. */ |
310 | if ((1 << type) & sysctl_icmp_ratemask) | 310 | if ((1 << type) & sysctl_icmp_ratemask) |
@@ -350,9 +350,9 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
350 | struct sk_buff *skb; | 350 | struct sk_buff *skb; |
351 | 351 | ||
352 | if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, | 352 | if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param, |
353 | icmp_param->data_len+icmp_param->head_len, | 353 | icmp_param->data_len+icmp_param->head_len, |
354 | icmp_param->head_len, | 354 | icmp_param->head_len, |
355 | ipc, rt, MSG_DONTWAIT) < 0) | 355 | ipc, rt, MSG_DONTWAIT) < 0) |
356 | ip_flush_pending_frames(icmp_socket->sk); | 356 | ip_flush_pending_frames(icmp_socket->sk); |
357 | else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { | 357 | else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { |
358 | struct icmphdr *icmph = skb->h.icmph; | 358 | struct icmphdr *icmph = skb->h.icmph; |
@@ -755,7 +755,7 @@ static void icmp_redirect(struct sk_buff *skb) | |||
755 | skb->h.icmph->un.gateway, | 755 | skb->h.icmph->un.gateway, |
756 | iph->saddr, skb->dev); | 756 | iph->saddr, skb->dev); |
757 | break; | 757 | break; |
758 | } | 758 | } |
759 | out: | 759 | out: |
760 | return; | 760 | return; |
761 | out_err: | 761 | out_err: |
@@ -959,7 +959,7 @@ int icmp_rcv(struct sk_buff *skb) | |||
959 | * Parse the ICMP message | 959 | * Parse the ICMP message |
960 | */ | 960 | */ |
961 | 961 | ||
962 | if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { | 962 | if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { |
963 | /* | 963 | /* |
964 | * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be | 964 | * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be |
965 | * silently ignored (we let user decide with a sysctl). | 965 | * silently ignored (we let user decide with a sysctl). |
@@ -976,7 +976,7 @@ int icmp_rcv(struct sk_buff *skb) | |||
976 | icmph->type != ICMP_ADDRESS && | 976 | icmph->type != ICMP_ADDRESS && |
977 | icmph->type != ICMP_ADDRESSREPLY) { | 977 | icmph->type != ICMP_ADDRESSREPLY) { |
978 | goto error; | 978 | goto error; |
979 | } | 979 | } |
980 | } | 980 | } |
981 | 981 | ||
982 | ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry); | 982 | ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry); |
@@ -1085,7 +1085,7 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { | |||
1085 | .input_entry = ICMP_MIB_DUMMY, | 1085 | .input_entry = ICMP_MIB_DUMMY, |
1086 | .handler = icmp_discard, | 1086 | .handler = icmp_discard, |
1087 | }, | 1087 | }, |
1088 | [ICMP_INFO_REPLY] = { | 1088 | [ICMP_INFO_REPLY] = { |
1089 | .output_entry = ICMP_MIB_DUMMY, | 1089 | .output_entry = ICMP_MIB_DUMMY, |
1090 | .input_entry = ICMP_MIB_DUMMY, | 1090 | .input_entry = ICMP_MIB_DUMMY, |
1091 | .handler = icmp_discard, | 1091 | .handler = icmp_discard, |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 024ae56cab25..b8e1625d34cf 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -35,7 +35,7 @@ | |||
35 | * | 35 | * |
36 | * Chih-Jen Chang : Tried to revise IGMP to Version 2 | 36 | * Chih-Jen Chang : Tried to revise IGMP to Version 2 |
37 | * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu | 37 | * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu |
38 | * The enhancements are mainly based on Steve Deering's | 38 | * The enhancements are mainly based on Steve Deering's |
39 | * ipmulti-3.5 source code. | 39 | * ipmulti-3.5 source code. |
40 | * Chih-Jen Chang : Added the igmp_get_mrouter_info and | 40 | * Chih-Jen Chang : Added the igmp_get_mrouter_info and |
41 | * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of | 41 | * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of |
@@ -49,11 +49,11 @@ | |||
49 | * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. | 49 | * Alan Cox : Stop IGMP from 0.0.0.0 being accepted. |
50 | * Alan Cox : Use GFP_ATOMIC in the right places. | 50 | * Alan Cox : Use GFP_ATOMIC in the right places. |
51 | * Christian Daudt : igmp timer wasn't set for local group | 51 | * Christian Daudt : igmp timer wasn't set for local group |
52 | * memberships but was being deleted, | 52 | * memberships but was being deleted, |
53 | * which caused a "del_timer() called | 53 | * which caused a "del_timer() called |
54 | * from %p with timer not initialized\n" | 54 | * from %p with timer not initialized\n" |
55 | * message (960131). | 55 | * message (960131). |
56 | * Christian Daudt : removed del_timer from | 56 | * Christian Daudt : removed del_timer from |
57 | * igmp_timer_expire function (960205). | 57 | * igmp_timer_expire function (960205). |
58 | * Christian Daudt : igmp_heard_report now only calls | 58 | * Christian Daudt : igmp_heard_report now only calls |
59 | * igmp_timer_expire if tm->running is | 59 | * igmp_timer_expire if tm->running is |
@@ -718,7 +718,7 @@ static void igmp_ifc_event(struct in_device *in_dev) | |||
718 | { | 718 | { |
719 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) | 719 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) |
720 | return; | 720 | return; |
721 | in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv : | 721 | in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv : |
722 | IGMP_Unsolicited_Report_Count; | 722 | IGMP_Unsolicited_Report_Count; |
723 | igmp_ifc_start_timer(in_dev, 1); | 723 | igmp_ifc_start_timer(in_dev, 1); |
724 | } | 724 | } |
@@ -838,7 +838,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
838 | if (len == 8) { | 838 | if (len == 8) { |
839 | if (ih->code == 0) { | 839 | if (ih->code == 0) { |
840 | /* Alas, old v1 router presents here. */ | 840 | /* Alas, old v1 router presents here. */ |
841 | 841 | ||
842 | max_delay = IGMP_Query_Response_Interval; | 842 | max_delay = IGMP_Query_Response_Interval; |
843 | in_dev->mr_v1_seen = jiffies + | 843 | in_dev->mr_v1_seen = jiffies + |
844 | IGMP_V1_Router_Present_Timeout; | 844 | IGMP_V1_Router_Present_Timeout; |
@@ -860,10 +860,10 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
860 | } else { /* v3 */ | 860 | } else { /* v3 */ |
861 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) | 861 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) |
862 | return; | 862 | return; |
863 | 863 | ||
864 | ih3 = (struct igmpv3_query *) skb->h.raw; | 864 | ih3 = (struct igmpv3_query *) skb->h.raw; |
865 | if (ih3->nsrcs) { | 865 | if (ih3->nsrcs) { |
866 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) | 866 | if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) |
867 | + ntohs(ih3->nsrcs)*sizeof(__be32))) | 867 | + ntohs(ih3->nsrcs)*sizeof(__be32))) |
868 | return; | 868 | return; |
869 | ih3 = (struct igmpv3_query *) skb->h.raw; | 869 | ih3 = (struct igmpv3_query *) skb->h.raw; |
@@ -909,7 +909,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
909 | else | 909 | else |
910 | im->gsquery = mark; | 910 | im->gsquery = mark; |
911 | changed = !im->gsquery || | 911 | changed = !im->gsquery || |
912 | igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); | 912 | igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs); |
913 | spin_unlock_bh(&im->lock); | 913 | spin_unlock_bh(&im->lock); |
914 | if (changed) | 914 | if (changed) |
915 | igmp_mod_timer(im, max_delay); | 915 | igmp_mod_timer(im, max_delay); |
@@ -1257,9 +1257,9 @@ out: | |||
1257 | void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) | 1257 | void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) |
1258 | { | 1258 | { |
1259 | struct ip_mc_list *i, **ip; | 1259 | struct ip_mc_list *i, **ip; |
1260 | 1260 | ||
1261 | ASSERT_RTNL(); | 1261 | ASSERT_RTNL(); |
1262 | 1262 | ||
1263 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { | 1263 | for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { |
1264 | if (i->multiaddr==addr) { | 1264 | if (i->multiaddr==addr) { |
1265 | if (--i->users == 0) { | 1265 | if (--i->users == 0) { |
@@ -1436,7 +1436,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, | |||
1436 | #ifdef CONFIG_IP_MULTICAST | 1436 | #ifdef CONFIG_IP_MULTICAST |
1437 | if (psf->sf_oldin && | 1437 | if (psf->sf_oldin && |
1438 | !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { | 1438 | !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { |
1439 | psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv : | 1439 | psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv : |
1440 | IGMP_Unsolicited_Report_Count; | 1440 | IGMP_Unsolicited_Report_Count; |
1441 | psf->sf_next = pmc->tomb; | 1441 | psf->sf_next = pmc->tomb; |
1442 | pmc->tomb = psf; | 1442 | pmc->tomb = psf; |
@@ -1500,7 +1500,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | |||
1500 | /* filter mode change */ | 1500 | /* filter mode change */ |
1501 | pmc->sfmode = MCAST_INCLUDE; | 1501 | pmc->sfmode = MCAST_INCLUDE; |
1502 | #ifdef CONFIG_IP_MULTICAST | 1502 | #ifdef CONFIG_IP_MULTICAST |
1503 | pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : | 1503 | pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : |
1504 | IGMP_Unsolicited_Report_Count; | 1504 | IGMP_Unsolicited_Report_Count; |
1505 | in_dev->mr_ifc_count = pmc->crcount; | 1505 | in_dev->mr_ifc_count = pmc->crcount; |
1506 | for (psf=pmc->sources; psf; psf = psf->sf_next) | 1506 | for (psf=pmc->sources; psf; psf = psf->sf_next) |
@@ -1679,7 +1679,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, | |||
1679 | #ifdef CONFIG_IP_MULTICAST | 1679 | #ifdef CONFIG_IP_MULTICAST |
1680 | /* else no filters; keep old mode for reports */ | 1680 | /* else no filters; keep old mode for reports */ |
1681 | 1681 | ||
1682 | pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : | 1682 | pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : |
1683 | IGMP_Unsolicited_Report_Count; | 1683 | IGMP_Unsolicited_Report_Count; |
1684 | in_dev->mr_ifc_count = pmc->crcount; | 1684 | in_dev->mr_ifc_count = pmc->crcount; |
1685 | for (psf=pmc->sources; psf; psf = psf->sf_next) | 1685 | for (psf=pmc->sources; psf; psf = psf->sf_next) |
@@ -1873,7 +1873,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1873 | } else if (pmc->sfmode != omode) { | 1873 | } else if (pmc->sfmode != omode) { |
1874 | /* allow mode switches for empty-set filters */ | 1874 | /* allow mode switches for empty-set filters */ |
1875 | ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); | 1875 | ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); |
1876 | ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, | 1876 | ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, |
1877 | NULL, 0); | 1877 | NULL, 0); |
1878 | pmc->sfmode = omode; | 1878 | pmc->sfmode = omode; |
1879 | } | 1879 | } |
@@ -1899,7 +1899,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1899 | } | 1899 | } |
1900 | 1900 | ||
1901 | /* update the interface filter */ | 1901 | /* update the interface filter */ |
1902 | ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, | 1902 | ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, |
1903 | &mreqs->imr_sourceaddr, 1); | 1903 | &mreqs->imr_sourceaddr, 1); |
1904 | 1904 | ||
1905 | for (j=i+1; j<psl->sl_count; j++) | 1905 | for (j=i+1; j<psl->sl_count; j++) |
@@ -1949,7 +1949,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct | |||
1949 | psl->sl_count++; | 1949 | psl->sl_count++; |
1950 | err = 0; | 1950 | err = 0; |
1951 | /* update the interface list */ | 1951 | /* update the interface list */ |
1952 | ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, | 1952 | ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, |
1953 | &mreqs->imr_sourceaddr, 1); | 1953 | &mreqs->imr_sourceaddr, 1); |
1954 | done: | 1954 | done: |
1955 | rtnl_unlock(); | 1955 | rtnl_unlock(); |
@@ -2264,7 +2264,7 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq) | |||
2264 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); | 2264 | struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq); |
2265 | 2265 | ||
2266 | for (state->dev = dev_base, state->in_dev = NULL; | 2266 | for (state->dev = dev_base, state->in_dev = NULL; |
2267 | state->dev; | 2267 | state->dev; |
2268 | state->dev = state->dev->next) { | 2268 | state->dev = state->dev->next) { |
2269 | struct in_device *in_dev; | 2269 | struct in_device *in_dev; |
2270 | in_dev = in_dev_get(state->dev); | 2270 | in_dev = in_dev_get(state->dev); |
@@ -2346,7 +2346,7 @@ static void igmp_mc_seq_stop(struct seq_file *seq, void *v) | |||
2346 | static int igmp_mc_seq_show(struct seq_file *seq, void *v) | 2346 | static int igmp_mc_seq_show(struct seq_file *seq, void *v) |
2347 | { | 2347 | { |
2348 | if (v == SEQ_START_TOKEN) | 2348 | if (v == SEQ_START_TOKEN) |
2349 | seq_puts(seq, | 2349 | seq_puts(seq, |
2350 | "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n"); | 2350 | "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n"); |
2351 | else { | 2351 | else { |
2352 | struct ip_mc_list *im = (struct ip_mc_list *)v; | 2352 | struct ip_mc_list *im = (struct ip_mc_list *)v; |
@@ -2426,7 +2426,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) | |||
2426 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); | 2426 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); |
2427 | 2427 | ||
2428 | for (state->dev = dev_base, state->idev = NULL, state->im = NULL; | 2428 | for (state->dev = dev_base, state->idev = NULL, state->im = NULL; |
2429 | state->dev; | 2429 | state->dev; |
2430 | state->dev = state->dev->next) { | 2430 | state->dev = state->dev->next) { |
2431 | struct in_device *idev; | 2431 | struct in_device *idev; |
2432 | idev = in_dev_get(state->dev); | 2432 | idev = in_dev_get(state->dev); |
@@ -2531,7 +2531,7 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v) | |||
2531 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); | 2531 | struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); |
2532 | 2532 | ||
2533 | if (v == SEQ_START_TOKEN) { | 2533 | if (v == SEQ_START_TOKEN) { |
2534 | seq_printf(seq, | 2534 | seq_printf(seq, |
2535 | "%3s %6s " | 2535 | "%3s %6s " |
2536 | "%10s %10s %6s %6s\n", "Idx", | 2536 | "%10s %10s %6s %6s\n", "Idx", |
2537 | "Device", "MCA", | 2537 | "Device", "MCA", |
@@ -2539,8 +2539,8 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v) | |||
2539 | } else { | 2539 | } else { |
2540 | seq_printf(seq, | 2540 | seq_printf(seq, |
2541 | "%3d %6.6s 0x%08x " | 2541 | "%3d %6.6s 0x%08x " |
2542 | "0x%08x %6lu %6lu\n", | 2542 | "0x%08x %6lu %6lu\n", |
2543 | state->dev->ifindex, state->dev->name, | 2543 | state->dev->ifindex, state->dev->name, |
2544 | ntohl(state->im->multiaddr), | 2544 | ntohl(state->im->multiaddr), |
2545 | ntohl(psf->sf_inaddr), | 2545 | ntohl(psf->sf_inaddr), |
2546 | psf->sf_count[MCAST_INCLUDE], | 2546 | psf->sf_count[MCAST_INCLUDE], |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 9d68837888d3..43fb1600f1f0 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -149,7 +149,7 @@ success: | |||
149 | if (!inet_csk(sk)->icsk_bind_hash) | 149 | if (!inet_csk(sk)->icsk_bind_hash) |
150 | inet_bind_hash(sk, tb, snum); | 150 | inet_bind_hash(sk, tb, snum); |
151 | BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); | 151 | BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); |
152 | ret = 0; | 152 | ret = 0; |
153 | 153 | ||
154 | fail_unlock: | 154 | fail_unlock: |
155 | spin_unlock(&head->lock); | 155 | spin_unlock(&head->lock); |
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(inet_csk_accept); | |||
255 | 255 | ||
256 | /* | 256 | /* |
257 | * Using different timers for retransmit, delayed acks and probes | 257 | * Using different timers for retransmit, delayed acks and probes |
258 | * We may wish use just one timer maintaining a list of expire jiffies | 258 | * We may wish use just one timer maintaining a list of expire jiffies |
259 | * to optimize. | 259 | * to optimize. |
260 | */ | 260 | */ |
261 | void inet_csk_init_xmit_timers(struct sock *sk, | 261 | void inet_csk_init_xmit_timers(struct sock *sk, |
@@ -273,7 +273,7 @@ void inet_csk_init_xmit_timers(struct sock *sk, | |||
273 | icsk->icsk_delack_timer.function = delack_handler; | 273 | icsk->icsk_delack_timer.function = delack_handler; |
274 | sk->sk_timer.function = keepalive_handler; | 274 | sk->sk_timer.function = keepalive_handler; |
275 | 275 | ||
276 | icsk->icsk_retransmit_timer.data = | 276 | icsk->icsk_retransmit_timer.data = |
277 | icsk->icsk_delack_timer.data = | 277 | icsk->icsk_delack_timer.data = |
278 | sk->sk_timer.data = (unsigned long)sk; | 278 | sk->sk_timer.data = (unsigned long)sk; |
279 | 279 | ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8aa7d51e6881..5df71cd08da8 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -381,7 +381,7 @@ static int inet_diag_bc_run(const void *bc, int len, | |||
381 | if (addr[0] == 0 && addr[1] == 0 && | 381 | if (addr[0] == 0 && addr[1] == 0 && |
382 | addr[2] == htonl(0xffff) && | 382 | addr[2] == htonl(0xffff) && |
383 | bitstring_match(addr + 3, cond->addr, | 383 | bitstring_match(addr + 3, cond->addr, |
384 | cond->prefix_len)) | 384 | cond->prefix_len)) |
385 | break; | 385 | break; |
386 | } | 386 | } |
387 | yes = 0; | 387 | yes = 0; |
@@ -518,7 +518,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, | |||
518 | } | 518 | } |
519 | entry.sport = tw->tw_num; | 519 | entry.sport = tw->tw_num; |
520 | entry.dport = ntohs(tw->tw_dport); | 520 | entry.dport = ntohs(tw->tw_dport); |
521 | entry.userlocks = 0; | 521 | entry.userlocks = 0; |
522 | 522 | ||
523 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) | 523 | if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) |
524 | return 0; | 524 | return 0; |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 150ace18dc75..fb662621c54e 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -262,7 +262,7 @@ not_unique: | |||
262 | static inline u32 inet_sk_port_offset(const struct sock *sk) | 262 | static inline u32 inet_sk_port_offset(const struct sock *sk) |
263 | { | 263 | { |
264 | const struct inet_sock *inet = inet_sk(sk); | 264 | const struct inet_sock *inet = inet_sk(sk); |
265 | return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr, | 265 | return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr, |
266 | inet->dport); | 266 | inet->dport); |
267 | } | 267 | } |
268 | 268 | ||
@@ -274,81 +274,81 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row, | |||
274 | { | 274 | { |
275 | struct inet_hashinfo *hinfo = death_row->hashinfo; | 275 | struct inet_hashinfo *hinfo = death_row->hashinfo; |
276 | const unsigned short snum = inet_sk(sk)->num; | 276 | const unsigned short snum = inet_sk(sk)->num; |
277 | struct inet_bind_hashbucket *head; | 277 | struct inet_bind_hashbucket *head; |
278 | struct inet_bind_bucket *tb; | 278 | struct inet_bind_bucket *tb; |
279 | int ret; | 279 | int ret; |
280 | 280 | ||
281 | if (!snum) { | 281 | if (!snum) { |
282 | int low = sysctl_local_port_range[0]; | 282 | int low = sysctl_local_port_range[0]; |
283 | int high = sysctl_local_port_range[1]; | 283 | int high = sysctl_local_port_range[1]; |
284 | int range = high - low; | 284 | int range = high - low; |
285 | int i; | 285 | int i; |
286 | int port; | 286 | int port; |
287 | static u32 hint; | 287 | static u32 hint; |
288 | u32 offset = hint + inet_sk_port_offset(sk); | 288 | u32 offset = hint + inet_sk_port_offset(sk); |
289 | struct hlist_node *node; | 289 | struct hlist_node *node; |
290 | struct inet_timewait_sock *tw = NULL; | 290 | struct inet_timewait_sock *tw = NULL; |
291 | 291 | ||
292 | local_bh_disable(); | 292 | local_bh_disable(); |
293 | for (i = 1; i <= range; i++) { | 293 | for (i = 1; i <= range; i++) { |
294 | port = low + (i + offset) % range; | 294 | port = low + (i + offset) % range; |
295 | head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; | 295 | head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; |
296 | spin_lock(&head->lock); | 296 | spin_lock(&head->lock); |
297 | 297 | ||
298 | /* Does not bother with rcv_saddr checks, | 298 | /* Does not bother with rcv_saddr checks, |
299 | * because the established check is already | 299 | * because the established check is already |
300 | * unique enough. | 300 | * unique enough. |
301 | */ | 301 | */ |
302 | inet_bind_bucket_for_each(tb, node, &head->chain) { | 302 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
303 | if (tb->port == port) { | 303 | if (tb->port == port) { |
304 | BUG_TRAP(!hlist_empty(&tb->owners)); | 304 | BUG_TRAP(!hlist_empty(&tb->owners)); |
305 | if (tb->fastreuse >= 0) | 305 | if (tb->fastreuse >= 0) |
306 | goto next_port; | 306 | goto next_port; |
307 | if (!__inet_check_established(death_row, | 307 | if (!__inet_check_established(death_row, |
308 | sk, port, | 308 | sk, port, |
309 | &tw)) | 309 | &tw)) |
310 | goto ok; | 310 | goto ok; |
311 | goto next_port; | 311 | goto next_port; |
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port); | 315 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port); |
316 | if (!tb) { | 316 | if (!tb) { |
317 | spin_unlock(&head->lock); | 317 | spin_unlock(&head->lock); |
318 | break; | 318 | break; |
319 | } | 319 | } |
320 | tb->fastreuse = -1; | 320 | tb->fastreuse = -1; |
321 | goto ok; | 321 | goto ok; |
322 | 322 | ||
323 | next_port: | 323 | next_port: |
324 | spin_unlock(&head->lock); | 324 | spin_unlock(&head->lock); |
325 | } | 325 | } |
326 | local_bh_enable(); | 326 | local_bh_enable(); |
327 | 327 | ||
328 | return -EADDRNOTAVAIL; | 328 | return -EADDRNOTAVAIL; |
329 | 329 | ||
330 | ok: | 330 | ok: |
331 | hint += i; | 331 | hint += i; |
332 | 332 | ||
333 | /* Head lock still held and bh's disabled */ | 333 | /* Head lock still held and bh's disabled */ |
334 | inet_bind_hash(sk, tb, port); | 334 | inet_bind_hash(sk, tb, port); |
335 | if (sk_unhashed(sk)) { | 335 | if (sk_unhashed(sk)) { |
336 | inet_sk(sk)->sport = htons(port); | 336 | inet_sk(sk)->sport = htons(port); |
337 | __inet_hash(hinfo, sk, 0); | 337 | __inet_hash(hinfo, sk, 0); |
338 | } | 338 | } |
339 | spin_unlock(&head->lock); | 339 | spin_unlock(&head->lock); |
340 | 340 | ||
341 | if (tw) { | 341 | if (tw) { |
342 | inet_twsk_deschedule(tw, death_row); | 342 | inet_twsk_deschedule(tw, death_row); |
343 | inet_twsk_put(tw); | 343 | inet_twsk_put(tw); |
344 | } | 344 | } |
345 | 345 | ||
346 | ret = 0; | 346 | ret = 0; |
347 | goto out; | 347 | goto out; |
348 | } | 348 | } |
349 | 349 | ||
350 | head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; | 350 | head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; |
351 | tb = inet_csk(sk)->icsk_bind_hash; | 351 | tb = inet_csk(sk)->icsk_bind_hash; |
352 | spin_lock_bh(&head->lock); | 352 | spin_lock_bh(&head->lock); |
353 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 353 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
354 | __inet_hash(hinfo, sk, 0); | 354 | __inet_hash(hinfo, sk, 0); |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index a22d11d2911c..c3ea0cd2e584 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -4,15 +4,15 @@ | |||
4 | * interface as the means of communication with the user level. | 4 | * interface as the means of communication with the user level. |
5 | * | 5 | * |
6 | * The IP forwarding functionality. | 6 | * The IP forwarding functionality. |
7 | * | 7 | * |
8 | * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $ | 8 | * Version: $Id: ip_forward.c,v 1.48 2000/12/13 18:31:48 davem Exp $ |
9 | * | 9 | * |
10 | * Authors: see ip.c | 10 | * Authors: see ip.c |
11 | * | 11 | * |
12 | * Fixes: | 12 | * Fixes: |
13 | * Many : Split from ip.c , see ip_input.c for | 13 | * Many : Split from ip.c , see ip_input.c for |
14 | * history. | 14 | * history. |
15 | * Dave Gregorich : NULL ip_rt_put fix for multicast | 15 | * Dave Gregorich : NULL ip_rt_put fix for multicast |
16 | * routing. | 16 | * routing. |
17 | * Jos Vos : Add call_out_firewall before sending, | 17 | * Jos Vos : Add call_out_firewall before sending, |
18 | * use output device for accounting. | 18 | * use output device for accounting. |
@@ -69,14 +69,14 @@ int ip_forward(struct sk_buff *skb) | |||
69 | goto drop; | 69 | goto drop; |
70 | 70 | ||
71 | skb->ip_summed = CHECKSUM_NONE; | 71 | skb->ip_summed = CHECKSUM_NONE; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * According to the RFC, we must first decrease the TTL field. If | 74 | * According to the RFC, we must first decrease the TTL field. If |
75 | * that reaches zero, we must reply an ICMP control message telling | 75 | * that reaches zero, we must reply an ICMP control message telling |
76 | * that the packet's lifetime expired. | 76 | * that the packet's lifetime expired. |
77 | */ | 77 | */ |
78 | if (skb->nh.iph->ttl <= 1) | 78 | if (skb->nh.iph->ttl <= 1) |
79 | goto too_many_hops; | 79 | goto too_many_hops; |
80 | 80 | ||
81 | if (!xfrm4_route_forward(skb)) | 81 | if (!xfrm4_route_forward(skb)) |
82 | goto drop; | 82 | goto drop; |
@@ -107,16 +107,16 @@ int ip_forward(struct sk_buff *skb) | |||
107 | ip_forward_finish); | 107 | ip_forward_finish); |
108 | 108 | ||
109 | sr_failed: | 109 | sr_failed: |
110 | /* | 110 | /* |
111 | * Strict routing permits no gatewaying | 111 | * Strict routing permits no gatewaying |
112 | */ | 112 | */ |
113 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); | 113 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); |
114 | goto drop; | 114 | goto drop; |
115 | 115 | ||
116 | too_many_hops: | 116 | too_many_hops: |
117 | /* Tell the sender its packet died... */ | 117 | /* Tell the sender its packet died... */ |
118 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 118 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); |
119 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); | 119 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); |
120 | drop: | 120 | drop: |
121 | kfree_skb(skb); | 121 | kfree_skb(skb); |
122 | return NET_RX_DROP; | 122 | return NET_RX_DROP; |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 8ce00d3703da..b6f055380373 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * interface as the means of communication with the user level. | 4 | * interface as the means of communication with the user level. |
5 | * | 5 | * |
6 | * The IP fragmentation functionality. | 6 | * The IP fragmentation functionality. |
7 | * | 7 | * |
8 | * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $ | 8 | * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $ |
9 | * | 9 | * |
10 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> | 10 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> |
@@ -238,7 +238,7 @@ static void ipq_kill(struct ipq *ipq) | |||
238 | } | 238 | } |
239 | } | 239 | } |
240 | 240 | ||
241 | /* Memory limiting on fragments. Evictor trashes the oldest | 241 | /* Memory limiting on fragments. Evictor trashes the oldest |
242 | * fragment queue until we are back under the threshold. | 242 | * fragment queue until we are back under the threshold. |
243 | */ | 243 | */ |
244 | static void ip_evictor(void) | 244 | static void ip_evictor(void) |
@@ -479,14 +479,14 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
479 | goto err; | 479 | goto err; |
480 | } | 480 | } |
481 | 481 | ||
482 | offset = ntohs(skb->nh.iph->frag_off); | 482 | offset = ntohs(skb->nh.iph->frag_off); |
483 | flags = offset & ~IP_OFFSET; | 483 | flags = offset & ~IP_OFFSET; |
484 | offset &= IP_OFFSET; | 484 | offset &= IP_OFFSET; |
485 | offset <<= 3; /* offset is in 8-byte chunks */ | 485 | offset <<= 3; /* offset is in 8-byte chunks */ |
486 | ihl = skb->nh.iph->ihl * 4; | 486 | ihl = skb->nh.iph->ihl * 4; |
487 | 487 | ||
488 | /* Determine the position of this fragment. */ | 488 | /* Determine the position of this fragment. */ |
489 | end = offset + skb->len - ihl; | 489 | end = offset + skb->len - ihl; |
490 | 490 | ||
491 | /* Is this the final fragment? */ | 491 | /* Is this the final fragment? */ |
492 | if ((flags & IP_MF) == 0) { | 492 | if ((flags & IP_MF) == 0) { |
@@ -589,8 +589,8 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
589 | else | 589 | else |
590 | qp->fragments = skb; | 590 | qp->fragments = skb; |
591 | 591 | ||
592 | if (skb->dev) | 592 | if (skb->dev) |
593 | qp->iif = skb->dev->ifindex; | 593 | qp->iif = skb->dev->ifindex; |
594 | skb->dev = NULL; | 594 | skb->dev = NULL; |
595 | skb_get_timestamp(skb, &qp->stamp); | 595 | skb_get_timestamp(skb, &qp->stamp); |
596 | qp->meat += skb->len; | 596 | qp->meat += skb->len; |
@@ -684,7 +684,7 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev) | |||
684 | return head; | 684 | return head; |
685 | 685 | ||
686 | out_nomem: | 686 | out_nomem: |
687 | LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " | 687 | LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " |
688 | "queue %p\n", qp); | 688 | "queue %p\n", qp); |
689 | goto out_fail; | 689 | goto out_fail; |
690 | out_oversize: | 690 | out_oversize: |
@@ -703,7 +703,7 @@ struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user) | |||
703 | struct iphdr *iph = skb->nh.iph; | 703 | struct iphdr *iph = skb->nh.iph; |
704 | struct ipq *qp; | 704 | struct ipq *qp; |
705 | struct net_device *dev; | 705 | struct net_device *dev; |
706 | 706 | ||
707 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); | 707 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); |
708 | 708 | ||
709 | /* Start by cleaning up the memory. */ | 709 | /* Start by cleaning up the memory. */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 51c83500790f..f12c0d6623a0 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux NET3: GRE over IP protocol decoder. | 2 | * Linux NET3: GRE over IP protocol decoder. |
3 | * | 3 | * |
4 | * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) | 4 | * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) |
5 | * | 5 | * |
@@ -63,7 +63,7 @@ | |||
63 | solution, but it supposes maintaing new variable in ALL | 63 | solution, but it supposes maintaing new variable in ALL |
64 | skb, even if no tunneling is used. | 64 | skb, even if no tunneling is used. |
65 | 65 | ||
66 | Current solution: t->recursion lock breaks dead loops. It looks | 66 | Current solution: t->recursion lock breaks dead loops. It looks |
67 | like dev->tbusy flag, but I preferred new variable, because | 67 | like dev->tbusy flag, but I preferred new variable, because |
68 | the semantics is different. One day, when hard_start_xmit | 68 | the semantics is different. One day, when hard_start_xmit |
69 | will be multithreaded we will have to use skb->encapsulation. | 69 | will be multithreaded we will have to use skb->encapsulation. |
@@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
613 | if (flags == 0 && | 613 | if (flags == 0 && |
614 | skb->protocol == htons(ETH_P_WCCP)) { | 614 | skb->protocol == htons(ETH_P_WCCP)) { |
615 | skb->protocol = htons(ETH_P_IP); | 615 | skb->protocol = htons(ETH_P_IP); |
616 | if ((*(h + offset) & 0xF0) != 0x40) | 616 | if ((*(h + offset) & 0xF0) != 0x40) |
617 | offset += 4; | 617 | offset += 4; |
618 | } | 618 | } |
619 | 619 | ||
@@ -816,7 +816,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
816 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 816 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
817 | if (!new_skb) { | 817 | if (!new_skb) { |
818 | ip_rt_put(rt); | 818 | ip_rt_put(rt); |
819 | stats->tx_dropped++; | 819 | stats->tx_dropped++; |
820 | dev_kfree_skb(skb); | 820 | dev_kfree_skb(skb); |
821 | tunnel->recursion--; | 821 | tunnel->recursion--; |
822 | return 0; | 822 | return 0; |
@@ -1044,7 +1044,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) | |||
1044 | so that I had to set ARPHRD_IPGRE to a random value. | 1044 | so that I had to set ARPHRD_IPGRE to a random value. |
1045 | I have an impression, that Cisco could make something similar, | 1045 | I have an impression, that Cisco could make something similar, |
1046 | but this feature is apparently missing in IOS<=11.2(8). | 1046 | but this feature is apparently missing in IOS<=11.2(8). |
1047 | 1047 | ||
1048 | I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks | 1048 | I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks |
1049 | with broadcast 224.66.66.66. If you have access to mbone, play with me :-) | 1049 | with broadcast 224.66.66.66. If you have access to mbone, play with me :-) |
1050 | 1050 | ||
@@ -1076,9 +1076,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh | |||
1076 | p[1] = htons(type); | 1076 | p[1] = htons(type); |
1077 | 1077 | ||
1078 | /* | 1078 | /* |
1079 | * Set the source hardware address. | 1079 | * Set the source hardware address. |
1080 | */ | 1080 | */ |
1081 | 1081 | ||
1082 | if (saddr) | 1082 | if (saddr) |
1083 | memcpy(&iph->saddr, saddr, 4); | 1083 | memcpy(&iph->saddr, saddr, 4); |
1084 | 1084 | ||
@@ -1088,7 +1088,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh | |||
1088 | } | 1088 | } |
1089 | if (iph->daddr && !MULTICAST(iph->daddr)) | 1089 | if (iph->daddr && !MULTICAST(iph->daddr)) |
1090 | return t->hlen; | 1090 | return t->hlen; |
1091 | 1091 | ||
1092 | return -t->hlen; | 1092 | return -t->hlen; |
1093 | } | 1093 | } |
1094 | 1094 | ||
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 212734ca238f..f38e97647ac0 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * Stefan Becker, <stefanb@yello.ping.de> | 15 | * Stefan Becker, <stefanb@yello.ping.de> |
16 | * Jorge Cwik, <jorge@laser.satlink.net> | 16 | * Jorge Cwik, <jorge@laser.satlink.net> |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | 17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
18 | * | 18 | * |
19 | * | 19 | * |
20 | * Fixes: | 20 | * Fixes: |
21 | * Alan Cox : Commented a couple of minor bits of surplus code | 21 | * Alan Cox : Commented a couple of minor bits of surplus code |
@@ -98,13 +98,13 @@ | |||
98 | * Jos Vos : Do accounting *before* call_in_firewall | 98 | * Jos Vos : Do accounting *before* call_in_firewall |
99 | * Willy Konynenberg : Transparent proxying support | 99 | * Willy Konynenberg : Transparent proxying support |
100 | * | 100 | * |
101 | * | 101 | * |
102 | * | 102 | * |
103 | * To Fix: | 103 | * To Fix: |
104 | * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient | 104 | * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient |
105 | * and could be made very efficient with the addition of some virtual memory hacks to permit | 105 | * and could be made very efficient with the addition of some virtual memory hacks to permit |
106 | * the allocation of a buffer that can then be 'grown' by twiddling page tables. | 106 | * the allocation of a buffer that can then be 'grown' by twiddling page tables. |
107 | * Output fragmentation wants updating along with the buffer management to use a single | 107 | * Output fragmentation wants updating along with the buffer management to use a single |
108 | * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet | 108 | * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet |
109 | * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause | 109 | * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause |
110 | * fragmentation anyway. | 110 | * fragmentation anyway. |
@@ -154,7 +154,7 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly; | |||
154 | 154 | ||
155 | /* | 155 | /* |
156 | * Process Router Attention IP option | 156 | * Process Router Attention IP option |
157 | */ | 157 | */ |
158 | int ip_call_ra_chain(struct sk_buff *skb) | 158 | int ip_call_ra_chain(struct sk_buff *skb) |
159 | { | 159 | { |
160 | struct ip_ra_chain *ra; | 160 | struct ip_ra_chain *ra; |
@@ -202,8 +202,8 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) | |||
202 | 202 | ||
203 | __skb_pull(skb, ihl); | 203 | __skb_pull(skb, ihl); |
204 | 204 | ||
205 | /* Point into the IP datagram, just past the header. */ | 205 | /* Point into the IP datagram, just past the header. */ |
206 | skb->h.raw = skb->data; | 206 | skb->h.raw = skb->data; |
207 | 207 | ||
208 | rcu_read_lock(); | 208 | rcu_read_lock(); |
209 | { | 209 | { |
@@ -259,7 +259,7 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb) | |||
259 | 259 | ||
260 | /* | 260 | /* |
261 | * Deliver IP Packets to the higher protocol layers. | 261 | * Deliver IP Packets to the higher protocol layers. |
262 | */ | 262 | */ |
263 | int ip_local_deliver(struct sk_buff *skb) | 263 | int ip_local_deliver(struct sk_buff *skb) |
264 | { | 264 | { |
265 | /* | 265 | /* |
@@ -335,14 +335,14 @@ static inline int ip_rcv_finish(struct sk_buff *skb) | |||
335 | /* | 335 | /* |
336 | * Initialise the virtual path cache for the packet. It describes | 336 | * Initialise the virtual path cache for the packet. It describes |
337 | * how the packet travels inside Linux networking. | 337 | * how the packet travels inside Linux networking. |
338 | */ | 338 | */ |
339 | if (skb->dst == NULL) { | 339 | if (skb->dst == NULL) { |
340 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 340 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
341 | skb->dev); | 341 | skb->dev); |
342 | if (unlikely(err)) { | 342 | if (unlikely(err)) { |
343 | if (err == -EHOSTUNREACH) | 343 | if (err == -EHOSTUNREACH) |
344 | IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); | 344 | IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); |
345 | goto drop; | 345 | goto drop; |
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
@@ -363,13 +363,13 @@ static inline int ip_rcv_finish(struct sk_buff *skb) | |||
363 | return dst_input(skb); | 363 | return dst_input(skb); |
364 | 364 | ||
365 | drop: | 365 | drop: |
366 | kfree_skb(skb); | 366 | kfree_skb(skb); |
367 | return NET_RX_DROP; | 367 | return NET_RX_DROP; |
368 | } | 368 | } |
369 | 369 | ||
370 | /* | 370 | /* |
371 | * Main IP Receive routine. | 371 | * Main IP Receive routine. |
372 | */ | 372 | */ |
373 | int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 373 | int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) |
374 | { | 374 | { |
375 | struct iphdr *iph; | 375 | struct iphdr *iph; |
@@ -437,9 +437,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, | |||
437 | inhdr_error: | 437 | inhdr_error: |
438 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); | 438 | IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); |
439 | drop: | 439 | drop: |
440 | kfree_skb(skb); | 440 | kfree_skb(skb); |
441 | out: | 441 | out: |
442 | return NET_RX_DROP; | 442 | return NET_RX_DROP; |
443 | } | 443 | } |
444 | 444 | ||
445 | EXPORT_SYMBOL(ip_statistics); | 445 | EXPORT_SYMBOL(ip_statistics); |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 9f02917d6f45..f906a80d5a87 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $ | 8 | * Version: $Id: ip_options.c,v 1.21 2001/09/01 00:31:50 davem Exp $ |
9 | * | 9 | * |
10 | * Authors: A.N.Kuznetsov | 10 | * Authors: A.N.Kuznetsov |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/capability.h> | 14 | #include <linux/capability.h> |
@@ -26,7 +26,7 @@ | |||
26 | #include <net/route.h> | 26 | #include <net/route.h> |
27 | #include <net/cipso_ipv4.h> | 27 | #include <net/cipso_ipv4.h> |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Write options to IP header, record destination address to | 30 | * Write options to IP header, record destination address to |
31 | * source route option, address of outgoing interface | 31 | * source route option, address of outgoing interface |
32 | * (we should already know it, so that this function is allowed be | 32 | * (we should already know it, so that this function is allowed be |
@@ -76,7 +76,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt, | |||
76 | } | 76 | } |
77 | } | 77 | } |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Provided (sopt, skb) points to received options, | 80 | * Provided (sopt, skb) points to received options, |
81 | * build in dopt compiled option set appropriate for answering. | 81 | * build in dopt compiled option set appropriate for answering. |
82 | * i.e. invert SRR option, copy anothers, | 82 | * i.e. invert SRR option, copy anothers, |
@@ -85,7 +85,7 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt, | |||
85 | * NOTE: dopt cannot point to skb. | 85 | * NOTE: dopt cannot point to skb. |
86 | */ | 86 | */ |
87 | 87 | ||
88 | int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | 88 | int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) |
89 | { | 89 | { |
90 | struct ip_options *sopt; | 90 | struct ip_options *sopt; |
91 | unsigned char *sptr, *dptr; | 91 | unsigned char *sptr, *dptr; |
@@ -215,7 +215,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
215 | * Simple and stupid 8), but the most efficient way. | 215 | * Simple and stupid 8), but the most efficient way. |
216 | */ | 216 | */ |
217 | 217 | ||
218 | void ip_options_fragment(struct sk_buff * skb) | 218 | void ip_options_fragment(struct sk_buff * skb) |
219 | { | 219 | { |
220 | unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr); | 220 | unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr); |
221 | struct ip_options * opt = &(IPCB(skb)->opt); | 221 | struct ip_options * opt = &(IPCB(skb)->opt); |
@@ -370,7 +370,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) | |||
370 | switch (optptr[3]&0xF) { | 370 | switch (optptr[3]&0xF) { |
371 | case IPOPT_TS_TSONLY: | 371 | case IPOPT_TS_TSONLY: |
372 | opt->ts = optptr - iph; | 372 | opt->ts = optptr - iph; |
373 | if (skb) | 373 | if (skb) |
374 | timeptr = (__be32*)&optptr[optptr[2]-1]; | 374 | timeptr = (__be32*)&optptr[optptr[2]-1]; |
375 | opt->ts_needtime = 1; | 375 | opt->ts_needtime = 1; |
376 | optptr[2] += 4; | 376 | optptr[2] += 4; |
@@ -448,7 +448,7 @@ int ip_options_compile(struct ip_options * opt, struct sk_buff * skb) | |||
448 | goto error; | 448 | goto error; |
449 | } | 449 | } |
450 | opt->cipso = optptr - iph; | 450 | opt->cipso = optptr - iph; |
451 | if (cipso_v4_validate(&optptr)) { | 451 | if (cipso_v4_validate(&optptr)) { |
452 | pp_ptr = optptr; | 452 | pp_ptr = optptr; |
453 | goto error; | 453 | goto error; |
454 | } | 454 | } |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a0f2008584bc..bb0bb8f07c54 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -22,7 +22,7 @@ | |||
22 | * Fixes: | 22 | * Fixes: |
23 | * Alan Cox : Missing nonblock feature in ip_build_xmit. | 23 | * Alan Cox : Missing nonblock feature in ip_build_xmit. |
24 | * Mike Kilburn : htons() missing in ip_build_xmit. | 24 | * Mike Kilburn : htons() missing in ip_build_xmit. |
25 | * Bradford Johnson: Fix faulty handling of some frames when | 25 | * Bradford Johnson: Fix faulty handling of some frames when |
26 | * no route is found. | 26 | * no route is found. |
27 | * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit | 27 | * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit |
28 | * (in case if packet not accepted by | 28 | * (in case if packet not accepted by |
@@ -33,9 +33,9 @@ | |||
33 | * some redundant tests. | 33 | * some redundant tests. |
34 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. | 34 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. |
35 | * Andi Kleen : Replace ip_reply with ip_send_reply. | 35 | * Andi Kleen : Replace ip_reply with ip_send_reply. |
36 | * Andi Kleen : Split fast and slow ip_build_xmit path | 36 | * Andi Kleen : Split fast and slow ip_build_xmit path |
37 | * for decreased register pressure on x86 | 37 | * for decreased register pressure on x86 |
38 | * and more readibility. | 38 | * and more readibility. |
39 | * Marc Boucher : When call_out_firewall returns FW_QUEUE, | 39 | * Marc Boucher : When call_out_firewall returns FW_QUEUE, |
40 | * silently drop skb instead of failing with -EPERM. | 40 | * silently drop skb instead of failing with -EPERM. |
41 | * Detlev Wengorz : Copy protocol for fragments. | 41 | * Detlev Wengorz : Copy protocol for fragments. |
@@ -114,7 +114,7 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) | |||
114 | return ttl; | 114 | return ttl; |
115 | } | 115 | } |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * Add an ip header to a skbuff and send it out. | 118 | * Add an ip header to a skbuff and send it out. |
119 | * | 119 | * |
120 | */ | 120 | */ |
@@ -243,7 +243,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
243 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); | 243 | struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); |
244 | if (newskb) | 244 | if (newskb) |
245 | NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL, | 245 | NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL, |
246 | newskb->dev, | 246 | newskb->dev, |
247 | ip_dev_loopback_xmit); | 247 | ip_dev_loopback_xmit); |
248 | } | 248 | } |
249 | 249 | ||
@@ -277,7 +277,7 @@ int ip_output(struct sk_buff *skb) | |||
277 | skb->protocol = htons(ETH_P_IP); | 277 | skb->protocol = htons(ETH_P_IP); |
278 | 278 | ||
279 | return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, | 279 | return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev, |
280 | ip_finish_output, | 280 | ip_finish_output, |
281 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 281 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
282 | } | 282 | } |
283 | 283 | ||
@@ -660,7 +660,7 @@ slow_path: | |||
660 | return err; | 660 | return err; |
661 | 661 | ||
662 | fail: | 662 | fail: |
663 | kfree_skb(skb); | 663 | kfree_skb(skb); |
664 | IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); | 664 | IP_INC_STATS(IPSTATS_MIB_FRAGFAILS); |
665 | return err; | 665 | return err; |
666 | } | 666 | } |
@@ -755,7 +755,7 @@ static inline int ip_ufo_append_data(struct sock *sk, | |||
755 | * from many pieces of data. Each pieces will be holded on the socket | 755 | * from many pieces of data. Each pieces will be holded on the socket |
756 | * until ip_push_pending_frames() is called. Each piece can be a page | 756 | * until ip_push_pending_frames() is called. Each piece can be a page |
757 | * or non-page data. | 757 | * or non-page data. |
758 | * | 758 | * |
759 | * Not only UDP, other transport protocols - e.g. raw sockets - can use | 759 | * Not only UDP, other transport protocols - e.g. raw sockets - can use |
760 | * this interface potentially. | 760 | * this interface potentially. |
761 | * | 761 | * |
@@ -888,7 +888,7 @@ alloc_new_skb: | |||
888 | datalen = maxfraglen - fragheaderlen; | 888 | datalen = maxfraglen - fragheaderlen; |
889 | fraglen = datalen + fragheaderlen; | 889 | fraglen = datalen + fragheaderlen; |
890 | 890 | ||
891 | if ((flags & MSG_MORE) && | 891 | if ((flags & MSG_MORE) && |
892 | !(rt->u.dst.dev->features&NETIF_F_SG)) | 892 | !(rt->u.dst.dev->features&NETIF_F_SG)) |
893 | alloclen = mtu; | 893 | alloclen = mtu; |
894 | else | 894 | else |
@@ -903,14 +903,14 @@ alloc_new_skb: | |||
903 | alloclen += rt->u.dst.trailer_len; | 903 | alloclen += rt->u.dst.trailer_len; |
904 | 904 | ||
905 | if (transhdrlen) { | 905 | if (transhdrlen) { |
906 | skb = sock_alloc_send_skb(sk, | 906 | skb = sock_alloc_send_skb(sk, |
907 | alloclen + hh_len + 15, | 907 | alloclen + hh_len + 15, |
908 | (flags & MSG_DONTWAIT), &err); | 908 | (flags & MSG_DONTWAIT), &err); |
909 | } else { | 909 | } else { |
910 | skb = NULL; | 910 | skb = NULL; |
911 | if (atomic_read(&sk->sk_wmem_alloc) <= | 911 | if (atomic_read(&sk->sk_wmem_alloc) <= |
912 | 2 * sk->sk_sndbuf) | 912 | 2 * sk->sk_sndbuf) |
913 | skb = sock_wmalloc(sk, | 913 | skb = sock_wmalloc(sk, |
914 | alloclen + hh_len + 15, 1, | 914 | alloclen + hh_len + 15, 1, |
915 | sk->sk_allocation); | 915 | sk->sk_allocation); |
916 | if (unlikely(skb == NULL)) | 916 | if (unlikely(skb == NULL)) |
@@ -971,7 +971,7 @@ alloc_new_skb: | |||
971 | unsigned int off; | 971 | unsigned int off; |
972 | 972 | ||
973 | off = skb->len; | 973 | off = skb->len; |
974 | if (getfrag(from, skb_put(skb, copy), | 974 | if (getfrag(from, skb_put(skb, copy), |
975 | offset, copy, off, skb) < 0) { | 975 | offset, copy, off, skb) < 0) { |
976 | __skb_trim(skb, off); | 976 | __skb_trim(skb, off); |
977 | err = -EFAULT; | 977 | err = -EFAULT; |
@@ -993,7 +993,7 @@ alloc_new_skb: | |||
993 | goto error; | 993 | goto error; |
994 | } | 994 | } |
995 | get_page(page); | 995 | get_page(page); |
996 | skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); | 996 | skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0); |
997 | frag = &skb_shinfo(skb)->frags[i]; | 997 | frag = &skb_shinfo(skb)->frags[i]; |
998 | } | 998 | } |
999 | } else if (i < MAX_SKB_FRAGS) { | 999 | } else if (i < MAX_SKB_FRAGS) { |
@@ -1033,7 +1033,7 @@ alloc_new_skb: | |||
1033 | error: | 1033 | error: |
1034 | inet->cork.length -= length; | 1034 | inet->cork.length -= length; |
1035 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 1035 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); |
1036 | return err; | 1036 | return err; |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | ssize_t ip_append_page(struct sock *sk, struct page *page, | 1039 | ssize_t ip_append_page(struct sock *sk, struct page *page, |
@@ -1257,7 +1257,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1257 | skb->dst = dst_clone(&rt->u.dst); | 1257 | skb->dst = dst_clone(&rt->u.dst); |
1258 | 1258 | ||
1259 | /* Netfilter gets whole the not fragmented skb. */ | 1259 | /* Netfilter gets whole the not fragmented skb. */ |
1260 | err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, | 1260 | err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, |
1261 | skb->dst->dev, dst_output); | 1261 | skb->dst->dev, dst_output); |
1262 | if (err) { | 1262 | if (err) { |
1263 | if (err > 0) | 1263 | if (err > 0) |
@@ -1305,21 +1305,21 @@ void ip_flush_pending_frames(struct sock *sk) | |||
1305 | /* | 1305 | /* |
1306 | * Fetch data from kernel space and fill in checksum if needed. | 1306 | * Fetch data from kernel space and fill in checksum if needed. |
1307 | */ | 1307 | */ |
1308 | static int ip_reply_glue_bits(void *dptr, char *to, int offset, | 1308 | static int ip_reply_glue_bits(void *dptr, char *to, int offset, |
1309 | int len, int odd, struct sk_buff *skb) | 1309 | int len, int odd, struct sk_buff *skb) |
1310 | { | 1310 | { |
1311 | __wsum csum; | 1311 | __wsum csum; |
1312 | 1312 | ||
1313 | csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); | 1313 | csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); |
1314 | skb->csum = csum_block_add(skb->csum, csum, odd); | 1314 | skb->csum = csum_block_add(skb->csum, csum, odd); |
1315 | return 0; | 1315 | return 0; |
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | /* | 1318 | /* |
1319 | * Generic function to send a packet as reply to another packet. | 1319 | * Generic function to send a packet as reply to another packet. |
1320 | * Used to send TCP resets so far. ICMP should use this function too. | 1320 | * Used to send TCP resets so far. ICMP should use this function too. |
1321 | * | 1321 | * |
1322 | * Should run single threaded per socket because it uses the sock | 1322 | * Should run single threaded per socket because it uses the sock |
1323 | * structure to pass arguments. | 1323 | * structure to pass arguments. |
1324 | * | 1324 | * |
1325 | * LATER: switch from ip_build_xmit to ip_append_* | 1325 | * LATER: switch from ip_build_xmit to ip_append_* |
@@ -1357,7 +1357,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar | |||
1357 | /* Not quite clean, but right. */ | 1357 | /* Not quite clean, but right. */ |
1358 | .uli_u = { .ports = | 1358 | .uli_u = { .ports = |
1359 | { .sport = skb->h.th->dest, | 1359 | { .sport = skb->h.th->dest, |
1360 | .dport = skb->h.th->source } }, | 1360 | .dport = skb->h.th->source } }, |
1361 | .proto = sk->sk_protocol }; | 1361 | .proto = sk->sk_protocol }; |
1362 | security_skb_classify_flow(skb, &fl); | 1362 | security_skb_classify_flow(skb, &fl); |
1363 | if (ip_route_output_key(&rt, &fl)) | 1363 | if (ip_route_output_key(&rt, &fl)) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 57d4bae6f080..e120686c3cb8 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * interface as the means of communication with the user level. | 4 | * interface as the means of communication with the user level. |
5 | * | 5 | * |
6 | * The IP to API glue. | 6 | * The IP to API glue. |
7 | * | 7 | * |
8 | * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $ | 8 | * Version: $Id: ip_sockglue.c,v 1.62 2002/02/01 22:01:04 davem Exp $ |
9 | * | 9 | * |
10 | * Authors: see ip.c | 10 | * Authors: see ip.c |
@@ -12,7 +12,7 @@ | |||
12 | * Fixes: | 12 | * Fixes: |
13 | * Many : Split from ip.c , see ip.c for history. | 13 | * Many : Split from ip.c , see ip.c for history. |
14 | * Martin Mares : TOS setting fixed. | 14 | * Martin Mares : TOS setting fixed. |
15 | * Alan Cox : Fixed a couple of oopses in Martin's | 15 | * Alan Cox : Fixed a couple of oopses in Martin's |
16 | * TOS tweaks. | 16 | * TOS tweaks. |
17 | * Mike McLagan : Routing by source | 17 | * Mike McLagan : Routing by source |
18 | */ | 18 | */ |
@@ -253,7 +253,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s | |||
253 | return 0; | 253 | return 0; |
254 | } | 254 | } |
255 | 255 | ||
256 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | 256 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, |
257 | __be16 port, u32 info, u8 *payload) | 257 | __be16 port, u32 info, u8 *payload) |
258 | { | 258 | { |
259 | struct inet_sock *inet = inet_sk(sk); | 259 | struct inet_sock *inet = inet_sk(sk); |
@@ -266,10 +266,10 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, | |||
266 | if (!skb) | 266 | if (!skb) |
267 | return; | 267 | return; |
268 | 268 | ||
269 | serr = SKB_EXT_ERR(skb); | 269 | serr = SKB_EXT_ERR(skb); |
270 | serr->ee.ee_errno = err; | 270 | serr->ee.ee_errno = err; |
271 | serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; | 271 | serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; |
272 | serr->ee.ee_type = skb->h.icmph->type; | 272 | serr->ee.ee_type = skb->h.icmph->type; |
273 | serr->ee.ee_code = skb->h.icmph->code; | 273 | serr->ee.ee_code = skb->h.icmph->code; |
274 | serr->ee.ee_pad = 0; | 274 | serr->ee.ee_pad = 0; |
275 | serr->ee.ee_info = info; | 275 | serr->ee.ee_info = info; |
@@ -301,10 +301,10 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
301 | skb->nh.iph = iph; | 301 | skb->nh.iph = iph; |
302 | iph->daddr = daddr; | 302 | iph->daddr = daddr; |
303 | 303 | ||
304 | serr = SKB_EXT_ERR(skb); | 304 | serr = SKB_EXT_ERR(skb); |
305 | serr->ee.ee_errno = err; | 305 | serr->ee.ee_errno = err; |
306 | serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; | 306 | serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; |
307 | serr->ee.ee_type = 0; | 307 | serr->ee.ee_type = 0; |
308 | serr->ee.ee_code = 0; | 308 | serr->ee.ee_code = 0; |
309 | serr->ee.ee_pad = 0; | 309 | serr->ee.ee_pad = 0; |
310 | serr->ee.ee_info = info; | 310 | serr->ee.ee_info = info; |
@@ -319,7 +319,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
319 | kfree_skb(skb); | 319 | kfree_skb(skb); |
320 | } | 320 | } |
321 | 321 | ||
322 | /* | 322 | /* |
323 | * Handle MSG_ERRQUEUE | 323 | * Handle MSG_ERRQUEUE |
324 | */ | 324 | */ |
325 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | 325 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) |
@@ -391,7 +391,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
391 | } else | 391 | } else |
392 | spin_unlock_bh(&sk->sk_error_queue.lock); | 392 | spin_unlock_bh(&sk->sk_error_queue.lock); |
393 | 393 | ||
394 | out_free_skb: | 394 | out_free_skb: |
395 | kfree_skb(skb); | 395 | kfree_skb(skb); |
396 | out: | 396 | out: |
397 | return err; | 397 | return err; |
@@ -409,15 +409,15 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
409 | struct inet_sock *inet = inet_sk(sk); | 409 | struct inet_sock *inet = inet_sk(sk); |
410 | int val=0,err; | 410 | int val=0,err; |
411 | 411 | ||
412 | if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | | 412 | if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | |
413 | (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | | 413 | (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | |
414 | (1<<IP_RETOPTS) | (1<<IP_TOS) | | 414 | (1<<IP_RETOPTS) | (1<<IP_TOS) | |
415 | (1<<IP_TTL) | (1<<IP_HDRINCL) | | 415 | (1<<IP_TTL) | (1<<IP_HDRINCL) | |
416 | (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | | 416 | (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | |
417 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | | 417 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | |
418 | (1<<IP_PASSSEC))) || | 418 | (1<<IP_PASSSEC))) || |
419 | optname == IP_MULTICAST_TTL || | 419 | optname == IP_MULTICAST_TTL || |
420 | optname == IP_MULTICAST_LOOP) { | 420 | optname == IP_MULTICAST_LOOP) { |
421 | if (optlen >= sizeof(int)) { | 421 | if (optlen >= sizeof(int)) { |
422 | if (get_user(val, (int __user *) optval)) | 422 | if (get_user(val, (int __user *) optval)) |
423 | return -EFAULT; | 423 | return -EFAULT; |
@@ -511,7 +511,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
511 | val &= ~3; | 511 | val &= ~3; |
512 | val |= inet->tos & 3; | 512 | val |= inet->tos & 3; |
513 | } | 513 | } |
514 | if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP && | 514 | if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP && |
515 | !capable(CAP_NET_ADMIN)) { | 515 | !capable(CAP_NET_ADMIN)) { |
516 | err = -EPERM; | 516 | err = -EPERM; |
517 | break; | 517 | break; |
@@ -519,7 +519,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
519 | if (inet->tos != val) { | 519 | if (inet->tos != val) { |
520 | inet->tos = val; | 520 | inet->tos = val; |
521 | sk->sk_priority = rt_tos2priority(val); | 521 | sk->sk_priority = rt_tos2priority(val); |
522 | sk_dst_reset(sk); | 522 | sk_dst_reset(sk); |
523 | } | 523 | } |
524 | break; | 524 | break; |
525 | case IP_TTL: | 525 | case IP_TTL: |
@@ -556,13 +556,13 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
556 | if (val < 0 || val > 255) | 556 | if (val < 0 || val > 255) |
557 | goto e_inval; | 557 | goto e_inval; |
558 | inet->mc_ttl = val; | 558 | inet->mc_ttl = val; |
559 | break; | 559 | break; |
560 | case IP_MULTICAST_LOOP: | 560 | case IP_MULTICAST_LOOP: |
561 | if (optlen<1) | 561 | if (optlen<1) |
562 | goto e_inval; | 562 | goto e_inval; |
563 | inet->mc_loop = !!val; | 563 | inet->mc_loop = !!val; |
564 | break; | 564 | break; |
565 | case IP_MULTICAST_IF: | 565 | case IP_MULTICAST_IF: |
566 | { | 566 | { |
567 | struct ip_mreqn mreq; | 567 | struct ip_mreqn mreq; |
568 | struct net_device *dev = NULL; | 568 | struct net_device *dev = NULL; |
@@ -616,7 +616,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
616 | } | 616 | } |
617 | 617 | ||
618 | case IP_ADD_MEMBERSHIP: | 618 | case IP_ADD_MEMBERSHIP: |
619 | case IP_DROP_MEMBERSHIP: | 619 | case IP_DROP_MEMBERSHIP: |
620 | { | 620 | { |
621 | struct ip_mreqn mreq; | 621 | struct ip_mreqn mreq; |
622 | 622 | ||
@@ -629,7 +629,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
629 | } else { | 629 | } else { |
630 | memset(&mreq, 0, sizeof(mreq)); | 630 | memset(&mreq, 0, sizeof(mreq)); |
631 | if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq))) | 631 | if (copy_from_user(&mreq,optval,sizeof(struct ip_mreq))) |
632 | break; | 632 | break; |
633 | } | 633 | } |
634 | 634 | ||
635 | if (optname == IP_ADD_MEMBERSHIP) | 635 | if (optname == IP_ADD_MEMBERSHIP) |
@@ -714,7 +714,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
714 | break; | 714 | break; |
715 | } | 715 | } |
716 | case MCAST_JOIN_GROUP: | 716 | case MCAST_JOIN_GROUP: |
717 | case MCAST_LEAVE_GROUP: | 717 | case MCAST_LEAVE_GROUP: |
718 | { | 718 | { |
719 | struct group_req greq; | 719 | struct group_req greq; |
720 | struct sockaddr_in *psin; | 720 | struct sockaddr_in *psin; |
@@ -858,16 +858,16 @@ mc_msf_out: | |||
858 | kfree(gsf); | 858 | kfree(gsf); |
859 | break; | 859 | break; |
860 | } | 860 | } |
861 | case IP_ROUTER_ALERT: | 861 | case IP_ROUTER_ALERT: |
862 | err = ip_ra_control(sk, val ? 1 : 0, NULL); | 862 | err = ip_ra_control(sk, val ? 1 : 0, NULL); |
863 | break; | 863 | break; |
864 | 864 | ||
865 | case IP_FREEBIND: | 865 | case IP_FREEBIND: |
866 | if (optlen<1) | 866 | if (optlen<1) |
867 | goto e_inval; | 867 | goto e_inval; |
868 | inet->freebind = !!val; | 868 | inet->freebind = !!val; |
869 | break; | 869 | break; |
870 | 870 | ||
871 | case IP_IPSEC_POLICY: | 871 | case IP_IPSEC_POLICY: |
872 | case IP_XFRM_POLICY: | 872 | case IP_XFRM_POLICY: |
873 | err = -EPERM; | 873 | err = -EPERM; |
@@ -954,7 +954,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
954 | struct inet_sock *inet = inet_sk(sk); | 954 | struct inet_sock *inet = inet_sk(sk); |
955 | int val; | 955 | int val; |
956 | int len; | 956 | int len; |
957 | 957 | ||
958 | if(level!=SOL_IP) | 958 | if(level!=SOL_IP) |
959 | return -EOPNOTSUPP; | 959 | return -EOPNOTSUPP; |
960 | 960 | ||
@@ -969,7 +969,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
969 | return -EFAULT; | 969 | return -EFAULT; |
970 | if(len < 0) | 970 | if(len < 0) |
971 | return -EINVAL; | 971 | return -EINVAL; |
972 | 972 | ||
973 | lock_sock(sk); | 973 | lock_sock(sk); |
974 | 974 | ||
975 | switch(optname) { | 975 | switch(optname) { |
@@ -984,7 +984,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
984 | inet->opt->optlen); | 984 | inet->opt->optlen); |
985 | release_sock(sk); | 985 | release_sock(sk); |
986 | 986 | ||
987 | if (opt->optlen == 0) | 987 | if (opt->optlen == 0) |
988 | return put_user(0, optlen); | 988 | return put_user(0, optlen); |
989 | 989 | ||
990 | ip_options_undo(opt); | 990 | ip_options_undo(opt); |
@@ -1059,8 +1059,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1059 | addr.s_addr = inet->mc_addr; | 1059 | addr.s_addr = inet->mc_addr; |
1060 | release_sock(sk); | 1060 | release_sock(sk); |
1061 | 1061 | ||
1062 | if(put_user(len, optlen)) | 1062 | if(put_user(len, optlen)) |
1063 | return -EFAULT; | 1063 | return -EFAULT; |
1064 | if(copy_to_user(optval, &addr, len)) | 1064 | if(copy_to_user(optval, &addr, len)) |
1065 | return -EFAULT; | 1065 | return -EFAULT; |
1066 | return 0; | 1066 | return 0; |
@@ -1101,7 +1101,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1101 | release_sock(sk); | 1101 | release_sock(sk); |
1102 | return err; | 1102 | return err; |
1103 | } | 1103 | } |
1104 | case IP_PKTOPTIONS: | 1104 | case IP_PKTOPTIONS: |
1105 | { | 1105 | { |
1106 | struct msghdr msg; | 1106 | struct msghdr msg; |
1107 | 1107 | ||
@@ -1129,15 +1129,15 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1129 | len -= msg.msg_controllen; | 1129 | len -= msg.msg_controllen; |
1130 | return put_user(len, optlen); | 1130 | return put_user(len, optlen); |
1131 | } | 1131 | } |
1132 | case IP_FREEBIND: | 1132 | case IP_FREEBIND: |
1133 | val = inet->freebind; | 1133 | val = inet->freebind; |
1134 | break; | 1134 | break; |
1135 | default: | 1135 | default: |
1136 | release_sock(sk); | 1136 | release_sock(sk); |
1137 | return -ENOPROTOOPT; | 1137 | return -ENOPROTOOPT; |
1138 | } | 1138 | } |
1139 | release_sock(sk); | 1139 | release_sock(sk); |
1140 | 1140 | ||
1141 | if (len < sizeof(int) && len > 0 && val>=0 && val<255) { | 1141 | if (len < sizeof(int) && len > 0 && val>=0 && val<255) { |
1142 | unsigned char ucval = (unsigned char)val; | 1142 | unsigned char ucval = (unsigned char)val; |
1143 | len = 1; | 1143 | len = 1; |
@@ -1168,7 +1168,7 @@ int ip_getsockopt(struct sock *sk, int level, | |||
1168 | && (optname < MRT_BASE || optname > MRT_BASE+10) | 1168 | && (optname < MRT_BASE || optname > MRT_BASE+10) |
1169 | #endif | 1169 | #endif |
1170 | ) { | 1170 | ) { |
1171 | int len; | 1171 | int len; |
1172 | 1172 | ||
1173 | if(get_user(len,optlen)) | 1173 | if(get_user(len,optlen)) |
1174 | return -EFAULT; | 1174 | return -EFAULT; |
@@ -1197,7 +1197,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1197 | && (optname < MRT_BASE || optname > MRT_BASE+10) | 1197 | && (optname < MRT_BASE || optname > MRT_BASE+10) |
1198 | #endif | 1198 | #endif |
1199 | ) { | 1199 | ) { |
1200 | int len; | 1200 | int len; |
1201 | 1201 | ||
1202 | if (get_user(len, optlen)) | 1202 | if (get_user(len, optlen)) |
1203 | return -EFAULT; | 1203 | return -EFAULT; |
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 3839b706142e..aa704b88f014 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free | 7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation; either version 2 of the License, or (at your option) | 8 | * Software Foundation; either version 2 of the License, or (at your option) |
9 | * any later version. | 9 | * any later version. |
10 | * | 10 | * |
11 | * Todo: | 11 | * Todo: |
@@ -48,7 +48,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) | |||
48 | u8 *start, *scratch; | 48 | u8 *start, *scratch; |
49 | struct crypto_comp *tfm; | 49 | struct crypto_comp *tfm; |
50 | int cpu; | 50 | int cpu; |
51 | 51 | ||
52 | plen = skb->len; | 52 | plen = skb->len; |
53 | dlen = IPCOMP_SCRATCH_SIZE; | 53 | dlen = IPCOMP_SCRATCH_SIZE; |
54 | start = skb->data; | 54 | start = skb->data; |
@@ -69,11 +69,11 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) | |||
69 | err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); | 69 | err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); |
70 | if (err) | 70 | if (err) |
71 | goto out; | 71 | goto out; |
72 | 72 | ||
73 | skb->truesize += dlen - plen; | 73 | skb->truesize += dlen - plen; |
74 | __skb_put(skb, dlen - plen); | 74 | __skb_put(skb, dlen - plen); |
75 | memcpy(skb->data, scratch, dlen); | 75 | memcpy(skb->data, scratch, dlen); |
76 | out: | 76 | out: |
77 | put_cpu(); | 77 | put_cpu(); |
78 | return err; | 78 | return err; |
79 | } | 79 | } |
@@ -85,11 +85,11 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
85 | struct ip_comp_hdr *ipch; | 85 | struct ip_comp_hdr *ipch; |
86 | 86 | ||
87 | if (skb_linearize_cow(skb)) | 87 | if (skb_linearize_cow(skb)) |
88 | goto out; | 88 | goto out; |
89 | 89 | ||
90 | skb->ip_summed = CHECKSUM_NONE; | 90 | skb->ip_summed = CHECKSUM_NONE; |
91 | 91 | ||
92 | /* Remove ipcomp header and decompress original payload */ | 92 | /* Remove ipcomp header and decompress original payload */ |
93 | iph = skb->nh.iph; | 93 | iph = skb->nh.iph; |
94 | ipch = (void *)skb->data; | 94 | ipch = (void *)skb->data; |
95 | iph->protocol = ipch->nexthdr; | 95 | iph->protocol = ipch->nexthdr; |
@@ -97,7 +97,7 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) | |||
97 | __skb_pull(skb, sizeof(*ipch)); | 97 | __skb_pull(skb, sizeof(*ipch)); |
98 | err = ipcomp_decompress(x, skb); | 98 | err = ipcomp_decompress(x, skb); |
99 | 99 | ||
100 | out: | 100 | out: |
101 | return err; | 101 | return err; |
102 | } | 102 | } |
103 | 103 | ||
@@ -109,7 +109,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) | |||
109 | u8 *start, *scratch; | 109 | u8 *start, *scratch; |
110 | struct crypto_comp *tfm; | 110 | struct crypto_comp *tfm; |
111 | int cpu; | 111 | int cpu; |
112 | 112 | ||
113 | ihlen = iph->ihl * 4; | 113 | ihlen = iph->ihl * 4; |
114 | plen = skb->len - ihlen; | 114 | plen = skb->len - ihlen; |
115 | dlen = IPCOMP_SCRATCH_SIZE; | 115 | dlen = IPCOMP_SCRATCH_SIZE; |
@@ -127,14 +127,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) | |||
127 | err = -EMSGSIZE; | 127 | err = -EMSGSIZE; |
128 | goto out; | 128 | goto out; |
129 | } | 129 | } |
130 | 130 | ||
131 | memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); | 131 | memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); |
132 | put_cpu(); | 132 | put_cpu(); |
133 | 133 | ||
134 | pskb_trim(skb, ihlen + dlen + sizeof(struct ip_comp_hdr)); | 134 | pskb_trim(skb, ihlen + dlen + sizeof(struct ip_comp_hdr)); |
135 | return 0; | 135 | return 0; |
136 | 136 | ||
137 | out: | 137 | out: |
138 | put_cpu(); | 138 | put_cpu(); |
139 | return err; | 139 | return err; |
140 | } | 140 | } |
@@ -157,7 +157,7 @@ static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
157 | 157 | ||
158 | if (skb_linearize_cow(skb)) | 158 | if (skb_linearize_cow(skb)) |
159 | goto out_ok; | 159 | goto out_ok; |
160 | 160 | ||
161 | err = ipcomp_compress(x, skb); | 161 | err = ipcomp_compress(x, skb); |
162 | iph = skb->nh.iph; | 162 | iph = skb->nh.iph; |
163 | 163 | ||
@@ -194,7 +194,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) | |||
194 | 194 | ||
195 | spi = htonl(ntohs(ipch->cpi)); | 195 | spi = htonl(ntohs(ipch->cpi)); |
196 | x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, | 196 | x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, |
197 | spi, IPPROTO_COMP, AF_INET); | 197 | spi, IPPROTO_COMP, AF_INET); |
198 | if (!x) | 198 | if (!x) |
199 | return; | 199 | return; |
200 | NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n", | 200 | NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n", |
@@ -202,12 +202,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) | |||
202 | xfrm_state_put(x); | 202 | xfrm_state_put(x); |
203 | } | 203 | } |
204 | 204 | ||
205 | /* We always hold one tunnel user reference to indicate a tunnel */ | 205 | /* We always hold one tunnel user reference to indicate a tunnel */ |
206 | static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) | 206 | static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) |
207 | { | 207 | { |
208 | struct xfrm_state *t; | 208 | struct xfrm_state *t; |
209 | u8 mode = XFRM_MODE_TUNNEL; | 209 | u8 mode = XFRM_MODE_TUNNEL; |
210 | 210 | ||
211 | t = xfrm_state_alloc(); | 211 | t = xfrm_state_alloc(); |
212 | if (t == NULL) | 212 | if (t == NULL) |
213 | goto out; | 213 | goto out; |
@@ -247,7 +247,7 @@ static int ipcomp_tunnel_attach(struct xfrm_state *x) | |||
247 | struct xfrm_state *t; | 247 | struct xfrm_state *t; |
248 | 248 | ||
249 | t = xfrm_state_lookup((xfrm_address_t *)&x->id.daddr.a4, | 249 | t = xfrm_state_lookup((xfrm_address_t *)&x->id.daddr.a4, |
250 | x->props.saddr.a4, IPPROTO_IPIP, AF_INET); | 250 | x->props.saddr.a4, IPPROTO_IPIP, AF_INET); |
251 | if (!t) { | 251 | if (!t) { |
252 | t = ipcomp_tunnel_create(x); | 252 | t = ipcomp_tunnel_create(x); |
253 | if (!t) { | 253 | if (!t) { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index afa60b9a003f..ba882bec317a 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * BOOTP rewritten to construct and analyse packets itself instead | 12 | * BOOTP rewritten to construct and analyse packets itself instead |
13 | * of misusing the IP layer. num_bugs_causing_wrong_arp_replies--; | 13 | * of misusing the IP layer. num_bugs_causing_wrong_arp_replies--; |
14 | * -- MJ, December 1998 | 14 | * -- MJ, December 1998 |
15 | * | 15 | * |
16 | * Fixed ip_auto_config_setup calling at startup in the new "Linker Magic" | 16 | * Fixed ip_auto_config_setup calling at startup in the new "Linker Magic" |
17 | * initialization scheme. | 17 | * initialization scheme. |
18 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 08/11/1999 | 18 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 08/11/1999 |
@@ -98,8 +98,8 @@ | |||
98 | #define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */ | 98 | #define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */ |
99 | #define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */ | 99 | #define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */ |
100 | #define CONF_TIMEOUT_MAX (HZ*30) /* Maximum allowed timeout */ | 100 | #define CONF_TIMEOUT_MAX (HZ*30) /* Maximum allowed timeout */ |
101 | #define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers | 101 | #define CONF_NAMESERVERS_MAX 3 /* Maximum number of nameservers |
102 | - '3' from resolv.h */ | 102 | - '3' from resolv.h */ |
103 | 103 | ||
104 | #define NONE __constant_htonl(INADDR_NONE) | 104 | #define NONE __constant_htonl(INADDR_NONE) |
105 | 105 | ||
@@ -365,7 +365,7 @@ static int __init ic_defaults(void) | |||
365 | * At this point we have no userspace running so need not | 365 | * At this point we have no userspace running so need not |
366 | * claim locks on system_utsname | 366 | * claim locks on system_utsname |
367 | */ | 367 | */ |
368 | 368 | ||
369 | if (!ic_host_name_set) | 369 | if (!ic_host_name_set) |
370 | sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr)); | 370 | sprintf(init_utsname()->nodename, "%u.%u.%u.%u", NIPQUAD(ic_myaddr)); |
371 | 371 | ||
@@ -650,9 +650,9 @@ static void __init ic_bootp_init_ext(u8 *e) | |||
650 | *e++ = 40; | 650 | *e++ = 40; |
651 | e += 40; | 651 | e += 40; |
652 | 652 | ||
653 | *e++ = 57; /* set extension buffer size for reply */ | 653 | *e++ = 57; /* set extension buffer size for reply */ |
654 | *e++ = 2; | 654 | *e++ = 2; |
655 | *e++ = 1; /* 128+236+8+20+14, see dhcpd sources */ | 655 | *e++ = 1; /* 128+236+8+20+14, see dhcpd sources */ |
656 | *e++ = 150; | 656 | *e++ = 150; |
657 | 657 | ||
658 | *e++ = 255; /* End of the list */ | 658 | *e++ = 255; /* End of the list */ |
@@ -913,7 +913,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str | |||
913 | /* Parse extensions */ | 913 | /* Parse extensions */ |
914 | if (ext_len >= 4 && | 914 | if (ext_len >= 4 && |
915 | !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ | 915 | !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ |
916 | u8 *end = (u8 *) b + ntohs(b->iph.tot_len); | 916 | u8 *end = (u8 *) b + ntohs(b->iph.tot_len); |
917 | u8 *ext; | 917 | u8 *ext; |
918 | 918 | ||
919 | #ifdef IPCONFIG_DHCP | 919 | #ifdef IPCONFIG_DHCP |
@@ -1020,7 +1020,7 @@ drop: | |||
1020 | kfree_skb(skb); | 1020 | kfree_skb(skb); |
1021 | 1021 | ||
1022 | return 0; | 1022 | return 0; |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | 1025 | ||
1026 | #endif | 1026 | #endif |
@@ -1080,7 +1080,7 @@ static int __init ic_dynamic(void) | |||
1080 | * seems to be a terrible waste of CPU time, but actually there is | 1080 | * seems to be a terrible waste of CPU time, but actually there is |
1081 | * only one process running at all, so we don't need to use any | 1081 | * only one process running at all, so we don't need to use any |
1082 | * scheduler functions. | 1082 | * scheduler functions. |
1083 | * [Actually we could now, but the nothing else running note still | 1083 | * [Actually we could now, but the nothing else running note still |
1084 | * applies.. - AC] | 1084 | * applies.. - AC] |
1085 | */ | 1085 | */ |
1086 | printk(KERN_NOTICE "Sending %s%s%s requests .", | 1086 | printk(KERN_NOTICE "Sending %s%s%s requests .", |
@@ -1156,7 +1156,7 @@ static int __init ic_dynamic(void) | |||
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | printk("IP-Config: Got %s answer from %u.%u.%u.%u, ", | 1158 | printk("IP-Config: Got %s answer from %u.%u.%u.%u, ", |
1159 | ((ic_got_reply & IC_RARP) ? "RARP" | 1159 | ((ic_got_reply & IC_RARP) ? "RARP" |
1160 | : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), | 1160 | : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), |
1161 | NIPQUAD(ic_servaddr)); | 1161 | NIPQUAD(ic_servaddr)); |
1162 | printk("my address is %u.%u.%u.%u\n", NIPQUAD(ic_myaddr)); | 1162 | printk("my address is %u.%u.%u.%u\n", NIPQUAD(ic_myaddr)); |
@@ -1286,7 +1286,7 @@ static int __init ip_auto_config(void) | |||
1286 | #endif | 1286 | #endif |
1287 | ic_first_dev->next) { | 1287 | ic_first_dev->next) { |
1288 | #ifdef IPCONFIG_DYNAMIC | 1288 | #ifdef IPCONFIG_DYNAMIC |
1289 | 1289 | ||
1290 | int retries = CONF_OPEN_RETRIES; | 1290 | int retries = CONF_OPEN_RETRIES; |
1291 | 1291 | ||
1292 | if (ic_dynamic() < 0) { | 1292 | if (ic_dynamic() < 0) { |
@@ -1308,14 +1308,14 @@ static int __init ip_auto_config(void) | |||
1308 | */ | 1308 | */ |
1309 | #ifdef CONFIG_ROOT_NFS | 1309 | #ifdef CONFIG_ROOT_NFS |
1310 | if (ROOT_DEV == Root_NFS) { | 1310 | if (ROOT_DEV == Root_NFS) { |
1311 | printk(KERN_ERR | 1311 | printk(KERN_ERR |
1312 | "IP-Config: Retrying forever (NFS root)...\n"); | 1312 | "IP-Config: Retrying forever (NFS root)...\n"); |
1313 | goto try_try_again; | 1313 | goto try_try_again; |
1314 | } | 1314 | } |
1315 | #endif | 1315 | #endif |
1316 | 1316 | ||
1317 | if (--retries) { | 1317 | if (--retries) { |
1318 | printk(KERN_ERR | 1318 | printk(KERN_ERR |
1319 | "IP-Config: Reopening network devices...\n"); | 1319 | "IP-Config: Reopening network devices...\n"); |
1320 | goto try_try_again; | 1320 | goto try_try_again; |
1321 | } | 1321 | } |
@@ -1443,8 +1443,8 @@ static int __init ip_auto_config_setup(char *addrs) | |||
1443 | 1443 | ||
1444 | ic_set_manually = 1; | 1444 | ic_set_manually = 1; |
1445 | 1445 | ||
1446 | ic_enable = (*addrs && | 1446 | ic_enable = (*addrs && |
1447 | (strcmp(addrs, "off") != 0) && | 1447 | (strcmp(addrs, "off") != 0) && |
1448 | (strcmp(addrs, "none") != 0)); | 1448 | (strcmp(addrs, "none") != 0)); |
1449 | if (!ic_enable) | 1449 | if (!ic_enable) |
1450 | return 1; | 1450 | return 1; |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index da8bbd20c7ed..475bcd1e4181 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux NET3: IP/IP protocol decoder. | 2 | * Linux NET3: IP/IP protocol decoder. |
3 | * | 3 | * |
4 | * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $ | 4 | * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $ |
5 | * | 5 | * |
@@ -35,14 +35,14 @@ | |||
35 | Thanks for the great code! | 35 | Thanks for the great code! |
36 | 36 | ||
37 | -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 | 37 | -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 |
38 | 38 | ||
39 | Minor tweaks: | 39 | Minor tweaks: |
40 | Cleaned up the code a little and added some pre-1.3.0 tweaks. | 40 | Cleaned up the code a little and added some pre-1.3.0 tweaks. |
41 | dev->hard_header/hard_header_len changed to use no headers. | 41 | dev->hard_header/hard_header_len changed to use no headers. |
42 | Comments/bracketing tweaked. | 42 | Comments/bracketing tweaked. |
43 | Made the tunnels use dev->name not tunnel: when error reporting. | 43 | Made the tunnels use dev->name not tunnel: when error reporting. |
44 | Added tx_dropped stat | 44 | Added tx_dropped stat |
45 | 45 | ||
46 | -Alan Cox (Alan.Cox@linux.org) 21 March 95 | 46 | -Alan Cox (Alan.Cox@linux.org) 21 March 95 |
47 | 47 | ||
48 | Reworked: | 48 | Reworked: |
@@ -52,7 +52,7 @@ | |||
52 | Note: There is currently no firewall or ICMP handling done. | 52 | Note: There is currently no firewall or ICMP handling done. |
53 | 53 | ||
54 | -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96 | 54 | -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96 |
55 | 55 | ||
56 | */ | 56 | */ |
57 | 57 | ||
58 | /* Things I wish I had known when writing the tunnel driver: | 58 | /* Things I wish I had known when writing the tunnel driver: |
@@ -75,7 +75,7 @@ | |||
75 | "allocated" with skb_put(). You can then write up to skb->len | 75 | "allocated" with skb_put(). You can then write up to skb->len |
76 | bytes to that buffer. If you need more, you can call skb_put() | 76 | bytes to that buffer. If you need more, you can call skb_put() |
77 | again with the additional amount of space you need. You can | 77 | again with the additional amount of space you need. You can |
78 | find out how much more space you can allocate by calling | 78 | find out how much more space you can allocate by calling |
79 | "skb_tailroom(skb)". | 79 | "skb_tailroom(skb)". |
80 | Now, to add header space, call "skb_push(skb, header_len)". | 80 | Now, to add header space, call "skb_push(skb, header_len)". |
81 | This creates space at the beginning of the buffer and returns | 81 | This creates space at the beginning of the buffer and returns |
@@ -92,7 +92,7 @@ | |||
92 | For comments look at net/ipv4/ip_gre.c --ANK | 92 | For comments look at net/ipv4/ip_gre.c --ANK |
93 | */ | 93 | */ |
94 | 94 | ||
95 | 95 | ||
96 | #include <linux/capability.h> | 96 | #include <linux/capability.h> |
97 | #include <linux/module.h> | 97 | #include <linux/module.h> |
98 | #include <linux/types.h> | 98 | #include <linux/types.h> |
@@ -607,7 +607,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
607 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); | 607 | struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); |
608 | if (!new_skb) { | 608 | if (!new_skb) { |
609 | ip_rt_put(rt); | 609 | ip_rt_put(rt); |
610 | stats->tx_dropped++; | 610 | stats->tx_dropped++; |
611 | dev_kfree_skb(skb); | 611 | dev_kfree_skb(skb); |
612 | tunnel->recursion--; | 612 | tunnel->recursion--; |
613 | return 0; | 613 | return 0; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index ecb5422ea237..a099000cd132 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -241,7 +241,7 @@ failure: | |||
241 | /* | 241 | /* |
242 | * Delete a VIF entry | 242 | * Delete a VIF entry |
243 | */ | 243 | */ |
244 | 244 | ||
245 | static int vif_delete(int vifi) | 245 | static int vif_delete(int vifi) |
246 | { | 246 | { |
247 | struct vif_device *v; | 247 | struct vif_device *v; |
@@ -409,7 +409,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) | |||
409 | return -ENOBUFS; | 409 | return -ENOBUFS; |
410 | break; | 410 | break; |
411 | #endif | 411 | #endif |
412 | case VIFF_TUNNEL: | 412 | case VIFF_TUNNEL: |
413 | dev = ipmr_new_tunnel(vifc); | 413 | dev = ipmr_new_tunnel(vifc); |
414 | if (!dev) | 414 | if (!dev) |
415 | return -ENOBUFS; | 415 | return -ENOBUFS; |
@@ -501,7 +501,7 @@ static struct mfc_cache *ipmr_cache_alloc_unres(void) | |||
501 | /* | 501 | /* |
502 | * A cache entry has gone into a resolved state from queued | 502 | * A cache entry has gone into a resolved state from queued |
503 | */ | 503 | */ |
504 | 504 | ||
505 | static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | 505 | static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) |
506 | { | 506 | { |
507 | struct sk_buff *skb; | 507 | struct sk_buff *skb; |
@@ -538,7 +538,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) | |||
538 | * | 538 | * |
539 | * Called under mrt_lock. | 539 | * Called under mrt_lock. |
540 | */ | 540 | */ |
541 | 541 | ||
542 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | 542 | static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) |
543 | { | 543 | { |
544 | struct sk_buff *skb; | 544 | struct sk_buff *skb; |
@@ -569,13 +569,13 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
569 | memcpy(msg, pkt->nh.raw, sizeof(struct iphdr)); | 569 | memcpy(msg, pkt->nh.raw, sizeof(struct iphdr)); |
570 | msg->im_msgtype = IGMPMSG_WHOLEPKT; | 570 | msg->im_msgtype = IGMPMSG_WHOLEPKT; |
571 | msg->im_mbz = 0; | 571 | msg->im_mbz = 0; |
572 | msg->im_vif = reg_vif_num; | 572 | msg->im_vif = reg_vif_num; |
573 | skb->nh.iph->ihl = sizeof(struct iphdr) >> 2; | 573 | skb->nh.iph->ihl = sizeof(struct iphdr) >> 2; |
574 | skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr)); | 574 | skb->nh.iph->tot_len = htons(ntohs(pkt->nh.iph->tot_len) + sizeof(struct iphdr)); |
575 | } else | 575 | } else |
576 | #endif | 576 | #endif |
577 | { | 577 | { |
578 | 578 | ||
579 | /* | 579 | /* |
580 | * Copy the IP header | 580 | * Copy the IP header |
581 | */ | 581 | */ |
@@ -597,7 +597,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
597 | igmp->code = 0; | 597 | igmp->code = 0; |
598 | skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */ | 598 | skb->nh.iph->tot_len=htons(skb->len); /* Fix the length */ |
599 | skb->h.raw = skb->nh.raw; | 599 | skb->h.raw = skb->nh.raw; |
600 | } | 600 | } |
601 | 601 | ||
602 | if (mroute_socket == NULL) { | 602 | if (mroute_socket == NULL) { |
603 | kfree_skb(skb); | 603 | kfree_skb(skb); |
@@ -619,7 +619,7 @@ static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert) | |||
619 | /* | 619 | /* |
620 | * Queue a packet for resolution. It gets locked cache entry! | 620 | * Queue a packet for resolution. It gets locked cache entry! |
621 | */ | 621 | */ |
622 | 622 | ||
623 | static int | 623 | static int |
624 | ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | 624 | ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) |
625 | { | 625 | { |
@@ -657,7 +657,7 @@ ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb) | |||
657 | * Reflect first query at mrouted. | 657 | * Reflect first query at mrouted. |
658 | */ | 658 | */ |
659 | if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { | 659 | if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) { |
660 | /* If the report failed throw the cache entry | 660 | /* If the report failed throw the cache entry |
661 | out - Brad Parker | 661 | out - Brad Parker |
662 | */ | 662 | */ |
663 | spin_unlock_bh(&mfc_unres_lock); | 663 | spin_unlock_bh(&mfc_unres_lock); |
@@ -783,11 +783,11 @@ static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock) | |||
783 | /* | 783 | /* |
784 | * Close the multicast socket, and clear the vif tables etc | 784 | * Close the multicast socket, and clear the vif tables etc |
785 | */ | 785 | */ |
786 | 786 | ||
787 | static void mroute_clean_tables(struct sock *sk) | 787 | static void mroute_clean_tables(struct sock *sk) |
788 | { | 788 | { |
789 | int i; | 789 | int i; |
790 | 790 | ||
791 | /* | 791 | /* |
792 | * Shut down all active vif entries | 792 | * Shut down all active vif entries |
793 | */ | 793 | */ |
@@ -854,13 +854,13 @@ static void mrtsock_destruct(struct sock *sk) | |||
854 | * that's how BSD mrouted happens to think. Maybe one day with a proper | 854 | * that's how BSD mrouted happens to think. Maybe one day with a proper |
855 | * MOSPF/PIM router set up we can clean this up. | 855 | * MOSPF/PIM router set up we can clean this up. |
856 | */ | 856 | */ |
857 | 857 | ||
858 | int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen) | 858 | int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen) |
859 | { | 859 | { |
860 | int ret; | 860 | int ret; |
861 | struct vifctl vif; | 861 | struct vifctl vif; |
862 | struct mfcctl mfc; | 862 | struct mfcctl mfc; |
863 | 863 | ||
864 | if(optname!=MRT_INIT) | 864 | if(optname!=MRT_INIT) |
865 | { | 865 | { |
866 | if(sk!=mroute_socket && !capable(CAP_NET_ADMIN)) | 866 | if(sk!=mroute_socket && !capable(CAP_NET_ADMIN)) |
@@ -901,7 +901,7 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
901 | if(optlen!=sizeof(vif)) | 901 | if(optlen!=sizeof(vif)) |
902 | return -EINVAL; | 902 | return -EINVAL; |
903 | if (copy_from_user(&vif,optval,sizeof(vif))) | 903 | if (copy_from_user(&vif,optval,sizeof(vif))) |
904 | return -EFAULT; | 904 | return -EFAULT; |
905 | if(vif.vifc_vifi >= MAXVIFS) | 905 | if(vif.vifc_vifi >= MAXVIFS) |
906 | return -ENFILE; | 906 | return -ENFILE; |
907 | rtnl_lock(); | 907 | rtnl_lock(); |
@@ -980,13 +980,13 @@ int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int opt | |||
980 | /* | 980 | /* |
981 | * Getsock opt support for the multicast routing system. | 981 | * Getsock opt support for the multicast routing system. |
982 | */ | 982 | */ |
983 | 983 | ||
984 | int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen) | 984 | int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen) |
985 | { | 985 | { |
986 | int olr; | 986 | int olr; |
987 | int val; | 987 | int val; |
988 | 988 | ||
989 | if(optname!=MRT_VERSION && | 989 | if(optname!=MRT_VERSION && |
990 | #ifdef CONFIG_IP_PIMSM | 990 | #ifdef CONFIG_IP_PIMSM |
991 | optname!=MRT_PIM && | 991 | optname!=MRT_PIM && |
992 | #endif | 992 | #endif |
@@ -999,7 +999,7 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u | |||
999 | olr = min_t(unsigned int, olr, sizeof(int)); | 999 | olr = min_t(unsigned int, olr, sizeof(int)); |
1000 | if (olr < 0) | 1000 | if (olr < 0) |
1001 | return -EINVAL; | 1001 | return -EINVAL; |
1002 | 1002 | ||
1003 | if(put_user(olr,optlen)) | 1003 | if(put_user(olr,optlen)) |
1004 | return -EFAULT; | 1004 | return -EFAULT; |
1005 | if(optname==MRT_VERSION) | 1005 | if(optname==MRT_VERSION) |
@@ -1018,19 +1018,19 @@ int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __u | |||
1018 | /* | 1018 | /* |
1019 | * The IP multicast ioctl support routines. | 1019 | * The IP multicast ioctl support routines. |
1020 | */ | 1020 | */ |
1021 | 1021 | ||
1022 | int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) | 1022 | int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) |
1023 | { | 1023 | { |
1024 | struct sioc_sg_req sr; | 1024 | struct sioc_sg_req sr; |
1025 | struct sioc_vif_req vr; | 1025 | struct sioc_vif_req vr; |
1026 | struct vif_device *vif; | 1026 | struct vif_device *vif; |
1027 | struct mfc_cache *c; | 1027 | struct mfc_cache *c; |
1028 | 1028 | ||
1029 | switch(cmd) | 1029 | switch(cmd) |
1030 | { | 1030 | { |
1031 | case SIOCGETVIFCNT: | 1031 | case SIOCGETVIFCNT: |
1032 | if (copy_from_user(&vr,arg,sizeof(vr))) | 1032 | if (copy_from_user(&vr,arg,sizeof(vr))) |
1033 | return -EFAULT; | 1033 | return -EFAULT; |
1034 | if(vr.vifi>=maxvif) | 1034 | if(vr.vifi>=maxvif) |
1035 | return -EINVAL; | 1035 | return -EINVAL; |
1036 | read_lock(&mrt_lock); | 1036 | read_lock(&mrt_lock); |
@@ -1096,7 +1096,7 @@ static struct notifier_block ip_mr_notifier={ | |||
1096 | * This avoids tunnel drivers and other mess and gives us the speed so | 1096 | * This avoids tunnel drivers and other mess and gives us the speed so |
1097 | * important for multicast video. | 1097 | * important for multicast video. |
1098 | */ | 1098 | */ |
1099 | 1099 | ||
1100 | static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) | 1100 | static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) |
1101 | { | 1101 | { |
1102 | struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr)); | 1102 | struct iphdr *iph = (struct iphdr *)skb_push(skb,sizeof(struct iphdr)); |
@@ -1194,7 +1194,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1194 | encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; | 1194 | encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; |
1195 | 1195 | ||
1196 | if (skb_cow(skb, encap)) { | 1196 | if (skb_cow(skb, encap)) { |
1197 | ip_rt_put(rt); | 1197 | ip_rt_put(rt); |
1198 | goto out_free; | 1198 | goto out_free; |
1199 | } | 1199 | } |
1200 | 1200 | ||
@@ -1228,7 +1228,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1228 | * not mrouter) cannot join to more than one interface - it will | 1228 | * not mrouter) cannot join to more than one interface - it will |
1229 | * result in receiving multiple packets. | 1229 | * result in receiving multiple packets. |
1230 | */ | 1230 | */ |
1231 | NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev, | 1231 | NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev, |
1232 | ipmr_forward_finish); | 1232 | ipmr_forward_finish); |
1233 | return; | 1233 | return; |
1234 | 1234 | ||
@@ -1289,7 +1289,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1289 | large chunk of pimd to kernel. Ough... --ANK | 1289 | large chunk of pimd to kernel. Ough... --ANK |
1290 | */ | 1290 | */ |
1291 | (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && | 1291 | (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && |
1292 | time_after(jiffies, | 1292 | time_after(jiffies, |
1293 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { | 1293 | cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { |
1294 | cache->mfc_un.res.last_assert = jiffies; | 1294 | cache->mfc_un.res.last_assert = jiffies; |
1295 | ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); | 1295 | ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF); |
@@ -1426,14 +1426,14 @@ int pim_rcv_v1(struct sk_buff * skb) | |||
1426 | struct iphdr *encap; | 1426 | struct iphdr *encap; |
1427 | struct net_device *reg_dev = NULL; | 1427 | struct net_device *reg_dev = NULL; |
1428 | 1428 | ||
1429 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) | 1429 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) |
1430 | goto drop; | 1430 | goto drop; |
1431 | 1431 | ||
1432 | pim = (struct igmphdr*)skb->h.raw; | 1432 | pim = (struct igmphdr*)skb->h.raw; |
1433 | 1433 | ||
1434 | if (!mroute_do_pim || | 1434 | if (!mroute_do_pim || |
1435 | skb->len < sizeof(*pim) + sizeof(*encap) || | 1435 | skb->len < sizeof(*pim) + sizeof(*encap) || |
1436 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) | 1436 | pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) |
1437 | goto drop; | 1437 | goto drop; |
1438 | 1438 | ||
1439 | encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr)); | 1439 | encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr)); |
@@ -1445,7 +1445,7 @@ int pim_rcv_v1(struct sk_buff * skb) | |||
1445 | */ | 1445 | */ |
1446 | if (!MULTICAST(encap->daddr) || | 1446 | if (!MULTICAST(encap->daddr) || |
1447 | encap->tot_len == 0 || | 1447 | encap->tot_len == 0 || |
1448 | ntohs(encap->tot_len) + sizeof(*pim) > skb->len) | 1448 | ntohs(encap->tot_len) + sizeof(*pim) > skb->len) |
1449 | goto drop; | 1449 | goto drop; |
1450 | 1450 | ||
1451 | read_lock(&mrt_lock); | 1451 | read_lock(&mrt_lock); |
@@ -1455,7 +1455,7 @@ int pim_rcv_v1(struct sk_buff * skb) | |||
1455 | dev_hold(reg_dev); | 1455 | dev_hold(reg_dev); |
1456 | read_unlock(&mrt_lock); | 1456 | read_unlock(&mrt_lock); |
1457 | 1457 | ||
1458 | if (reg_dev == NULL) | 1458 | if (reg_dev == NULL) |
1459 | goto drop; | 1459 | goto drop; |
1460 | 1460 | ||
1461 | skb->mac.raw = skb->nh.raw; | 1461 | skb->mac.raw = skb->nh.raw; |
@@ -1486,13 +1486,13 @@ static int pim_rcv(struct sk_buff * skb) | |||
1486 | struct iphdr *encap; | 1486 | struct iphdr *encap; |
1487 | struct net_device *reg_dev = NULL; | 1487 | struct net_device *reg_dev = NULL; |
1488 | 1488 | ||
1489 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) | 1489 | if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) |
1490 | goto drop; | 1490 | goto drop; |
1491 | 1491 | ||
1492 | pim = (struct pimreghdr*)skb->h.raw; | 1492 | pim = (struct pimreghdr*)skb->h.raw; |
1493 | if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || | 1493 | if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || |
1494 | (pim->flags&PIM_NULL_REGISTER) || | 1494 | (pim->flags&PIM_NULL_REGISTER) || |
1495 | (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && | 1495 | (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && |
1496 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) | 1496 | csum_fold(skb_checksum(skb, 0, skb->len, 0)))) |
1497 | goto drop; | 1497 | goto drop; |
1498 | 1498 | ||
@@ -1500,7 +1500,7 @@ static int pim_rcv(struct sk_buff * skb) | |||
1500 | encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr)); | 1500 | encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr)); |
1501 | if (!MULTICAST(encap->daddr) || | 1501 | if (!MULTICAST(encap->daddr) || |
1502 | encap->tot_len == 0 || | 1502 | encap->tot_len == 0 || |
1503 | ntohs(encap->tot_len) + sizeof(*pim) > skb->len) | 1503 | ntohs(encap->tot_len) + sizeof(*pim) > skb->len) |
1504 | goto drop; | 1504 | goto drop; |
1505 | 1505 | ||
1506 | read_lock(&mrt_lock); | 1506 | read_lock(&mrt_lock); |
@@ -1510,7 +1510,7 @@ static int pim_rcv(struct sk_buff * skb) | |||
1510 | dev_hold(reg_dev); | 1510 | dev_hold(reg_dev); |
1511 | read_unlock(&mrt_lock); | 1511 | read_unlock(&mrt_lock); |
1512 | 1512 | ||
1513 | if (reg_dev == NULL) | 1513 | if (reg_dev == NULL) |
1514 | goto drop; | 1514 | goto drop; |
1515 | 1515 | ||
1516 | skb->mac.raw = skb->nh.raw; | 1516 | skb->mac.raw = skb->nh.raw; |
@@ -1616,7 +1616,7 @@ int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) | |||
1616 | return err; | 1616 | return err; |
1617 | } | 1617 | } |
1618 | 1618 | ||
1619 | #ifdef CONFIG_PROC_FS | 1619 | #ifdef CONFIG_PROC_FS |
1620 | /* | 1620 | /* |
1621 | * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif | 1621 | * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif |
1622 | */ | 1622 | */ |
@@ -1630,7 +1630,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter, | |||
1630 | for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) { | 1630 | for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) { |
1631 | if(!VIF_EXISTS(iter->ct)) | 1631 | if(!VIF_EXISTS(iter->ct)) |
1632 | continue; | 1632 | continue; |
1633 | if (pos-- == 0) | 1633 | if (pos-- == 0) |
1634 | return &vif_table[iter->ct]; | 1634 | return &vif_table[iter->ct]; |
1635 | } | 1635 | } |
1636 | return NULL; | 1636 | return NULL; |
@@ -1639,7 +1639,7 @@ static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter, | |||
1639 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) | 1639 | static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) |
1640 | { | 1640 | { |
1641 | read_lock(&mrt_lock); | 1641 | read_lock(&mrt_lock); |
1642 | return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) | 1642 | return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1) |
1643 | : SEQ_START_TOKEN; | 1643 | : SEQ_START_TOKEN; |
1644 | } | 1644 | } |
1645 | 1645 | ||
@@ -1650,7 +1650,7 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1650 | ++*pos; | 1650 | ++*pos; |
1651 | if (v == SEQ_START_TOKEN) | 1651 | if (v == SEQ_START_TOKEN) |
1652 | return ipmr_vif_seq_idx(iter, 0); | 1652 | return ipmr_vif_seq_idx(iter, 0); |
1653 | 1653 | ||
1654 | while (++iter->ct < maxvif) { | 1654 | while (++iter->ct < maxvif) { |
1655 | if(!VIF_EXISTS(iter->ct)) | 1655 | if(!VIF_EXISTS(iter->ct)) |
1656 | continue; | 1656 | continue; |
@@ -1667,7 +1667,7 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) | |||
1667 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | 1667 | static int ipmr_vif_seq_show(struct seq_file *seq, void *v) |
1668 | { | 1668 | { |
1669 | if (v == SEQ_START_TOKEN) { | 1669 | if (v == SEQ_START_TOKEN) { |
1670 | seq_puts(seq, | 1670 | seq_puts(seq, |
1671 | "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); | 1671 | "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); |
1672 | } else { | 1672 | } else { |
1673 | const struct vif_device *vif = v; | 1673 | const struct vif_device *vif = v; |
@@ -1676,7 +1676,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v) | |||
1676 | seq_printf(seq, | 1676 | seq_printf(seq, |
1677 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", | 1677 | "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", |
1678 | vif - vif_table, | 1678 | vif - vif_table, |
1679 | name, vif->bytes_in, vif->pkt_in, | 1679 | name, vif->bytes_in, vif->pkt_in, |
1680 | vif->bytes_out, vif->pkt_out, | 1680 | vif->bytes_out, vif->pkt_out, |
1681 | vif->flags, vif->local, vif->remote); | 1681 | vif->flags, vif->local, vif->remote); |
1682 | } | 1682 | } |
@@ -1695,7 +1695,7 @@ static int ipmr_vif_open(struct inode *inode, struct file *file) | |||
1695 | struct seq_file *seq; | 1695 | struct seq_file *seq; |
1696 | int rc = -ENOMEM; | 1696 | int rc = -ENOMEM; |
1697 | struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); | 1697 | struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); |
1698 | 1698 | ||
1699 | if (!s) | 1699 | if (!s) |
1700 | goto out; | 1700 | goto out; |
1701 | 1701 | ||
@@ -1734,15 +1734,15 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos) | |||
1734 | 1734 | ||
1735 | it->cache = mfc_cache_array; | 1735 | it->cache = mfc_cache_array; |
1736 | read_lock(&mrt_lock); | 1736 | read_lock(&mrt_lock); |
1737 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) | 1737 | for (it->ct = 0; it->ct < MFC_LINES; it->ct++) |
1738 | for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) | 1738 | for(mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next) |
1739 | if (pos-- == 0) | 1739 | if (pos-- == 0) |
1740 | return mfc; | 1740 | return mfc; |
1741 | read_unlock(&mrt_lock); | 1741 | read_unlock(&mrt_lock); |
1742 | 1742 | ||
1743 | it->cache = &mfc_unres_queue; | 1743 | it->cache = &mfc_unres_queue; |
1744 | spin_lock_bh(&mfc_unres_lock); | 1744 | spin_lock_bh(&mfc_unres_lock); |
1745 | for(mfc = mfc_unres_queue; mfc; mfc = mfc->next) | 1745 | for(mfc = mfc_unres_queue; mfc; mfc = mfc->next) |
1746 | if (pos-- == 0) | 1746 | if (pos-- == 0) |
1747 | return mfc; | 1747 | return mfc; |
1748 | spin_unlock_bh(&mfc_unres_lock); | 1748 | spin_unlock_bh(&mfc_unres_lock); |
@@ -1757,7 +1757,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) | |||
1757 | struct ipmr_mfc_iter *it = seq->private; | 1757 | struct ipmr_mfc_iter *it = seq->private; |
1758 | it->cache = NULL; | 1758 | it->cache = NULL; |
1759 | it->ct = 0; | 1759 | it->ct = 0; |
1760 | return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) | 1760 | return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1) |
1761 | : SEQ_START_TOKEN; | 1761 | : SEQ_START_TOKEN; |
1762 | } | 1762 | } |
1763 | 1763 | ||
@@ -1773,8 +1773,8 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1773 | 1773 | ||
1774 | if (mfc->next) | 1774 | if (mfc->next) |
1775 | return mfc->next; | 1775 | return mfc->next; |
1776 | 1776 | ||
1777 | if (it->cache == &mfc_unres_queue) | 1777 | if (it->cache == &mfc_unres_queue) |
1778 | goto end_of_list; | 1778 | goto end_of_list; |
1779 | 1779 | ||
1780 | BUG_ON(it->cache != mfc_cache_array); | 1780 | BUG_ON(it->cache != mfc_cache_array); |
@@ -1789,10 +1789,10 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
1789 | read_unlock(&mrt_lock); | 1789 | read_unlock(&mrt_lock); |
1790 | it->cache = &mfc_unres_queue; | 1790 | it->cache = &mfc_unres_queue; |
1791 | it->ct = 0; | 1791 | it->ct = 0; |
1792 | 1792 | ||
1793 | spin_lock_bh(&mfc_unres_lock); | 1793 | spin_lock_bh(&mfc_unres_lock); |
1794 | mfc = mfc_unres_queue; | 1794 | mfc = mfc_unres_queue; |
1795 | if (mfc) | 1795 | if (mfc) |
1796 | return mfc; | 1796 | return mfc; |
1797 | 1797 | ||
1798 | end_of_list: | 1798 | end_of_list: |
@@ -1817,12 +1817,12 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
1817 | int n; | 1817 | int n; |
1818 | 1818 | ||
1819 | if (v == SEQ_START_TOKEN) { | 1819 | if (v == SEQ_START_TOKEN) { |
1820 | seq_puts(seq, | 1820 | seq_puts(seq, |
1821 | "Group Origin Iif Pkts Bytes Wrong Oifs\n"); | 1821 | "Group Origin Iif Pkts Bytes Wrong Oifs\n"); |
1822 | } else { | 1822 | } else { |
1823 | const struct mfc_cache *mfc = v; | 1823 | const struct mfc_cache *mfc = v; |
1824 | const struct ipmr_mfc_iter *it = seq->private; | 1824 | const struct ipmr_mfc_iter *it = seq->private; |
1825 | 1825 | ||
1826 | seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld", | 1826 | seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld", |
1827 | (unsigned long) mfc->mfc_mcastgrp, | 1827 | (unsigned long) mfc->mfc_mcastgrp, |
1828 | (unsigned long) mfc->mfc_origin, | 1828 | (unsigned long) mfc->mfc_origin, |
@@ -1832,12 +1832,12 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) | |||
1832 | mfc->mfc_un.res.wrong_if); | 1832 | mfc->mfc_un.res.wrong_if); |
1833 | 1833 | ||
1834 | if (it->cache != &mfc_unres_queue) { | 1834 | if (it->cache != &mfc_unres_queue) { |
1835 | for(n = mfc->mfc_un.res.minvif; | 1835 | for(n = mfc->mfc_un.res.minvif; |
1836 | n < mfc->mfc_un.res.maxvif; n++ ) { | 1836 | n < mfc->mfc_un.res.maxvif; n++ ) { |
1837 | if(VIF_EXISTS(n) | 1837 | if(VIF_EXISTS(n) |
1838 | && mfc->mfc_un.res.ttls[n] < 255) | 1838 | && mfc->mfc_un.res.ttls[n] < 255) |
1839 | seq_printf(seq, | 1839 | seq_printf(seq, |
1840 | " %2d:%-3d", | 1840 | " %2d:%-3d", |
1841 | n, mfc->mfc_un.res.ttls[n]); | 1841 | n, mfc->mfc_un.res.ttls[n]); |
1842 | } | 1842 | } |
1843 | } | 1843 | } |
@@ -1858,7 +1858,7 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file) | |||
1858 | struct seq_file *seq; | 1858 | struct seq_file *seq; |
1859 | int rc = -ENOMEM; | 1859 | int rc = -ENOMEM; |
1860 | struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); | 1860 | struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); |
1861 | 1861 | ||
1862 | if (!s) | 1862 | if (!s) |
1863 | goto out; | 1863 | goto out; |
1864 | 1864 | ||
@@ -1883,7 +1883,7 @@ static struct file_operations ipmr_mfc_fops = { | |||
1883 | .llseek = seq_lseek, | 1883 | .llseek = seq_lseek, |
1884 | .release = seq_release_private, | 1884 | .release = seq_release_private, |
1885 | }; | 1885 | }; |
1886 | #endif | 1886 | #endif |
1887 | 1887 | ||
1888 | #ifdef CONFIG_IP_PIMSM_V2 | 1888 | #ifdef CONFIG_IP_PIMSM_V2 |
1889 | static struct net_protocol pim_protocol = { | 1889 | static struct net_protocol pim_protocol = { |
@@ -1895,7 +1895,7 @@ static struct net_protocol pim_protocol = { | |||
1895 | /* | 1895 | /* |
1896 | * Setup for IP multicast routing | 1896 | * Setup for IP multicast routing |
1897 | */ | 1897 | */ |
1898 | 1898 | ||
1899 | void __init ip_mr_init(void) | 1899 | void __init ip_mr_init(void) |
1900 | { | 1900 | { |
1901 | mrt_cachep = kmem_cache_create("ip_mrt_cache", | 1901 | mrt_cachep = kmem_cache_create("ip_mrt_cache", |
@@ -1905,8 +1905,8 @@ void __init ip_mr_init(void) | |||
1905 | init_timer(&ipmr_expire_timer); | 1905 | init_timer(&ipmr_expire_timer); |
1906 | ipmr_expire_timer.function=ipmr_expire_process; | 1906 | ipmr_expire_timer.function=ipmr_expire_process; |
1907 | register_netdevice_notifier(&ip_mr_notifier); | 1907 | register_netdevice_notifier(&ip_mr_notifier); |
1908 | #ifdef CONFIG_PROC_FS | 1908 | #ifdef CONFIG_PROC_FS |
1909 | proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops); | 1909 | proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops); |
1910 | proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops); | 1910 | proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops); |
1911 | #endif | 1911 | #endif |
1912 | } | 1912 | } |
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index 8086787a2c51..6feeb1f1c9cc 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c | |||
@@ -494,8 +494,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct) | |||
494 | * Checking the dest server status. | 494 | * Checking the dest server status. |
495 | */ | 495 | */ |
496 | if ((dest == NULL) || | 496 | if ((dest == NULL) || |
497 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || | 497 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || |
498 | (sysctl_ip_vs_expire_quiescent_template && | 498 | (sysctl_ip_vs_expire_quiescent_template && |
499 | (atomic_read(&dest->weight) == 0))) { | 499 | (atomic_read(&dest->weight) == 0))) { |
500 | IP_VS_DBG(9, "check_template: dest not available for " | 500 | IP_VS_DBG(9, "check_template: dest not available for " |
501 | "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " | 501 | "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " |
@@ -667,7 +667,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos) | |||
667 | { | 667 | { |
668 | int idx; | 668 | int idx; |
669 | struct ip_vs_conn *cp; | 669 | struct ip_vs_conn *cp; |
670 | 670 | ||
671 | for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { | 671 | for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { |
672 | ct_read_lock_bh(idx); | 672 | ct_read_lock_bh(idx); |
673 | list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { | 673 | list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
@@ -695,7 +695,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
695 | int idx; | 695 | int idx; |
696 | 696 | ||
697 | ++*pos; | 697 | ++*pos; |
698 | if (v == SEQ_START_TOKEN) | 698 | if (v == SEQ_START_TOKEN) |
699 | return ip_vs_conn_array(seq, 0); | 699 | return ip_vs_conn_array(seq, 0); |
700 | 700 | ||
701 | /* more on same hash chain? */ | 701 | /* more on same hash chain? */ |
@@ -710,7 +710,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
710 | list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { | 710 | list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { |
711 | seq->private = &ip_vs_conn_tab[idx]; | 711 | seq->private = &ip_vs_conn_tab[idx]; |
712 | return cp; | 712 | return cp; |
713 | } | 713 | } |
714 | ct_read_unlock_bh(idx); | 714 | ct_read_unlock_bh(idx); |
715 | } | 715 | } |
716 | seq->private = NULL; | 716 | seq->private = NULL; |
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 34257520a3a6..24d7b66eb6d2 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c | |||
@@ -813,14 +813,14 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
813 | skb->nh.iph->saddr = cp->vaddr; | 813 | skb->nh.iph->saddr = cp->vaddr; |
814 | ip_send_check(skb->nh.iph); | 814 | ip_send_check(skb->nh.iph); |
815 | 815 | ||
816 | /* For policy routing, packets originating from this | 816 | /* For policy routing, packets originating from this |
817 | * machine itself may be routed differently to packets | 817 | * machine itself may be routed differently to packets |
818 | * passing through. We want this packet to be routed as | 818 | * passing through. We want this packet to be routed as |
819 | * if it came from this machine itself. So re-compute | 819 | * if it came from this machine itself. So re-compute |
820 | * the routing information. | 820 | * the routing information. |
821 | */ | 821 | */ |
822 | if (ip_route_me_harder(pskb, RTN_LOCAL) != 0) | 822 | if (ip_route_me_harder(pskb, RTN_LOCAL) != 0) |
823 | goto drop; | 823 | goto drop; |
824 | skb = *pskb; | 824 | skb = *pskb; |
825 | 825 | ||
826 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); | 826 | IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); |
@@ -847,7 +847,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb, | |||
847 | * forward to the right destination host if relevant. | 847 | * forward to the right destination host if relevant. |
848 | * Currently handles error types - unreachable, quench, ttl exceeded. | 848 | * Currently handles error types - unreachable, quench, ttl exceeded. |
849 | */ | 849 | */ |
850 | static int | 850 | static int |
851 | ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) | 851 | ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) |
852 | { | 852 | { |
853 | struct sk_buff *skb = *pskb; | 853 | struct sk_buff *skb = *pskb; |
@@ -863,7 +863,7 @@ ip_vs_in_icmp(struct sk_buff **pskb, int *related, unsigned int hooknum) | |||
863 | /* reassemble IP fragments */ | 863 | /* reassemble IP fragments */ |
864 | if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) { | 864 | if (skb->nh.iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) { |
865 | skb = ip_vs_gather_frags(skb, | 865 | skb = ip_vs_gather_frags(skb, |
866 | hooknum == NF_IP_LOCAL_IN ? | 866 | hooknum == NF_IP_LOCAL_IN ? |
867 | IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD); | 867 | IP_DEFRAG_VS_IN : IP_DEFRAG_VS_FWD); |
868 | if (!skb) | 868 | if (!skb) |
869 | return NF_STOLEN; | 869 | return NF_STOLEN; |
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c index 687c1de1146f..847c47af040c 100644 --- a/net/ipv4/ipvs/ip_vs_ftp.c +++ b/net/ipv4/ipvs/ip_vs_ftp.c | |||
@@ -370,7 +370,7 @@ static int __init ip_vs_ftp_init(void) | |||
370 | if (ret) | 370 | if (ret) |
371 | break; | 371 | break; |
372 | IP_VS_INFO("%s: loaded support on port[%d] = %d\n", | 372 | IP_VS_INFO("%s: loaded support on port[%d] = %d\n", |
373 | app->name, i, ports[i]); | 373 | app->name, i, ports[i]); |
374 | } | 374 | } |
375 | 375 | ||
376 | if (ret) | 376 | if (ret) |
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c index a4385a2180ee..76fd1fb91878 100644 --- a/net/ipv4/ipvs/ip_vs_lblc.c +++ b/net/ipv4/ipvs/ip_vs_lblc.c | |||
@@ -118,7 +118,7 @@ static ctl_table vs_vars_table[] = { | |||
118 | .procname = "lblc_expiration", | 118 | .procname = "lblc_expiration", |
119 | .data = &sysctl_ip_vs_lblc_expiration, | 119 | .data = &sysctl_ip_vs_lblc_expiration, |
120 | .maxlen = sizeof(int), | 120 | .maxlen = sizeof(int), |
121 | .mode = 0644, | 121 | .mode = 0644, |
122 | .proc_handler = &proc_dointvec_jiffies, | 122 | .proc_handler = &proc_dointvec_jiffies, |
123 | }, | 123 | }, |
124 | { .ctl_name = 0 } | 124 | { .ctl_name = 0 } |
@@ -128,7 +128,7 @@ static ctl_table vs_table[] = { | |||
128 | { | 128 | { |
129 | .ctl_name = NET_IPV4_VS, | 129 | .ctl_name = NET_IPV4_VS, |
130 | .procname = "vs", | 130 | .procname = "vs", |
131 | .mode = 0555, | 131 | .mode = 0555, |
132 | .child = vs_vars_table | 132 | .child = vs_vars_table |
133 | }, | 133 | }, |
134 | { .ctl_name = 0 } | 134 | { .ctl_name = 0 } |
@@ -137,7 +137,7 @@ static ctl_table vs_table[] = { | |||
137 | static ctl_table ipvs_ipv4_table[] = { | 137 | static ctl_table ipvs_ipv4_table[] = { |
138 | { | 138 | { |
139 | .ctl_name = NET_IPV4, | 139 | .ctl_name = NET_IPV4, |
140 | .procname = "ipv4", | 140 | .procname = "ipv4", |
141 | .mode = 0555, | 141 | .mode = 0555, |
142 | .child = vs_table | 142 | .child = vs_table |
143 | }, | 143 | }, |
@@ -147,8 +147,8 @@ static ctl_table ipvs_ipv4_table[] = { | |||
147 | static ctl_table lblc_root_table[] = { | 147 | static ctl_table lblc_root_table[] = { |
148 | { | 148 | { |
149 | .ctl_name = CTL_NET, | 149 | .ctl_name = CTL_NET, |
150 | .procname = "net", | 150 | .procname = "net", |
151 | .mode = 0555, | 151 | .mode = 0555, |
152 | .child = ipvs_ipv4_table | 152 | .child = ipvs_ipv4_table |
153 | }, | 153 | }, |
154 | { .ctl_name = 0 } | 154 | { .ctl_name = 0 } |
@@ -288,7 +288,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) | |||
288 | 288 | ||
289 | write_lock(&tbl->lock); | 289 | write_lock(&tbl->lock); |
290 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { | 290 | list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { |
291 | if (time_before(now, | 291 | if (time_before(now, |
292 | en->lastuse + sysctl_ip_vs_lblc_expiration)) | 292 | en->lastuse + sysctl_ip_vs_lblc_expiration)) |
293 | continue; | 293 | continue; |
294 | 294 | ||
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index fe1af5d079af..bf1e7f272b84 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c | |||
@@ -307,7 +307,7 @@ static ctl_table vs_vars_table[] = { | |||
307 | .procname = "lblcr_expiration", | 307 | .procname = "lblcr_expiration", |
308 | .data = &sysctl_ip_vs_lblcr_expiration, | 308 | .data = &sysctl_ip_vs_lblcr_expiration, |
309 | .maxlen = sizeof(int), | 309 | .maxlen = sizeof(int), |
310 | .mode = 0644, | 310 | .mode = 0644, |
311 | .proc_handler = &proc_dointvec_jiffies, | 311 | .proc_handler = &proc_dointvec_jiffies, |
312 | }, | 312 | }, |
313 | { .ctl_name = 0 } | 313 | { .ctl_name = 0 } |
@@ -326,7 +326,7 @@ static ctl_table vs_table[] = { | |||
326 | static ctl_table ipvs_ipv4_table[] = { | 326 | static ctl_table ipvs_ipv4_table[] = { |
327 | { | 327 | { |
328 | .ctl_name = NET_IPV4, | 328 | .ctl_name = NET_IPV4, |
329 | .procname = "ipv4", | 329 | .procname = "ipv4", |
330 | .mode = 0555, | 330 | .mode = 0555, |
331 | .child = vs_table | 331 | .child = vs_table |
332 | }, | 332 | }, |
@@ -336,8 +336,8 @@ static ctl_table ipvs_ipv4_table[] = { | |||
336 | static ctl_table lblcr_root_table[] = { | 336 | static ctl_table lblcr_root_table[] = { |
337 | { | 337 | { |
338 | .ctl_name = CTL_NET, | 338 | .ctl_name = CTL_NET, |
339 | .procname = "net", | 339 | .procname = "net", |
340 | .mode = 0555, | 340 | .mode = 0555, |
341 | .child = ipvs_ipv4_table | 341 | .child = ipvs_ipv4_table |
342 | }, | 342 | }, |
343 | { .ctl_name = 0 } | 343 | { .ctl_name = 0 } |
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c index b23bab231cab..433f8a947924 100644 --- a/net/ipv4/ipvs/ip_vs_rr.c +++ b/net/ipv4/ipvs/ip_vs_rr.c | |||
@@ -68,7 +68,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
68 | q = q->next; | 68 | q = q->next; |
69 | continue; | 69 | continue; |
70 | } | 70 | } |
71 | 71 | ||
72 | dest = list_entry(q, struct ip_vs_dest, n_list); | 72 | dest = list_entry(q, struct ip_vs_dest, n_list); |
73 | if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && | 73 | if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && |
74 | atomic_read(&dest->weight) > 0) | 74 | atomic_read(&dest->weight) > 0) |
diff --git a/net/ipv4/multipath_drr.c b/net/ipv4/multipath_drr.c index 252e837b17a5..af691e287c03 100644 --- a/net/ipv4/multipath_drr.c +++ b/net/ipv4/multipath_drr.c | |||
@@ -134,7 +134,7 @@ static void drr_select_route(const struct flowi *flp, | |||
134 | struct rtable *first, struct rtable **rp) | 134 | struct rtable *first, struct rtable **rp) |
135 | { | 135 | { |
136 | struct rtable *nh, *result, *cur_min; | 136 | struct rtable *nh, *result, *cur_min; |
137 | int min_usecount = -1; | 137 | int min_usecount = -1; |
138 | int devidx = -1; | 138 | int devidx = -1; |
139 | int cur_min_devidx = -1; | 139 | int cur_min_devidx = -1; |
140 | 140 | ||
@@ -161,7 +161,7 @@ static void drr_select_route(const struct flowi *flp, | |||
161 | */ | 161 | */ |
162 | devidx = __multipath_finddev(nh_ifidx); | 162 | devidx = __multipath_finddev(nh_ifidx); |
163 | if (devidx == -1) { | 163 | if (devidx == -1) { |
164 | /* add the interface to the array | 164 | /* add the interface to the array |
165 | * SMP safe | 165 | * SMP safe |
166 | */ | 166 | */ |
167 | spin_lock_bh(&state_lock); | 167 | spin_lock_bh(&state_lock); |
diff --git a/net/ipv4/multipath_rr.c b/net/ipv4/multipath_rr.c index bba5abe5542d..ed0aefa26f8b 100644 --- a/net/ipv4/multipath_rr.c +++ b/net/ipv4/multipath_rr.c | |||
@@ -58,7 +58,7 @@ static void rr_select_route(const struct flowi *flp, | |||
58 | */ | 58 | */ |
59 | result = NULL; | 59 | result = NULL; |
60 | for (nh = rcu_dereference(first); nh; | 60 | for (nh = rcu_dereference(first); nh; |
61 | nh = rcu_dereference(nh->u.rt_next)) { | 61 | nh = rcu_dereference(nh->u.rt_next)) { |
62 | if ((nh->u.dst.flags & DST_BALANCED) != 0 && | 62 | if ((nh->u.dst.flags & DST_BALANCED) != 0 && |
63 | multipath_comparekeys(&nh->fl, flp)) { | 63 | multipath_comparekeys(&nh->fl, flp)) { |
64 | nh->u.dst.lastuse = jiffies; | 64 | nh->u.dst.lastuse = jiffies; |
diff --git a/net/ipv4/multipath_wrandom.c b/net/ipv4/multipath_wrandom.c index 92b04823e034..ef42e5fa647b 100644 --- a/net/ipv4/multipath_wrandom.c +++ b/net/ipv4/multipath_wrandom.c | |||
@@ -142,7 +142,7 @@ out: | |||
142 | return weight; | 142 | return weight; |
143 | } | 143 | } |
144 | 144 | ||
145 | static void wrandom_init_state(void) | 145 | static void wrandom_init_state(void) |
146 | { | 146 | { |
147 | int i; | 147 | int i; |
148 | 148 | ||
@@ -287,7 +287,7 @@ static void __multipath_free(struct rcu_head *head) | |||
287 | 287 | ||
288 | static void __multipath_free_dst(struct rcu_head *head) | 288 | static void __multipath_free_dst(struct rcu_head *head) |
289 | { | 289 | { |
290 | struct multipath_dest *dst = container_of(head, | 290 | struct multipath_dest *dst = container_of(head, |
291 | struct multipath_dest, | 291 | struct multipath_dest, |
292 | rcu); | 292 | rcu); |
293 | kfree(dst); | 293 | kfree(dst); |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index c47ce7076bd5..6069a11514f6 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -53,7 +53,7 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | |||
53 | dst_release(&rt->u.dst); | 53 | dst_release(&rt->u.dst); |
54 | dst_release(odst); | 54 | dst_release(odst); |
55 | } | 55 | } |
56 | 56 | ||
57 | if ((*pskb)->dst->error) | 57 | if ((*pskb)->dst->error) |
58 | return -1; | 58 | return -1; |
59 | 59 | ||
@@ -70,7 +70,7 @@ int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type) | |||
70 | struct sk_buff *nskb; | 70 | struct sk_buff *nskb; |
71 | 71 | ||
72 | nskb = skb_realloc_headroom(*pskb, hh_len); | 72 | nskb = skb_realloc_headroom(*pskb, hh_len); |
73 | if (!nskb) | 73 | if (!nskb) |
74 | return -1; | 74 | return -1; |
75 | if ((*pskb)->sk) | 75 | if ((*pskb)->sk) |
76 | skb_set_owner_w(nskb, (*pskb)->sk); | 76 | skb_set_owner_w(nskb, (*pskb)->sk); |
@@ -177,7 +177,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, | |||
177 | break; | 177 | break; |
178 | if ((protocol == 0 && !csum_fold(skb->csum)) || | 178 | if ((protocol == 0 && !csum_fold(skb->csum)) || |
179 | !csum_tcpudp_magic(iph->saddr, iph->daddr, | 179 | !csum_tcpudp_magic(iph->saddr, iph->daddr, |
180 | skb->len - dataoff, protocol, | 180 | skb->len - dataoff, protocol, |
181 | skb->csum)) { | 181 | skb->csum)) { |
182 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 182 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
183 | break; | 183 | break; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 9aa22398b3dc..5170f5c75f9d 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -544,7 +544,7 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
544 | } | 544 | } |
545 | 545 | ||
546 | /* FIXME: underflows must be unconditional, standard verdicts | 546 | /* FIXME: underflows must be unconditional, standard verdicts |
547 | < 0 (not ARPT_RETURN). --RR */ | 547 | < 0 (not ARPT_RETURN). --RR */ |
548 | 548 | ||
549 | /* Clear counters and comefrom */ | 549 | /* Clear counters and comefrom */ |
550 | e->counters = ((struct xt_counters) { 0, 0 }); | 550 | e->counters = ((struct xt_counters) { 0, 0 }); |
@@ -869,8 +869,8 @@ static int do_replace(void __user *user, unsigned int len) | |||
869 | /* Update module usage count based on number of rules */ | 869 | /* Update module usage count based on number of rules */ |
870 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", | 870 | duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", |
871 | oldinfo->number, oldinfo->initial_entries, newinfo->number); | 871 | oldinfo->number, oldinfo->initial_entries, newinfo->number); |
872 | if ((oldinfo->number > oldinfo->initial_entries) || | 872 | if ((oldinfo->number > oldinfo->initial_entries) || |
873 | (newinfo->number <= oldinfo->initial_entries)) | 873 | (newinfo->number <= oldinfo->initial_entries)) |
874 | module_put(t->me); | 874 | module_put(t->me); |
875 | if ((oldinfo->number > oldinfo->initial_entries) && | 875 | if ((oldinfo->number > oldinfo->initial_entries) && |
876 | (newinfo->number <= oldinfo->initial_entries)) | 876 | (newinfo->number <= oldinfo->initial_entries)) |
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index d12b1df252a1..709db4d3f48f 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c | |||
@@ -67,7 +67,7 @@ target(struct sk_buff **pskb, | |||
67 | 67 | ||
68 | static int | 68 | static int |
69 | checkentry(const char *tablename, const void *e, const struct xt_target *target, | 69 | checkentry(const char *tablename, const void *e, const struct xt_target *target, |
70 | void *targinfo, unsigned int hook_mask) | 70 | void *targinfo, unsigned int hook_mask) |
71 | { | 71 | { |
72 | const struct arpt_mangle *mangle = targinfo; | 72 | const struct arpt_mangle *mangle = targinfo; |
73 | 73 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c index ad246ba7790b..4f561f52c83a 100644 --- a/net/ipv4/netfilter/ip_conntrack_amanda.c +++ b/net/ipv4/netfilter/ip_conntrack_amanda.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * Module load syntax: | 10 | * Module load syntax: |
11 | * insmod ip_conntrack_amanda.o [master_timeout=n] | 11 | * insmod ip_conntrack_amanda.o [master_timeout=n] |
12 | * | 12 | * |
13 | * Where master_timeout is the timeout (in seconds) of the master | 13 | * Where master_timeout is the timeout (in seconds) of the master |
14 | * connection (port 10080). This defaults to 5 minutes but if | 14 | * connection (port 10080). This defaults to 5 minutes but if |
15 | * your clients take longer than 5 minutes to do their work | 15 | * your clients take longer than 5 minutes to do their work |
@@ -84,7 +84,7 @@ static struct { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | static int help(struct sk_buff **pskb, | 86 | static int help(struct sk_buff **pskb, |
87 | struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) | 87 | struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) |
88 | { | 88 | { |
89 | struct ts_state ts; | 89 | struct ts_state ts; |
90 | struct ip_conntrack_expect *exp; | 90 | struct ip_conntrack_expect *exp; |
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 8556a4f4f60a..2e6e42199f21 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -2,7 +2,7 @@ | |||
2 | but required by, the NAT layer; it can also be used by an iptables | 2 | but required by, the NAT layer; it can also be used by an iptables |
3 | extension. */ | 3 | extension. */ |
4 | 4 | ||
5 | /* (C) 1999-2001 Paul `Rusty' Russell | 5 | /* (C) 1999-2001 Paul `Rusty' Russell |
6 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 6 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
@@ -99,7 +99,7 @@ __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache) | |||
99 | void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) | 99 | void ip_ct_deliver_cached_events(const struct ip_conntrack *ct) |
100 | { | 100 | { |
101 | struct ip_conntrack_ecache *ecache; | 101 | struct ip_conntrack_ecache *ecache; |
102 | 102 | ||
103 | local_bh_disable(); | 103 | local_bh_disable(); |
104 | ecache = &__get_cpu_var(ip_conntrack_ecache); | 104 | ecache = &__get_cpu_var(ip_conntrack_ecache); |
105 | if (ecache->ct == ct) | 105 | if (ecache->ct == ct) |
@@ -147,9 +147,9 @@ static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple, | |||
147 | unsigned int size, unsigned int rnd) | 147 | unsigned int size, unsigned int rnd) |
148 | { | 148 | { |
149 | return (jhash_3words((__force u32)tuple->src.ip, | 149 | return (jhash_3words((__force u32)tuple->src.ip, |
150 | ((__force u32)tuple->dst.ip ^ tuple->dst.protonum), | 150 | ((__force u32)tuple->dst.ip ^ tuple->dst.protonum), |
151 | (tuple->src.u.all | (tuple->dst.u.all << 16)), | 151 | (tuple->src.u.all | (tuple->dst.u.all << 16)), |
152 | rnd) % size); | 152 | rnd) % size); |
153 | } | 153 | } |
154 | 154 | ||
155 | static u_int32_t | 155 | static u_int32_t |
@@ -219,7 +219,7 @@ struct ip_conntrack_expect * | |||
219 | __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) | 219 | __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple) |
220 | { | 220 | { |
221 | struct ip_conntrack_expect *i; | 221 | struct ip_conntrack_expect *i; |
222 | 222 | ||
223 | list_for_each_entry(i, &ip_conntrack_expect_list, list) { | 223 | list_for_each_entry(i, &ip_conntrack_expect_list, list) { |
224 | if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) | 224 | if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) |
225 | return i; | 225 | return i; |
@@ -232,7 +232,7 @@ struct ip_conntrack_expect * | |||
232 | ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) | 232 | ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple) |
233 | { | 233 | { |
234 | struct ip_conntrack_expect *i; | 234 | struct ip_conntrack_expect *i; |
235 | 235 | ||
236 | read_lock_bh(&ip_conntrack_lock); | 236 | read_lock_bh(&ip_conntrack_lock); |
237 | i = __ip_conntrack_expect_find(tuple); | 237 | i = __ip_conntrack_expect_find(tuple); |
238 | if (i) | 238 | if (i) |
@@ -398,7 +398,7 @@ ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple, | |||
398 | 398 | ||
399 | static void __ip_conntrack_hash_insert(struct ip_conntrack *ct, | 399 | static void __ip_conntrack_hash_insert(struct ip_conntrack *ct, |
400 | unsigned int hash, | 400 | unsigned int hash, |
401 | unsigned int repl_hash) | 401 | unsigned int repl_hash) |
402 | { | 402 | { |
403 | ct->id = ++ip_conntrack_next_id; | 403 | ct->id = ++ip_conntrack_next_id; |
404 | list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, | 404 | list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, |
@@ -446,15 +446,15 @@ __ip_conntrack_confirm(struct sk_buff **pskb) | |||
446 | /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ | 446 | /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ |
447 | 447 | ||
448 | /* No external references means noone else could have | 448 | /* No external references means noone else could have |
449 | confirmed us. */ | 449 | confirmed us. */ |
450 | IP_NF_ASSERT(!is_confirmed(ct)); | 450 | IP_NF_ASSERT(!is_confirmed(ct)); |
451 | DEBUGP("Confirming conntrack %p\n", ct); | 451 | DEBUGP("Confirming conntrack %p\n", ct); |
452 | 452 | ||
453 | write_lock_bh(&ip_conntrack_lock); | 453 | write_lock_bh(&ip_conntrack_lock); |
454 | 454 | ||
455 | /* See if there's one in the list already, including reverse: | 455 | /* See if there's one in the list already, including reverse: |
456 | NAT could have grabbed it without realizing, since we're | 456 | NAT could have grabbed it without realizing, since we're |
457 | not in the hash. If there is, we lost race. */ | 457 | not in the hash. If there is, we lost race. */ |
458 | list_for_each_entry(h, &ip_conntrack_hash[hash], list) | 458 | list_for_each_entry(h, &ip_conntrack_hash[hash], list) |
459 | if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 459 | if (ip_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
460 | &h->tuple)) | 460 | &h->tuple)) |
@@ -602,7 +602,7 @@ ip_conntrack_proto_find_get(u_int8_t protocol) | |||
602 | p = &ip_conntrack_generic_protocol; | 602 | p = &ip_conntrack_generic_protocol; |
603 | } | 603 | } |
604 | preempt_enable(); | 604 | preempt_enable(); |
605 | 605 | ||
606 | return p; | 606 | return p; |
607 | } | 607 | } |
608 | 608 | ||
@@ -746,7 +746,7 @@ resolve_normal_ct(struct sk_buff *skb, | |||
746 | 746 | ||
747 | IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0); | 747 | IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0); |
748 | 748 | ||
749 | if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, | 749 | if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4, |
750 | &tuple,proto)) | 750 | &tuple,proto)) |
751 | return NULL; | 751 | return NULL; |
752 | 752 | ||
@@ -771,7 +771,7 @@ resolve_normal_ct(struct sk_buff *skb, | |||
771 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | 771 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { |
772 | DEBUGP("ip_conntrack_in: normal packet for %p\n", | 772 | DEBUGP("ip_conntrack_in: normal packet for %p\n", |
773 | ct); | 773 | ct); |
774 | *ctinfo = IP_CT_ESTABLISHED; | 774 | *ctinfo = IP_CT_ESTABLISHED; |
775 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { | 775 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { |
776 | DEBUGP("ip_conntrack_in: related packet for %p\n", | 776 | DEBUGP("ip_conntrack_in: related packet for %p\n", |
777 | ct); | 777 | ct); |
@@ -822,7 +822,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
822 | if ((*pskb)->pkt_type == PACKET_BROADCAST) { | 822 | if ((*pskb)->pkt_type == PACKET_BROADCAST) { |
823 | printk("Broadcast packet!\n"); | 823 | printk("Broadcast packet!\n"); |
824 | return NF_ACCEPT; | 824 | return NF_ACCEPT; |
825 | } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) | 825 | } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF)) |
826 | == htonl(0x000000FF)) { | 826 | == htonl(0x000000FF)) { |
827 | printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n", | 827 | printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n", |
828 | NIPQUAD((*pskb)->nh.iph->saddr), | 828 | NIPQUAD((*pskb)->nh.iph->saddr), |
@@ -836,7 +836,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
836 | /* It may be an special packet, error, unclean... | 836 | /* It may be an special packet, error, unclean... |
837 | * inverse of the return code tells to the netfilter | 837 | * inverse of the return code tells to the netfilter |
838 | * core what to do with the packet. */ | 838 | * core what to do with the packet. */ |
839 | if (proto->error != NULL | 839 | if (proto->error != NULL |
840 | && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) { | 840 | && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) { |
841 | CONNTRACK_STAT_INC(error); | 841 | CONNTRACK_STAT_INC(error); |
842 | CONNTRACK_STAT_INC(invalid); | 842 | CONNTRACK_STAT_INC(invalid); |
@@ -876,7 +876,7 @@ unsigned int ip_conntrack_in(unsigned int hooknum, | |||
876 | int invert_tuplepr(struct ip_conntrack_tuple *inverse, | 876 | int invert_tuplepr(struct ip_conntrack_tuple *inverse, |
877 | const struct ip_conntrack_tuple *orig) | 877 | const struct ip_conntrack_tuple *orig) |
878 | { | 878 | { |
879 | return ip_ct_invert_tuple(inverse, orig, | 879 | return ip_ct_invert_tuple(inverse, orig, |
880 | __ip_conntrack_proto_find(orig->dst.protonum)); | 880 | __ip_conntrack_proto_find(orig->dst.protonum)); |
881 | } | 881 | } |
882 | 882 | ||
@@ -885,7 +885,7 @@ static inline int expect_clash(const struct ip_conntrack_expect *a, | |||
885 | const struct ip_conntrack_expect *b) | 885 | const struct ip_conntrack_expect *b) |
886 | { | 886 | { |
887 | /* Part covered by intersection of masks must be unequal, | 887 | /* Part covered by intersection of masks must be unequal, |
888 | otherwise they clash */ | 888 | otherwise they clash */ |
889 | struct ip_conntrack_tuple intersect_mask | 889 | struct ip_conntrack_tuple intersect_mask |
890 | = { { a->mask.src.ip & b->mask.src.ip, | 890 | = { { a->mask.src.ip & b->mask.src.ip, |
891 | { a->mask.src.u.all & b->mask.src.u.all } }, | 891 | { a->mask.src.u.all & b->mask.src.u.all } }, |
@@ -923,7 +923,7 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp) | |||
923 | } | 923 | } |
924 | 924 | ||
925 | /* We don't increase the master conntrack refcount for non-fulfilled | 925 | /* We don't increase the master conntrack refcount for non-fulfilled |
926 | * conntracks. During the conntrack destruction, the expectations are | 926 | * conntracks. During the conntrack destruction, the expectations are |
927 | * always killed before the conntrack itself */ | 927 | * always killed before the conntrack itself */ |
928 | struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me) | 928 | struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me) |
929 | { | 929 | { |
@@ -1012,7 +1012,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect) | |||
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | /* Will be over limit? */ | 1014 | /* Will be over limit? */ |
1015 | if (expect->master->helper->max_expected && | 1015 | if (expect->master->helper->max_expected && |
1016 | expect->master->expecting >= expect->master->helper->max_expected) | 1016 | expect->master->expecting >= expect->master->helper->max_expected) |
1017 | evict_oldest_expect(expect->master); | 1017 | evict_oldest_expect(expect->master); |
1018 | 1018 | ||
@@ -1021,7 +1021,7 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect) | |||
1021 | ret = 0; | 1021 | ret = 0; |
1022 | out: | 1022 | out: |
1023 | write_unlock_bh(&ip_conntrack_lock); | 1023 | write_unlock_bh(&ip_conntrack_lock); |
1024 | return ret; | 1024 | return ret; |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is | 1027 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is |
@@ -1069,7 +1069,7 @@ static inline void unhelp(struct ip_conntrack_tuple_hash *i, | |||
1069 | const struct ip_conntrack_helper *me) | 1069 | const struct ip_conntrack_helper *me) |
1070 | { | 1070 | { |
1071 | if (tuplehash_to_ctrack(i)->helper == me) { | 1071 | if (tuplehash_to_ctrack(i)->helper == me) { |
1072 | ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i)); | 1072 | ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i)); |
1073 | tuplehash_to_ctrack(i)->helper = NULL; | 1073 | tuplehash_to_ctrack(i)->helper = NULL; |
1074 | } | 1074 | } |
1075 | } | 1075 | } |
@@ -1105,8 +1105,8 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) | |||
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ | 1107 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ |
1108 | void __ip_ct_refresh_acct(struct ip_conntrack *ct, | 1108 | void __ip_ct_refresh_acct(struct ip_conntrack *ct, |
1109 | enum ip_conntrack_info ctinfo, | 1109 | enum ip_conntrack_info ctinfo, |
1110 | const struct sk_buff *skb, | 1110 | const struct sk_buff *skb, |
1111 | unsigned long extra_jiffies, | 1111 | unsigned long extra_jiffies, |
1112 | int do_acct) | 1112 | int do_acct) |
@@ -1140,7 +1140,7 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct, | |||
1140 | #ifdef CONFIG_IP_NF_CT_ACCT | 1140 | #ifdef CONFIG_IP_NF_CT_ACCT |
1141 | if (do_acct) { | 1141 | if (do_acct) { |
1142 | ct->counters[CTINFO2DIR(ctinfo)].packets++; | 1142 | ct->counters[CTINFO2DIR(ctinfo)].packets++; |
1143 | ct->counters[CTINFO2DIR(ctinfo)].bytes += | 1143 | ct->counters[CTINFO2DIR(ctinfo)].bytes += |
1144 | ntohs(skb->nh.iph->tot_len); | 1144 | ntohs(skb->nh.iph->tot_len); |
1145 | if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) | 1145 | if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) |
1146 | || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) | 1146 | || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) |
@@ -1194,7 +1194,7 @@ ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) | |||
1194 | { | 1194 | { |
1195 | skb_orphan(skb); | 1195 | skb_orphan(skb); |
1196 | 1196 | ||
1197 | local_bh_disable(); | 1197 | local_bh_disable(); |
1198 | skb = ip_defrag(skb, user); | 1198 | skb = ip_defrag(skb, user); |
1199 | local_bh_enable(); | 1199 | local_bh_enable(); |
1200 | 1200 | ||
@@ -1211,7 +1211,7 @@ static void ip_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) | |||
1211 | 1211 | ||
1212 | /* This ICMP is in reverse direction to the packet which caused it */ | 1212 | /* This ICMP is in reverse direction to the packet which caused it */ |
1213 | ct = ip_conntrack_get(skb, &ctinfo); | 1213 | ct = ip_conntrack_get(skb, &ctinfo); |
1214 | 1214 | ||
1215 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | 1215 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) |
1216 | ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; | 1216 | ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; |
1217 | else | 1217 | else |
@@ -1279,7 +1279,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
1279 | struct inet_sock *inet = inet_sk(sk); | 1279 | struct inet_sock *inet = inet_sk(sk); |
1280 | struct ip_conntrack_tuple_hash *h; | 1280 | struct ip_conntrack_tuple_hash *h; |
1281 | struct ip_conntrack_tuple tuple; | 1281 | struct ip_conntrack_tuple tuple; |
1282 | 1282 | ||
1283 | IP_CT_TUPLE_U_BLANK(&tuple); | 1283 | IP_CT_TUPLE_U_BLANK(&tuple); |
1284 | tuple.src.ip = inet->rcv_saddr; | 1284 | tuple.src.ip = inet->rcv_saddr; |
1285 | tuple.src.u.tcp.port = inet->sport; | 1285 | tuple.src.u.tcp.port = inet->sport; |
@@ -1347,7 +1347,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size) | |||
1347 | if (vmalloced) | 1347 | if (vmalloced) |
1348 | vfree(hash); | 1348 | vfree(hash); |
1349 | else | 1349 | else |
1350 | free_pages((unsigned long)hash, | 1350 | free_pages((unsigned long)hash, |
1351 | get_order(sizeof(struct list_head) * size)); | 1351 | get_order(sizeof(struct list_head) * size)); |
1352 | } | 1352 | } |
1353 | 1353 | ||
@@ -1358,8 +1358,8 @@ void ip_conntrack_cleanup(void) | |||
1358 | ip_ct_attach = NULL; | 1358 | ip_ct_attach = NULL; |
1359 | 1359 | ||
1360 | /* This makes sure all current packets have passed through | 1360 | /* This makes sure all current packets have passed through |
1361 | netfilter framework. Roll on, two-stage module | 1361 | netfilter framework. Roll on, two-stage module |
1362 | delete... */ | 1362 | delete... */ |
1363 | synchronize_net(); | 1363 | synchronize_net(); |
1364 | 1364 | ||
1365 | ip_ct_event_cache_flush(); | 1365 | ip_ct_event_cache_flush(); |
@@ -1385,11 +1385,11 @@ static struct list_head *alloc_hashtable(int size, int *vmalloced) | |||
1385 | struct list_head *hash; | 1385 | struct list_head *hash; |
1386 | unsigned int i; | 1386 | unsigned int i; |
1387 | 1387 | ||
1388 | *vmalloced = 0; | 1388 | *vmalloced = 0; |
1389 | hash = (void*)__get_free_pages(GFP_KERNEL, | 1389 | hash = (void*)__get_free_pages(GFP_KERNEL, |
1390 | get_order(sizeof(struct list_head) | 1390 | get_order(sizeof(struct list_head) |
1391 | * size)); | 1391 | * size)); |
1392 | if (!hash) { | 1392 | if (!hash) { |
1393 | *vmalloced = 1; | 1393 | *vmalloced = 1; |
1394 | printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); | 1394 | printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); |
1395 | hash = vmalloc(sizeof(struct list_head) * size); | 1395 | hash = vmalloc(sizeof(struct list_head) * size); |
@@ -1422,7 +1422,7 @@ static int set_hashsize(const char *val, struct kernel_param *kp) | |||
1422 | if (!hash) | 1422 | if (!hash) |
1423 | return -ENOMEM; | 1423 | return -ENOMEM; |
1424 | 1424 | ||
1425 | /* We have to rehash for the new table anyway, so we also can | 1425 | /* We have to rehash for the new table anyway, so we also can |
1426 | * use a new random seed */ | 1426 | * use a new random seed */ |
1427 | get_random_bytes(&rnd, 4); | 1427 | get_random_bytes(&rnd, 4); |
1428 | 1428 | ||
@@ -1460,7 +1460,7 @@ int __init ip_conntrack_init(void) | |||
1460 | 1460 | ||
1461 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB | 1461 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB |
1462 | * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ | 1462 | * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ |
1463 | if (!ip_conntrack_htable_size) { | 1463 | if (!ip_conntrack_htable_size) { |
1464 | ip_conntrack_htable_size | 1464 | ip_conntrack_htable_size |
1465 | = (((num_physpages << PAGE_SHIFT) / 16384) | 1465 | = (((num_physpages << PAGE_SHIFT) / 16384) |
1466 | / sizeof(struct list_head)); | 1466 | / sizeof(struct list_head)); |
@@ -1490,8 +1490,8 @@ int __init ip_conntrack_init(void) | |||
1490 | } | 1490 | } |
1491 | 1491 | ||
1492 | ip_conntrack_cachep = kmem_cache_create("ip_conntrack", | 1492 | ip_conntrack_cachep = kmem_cache_create("ip_conntrack", |
1493 | sizeof(struct ip_conntrack), 0, | 1493 | sizeof(struct ip_conntrack), 0, |
1494 | 0, NULL, NULL); | 1494 | 0, NULL, NULL); |
1495 | if (!ip_conntrack_cachep) { | 1495 | if (!ip_conntrack_cachep) { |
1496 | printk(KERN_ERR "Unable to create ip_conntrack slab cache\n"); | 1496 | printk(KERN_ERR "Unable to create ip_conntrack slab cache\n"); |
1497 | goto err_free_hash; | 1497 | goto err_free_hash; |
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c index 0410c99cacae..1faa68ab9432 100644 --- a/net/ipv4/netfilter/ip_conntrack_ftp.c +++ b/net/ipv4/netfilter/ip_conntrack_ftp.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* FTP extension for IP connection tracking. */ | 1 | /* FTP extension for IP connection tracking. */ |
2 | 2 | ||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | 3 | /* (C) 1999-2001 Paul `Rusty' Russell |
4 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> | 4 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
@@ -169,7 +169,7 @@ static int try_eprt(const char *data, size_t dlen, u_int32_t array[6], | |||
169 | int length; | 169 | int length; |
170 | 170 | ||
171 | /* First character is delimiter, then "1" for IPv4, then | 171 | /* First character is delimiter, then "1" for IPv4, then |
172 | delimiter again. */ | 172 | delimiter again. */ |
173 | if (dlen <= 3) return 0; | 173 | if (dlen <= 3) return 0; |
174 | delim = data[0]; | 174 | delim = data[0]; |
175 | if (isdigit(delim) || delim < 33 || delim > 126 | 175 | if (isdigit(delim) || delim < 33 || delim > 126 |
@@ -344,14 +344,14 @@ static int help(struct sk_buff **pskb, | |||
344 | if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { | 344 | if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) { |
345 | /* Now if this ends in \n, update ftp info. */ | 345 | /* Now if this ends in \n, update ftp info. */ |
346 | DEBUGP("ip_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n", | 346 | DEBUGP("ip_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n", |
347 | ct_ftp_info->seq_aft_nl[0][dir] | 347 | ct_ftp_info->seq_aft_nl[0][dir] |
348 | old_seq_aft_nl_set ? "":"(UNSET) ", old_seq_aft_nl); | 348 | old_seq_aft_nl_set ? "":"(UNSET) ", old_seq_aft_nl); |
349 | ret = NF_ACCEPT; | 349 | ret = NF_ACCEPT; |
350 | goto out_update_nl; | 350 | goto out_update_nl; |
351 | } | 351 | } |
352 | 352 | ||
353 | /* Initialize IP array to expected address (it's not mentioned | 353 | /* Initialize IP array to expected address (it's not mentioned |
354 | in EPSV responses) */ | 354 | in EPSV responses) */ |
355 | array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF; | 355 | array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF; |
356 | array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF; | 356 | array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF; |
357 | array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF; | 357 | array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF; |
@@ -386,7 +386,7 @@ static int help(struct sk_buff **pskb, | |||
386 | 386 | ||
387 | DEBUGP("conntrack_ftp: match `%s' (%u bytes at %u)\n", | 387 | DEBUGP("conntrack_ftp: match `%s' (%u bytes at %u)\n", |
388 | fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff); | 388 | fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff); |
389 | 389 | ||
390 | /* Allocate expectation which will be inserted */ | 390 | /* Allocate expectation which will be inserted */ |
391 | exp = ip_conntrack_expect_alloc(ct); | 391 | exp = ip_conntrack_expect_alloc(ct); |
392 | if (exp == NULL) { | 392 | if (exp == NULL) { |
@@ -504,7 +504,7 @@ static int __init ip_conntrack_ftp_init(void) | |||
504 | sprintf(tmpname, "ftp-%d", ports[i]); | 504 | sprintf(tmpname, "ftp-%d", ports[i]); |
505 | ftp[i].name = tmpname; | 505 | ftp[i].name = tmpname; |
506 | 506 | ||
507 | DEBUGP("ip_ct_ftp: registering helper for port %d\n", | 507 | DEBUGP("ip_ct_ftp: registering helper for port %d\n", |
508 | ports[i]); | 508 | ports[i]); |
509 | ret = ip_conntrack_helper_register(&ftp[i]); | 509 | ret = ip_conntrack_helper_register(&ftp[i]); |
510 | 510 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_h323.c b/net/ipv4/netfilter/ip_conntrack_helper_h323.c index aabfe1c06905..53eb365ccc7e 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_h323.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_h323.c | |||
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(gkrouted_only, "only accept calls from gatekeeper"); | |||
42 | static int callforward_filter = 1; | 42 | static int callforward_filter = 1; |
43 | module_param(callforward_filter, bool, 0600); | 43 | module_param(callforward_filter, bool, 0600); |
44 | MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " | 44 | MODULE_PARM_DESC(callforward_filter, "only create call forwarding expectations " |
45 | "if both endpoints are on different sides " | 45 | "if both endpoints are on different sides " |
46 | "(determined by routing information)"); | 46 | "(determined by routing information)"); |
47 | 47 | ||
48 | /* Hooks for NAT */ | 48 | /* Hooks for NAT */ |
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c index 4d19373bbf0d..2b760c5cf709 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c | |||
@@ -560,7 +560,7 @@ conntrack_pptp_help(struct sk_buff **pskb, | |||
560 | tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); | 560 | tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph); |
561 | BUG_ON(!tcph); | 561 | BUG_ON(!tcph); |
562 | nexthdr_off += tcph->doff * 4; | 562 | nexthdr_off += tcph->doff * 4; |
563 | datalen = tcplen - tcph->doff * 4; | 563 | datalen = tcplen - tcph->doff * 4; |
564 | 564 | ||
565 | pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); | 565 | pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph); |
566 | if (!pptph) { | 566 | if (!pptph) { |
@@ -624,7 +624,7 @@ static struct ip_conntrack_helper pptp = { | |||
624 | .max_expected = 2, | 624 | .max_expected = 2, |
625 | .timeout = 5 * 60, | 625 | .timeout = 5 * 60, |
626 | .tuple = { .src = { .ip = 0, | 626 | .tuple = { .src = { .ip = 0, |
627 | .u = { .tcp = { .port = | 627 | .u = { .tcp = { .port = |
628 | __constant_htons(PPTP_CONTROL_PORT) } } | 628 | __constant_htons(PPTP_CONTROL_PORT) } } |
629 | }, | 629 | }, |
630 | .dst = { .ip = 0, | 630 | .dst = { .ip = 0, |
@@ -638,7 +638,7 @@ static struct ip_conntrack_helper pptp = { | |||
638 | .dst = { .ip = 0, | 638 | .dst = { .ip = 0, |
639 | .u = { .all = 0 }, | 639 | .u = { .all = 0 }, |
640 | .protonum = 0xff | 640 | .protonum = 0xff |
641 | } | 641 | } |
642 | }, | 642 | }, |
643 | .help = conntrack_pptp_help, | 643 | .help = conntrack_pptp_help, |
644 | .destroy = pptp_destroy_siblings, | 644 | .destroy = pptp_destroy_siblings, |
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c index 91832eca4106..053e591f407a 100644 --- a/net/ipv4/netfilter/ip_conntrack_irc.c +++ b/net/ipv4/netfilter/ip_conntrack_irc.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* IRC extension for IP connection tracking, Version 1.21 | 1 | /* IRC extension for IP connection tracking, Version 1.21 |
2 | * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> | 2 | * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org> |
3 | * based on RR's ip_conntrack_ftp.c | 3 | * based on RR's ip_conntrack_ftp.c |
4 | * | 4 | * |
5 | * ip_conntrack_irc.c,v 1.21 2002/02/05 14:49:26 laforge Exp | 5 | * ip_conntrack_irc.c,v 1.21 2002/02/05 14:49:26 laforge Exp |
6 | * | 6 | * |
@@ -12,12 +12,12 @@ | |||
12 | * Module load syntax: | 12 | * Module load syntax: |
13 | * insmod ip_conntrack_irc.o ports=port1,port2,...port<MAX_PORTS> | 13 | * insmod ip_conntrack_irc.o ports=port1,port2,...port<MAX_PORTS> |
14 | * max_dcc_channels=n dcc_timeout=secs | 14 | * max_dcc_channels=n dcc_timeout=secs |
15 | * | 15 | * |
16 | * please give the ports of all IRC servers You wish to connect to. | 16 | * please give the ports of all IRC servers You wish to connect to. |
17 | * If You don't specify ports, the default will be port 6667. | 17 | * If You don't specify ports, the default will be port 6667. |
18 | * With max_dcc_channels you can define the maximum number of not | 18 | * With max_dcc_channels you can define the maximum number of not |
19 | * yet answered DCC channels per IRC session (default 8). | 19 | * yet answered DCC channels per IRC session (default 8). |
20 | * With dcc_timeout you can specify how long the system waits for | 20 | * With dcc_timeout you can specify how long the system waits for |
21 | * an expected DCC channel (default 300 seconds). | 21 | * an expected DCC channel (default 300 seconds). |
22 | * | 22 | * |
23 | */ | 23 | */ |
@@ -63,7 +63,7 @@ static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " | |||
63 | 63 | ||
64 | #if 0 | 64 | #if 0 |
65 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \ | 65 | #define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s:" format, \ |
66 | __FILE__, __FUNCTION__ , ## args) | 66 | __FILE__, __FUNCTION__ , ## args) |
67 | #else | 67 | #else |
68 | #define DEBUGP(format, args...) | 68 | #define DEBUGP(format, args...) |
69 | #endif | 69 | #endif |
@@ -71,7 +71,7 @@ static const char *dccprotos[] = { "SEND ", "CHAT ", "MOVE ", "TSEND ", "SCHAT " | |||
71 | static int parse_dcc(char *data, char *data_end, u_int32_t *ip, | 71 | static int parse_dcc(char *data, char *data_end, u_int32_t *ip, |
72 | u_int16_t *port, char **ad_beg_p, char **ad_end_p) | 72 | u_int16_t *port, char **ad_beg_p, char **ad_end_p) |
73 | /* tries to get the ip_addr and port out of a dcc command | 73 | /* tries to get the ip_addr and port out of a dcc command |
74 | return value: -1 on failure, 0 on success | 74 | return value: -1 on failure, 0 on success |
75 | data pointer to first byte of DCC command data | 75 | data pointer to first byte of DCC command data |
76 | data_end pointer to last byte of dcc command data | 76 | data_end pointer to last byte of dcc command data |
77 | ip returns parsed ip of dcc command | 77 | ip returns parsed ip of dcc command |
@@ -90,7 +90,7 @@ static int parse_dcc(char *data, char *data_end, u_int32_t *ip, | |||
90 | 90 | ||
91 | /* skip blanks between ip and port */ | 91 | /* skip blanks between ip and port */ |
92 | while (*data == ' ') { | 92 | while (*data == ' ') { |
93 | if (data >= data_end) | 93 | if (data >= data_end) |
94 | return -1; | 94 | return -1; |
95 | data++; | 95 | data++; |
96 | } | 96 | } |
@@ -171,7 +171,7 @@ static int help(struct sk_buff **pskb, | |||
171 | 171 | ||
172 | DEBUGP("DCC %s detected\n", dccprotos[i]); | 172 | DEBUGP("DCC %s detected\n", dccprotos[i]); |
173 | data += strlen(dccprotos[i]); | 173 | data += strlen(dccprotos[i]); |
174 | /* we have at least | 174 | /* we have at least |
175 | * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid | 175 | * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid |
176 | * data left (== 14/13 bytes) */ | 176 | * data left (== 14/13 bytes) */ |
177 | if (parse_dcc((char *)data, data_limit, &dcc_ip, | 177 | if (parse_dcc((char *)data, data_limit, &dcc_ip, |
@@ -260,7 +260,7 @@ static int __init ip_conntrack_irc_init(void) | |||
260 | irc_buffer = kmalloc(65536, GFP_KERNEL); | 260 | irc_buffer = kmalloc(65536, GFP_KERNEL); |
261 | if (!irc_buffer) | 261 | if (!irc_buffer) |
262 | return -ENOMEM; | 262 | return -ENOMEM; |
263 | 263 | ||
264 | /* If no port given, default to standard irc port */ | 264 | /* If no port given, default to standard irc port */ |
265 | if (ports_c == 0) | 265 | if (ports_c == 0) |
266 | ports[ports_c++] = IRC_PORT; | 266 | ports[ports_c++] = IRC_PORT; |
@@ -297,7 +297,7 @@ static int __init ip_conntrack_irc_init(void) | |||
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | 299 | ||
300 | /* This function is intentionally _NOT_ defined as __exit, because | 300 | /* This function is intentionally _NOT_ defined as __exit, because |
301 | * it is needed by the init function */ | 301 | * it is needed by the init function */ |
302 | static void ip_conntrack_irc_fini(void) | 302 | static void ip_conntrack_irc_fini(void) |
303 | { | 303 | { |
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c index a1d6a89f64aa..cc6dd49c9da0 100644 --- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c +++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c | |||
@@ -42,7 +42,7 @@ module_param(timeout, uint, 0400); | |||
42 | MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); | 42 | MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds"); |
43 | 43 | ||
44 | static int help(struct sk_buff **pskb, | 44 | static int help(struct sk_buff **pskb, |
45 | struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) | 45 | struct ip_conntrack *ct, enum ip_conntrack_info ctinfo) |
46 | { | 46 | { |
47 | struct ip_conntrack_expect *exp; | 47 | struct ip_conntrack_expect *exp; |
48 | struct iphdr *iph = (*pskb)->nh.iph; | 48 | struct iphdr *iph = (*pskb)->nh.iph; |
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index 7f70b0886b83..9228b76ccd9a 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c | |||
@@ -6,10 +6,10 @@ | |||
6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> | 6 | * (C) 2003 by Patrick Mchardy <kaber@trash.net> |
7 | * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net> | 7 | * (C) 2005-2006 by Pablo Neira Ayuso <pablo@eurodev.net> |
8 | * | 8 | * |
9 | * I've reworked this stuff to use attributes instead of conntrack | 9 | * I've reworked this stuff to use attributes instead of conntrack |
10 | * structures. 5.44 am. I need more tea. --pablo 05/07/11. | 10 | * structures. 5.44 am. I need more tea. --pablo 05/07/11. |
11 | * | 11 | * |
12 | * Initial connection tracking via netlink development funded and | 12 | * Initial connection tracking via netlink development funded and |
13 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) | 13 | * generally made possible by Network Robots, Inc. (www.networkrobots.com) |
14 | * | 14 | * |
15 | * Further development of this code funded by Astaro AG (http://www.astaro.com) | 15 | * Further development of this code funded by Astaro AG (http://www.astaro.com) |
@@ -45,7 +45,7 @@ MODULE_LICENSE("GPL"); | |||
45 | static char __initdata version[] = "0.90"; | 45 | static char __initdata version[] = "0.90"; |
46 | 46 | ||
47 | static inline int | 47 | static inline int |
48 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, | 48 | ctnetlink_dump_tuples_proto(struct sk_buff *skb, |
49 | const struct ip_conntrack_tuple *tuple, | 49 | const struct ip_conntrack_tuple *tuple, |
50 | struct ip_conntrack_protocol *proto) | 50 | struct ip_conntrack_protocol *proto) |
51 | { | 51 | { |
@@ -56,7 +56,7 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb, | |||
56 | 56 | ||
57 | if (likely(proto->tuple_to_nfattr)) | 57 | if (likely(proto->tuple_to_nfattr)) |
58 | ret = proto->tuple_to_nfattr(skb, tuple); | 58 | ret = proto->tuple_to_nfattr(skb, tuple); |
59 | 59 | ||
60 | NFA_NEST_END(skb, nest_parms); | 60 | NFA_NEST_END(skb, nest_parms); |
61 | 61 | ||
62 | return ret; | 62 | return ret; |
@@ -70,7 +70,7 @@ ctnetlink_dump_tuples_ip(struct sk_buff *skb, | |||
70 | const struct ip_conntrack_tuple *tuple) | 70 | const struct ip_conntrack_tuple *tuple) |
71 | { | 71 | { |
72 | struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP); | 72 | struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP); |
73 | 73 | ||
74 | NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip); | 74 | NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(__be32), &tuple->src.ip); |
75 | NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip); | 75 | NFA_PUT(skb, CTA_IP_V4_DST, sizeof(__be32), &tuple->dst.ip); |
76 | 76 | ||
@@ -121,7 +121,7 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct ip_conntrack *ct) | |||
121 | timeout = 0; | 121 | timeout = 0; |
122 | else | 122 | else |
123 | timeout = htonl(timeout_l / HZ); | 123 | timeout = htonl(timeout_l / HZ); |
124 | 124 | ||
125 | NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); | 125 | NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); |
126 | return 0; | 126 | return 0; |
127 | 127 | ||
@@ -141,7 +141,7 @@ ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct ip_conntrack *ct) | |||
141 | ip_conntrack_proto_put(proto); | 141 | ip_conntrack_proto_put(proto); |
142 | return 0; | 142 | return 0; |
143 | } | 143 | } |
144 | 144 | ||
145 | nest_proto = NFA_NEST(skb, CTA_PROTOINFO); | 145 | nest_proto = NFA_NEST(skb, CTA_PROTOINFO); |
146 | 146 | ||
147 | ret = proto->to_nfattr(skb, nest_proto, ct); | 147 | ret = proto->to_nfattr(skb, nest_proto, ct); |
@@ -164,7 +164,7 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct ip_conntrack *ct) | |||
164 | 164 | ||
165 | if (!ct->helper) | 165 | if (!ct->helper) |
166 | return 0; | 166 | return 0; |
167 | 167 | ||
168 | nest_helper = NFA_NEST(skb, CTA_HELP); | 168 | nest_helper = NFA_NEST(skb, CTA_HELP); |
169 | NFA_PUT(skb, CTA_HELP_NAME, strlen(ct->helper->name), ct->helper->name); | 169 | NFA_PUT(skb, CTA_HELP_NAME, strlen(ct->helper->name), ct->helper->name); |
170 | 170 | ||
@@ -236,7 +236,7 @@ static inline int | |||
236 | ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct) | 236 | ctnetlink_dump_use(struct sk_buff *skb, const struct ip_conntrack *ct) |
237 | { | 237 | { |
238 | __be32 use = htonl(atomic_read(&ct->ct_general.use)); | 238 | __be32 use = htonl(atomic_read(&ct->ct_general.use)); |
239 | 239 | ||
240 | NFA_PUT(skb, CTA_USE, sizeof(__be32), &use); | 240 | NFA_PUT(skb, CTA_USE, sizeof(__be32), &use); |
241 | return 0; | 241 | return 0; |
242 | 242 | ||
@@ -248,7 +248,7 @@ nfattr_failure: | |||
248 | 248 | ||
249 | static int | 249 | static int |
250 | ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | 250 | ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
251 | int event, int nowait, | 251 | int event, int nowait, |
252 | const struct ip_conntrack *ct) | 252 | const struct ip_conntrack *ct) |
253 | { | 253 | { |
254 | struct nlmsghdr *nlh; | 254 | struct nlmsghdr *nlh; |
@@ -271,7 +271,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
271 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) | 271 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) |
272 | goto nfattr_failure; | 272 | goto nfattr_failure; |
273 | NFA_NEST_END(skb, nest_parms); | 273 | NFA_NEST_END(skb, nest_parms); |
274 | 274 | ||
275 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); | 275 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); |
276 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) | 276 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) |
277 | goto nfattr_failure; | 277 | goto nfattr_failure; |
@@ -299,7 +299,7 @@ nfattr_failure: | |||
299 | 299 | ||
300 | #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS | 300 | #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS |
301 | static int ctnetlink_conntrack_event(struct notifier_block *this, | 301 | static int ctnetlink_conntrack_event(struct notifier_block *this, |
302 | unsigned long events, void *ptr) | 302 | unsigned long events, void *ptr) |
303 | { | 303 | { |
304 | struct nlmsghdr *nlh; | 304 | struct nlmsghdr *nlh; |
305 | struct nfgenmsg *nfmsg; | 305 | struct nfgenmsg *nfmsg; |
@@ -324,7 +324,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
324 | } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { | 324 | } else if (events & (IPCT_STATUS | IPCT_PROTOINFO)) { |
325 | type = IPCTNL_MSG_CT_NEW; | 325 | type = IPCTNL_MSG_CT_NEW; |
326 | group = NFNLGRP_CONNTRACK_UPDATE; | 326 | group = NFNLGRP_CONNTRACK_UPDATE; |
327 | } else | 327 | } else |
328 | return NOTIFY_DONE; | 328 | return NOTIFY_DONE; |
329 | 329 | ||
330 | if (!nfnetlink_has_listeners(group)) | 330 | if (!nfnetlink_has_listeners(group)) |
@@ -349,7 +349,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
349 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) | 349 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) |
350 | goto nfattr_failure; | 350 | goto nfattr_failure; |
351 | NFA_NEST_END(skb, nest_parms); | 351 | NFA_NEST_END(skb, nest_parms); |
352 | 352 | ||
353 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); | 353 | nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); |
354 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) | 354 | if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) |
355 | goto nfattr_failure; | 355 | goto nfattr_failure; |
@@ -368,16 +368,16 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, | |||
368 | 368 | ||
369 | if (events & IPCT_PROTOINFO | 369 | if (events & IPCT_PROTOINFO |
370 | && ctnetlink_dump_protoinfo(skb, ct) < 0) | 370 | && ctnetlink_dump_protoinfo(skb, ct) < 0) |
371 | goto nfattr_failure; | 371 | goto nfattr_failure; |
372 | 372 | ||
373 | if ((events & IPCT_HELPER || ct->helper) | 373 | if ((events & IPCT_HELPER || ct->helper) |
374 | && ctnetlink_dump_helpinfo(skb, ct) < 0) | 374 | && ctnetlink_dump_helpinfo(skb, ct) < 0) |
375 | goto nfattr_failure; | 375 | goto nfattr_failure; |
376 | 376 | ||
377 | #ifdef CONFIG_IP_NF_CONNTRACK_MARK | 377 | #ifdef CONFIG_IP_NF_CONNTRACK_MARK |
378 | if ((events & IPCT_MARK || ct->mark) | 378 | if ((events & IPCT_MARK || ct->mark) |
379 | && ctnetlink_dump_mark(skb, ct) < 0) | 379 | && ctnetlink_dump_mark(skb, ct) < 0) |
380 | goto nfattr_failure; | 380 | goto nfattr_failure; |
381 | #endif | 381 | #endif |
382 | 382 | ||
383 | if (events & IPCT_COUNTER_FILLING && | 383 | if (events & IPCT_COUNTER_FILLING && |
@@ -426,7 +426,7 @@ restart: | |||
426 | cb->args[1] = 0; | 426 | cb->args[1] = 0; |
427 | } | 427 | } |
428 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, | 428 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, |
429 | cb->nlh->nlmsg_seq, | 429 | cb->nlh->nlmsg_seq, |
430 | IPCTNL_MSG_CT_NEW, | 430 | IPCTNL_MSG_CT_NEW, |
431 | 1, ct) < 0) { | 431 | 1, ct) < 0) { |
432 | nf_conntrack_get(&ct->ct_general); | 432 | nf_conntrack_get(&ct->ct_general); |
@@ -488,7 +488,7 @@ static const size_t cta_min_proto[CTA_PROTO_MAX] = { | |||
488 | }; | 488 | }; |
489 | 489 | ||
490 | static inline int | 490 | static inline int |
491 | ctnetlink_parse_tuple_proto(struct nfattr *attr, | 491 | ctnetlink_parse_tuple_proto(struct nfattr *attr, |
492 | struct ip_conntrack_tuple *tuple) | 492 | struct ip_conntrack_tuple *tuple) |
493 | { | 493 | { |
494 | struct nfattr *tb[CTA_PROTO_MAX]; | 494 | struct nfattr *tb[CTA_PROTO_MAX]; |
@@ -508,9 +508,9 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr, | |||
508 | 508 | ||
509 | if (likely(proto->nfattr_to_tuple)) | 509 | if (likely(proto->nfattr_to_tuple)) |
510 | ret = proto->nfattr_to_tuple(tb, tuple); | 510 | ret = proto->nfattr_to_tuple(tb, tuple); |
511 | 511 | ||
512 | ip_conntrack_proto_put(proto); | 512 | ip_conntrack_proto_put(proto); |
513 | 513 | ||
514 | return ret; | 514 | return ret; |
515 | } | 515 | } |
516 | 516 | ||
@@ -595,7 +595,7 @@ ctnetlink_parse_nat(struct nfattr *nat, | |||
595 | int err; | 595 | int err; |
596 | 596 | ||
597 | memset(range, 0, sizeof(*range)); | 597 | memset(range, 0, sizeof(*range)); |
598 | 598 | ||
599 | nfattr_parse_nested(tb, CTA_NAT_MAX, nat); | 599 | nfattr_parse_nested(tb, CTA_NAT_MAX, nat); |
600 | 600 | ||
601 | if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat)) | 601 | if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat)) |
@@ -647,7 +647,7 @@ static const size_t cta_min[CTA_MAX] = { | |||
647 | }; | 647 | }; |
648 | 648 | ||
649 | static int | 649 | static int |
650 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | 650 | ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, |
651 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 651 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
652 | { | 652 | { |
653 | struct ip_conntrack_tuple_hash *h; | 653 | struct ip_conntrack_tuple_hash *h; |
@@ -676,14 +676,14 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
676 | return -ENOENT; | 676 | return -ENOENT; |
677 | 677 | ||
678 | ct = tuplehash_to_ctrack(h); | 678 | ct = tuplehash_to_ctrack(h); |
679 | 679 | ||
680 | if (cda[CTA_ID-1]) { | 680 | if (cda[CTA_ID-1]) { |
681 | u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); | 681 | u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); |
682 | if (ct->id != id) { | 682 | if (ct->id != id) { |
683 | ip_conntrack_put(ct); | 683 | ip_conntrack_put(ct); |
684 | return -ENOENT; | 684 | return -ENOENT; |
685 | } | 685 | } |
686 | } | 686 | } |
687 | if (del_timer(&ct->timeout)) | 687 | if (del_timer(&ct->timeout)) |
688 | ct->timeout.function((unsigned long)ct); | 688 | ct->timeout.function((unsigned long)ct); |
689 | 689 | ||
@@ -693,7 +693,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
693 | } | 693 | } |
694 | 694 | ||
695 | static int | 695 | static int |
696 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | 696 | ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, |
697 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 697 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
698 | { | 698 | { |
699 | struct ip_conntrack_tuple_hash *h; | 699 | struct ip_conntrack_tuple_hash *h; |
@@ -714,8 +714,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
714 | return -ENOTSUPP; | 714 | return -ENOTSUPP; |
715 | #endif | 715 | #endif |
716 | if ((*errp = netlink_dump_start(ctnl, skb, nlh, | 716 | if ((*errp = netlink_dump_start(ctnl, skb, nlh, |
717 | ctnetlink_dump_table, | 717 | ctnetlink_dump_table, |
718 | ctnetlink_done)) != 0) | 718 | ctnetlink_done)) != 0) |
719 | return -EINVAL; | 719 | return -EINVAL; |
720 | 720 | ||
721 | rlen = NLMSG_ALIGN(nlh->nlmsg_len); | 721 | rlen = NLMSG_ALIGN(nlh->nlmsg_len); |
@@ -751,7 +751,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
751 | return -ENOMEM; | 751 | return -ENOMEM; |
752 | } | 752 | } |
753 | 753 | ||
754 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, | 754 | err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, |
755 | IPCTNL_MSG_CT_NEW, 1, ct); | 755 | IPCTNL_MSG_CT_NEW, 1, ct); |
756 | ip_conntrack_put(ct); | 756 | ip_conntrack_put(ct); |
757 | if (err <= 0) | 757 | if (err <= 0) |
@@ -779,12 +779,12 @@ ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[]) | |||
779 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) | 779 | if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) |
780 | /* unchangeable */ | 780 | /* unchangeable */ |
781 | return -EINVAL; | 781 | return -EINVAL; |
782 | 782 | ||
783 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) | 783 | if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) |
784 | /* SEEN_REPLY bit can only be set */ | 784 | /* SEEN_REPLY bit can only be set */ |
785 | return -EINVAL; | 785 | return -EINVAL; |
786 | 786 | ||
787 | 787 | ||
788 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) | 788 | if (d & IPS_ASSURED && !(status & IPS_ASSURED)) |
789 | /* ASSURED bit can only be set */ | 789 | /* ASSURED bit can only be set */ |
790 | return -EINVAL; | 790 | return -EINVAL; |
@@ -857,7 +857,7 @@ ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[]) | |||
857 | memset(&ct->help, 0, sizeof(ct->help)); | 857 | memset(&ct->help, 0, sizeof(ct->help)); |
858 | } | 858 | } |
859 | } | 859 | } |
860 | 860 | ||
861 | ct->helper = helper; | 861 | ct->helper = helper; |
862 | 862 | ||
863 | return 0; | 863 | return 0; |
@@ -867,7 +867,7 @@ static inline int | |||
867 | ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[]) | 867 | ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[]) |
868 | { | 868 | { |
869 | u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); | 869 | u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); |
870 | 870 | ||
871 | if (!del_timer(&ct->timeout)) | 871 | if (!del_timer(&ct->timeout)) |
872 | return -ETIME; | 872 | return -ETIME; |
873 | 873 | ||
@@ -891,7 +891,7 @@ ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[]) | |||
891 | 891 | ||
892 | if (proto->from_nfattr) | 892 | if (proto->from_nfattr) |
893 | err = proto->from_nfattr(tb, ct); | 893 | err = proto->from_nfattr(tb, ct); |
894 | ip_conntrack_proto_put(proto); | 894 | ip_conntrack_proto_put(proto); |
895 | 895 | ||
896 | return err; | 896 | return err; |
897 | } | 897 | } |
@@ -934,7 +934,7 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[]) | |||
934 | } | 934 | } |
935 | 935 | ||
936 | static int | 936 | static int |
937 | ctnetlink_create_conntrack(struct nfattr *cda[], | 937 | ctnetlink_create_conntrack(struct nfattr *cda[], |
938 | struct ip_conntrack_tuple *otuple, | 938 | struct ip_conntrack_tuple *otuple, |
939 | struct ip_conntrack_tuple *rtuple) | 939 | struct ip_conntrack_tuple *rtuple) |
940 | { | 940 | { |
@@ -943,7 +943,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
943 | 943 | ||
944 | ct = ip_conntrack_alloc(otuple, rtuple); | 944 | ct = ip_conntrack_alloc(otuple, rtuple); |
945 | if (ct == NULL || IS_ERR(ct)) | 945 | if (ct == NULL || IS_ERR(ct)) |
946 | return -ENOMEM; | 946 | return -ENOMEM; |
947 | 947 | ||
948 | if (!cda[CTA_TIMEOUT-1]) | 948 | if (!cda[CTA_TIMEOUT-1]) |
949 | goto err; | 949 | goto err; |
@@ -979,13 +979,13 @@ ctnetlink_create_conntrack(struct nfattr *cda[], | |||
979 | 979 | ||
980 | return 0; | 980 | return 0; |
981 | 981 | ||
982 | err: | 982 | err: |
983 | ip_conntrack_free(ct); | 983 | ip_conntrack_free(ct); |
984 | return err; | 984 | return err; |
985 | } | 985 | } |
986 | 986 | ||
987 | static int | 987 | static int |
988 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | 988 | ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, |
989 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 989 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
990 | { | 990 | { |
991 | struct ip_conntrack_tuple otuple, rtuple; | 991 | struct ip_conntrack_tuple otuple, rtuple; |
@@ -1039,9 +1039,9 @@ out_unlock: | |||
1039 | return err; | 1039 | return err; |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | /*********************************************************************** | 1042 | /*********************************************************************** |
1043 | * EXPECT | 1043 | * EXPECT |
1044 | ***********************************************************************/ | 1044 | ***********************************************************************/ |
1045 | 1045 | ||
1046 | static inline int | 1046 | static inline int |
1047 | ctnetlink_exp_dump_tuple(struct sk_buff *skb, | 1047 | ctnetlink_exp_dump_tuple(struct sk_buff *skb, |
@@ -1049,7 +1049,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb, | |||
1049 | enum ctattr_expect type) | 1049 | enum ctattr_expect type) |
1050 | { | 1050 | { |
1051 | struct nfattr *nest_parms = NFA_NEST(skb, type); | 1051 | struct nfattr *nest_parms = NFA_NEST(skb, type); |
1052 | 1052 | ||
1053 | if (ctnetlink_dump_tuples(skb, tuple) < 0) | 1053 | if (ctnetlink_dump_tuples(skb, tuple) < 0) |
1054 | goto nfattr_failure; | 1054 | goto nfattr_failure; |
1055 | 1055 | ||
@@ -1059,7 +1059,7 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb, | |||
1059 | 1059 | ||
1060 | nfattr_failure: | 1060 | nfattr_failure: |
1061 | return -1; | 1061 | return -1; |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | static inline int | 1064 | static inline int |
1065 | ctnetlink_exp_dump_mask(struct sk_buff *skb, | 1065 | ctnetlink_exp_dump_mask(struct sk_buff *skb, |
@@ -1090,7 +1090,7 @@ nfattr_failure: | |||
1090 | 1090 | ||
1091 | static inline int | 1091 | static inline int |
1092 | ctnetlink_exp_dump_expect(struct sk_buff *skb, | 1092 | ctnetlink_exp_dump_expect(struct sk_buff *skb, |
1093 | const struct ip_conntrack_expect *exp) | 1093 | const struct ip_conntrack_expect *exp) |
1094 | { | 1094 | { |
1095 | struct ip_conntrack *master = exp->master; | 1095 | struct ip_conntrack *master = exp->master; |
1096 | __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); | 1096 | __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); |
@@ -1104,20 +1104,20 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, | |||
1104 | &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 1104 | &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
1105 | CTA_EXPECT_MASTER) < 0) | 1105 | CTA_EXPECT_MASTER) < 0) |
1106 | goto nfattr_failure; | 1106 | goto nfattr_failure; |
1107 | 1107 | ||
1108 | NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout); | 1108 | NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(__be32), &timeout); |
1109 | NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id); | 1109 | NFA_PUT(skb, CTA_EXPECT_ID, sizeof(__be32), &id); |
1110 | 1110 | ||
1111 | return 0; | 1111 | return 0; |
1112 | 1112 | ||
1113 | nfattr_failure: | 1113 | nfattr_failure: |
1114 | return -1; | 1114 | return -1; |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | static int | 1117 | static int |
1118 | ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | 1118 | ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
1119 | int event, | 1119 | int event, |
1120 | int nowait, | 1120 | int nowait, |
1121 | const struct ip_conntrack_expect *exp) | 1121 | const struct ip_conntrack_expect *exp) |
1122 | { | 1122 | { |
1123 | struct nlmsghdr *nlh; | 1123 | struct nlmsghdr *nlh; |
@@ -1216,7 +1216,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
1216 | goto out; | 1216 | goto out; |
1217 | *id = exp->id; | 1217 | *id = exp->id; |
1218 | } | 1218 | } |
1219 | out: | 1219 | out: |
1220 | read_unlock_bh(&ip_conntrack_lock); | 1220 | read_unlock_bh(&ip_conntrack_lock); |
1221 | 1221 | ||
1222 | return skb->len; | 1222 | return skb->len; |
@@ -1228,7 +1228,7 @@ static const size_t cta_min_exp[CTA_EXPECT_MAX] = { | |||
1228 | }; | 1228 | }; |
1229 | 1229 | ||
1230 | static int | 1230 | static int |
1231 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | 1231 | ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, |
1232 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 1232 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
1233 | { | 1233 | { |
1234 | struct ip_conntrack_tuple tuple; | 1234 | struct ip_conntrack_tuple tuple; |
@@ -1247,7 +1247,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1247 | return -EAFNOSUPPORT; | 1247 | return -EAFNOSUPPORT; |
1248 | 1248 | ||
1249 | if ((*errp = netlink_dump_start(ctnl, skb, nlh, | 1249 | if ((*errp = netlink_dump_start(ctnl, skb, nlh, |
1250 | ctnetlink_exp_dump_table, | 1250 | ctnetlink_exp_dump_table, |
1251 | ctnetlink_done)) != 0) | 1251 | ctnetlink_done)) != 0) |
1252 | return -EINVAL; | 1252 | return -EINVAL; |
1253 | rlen = NLMSG_ALIGN(nlh->nlmsg_len); | 1253 | rlen = NLMSG_ALIGN(nlh->nlmsg_len); |
@@ -1275,14 +1275,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1275 | ip_conntrack_expect_put(exp); | 1275 | ip_conntrack_expect_put(exp); |
1276 | return -ENOENT; | 1276 | return -ENOENT; |
1277 | } | 1277 | } |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | err = -ENOMEM; | 1280 | err = -ENOMEM; |
1281 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); | 1281 | skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
1282 | if (!skb2) | 1282 | if (!skb2) |
1283 | goto out; | 1283 | goto out; |
1284 | 1284 | ||
1285 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, | 1285 | err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid, |
1286 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, | 1286 | nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, |
1287 | 1, exp); | 1287 | 1, exp); |
1288 | if (err <= 0) | 1288 | if (err <= 0) |
@@ -1300,7 +1300,7 @@ out: | |||
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | static int | 1302 | static int |
1303 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | 1303 | ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, |
1304 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) | 1304 | struct nlmsghdr *nlh, struct nfattr *cda[], int *errp) |
1305 | { | 1305 | { |
1306 | struct ip_conntrack_expect *exp, *tmp; | 1306 | struct ip_conntrack_expect *exp, *tmp; |
@@ -1333,7 +1333,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1333 | 1333 | ||
1334 | /* after list removal, usage count == 1 */ | 1334 | /* after list removal, usage count == 1 */ |
1335 | ip_conntrack_unexpect_related(exp); | 1335 | ip_conntrack_unexpect_related(exp); |
1336 | /* have to put what we 'get' above. | 1336 | /* have to put what we 'get' above. |
1337 | * after this line usage count == 0 */ | 1337 | * after this line usage count == 0 */ |
1338 | ip_conntrack_expect_put(exp); | 1338 | ip_conntrack_expect_put(exp); |
1339 | } else if (cda[CTA_EXPECT_HELP_NAME-1]) { | 1339 | } else if (cda[CTA_EXPECT_HELP_NAME-1]) { |
@@ -1348,7 +1348,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, | |||
1348 | } | 1348 | } |
1349 | list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, | 1349 | list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, |
1350 | list) { | 1350 | list) { |
1351 | if (exp->master->helper == h | 1351 | if (exp->master->helper == h |
1352 | && del_timer(&exp->timeout)) { | 1352 | && del_timer(&exp->timeout)) { |
1353 | ip_ct_unlink_expect(exp); | 1353 | ip_ct_unlink_expect(exp); |
1354 | ip_conntrack_expect_put(exp); | 1354 | ip_conntrack_expect_put(exp); |
@@ -1413,7 +1413,7 @@ ctnetlink_create_expect(struct nfattr *cda[]) | |||
1413 | err = -ENOMEM; | 1413 | err = -ENOMEM; |
1414 | goto out; | 1414 | goto out; |
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | exp->expectfn = NULL; | 1417 | exp->expectfn = NULL; |
1418 | exp->flags = 0; | 1418 | exp->flags = 0; |
1419 | exp->master = ct; | 1419 | exp->master = ct; |
@@ -1423,7 +1423,7 @@ ctnetlink_create_expect(struct nfattr *cda[]) | |||
1423 | err = ip_conntrack_expect_related(exp); | 1423 | err = ip_conntrack_expect_related(exp); |
1424 | ip_conntrack_expect_put(exp); | 1424 | ip_conntrack_expect_put(exp); |
1425 | 1425 | ||
1426 | out: | 1426 | out: |
1427 | ip_conntrack_put(tuplehash_to_ctrack(h)); | 1427 | ip_conntrack_put(tuplehash_to_ctrack(h)); |
1428 | return err; | 1428 | return err; |
1429 | } | 1429 | } |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c index 295b6fa340db..ec71abead00c 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c | |||
@@ -94,9 +94,9 @@ static int icmp_packet(struct ip_conntrack *ct, | |||
94 | enum ip_conntrack_info ctinfo) | 94 | enum ip_conntrack_info ctinfo) |
95 | { | 95 | { |
96 | /* Try to delete connection immediately after all replies: | 96 | /* Try to delete connection immediately after all replies: |
97 | won't actually vanish as we still have skb, and del_timer | 97 | won't actually vanish as we still have skb, and del_timer |
98 | means this will only run once even if count hits zero twice | 98 | means this will only run once even if count hits zero twice |
99 | (theoretically possible with SMP) */ | 99 | (theoretically possible with SMP) */ |
100 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { | 100 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { |
101 | if (atomic_dec_and_test(&ct->proto.icmp.count) | 101 | if (atomic_dec_and_test(&ct->proto.icmp.count) |
102 | && del_timer(&ct->timeout)) | 102 | && del_timer(&ct->timeout)) |
@@ -114,11 +114,11 @@ static int icmp_packet(struct ip_conntrack *ct, | |||
114 | static int icmp_new(struct ip_conntrack *conntrack, | 114 | static int icmp_new(struct ip_conntrack *conntrack, |
115 | const struct sk_buff *skb) | 115 | const struct sk_buff *skb) |
116 | { | 116 | { |
117 | static const u_int8_t valid_new[] = { | 117 | static const u_int8_t valid_new[] = { |
118 | [ICMP_ECHO] = 1, | 118 | [ICMP_ECHO] = 1, |
119 | [ICMP_TIMESTAMP] = 1, | 119 | [ICMP_TIMESTAMP] = 1, |
120 | [ICMP_INFO_REQUEST] = 1, | 120 | [ICMP_INFO_REQUEST] = 1, |
121 | [ICMP_ADDRESS] = 1 | 121 | [ICMP_ADDRESS] = 1 |
122 | }; | 122 | }; |
123 | 123 | ||
124 | if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) | 124 | if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) |
@@ -282,7 +282,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[], | |||
282 | || !tb[CTA_PROTO_ICMP_ID-1]) | 282 | || !tb[CTA_PROTO_ICMP_ID-1]) |
283 | return -EINVAL; | 283 | return -EINVAL; |
284 | 284 | ||
285 | tuple->dst.u.icmp.type = | 285 | tuple->dst.u.icmp.type = |
286 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); | 286 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); |
287 | tuple->dst.u.icmp.code = | 287 | tuple->dst.u.icmp.code = |
288 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); | 288 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c index 2443322e4128..9d5b917f49cd 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Connection tracking protocol helper module for SCTP. | 2 | * Connection tracking protocol helper module for SCTP. |
3 | * | 3 | * |
4 | * SCTP is defined in RFC 2960. References to various sections in this code | 4 | * SCTP is defined in RFC 2960. References to various sections in this code |
5 | * are to this RFC. | 5 | * are to this RFC. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
@@ -38,7 +38,7 @@ | |||
38 | static DEFINE_RWLOCK(sctp_lock); | 38 | static DEFINE_RWLOCK(sctp_lock); |
39 | 39 | ||
40 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more | 40 | /* FIXME: Examine ipfilter's timeouts and conntrack transitions more |
41 | closely. They're more complex. --RR | 41 | closely. They're more complex. --RR |
42 | 42 | ||
43 | And so for me for SCTP :D -Kiran */ | 43 | And so for me for SCTP :D -Kiran */ |
44 | 44 | ||
@@ -87,32 +87,32 @@ static const unsigned int * sctp_timeouts[] | |||
87 | #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT | 87 | #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT |
88 | #define sIV SCTP_CONNTRACK_MAX | 88 | #define sIV SCTP_CONNTRACK_MAX |
89 | 89 | ||
90 | /* | 90 | /* |
91 | These are the descriptions of the states: | 91 | These are the descriptions of the states: |
92 | 92 | ||
93 | NOTE: These state names are tantalizingly similar to the states of an | 93 | NOTE: These state names are tantalizingly similar to the states of an |
94 | SCTP endpoint. But the interpretation of the states is a little different, | 94 | SCTP endpoint. But the interpretation of the states is a little different, |
95 | considering that these are the states of the connection and not of an end | 95 | considering that these are the states of the connection and not of an end |
96 | point. Please note the subtleties. -Kiran | 96 | point. Please note the subtleties. -Kiran |
97 | 97 | ||
98 | NONE - Nothing so far. | 98 | NONE - Nothing so far. |
99 | COOKIE WAIT - We have seen an INIT chunk in the original direction, or also | 99 | COOKIE WAIT - We have seen an INIT chunk in the original direction, or also |
100 | an INIT_ACK chunk in the reply direction. | 100 | an INIT_ACK chunk in the reply direction. |
101 | COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. | 101 | COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction. |
102 | ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. | 102 | ESTABLISHED - We have seen a COOKIE_ACK in the reply direction. |
103 | SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. | 103 | SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction. |
104 | SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. | 104 | SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin. |
105 | SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite | 105 | SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite |
106 | to that of the SHUTDOWN chunk. | 106 | to that of the SHUTDOWN chunk. |
107 | CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of | 107 | CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of |
108 | the SHUTDOWN chunk. Connection is closed. | 108 | the SHUTDOWN chunk. Connection is closed. |
109 | */ | 109 | */ |
110 | 110 | ||
111 | /* TODO | 111 | /* TODO |
112 | - I have assumed that the first INIT is in the original direction. | 112 | - I have assumed that the first INIT is in the original direction. |
113 | This messes things when an INIT comes in the reply direction in CLOSED | 113 | This messes things when an INIT comes in the reply direction in CLOSED |
114 | state. | 114 | state. |
115 | - Check the error type in the reply dir before transitioning from | 115 | - Check the error type in the reply dir before transitioning from |
116 | cookie echoed to closed. | 116 | cookie echoed to closed. |
117 | - Sec 5.2.4 of RFC 2960 | 117 | - Sec 5.2.4 of RFC 2960 |
118 | - Multi Homing support. | 118 | - Multi Homing support. |
@@ -229,7 +229,7 @@ static int do_basic_checks(struct ip_conntrack *conntrack, | |||
229 | for_each_sctp_chunk (skb, sch, _sch, offset, count) { | 229 | for_each_sctp_chunk (skb, sch, _sch, offset, count) { |
230 | DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type); | 230 | DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type); |
231 | 231 | ||
232 | if (sch->type == SCTP_CID_INIT | 232 | if (sch->type == SCTP_CID_INIT |
233 | || sch->type == SCTP_CID_INIT_ACK | 233 | || sch->type == SCTP_CID_INIT_ACK |
234 | || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { | 234 | || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { |
235 | flag = 1; | 235 | flag = 1; |
@@ -269,42 +269,42 @@ static int new_state(enum ip_conntrack_dir dir, | |||
269 | DEBUGP("Chunk type: %d\n", chunk_type); | 269 | DEBUGP("Chunk type: %d\n", chunk_type); |
270 | 270 | ||
271 | switch (chunk_type) { | 271 | switch (chunk_type) { |
272 | case SCTP_CID_INIT: | 272 | case SCTP_CID_INIT: |
273 | DEBUGP("SCTP_CID_INIT\n"); | 273 | DEBUGP("SCTP_CID_INIT\n"); |
274 | i = 0; break; | 274 | i = 0; break; |
275 | case SCTP_CID_INIT_ACK: | 275 | case SCTP_CID_INIT_ACK: |
276 | DEBUGP("SCTP_CID_INIT_ACK\n"); | 276 | DEBUGP("SCTP_CID_INIT_ACK\n"); |
277 | i = 1; break; | 277 | i = 1; break; |
278 | case SCTP_CID_ABORT: | 278 | case SCTP_CID_ABORT: |
279 | DEBUGP("SCTP_CID_ABORT\n"); | 279 | DEBUGP("SCTP_CID_ABORT\n"); |
280 | i = 2; break; | 280 | i = 2; break; |
281 | case SCTP_CID_SHUTDOWN: | 281 | case SCTP_CID_SHUTDOWN: |
282 | DEBUGP("SCTP_CID_SHUTDOWN\n"); | 282 | DEBUGP("SCTP_CID_SHUTDOWN\n"); |
283 | i = 3; break; | 283 | i = 3; break; |
284 | case SCTP_CID_SHUTDOWN_ACK: | 284 | case SCTP_CID_SHUTDOWN_ACK: |
285 | DEBUGP("SCTP_CID_SHUTDOWN_ACK\n"); | 285 | DEBUGP("SCTP_CID_SHUTDOWN_ACK\n"); |
286 | i = 4; break; | 286 | i = 4; break; |
287 | case SCTP_CID_ERROR: | 287 | case SCTP_CID_ERROR: |
288 | DEBUGP("SCTP_CID_ERROR\n"); | 288 | DEBUGP("SCTP_CID_ERROR\n"); |
289 | i = 5; break; | 289 | i = 5; break; |
290 | case SCTP_CID_COOKIE_ECHO: | 290 | case SCTP_CID_COOKIE_ECHO: |
291 | DEBUGP("SCTP_CID_COOKIE_ECHO\n"); | 291 | DEBUGP("SCTP_CID_COOKIE_ECHO\n"); |
292 | i = 6; break; | 292 | i = 6; break; |
293 | case SCTP_CID_COOKIE_ACK: | 293 | case SCTP_CID_COOKIE_ACK: |
294 | DEBUGP("SCTP_CID_COOKIE_ACK\n"); | 294 | DEBUGP("SCTP_CID_COOKIE_ACK\n"); |
295 | i = 7; break; | 295 | i = 7; break; |
296 | case SCTP_CID_SHUTDOWN_COMPLETE: | 296 | case SCTP_CID_SHUTDOWN_COMPLETE: |
297 | DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n"); | 297 | DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n"); |
298 | i = 8; break; | 298 | i = 8; break; |
299 | default: | 299 | default: |
300 | /* Other chunks like DATA, SACK, HEARTBEAT and | 300 | /* Other chunks like DATA, SACK, HEARTBEAT and |
301 | its ACK do not cause a change in state */ | 301 | its ACK do not cause a change in state */ |
302 | DEBUGP("Unknown chunk type, Will stay in %s\n", | 302 | DEBUGP("Unknown chunk type, Will stay in %s\n", |
303 | sctp_conntrack_names[cur_state]); | 303 | sctp_conntrack_names[cur_state]); |
304 | return cur_state; | 304 | return cur_state; |
305 | } | 305 | } |
306 | 306 | ||
307 | DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", | 307 | DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n", |
308 | dir, sctp_conntrack_names[cur_state], chunk_type, | 308 | dir, sctp_conntrack_names[cur_state], chunk_type, |
309 | sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); | 309 | sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]); |
310 | 310 | ||
@@ -367,7 +367,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, | |||
367 | /* Sec 8.5.1 (C) */ | 367 | /* Sec 8.5.1 (C) */ |
368 | if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) | 368 | if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)]) |
369 | && !(sh->vtag == conntrack->proto.sctp.vtag | 369 | && !(sh->vtag == conntrack->proto.sctp.vtag |
370 | [1 - CTINFO2DIR(ctinfo)] | 370 | [1 - CTINFO2DIR(ctinfo)] |
371 | && (sch->flags & 1))) { | 371 | && (sch->flags & 1))) { |
372 | write_unlock_bh(&sctp_lock); | 372 | write_unlock_bh(&sctp_lock); |
373 | return -1; | 373 | return -1; |
@@ -392,17 +392,17 @@ static int sctp_packet(struct ip_conntrack *conntrack, | |||
392 | } | 392 | } |
393 | 393 | ||
394 | /* If it is an INIT or an INIT ACK note down the vtag */ | 394 | /* If it is an INIT or an INIT ACK note down the vtag */ |
395 | if (sch->type == SCTP_CID_INIT | 395 | if (sch->type == SCTP_CID_INIT |
396 | || sch->type == SCTP_CID_INIT_ACK) { | 396 | || sch->type == SCTP_CID_INIT_ACK) { |
397 | sctp_inithdr_t _inithdr, *ih; | 397 | sctp_inithdr_t _inithdr, *ih; |
398 | 398 | ||
399 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), | 399 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), |
400 | sizeof(_inithdr), &_inithdr); | 400 | sizeof(_inithdr), &_inithdr); |
401 | if (ih == NULL) { | 401 | if (ih == NULL) { |
402 | write_unlock_bh(&sctp_lock); | 402 | write_unlock_bh(&sctp_lock); |
403 | return -1; | 403 | return -1; |
404 | } | 404 | } |
405 | DEBUGP("Setting vtag %x for dir %d\n", | 405 | DEBUGP("Setting vtag %x for dir %d\n", |
406 | ih->init_tag, !CTINFO2DIR(ctinfo)); | 406 | ih->init_tag, !CTINFO2DIR(ctinfo)); |
407 | conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag; | 407 | conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag; |
408 | } | 408 | } |
@@ -427,7 +427,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, | |||
427 | } | 427 | } |
428 | 428 | ||
429 | /* Called when a new connection for this protocol found. */ | 429 | /* Called when a new connection for this protocol found. */ |
430 | static int sctp_new(struct ip_conntrack *conntrack, | 430 | static int sctp_new(struct ip_conntrack *conntrack, |
431 | const struct sk_buff *skb) | 431 | const struct sk_buff *skb) |
432 | { | 432 | { |
433 | enum sctp_conntrack newconntrack; | 433 | enum sctp_conntrack newconntrack; |
@@ -457,7 +457,7 @@ static int sctp_new(struct ip_conntrack *conntrack, | |||
457 | newconntrack = SCTP_CONNTRACK_MAX; | 457 | newconntrack = SCTP_CONNTRACK_MAX; |
458 | for_each_sctp_chunk (skb, sch, _sch, offset, count) { | 458 | for_each_sctp_chunk (skb, sch, _sch, offset, count) { |
459 | /* Don't need lock here: this conntrack not in circulation yet */ | 459 | /* Don't need lock here: this conntrack not in circulation yet */ |
460 | newconntrack = new_state (IP_CT_DIR_ORIGINAL, | 460 | newconntrack = new_state (IP_CT_DIR_ORIGINAL, |
461 | SCTP_CONNTRACK_NONE, sch->type); | 461 | SCTP_CONNTRACK_NONE, sch->type); |
462 | 462 | ||
463 | /* Invalid: delete conntrack */ | 463 | /* Invalid: delete conntrack */ |
@@ -472,14 +472,14 @@ static int sctp_new(struct ip_conntrack *conntrack, | |||
472 | sctp_inithdr_t _inithdr, *ih; | 472 | sctp_inithdr_t _inithdr, *ih; |
473 | 473 | ||
474 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), | 474 | ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t), |
475 | sizeof(_inithdr), &_inithdr); | 475 | sizeof(_inithdr), &_inithdr); |
476 | if (ih == NULL) | 476 | if (ih == NULL) |
477 | return 0; | 477 | return 0; |
478 | 478 | ||
479 | DEBUGP("Setting vtag %x for new conn\n", | 479 | DEBUGP("Setting vtag %x for new conn\n", |
480 | ih->init_tag); | 480 | ih->init_tag); |
481 | 481 | ||
482 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = | 482 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = |
483 | ih->init_tag; | 483 | ih->init_tag; |
484 | } else { | 484 | } else { |
485 | /* Sec 8.5.1 (A) */ | 485 | /* Sec 8.5.1 (A) */ |
@@ -489,7 +489,7 @@ static int sctp_new(struct ip_conntrack *conntrack, | |||
489 | /* If it is a shutdown ack OOTB packet, we expect a return | 489 | /* If it is a shutdown ack OOTB packet, we expect a return |
490 | shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ | 490 | shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ |
491 | else { | 491 | else { |
492 | DEBUGP("Setting vtag %x for new conn OOTB\n", | 492 | DEBUGP("Setting vtag %x for new conn OOTB\n", |
493 | sh->vtag); | 493 | sh->vtag); |
494 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; | 494 | conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; |
495 | } | 495 | } |
@@ -500,16 +500,16 @@ static int sctp_new(struct ip_conntrack *conntrack, | |||
500 | return 1; | 500 | return 1; |
501 | } | 501 | } |
502 | 502 | ||
503 | static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = { | 503 | static struct ip_conntrack_protocol ip_conntrack_protocol_sctp = { |
504 | .proto = IPPROTO_SCTP, | 504 | .proto = IPPROTO_SCTP, |
505 | .name = "sctp", | 505 | .name = "sctp", |
506 | .pkt_to_tuple = sctp_pkt_to_tuple, | 506 | .pkt_to_tuple = sctp_pkt_to_tuple, |
507 | .invert_tuple = sctp_invert_tuple, | 507 | .invert_tuple = sctp_invert_tuple, |
508 | .print_tuple = sctp_print_tuple, | 508 | .print_tuple = sctp_print_tuple, |
509 | .print_conntrack = sctp_print_conntrack, | 509 | .print_conntrack = sctp_print_conntrack, |
510 | .packet = sctp_packet, | 510 | .packet = sctp_packet, |
511 | .new = sctp_new, | 511 | .new = sctp_new, |
512 | .destroy = NULL, | 512 | .destroy = NULL, |
513 | .me = THIS_MODULE, | 513 | .me = THIS_MODULE, |
514 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | 514 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ |
515 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | 515 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) |
@@ -603,7 +603,7 @@ static ctl_table ip_ct_net_table[] = { | |||
603 | { | 603 | { |
604 | .ctl_name = CTL_NET, | 604 | .ctl_name = CTL_NET, |
605 | .procname = "net", | 605 | .procname = "net", |
606 | .mode = 0555, | 606 | .mode = 0555, |
607 | .child = ip_ct_ipv4_table, | 607 | .child = ip_ct_ipv4_table, |
608 | }, | 608 | }, |
609 | { .ctl_name = 0 } | 609 | { .ctl_name = 0 } |
@@ -638,7 +638,7 @@ static int __init ip_conntrack_proto_sctp_init(void) | |||
638 | ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); | 638 | ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); |
639 | #endif | 639 | #endif |
640 | out: | 640 | out: |
641 | DEBUGP("SCTP conntrack module loading %s\n", | 641 | DEBUGP("SCTP conntrack module loading %s\n", |
642 | ret ? "failed": "succeeded"); | 642 | ret ? "failed": "succeeded"); |
643 | return ret; | 643 | return ret; |
644 | } | 644 | } |
@@ -647,7 +647,7 @@ static void __exit ip_conntrack_proto_sctp_fini(void) | |||
647 | { | 647 | { |
648 | ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); | 648 | ip_conntrack_protocol_unregister(&ip_conntrack_protocol_sctp); |
649 | #ifdef CONFIG_SYSCTL | 649 | #ifdef CONFIG_SYSCTL |
650 | unregister_sysctl_table(ip_ct_sysctl_header); | 650 | unregister_sysctl_table(ip_ct_sysctl_header); |
651 | #endif | 651 | #endif |
652 | DEBUGP("SCTP conntrack module unloaded\n"); | 652 | DEBUGP("SCTP conntrack module unloaded\n"); |
653 | } | 653 | } |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index c34f48fe5478..fa35b49fe2fa 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c | |||
@@ -45,8 +45,8 @@ | |||
45 | /* Protects conntrack->proto.tcp */ | 45 | /* Protects conntrack->proto.tcp */ |
46 | static DEFINE_RWLOCK(tcp_lock); | 46 | static DEFINE_RWLOCK(tcp_lock); |
47 | 47 | ||
48 | /* "Be conservative in what you do, | 48 | /* "Be conservative in what you do, |
49 | be liberal in what you accept from others." | 49 | be liberal in what you accept from others." |
50 | If it's non-zero, we mark only out of window RST segments as INVALID. */ | 50 | If it's non-zero, we mark only out of window RST segments as INVALID. */ |
51 | int ip_ct_tcp_be_liberal __read_mostly = 0; | 51 | int ip_ct_tcp_be_liberal __read_mostly = 0; |
52 | 52 | ||
@@ -54,8 +54,8 @@ int ip_ct_tcp_be_liberal __read_mostly = 0; | |||
54 | connections. */ | 54 | connections. */ |
55 | int ip_ct_tcp_loose __read_mostly = 1; | 55 | int ip_ct_tcp_loose __read_mostly = 1; |
56 | 56 | ||
57 | /* Max number of the retransmitted packets without receiving an (acceptable) | 57 | /* Max number of the retransmitted packets without receiving an (acceptable) |
58 | ACK from the destination. If this number is reached, a shorter timer | 58 | ACK from the destination. If this number is reached, a shorter timer |
59 | will be started. */ | 59 | will be started. */ |
60 | int ip_ct_tcp_max_retrans __read_mostly = 3; | 60 | int ip_ct_tcp_max_retrans __read_mostly = 3; |
61 | 61 | ||
@@ -74,7 +74,7 @@ static const char *tcp_conntrack_names[] = { | |||
74 | "CLOSE", | 74 | "CLOSE", |
75 | "LISTEN" | 75 | "LISTEN" |
76 | }; | 76 | }; |
77 | 77 | ||
78 | #define SECS * HZ | 78 | #define SECS * HZ |
79 | #define MINS * 60 SECS | 79 | #define MINS * 60 SECS |
80 | #define HOURS * 60 MINS | 80 | #define HOURS * 60 MINS |
@@ -90,10 +90,10 @@ unsigned int ip_ct_tcp_timeout_time_wait __read_mostly = 2 MINS; | |||
90 | unsigned int ip_ct_tcp_timeout_close __read_mostly = 10 SECS; | 90 | unsigned int ip_ct_tcp_timeout_close __read_mostly = 10 SECS; |
91 | 91 | ||
92 | /* RFC1122 says the R2 limit should be at least 100 seconds. | 92 | /* RFC1122 says the R2 limit should be at least 100 seconds. |
93 | Linux uses 15 packets as limit, which corresponds | 93 | Linux uses 15 packets as limit, which corresponds |
94 | to ~13-30min depending on RTO. */ | 94 | to ~13-30min depending on RTO. */ |
95 | unsigned int ip_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; | 95 | unsigned int ip_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS; |
96 | 96 | ||
97 | static const unsigned int * tcp_timeouts[] | 97 | static const unsigned int * tcp_timeouts[] |
98 | = { NULL, /* TCP_CONNTRACK_NONE */ | 98 | = { NULL, /* TCP_CONNTRACK_NONE */ |
99 | &ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ | 99 | &ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */ |
@@ -106,7 +106,7 @@ static const unsigned int * tcp_timeouts[] | |||
106 | &ip_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */ | 106 | &ip_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */ |
107 | NULL, /* TCP_CONNTRACK_LISTEN */ | 107 | NULL, /* TCP_CONNTRACK_LISTEN */ |
108 | }; | 108 | }; |
109 | 109 | ||
110 | #define sNO TCP_CONNTRACK_NONE | 110 | #define sNO TCP_CONNTRACK_NONE |
111 | #define sSS TCP_CONNTRACK_SYN_SENT | 111 | #define sSS TCP_CONNTRACK_SYN_SENT |
112 | #define sSR TCP_CONNTRACK_SYN_RECV | 112 | #define sSR TCP_CONNTRACK_SYN_RECV |
@@ -129,13 +129,13 @@ enum tcp_bit_set { | |||
129 | TCP_RST_SET, | 129 | TCP_RST_SET, |
130 | TCP_NONE_SET, | 130 | TCP_NONE_SET, |
131 | }; | 131 | }; |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * The TCP state transition table needs a few words... | 134 | * The TCP state transition table needs a few words... |
135 | * | 135 | * |
136 | * We are the man in the middle. All the packets go through us | 136 | * We are the man in the middle. All the packets go through us |
137 | * but might get lost in transit to the destination. | 137 | * but might get lost in transit to the destination. |
138 | * It is assumed that the destinations can't receive segments | 138 | * It is assumed that the destinations can't receive segments |
139 | * we haven't seen. | 139 | * we haven't seen. |
140 | * | 140 | * |
141 | * The checked segment is in window, but our windows are *not* | 141 | * The checked segment is in window, but our windows are *not* |
@@ -145,11 +145,11 @@ enum tcp_bit_set { | |||
145 | * The meaning of the states are: | 145 | * The meaning of the states are: |
146 | * | 146 | * |
147 | * NONE: initial state | 147 | * NONE: initial state |
148 | * SYN_SENT: SYN-only packet seen | 148 | * SYN_SENT: SYN-only packet seen |
149 | * SYN_RECV: SYN-ACK packet seen | 149 | * SYN_RECV: SYN-ACK packet seen |
150 | * ESTABLISHED: ACK packet seen | 150 | * ESTABLISHED: ACK packet seen |
151 | * FIN_WAIT: FIN packet seen | 151 | * FIN_WAIT: FIN packet seen |
152 | * CLOSE_WAIT: ACK seen (after FIN) | 152 | * CLOSE_WAIT: ACK seen (after FIN) |
153 | * LAST_ACK: FIN seen (after FIN) | 153 | * LAST_ACK: FIN seen (after FIN) |
154 | * TIME_WAIT: last ACK seen | 154 | * TIME_WAIT: last ACK seen |
155 | * CLOSE: closed connection | 155 | * CLOSE: closed connection |
@@ -157,8 +157,8 @@ enum tcp_bit_set { | |||
157 | * LISTEN state is not used. | 157 | * LISTEN state is not used. |
158 | * | 158 | * |
159 | * Packets marked as IGNORED (sIG): | 159 | * Packets marked as IGNORED (sIG): |
160 | * if they may be either invalid or valid | 160 | * if they may be either invalid or valid |
161 | * and the receiver may send back a connection | 161 | * and the receiver may send back a connection |
162 | * closing RST or a SYN/ACK. | 162 | * closing RST or a SYN/ACK. |
163 | * | 163 | * |
164 | * Packets marked as INVALID (sIV): | 164 | * Packets marked as INVALID (sIV): |
@@ -175,7 +175,7 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
175 | * sSS -> sSS Retransmitted SYN | 175 | * sSS -> sSS Retransmitted SYN |
176 | * sSR -> sIG Late retransmitted SYN? | 176 | * sSR -> sIG Late retransmitted SYN? |
177 | * sES -> sIG Error: SYNs in window outside the SYN_SENT state | 177 | * sES -> sIG Error: SYNs in window outside the SYN_SENT state |
178 | * are errors. Receiver will reply with RST | 178 | * are errors. Receiver will reply with RST |
179 | * and close the connection. | 179 | * and close the connection. |
180 | * Or we are not in sync and hold a dead connection. | 180 | * Or we are not in sync and hold a dead connection. |
181 | * sFW -> sIG | 181 | * sFW -> sIG |
@@ -188,10 +188,10 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
188 | /*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, | 188 | /*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }, |
189 | /* | 189 | /* |
190 | * A SYN/ACK from the client is always invalid: | 190 | * A SYN/ACK from the client is always invalid: |
191 | * - either it tries to set up a simultaneous open, which is | 191 | * - either it tries to set up a simultaneous open, which is |
192 | * not supported; | 192 | * not supported; |
193 | * - or the firewall has just been inserted between the two hosts | 193 | * - or the firewall has just been inserted between the two hosts |
194 | * during the session set-up. The SYN will be retransmitted | 194 | * during the session set-up. The SYN will be retransmitted |
195 | * by the true client (or it'll time out). | 195 | * by the true client (or it'll time out). |
196 | */ | 196 | */ |
197 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ | 197 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ |
@@ -201,9 +201,9 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
201 | * sSS -> sIV Client migth not send FIN in this state: | 201 | * sSS -> sIV Client migth not send FIN in this state: |
202 | * we enforce waiting for a SYN/ACK reply first. | 202 | * we enforce waiting for a SYN/ACK reply first. |
203 | * sSR -> sFW Close started. | 203 | * sSR -> sFW Close started. |
204 | * sES -> sFW | 204 | * sES -> sFW |
205 | * sFW -> sLA FIN seen in both directions, waiting for | 205 | * sFW -> sLA FIN seen in both directions, waiting for |
206 | * the last ACK. | 206 | * the last ACK. |
207 | * Migth be a retransmitted FIN as well... | 207 | * Migth be a retransmitted FIN as well... |
208 | * sCW -> sLA | 208 | * sCW -> sLA |
209 | * sLA -> sLA Retransmitted FIN. Remain in the same state. | 209 | * sLA -> sLA Retransmitted FIN. Remain in the same state. |
@@ -281,7 +281,7 @@ static const enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
281 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ | 281 | /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */ |
282 | /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, | 282 | /*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV }, |
283 | /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } | 283 | /*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV } |
284 | } | 284 | } |
285 | }; | 285 | }; |
286 | 286 | ||
287 | static int tcp_pkt_to_tuple(const struct sk_buff *skb, | 287 | static int tcp_pkt_to_tuple(const struct sk_buff *skb, |
@@ -337,7 +337,7 @@ static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, | |||
337 | const struct ip_conntrack *ct) | 337 | const struct ip_conntrack *ct) |
338 | { | 338 | { |
339 | struct nfattr *nest_parms; | 339 | struct nfattr *nest_parms; |
340 | 340 | ||
341 | read_lock_bh(&tcp_lock); | 341 | read_lock_bh(&tcp_lock); |
342 | nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); | 342 | nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); |
343 | NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), | 343 | NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), |
@@ -367,7 +367,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct) | |||
367 | if (!attr) | 367 | if (!attr) |
368 | return 0; | 368 | return 0; |
369 | 369 | ||
370 | nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr); | 370 | nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr); |
371 | 371 | ||
372 | if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp)) | 372 | if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp)) |
373 | return -EINVAL; | 373 | return -EINVAL; |
@@ -376,7 +376,7 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct) | |||
376 | return -EINVAL; | 376 | return -EINVAL; |
377 | 377 | ||
378 | write_lock_bh(&tcp_lock); | 378 | write_lock_bh(&tcp_lock); |
379 | ct->proto.tcp.state = | 379 | ct->proto.tcp.state = |
380 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); | 380 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); |
381 | write_unlock_bh(&tcp_lock); | 381 | write_unlock_bh(&tcp_lock); |
382 | 382 | ||
@@ -395,30 +395,30 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph) | |||
395 | 395 | ||
396 | /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering | 396 | /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering |
397 | in IP Filter' by Guido van Rooij. | 397 | in IP Filter' by Guido van Rooij. |
398 | 398 | ||
399 | http://www.nluug.nl/events/sane2000/papers.html | 399 | http://www.nluug.nl/events/sane2000/papers.html |
400 | http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz | 400 | http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz |
401 | 401 | ||
402 | The boundaries and the conditions are changed according to RFC793: | 402 | The boundaries and the conditions are changed according to RFC793: |
403 | the packet must intersect the window (i.e. segments may be | 403 | the packet must intersect the window (i.e. segments may be |
404 | after the right or before the left edge) and thus receivers may ACK | 404 | after the right or before the left edge) and thus receivers may ACK |
405 | segments after the right edge of the window. | 405 | segments after the right edge of the window. |
406 | 406 | ||
407 | td_maxend = max(sack + max(win,1)) seen in reply packets | 407 | td_maxend = max(sack + max(win,1)) seen in reply packets |
408 | td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets | 408 | td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets |
409 | td_maxwin += seq + len - sender.td_maxend | 409 | td_maxwin += seq + len - sender.td_maxend |
410 | if seq + len > sender.td_maxend | 410 | if seq + len > sender.td_maxend |
411 | td_end = max(seq + len) seen in sent packets | 411 | td_end = max(seq + len) seen in sent packets |
412 | 412 | ||
413 | I. Upper bound for valid data: seq <= sender.td_maxend | 413 | I. Upper bound for valid data: seq <= sender.td_maxend |
414 | II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin | 414 | II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin |
415 | III. Upper bound for valid ack: sack <= receiver.td_end | 415 | III. Upper bound for valid ack: sack <= receiver.td_end |
416 | IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW | 416 | IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW |
417 | 417 | ||
418 | where sack is the highest right edge of sack block found in the packet. | 418 | where sack is the highest right edge of sack block found in the packet. |
419 | 419 | ||
420 | The upper bound limit for a valid ack is not ignored - | 420 | The upper bound limit for a valid ack is not ignored - |
421 | we doesn't have to deal with fragments. | 421 | we doesn't have to deal with fragments. |
422 | */ | 422 | */ |
423 | 423 | ||
424 | static inline __u32 segment_seq_plus_len(__u32 seq, | 424 | static inline __u32 segment_seq_plus_len(__u32 seq, |
@@ -429,25 +429,25 @@ static inline __u32 segment_seq_plus_len(__u32 seq, | |||
429 | return (seq + len - (iph->ihl + tcph->doff)*4 | 429 | return (seq + len - (iph->ihl + tcph->doff)*4 |
430 | + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0)); | 430 | + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0)); |
431 | } | 431 | } |
432 | 432 | ||
433 | /* Fixme: what about big packets? */ | 433 | /* Fixme: what about big packets? */ |
434 | #define MAXACKWINCONST 66000 | 434 | #define MAXACKWINCONST 66000 |
435 | #define MAXACKWINDOW(sender) \ | 435 | #define MAXACKWINDOW(sender) \ |
436 | ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \ | 436 | ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \ |
437 | : MAXACKWINCONST) | 437 | : MAXACKWINCONST) |
438 | 438 | ||
439 | /* | 439 | /* |
440 | * Simplified tcp_parse_options routine from tcp_input.c | 440 | * Simplified tcp_parse_options routine from tcp_input.c |
441 | */ | 441 | */ |
442 | static void tcp_options(const struct sk_buff *skb, | 442 | static void tcp_options(const struct sk_buff *skb, |
443 | struct iphdr *iph, | 443 | struct iphdr *iph, |
444 | struct tcphdr *tcph, | 444 | struct tcphdr *tcph, |
445 | struct ip_ct_tcp_state *state) | 445 | struct ip_ct_tcp_state *state) |
446 | { | 446 | { |
447 | unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; | 447 | unsigned char buff[(15 * 4) - sizeof(struct tcphdr)]; |
448 | unsigned char *ptr; | 448 | unsigned char *ptr; |
449 | int length = (tcph->doff*4) - sizeof(struct tcphdr); | 449 | int length = (tcph->doff*4) - sizeof(struct tcphdr); |
450 | 450 | ||
451 | if (!length) | 451 | if (!length) |
452 | return; | 452 | return; |
453 | 453 | ||
@@ -456,13 +456,13 @@ static void tcp_options(const struct sk_buff *skb, | |||
456 | length, buff); | 456 | length, buff); |
457 | BUG_ON(ptr == NULL); | 457 | BUG_ON(ptr == NULL); |
458 | 458 | ||
459 | state->td_scale = | 459 | state->td_scale = |
460 | state->flags = 0; | 460 | state->flags = 0; |
461 | 461 | ||
462 | while (length > 0) { | 462 | while (length > 0) { |
463 | int opcode=*ptr++; | 463 | int opcode=*ptr++; |
464 | int opsize; | 464 | int opsize; |
465 | 465 | ||
466 | switch (opcode) { | 466 | switch (opcode) { |
467 | case TCPOPT_EOL: | 467 | case TCPOPT_EOL: |
468 | return; | 468 | return; |
@@ -476,13 +476,13 @@ static void tcp_options(const struct sk_buff *skb, | |||
476 | if (opsize > length) | 476 | if (opsize > length) |
477 | break; /* don't parse partial options */ | 477 | break; /* don't parse partial options */ |
478 | 478 | ||
479 | if (opcode == TCPOPT_SACK_PERM | 479 | if (opcode == TCPOPT_SACK_PERM |
480 | && opsize == TCPOLEN_SACK_PERM) | 480 | && opsize == TCPOLEN_SACK_PERM) |
481 | state->flags |= IP_CT_TCP_FLAG_SACK_PERM; | 481 | state->flags |= IP_CT_TCP_FLAG_SACK_PERM; |
482 | else if (opcode == TCPOPT_WINDOW | 482 | else if (opcode == TCPOPT_WINDOW |
483 | && opsize == TCPOLEN_WINDOW) { | 483 | && opsize == TCPOLEN_WINDOW) { |
484 | state->td_scale = *(u_int8_t *)ptr; | 484 | state->td_scale = *(u_int8_t *)ptr; |
485 | 485 | ||
486 | if (state->td_scale > 14) { | 486 | if (state->td_scale > 14) { |
487 | /* See RFC1323 */ | 487 | /* See RFC1323 */ |
488 | state->td_scale = 14; | 488 | state->td_scale = 14; |
@@ -517,16 +517,16 @@ static void tcp_sack(const struct sk_buff *skb, | |||
517 | /* Fast path for timestamp-only option */ | 517 | /* Fast path for timestamp-only option */ |
518 | if (length == TCPOLEN_TSTAMP_ALIGNED*4 | 518 | if (length == TCPOLEN_TSTAMP_ALIGNED*4 |
519 | && *(__be32 *)ptr == | 519 | && *(__be32 *)ptr == |
520 | __constant_htonl((TCPOPT_NOP << 24) | 520 | __constant_htonl((TCPOPT_NOP << 24) |
521 | | (TCPOPT_NOP << 16) | 521 | | (TCPOPT_NOP << 16) |
522 | | (TCPOPT_TIMESTAMP << 8) | 522 | | (TCPOPT_TIMESTAMP << 8) |
523 | | TCPOLEN_TIMESTAMP)) | 523 | | TCPOLEN_TIMESTAMP)) |
524 | return; | 524 | return; |
525 | 525 | ||
526 | while (length > 0) { | 526 | while (length > 0) { |
527 | int opcode=*ptr++; | 527 | int opcode=*ptr++; |
528 | int opsize, i; | 528 | int opsize, i; |
529 | 529 | ||
530 | switch (opcode) { | 530 | switch (opcode) { |
531 | case TCPOPT_EOL: | 531 | case TCPOPT_EOL: |
532 | return; | 532 | return; |
@@ -540,16 +540,16 @@ static void tcp_sack(const struct sk_buff *skb, | |||
540 | if (opsize > length) | 540 | if (opsize > length) |
541 | break; /* don't parse partial options */ | 541 | break; /* don't parse partial options */ |
542 | 542 | ||
543 | if (opcode == TCPOPT_SACK | 543 | if (opcode == TCPOPT_SACK |
544 | && opsize >= (TCPOLEN_SACK_BASE | 544 | && opsize >= (TCPOLEN_SACK_BASE |
545 | + TCPOLEN_SACK_PERBLOCK) | 545 | + TCPOLEN_SACK_PERBLOCK) |
546 | && !((opsize - TCPOLEN_SACK_BASE) | 546 | && !((opsize - TCPOLEN_SACK_BASE) |
547 | % TCPOLEN_SACK_PERBLOCK)) { | 547 | % TCPOLEN_SACK_PERBLOCK)) { |
548 | for (i = 0; | 548 | for (i = 0; |
549 | i < (opsize - TCPOLEN_SACK_BASE); | 549 | i < (opsize - TCPOLEN_SACK_BASE); |
550 | i += TCPOLEN_SACK_PERBLOCK) { | 550 | i += TCPOLEN_SACK_PERBLOCK) { |
551 | tmp = ntohl(*((__be32 *)(ptr+i)+1)); | 551 | tmp = ntohl(*((__be32 *)(ptr+i)+1)); |
552 | 552 | ||
553 | if (after(tmp, *sack)) | 553 | if (after(tmp, *sack)) |
554 | *sack = tmp; | 554 | *sack = tmp; |
555 | } | 555 | } |
@@ -561,18 +561,18 @@ static void tcp_sack(const struct sk_buff *skb, | |||
561 | } | 561 | } |
562 | } | 562 | } |
563 | 563 | ||
564 | static int tcp_in_window(struct ip_ct_tcp *state, | 564 | static int tcp_in_window(struct ip_ct_tcp *state, |
565 | enum ip_conntrack_dir dir, | 565 | enum ip_conntrack_dir dir, |
566 | unsigned int index, | 566 | unsigned int index, |
567 | const struct sk_buff *skb, | 567 | const struct sk_buff *skb, |
568 | struct iphdr *iph, | 568 | struct iphdr *iph, |
569 | struct tcphdr *tcph) | 569 | struct tcphdr *tcph) |
570 | { | 570 | { |
571 | struct ip_ct_tcp_state *sender = &state->seen[dir]; | 571 | struct ip_ct_tcp_state *sender = &state->seen[dir]; |
572 | struct ip_ct_tcp_state *receiver = &state->seen[!dir]; | 572 | struct ip_ct_tcp_state *receiver = &state->seen[!dir]; |
573 | __u32 seq, ack, sack, end, win, swin; | 573 | __u32 seq, ack, sack, end, win, swin; |
574 | int res; | 574 | int res; |
575 | 575 | ||
576 | /* | 576 | /* |
577 | * Get the required data from the packet. | 577 | * Get the required data from the packet. |
578 | */ | 578 | */ |
@@ -580,23 +580,23 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
580 | ack = sack = ntohl(tcph->ack_seq); | 580 | ack = sack = ntohl(tcph->ack_seq); |
581 | win = ntohs(tcph->window); | 581 | win = ntohs(tcph->window); |
582 | end = segment_seq_plus_len(seq, skb->len, iph, tcph); | 582 | end = segment_seq_plus_len(seq, skb->len, iph, tcph); |
583 | 583 | ||
584 | if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) | 584 | if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM) |
585 | tcp_sack(skb, iph, tcph, &sack); | 585 | tcp_sack(skb, iph, tcph, &sack); |
586 | 586 | ||
587 | DEBUGP("tcp_in_window: START\n"); | 587 | DEBUGP("tcp_in_window: START\n"); |
588 | DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " | 588 | DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " |
589 | "seq=%u ack=%u sack=%u win=%u end=%u\n", | 589 | "seq=%u ack=%u sack=%u win=%u end=%u\n", |
590 | NIPQUAD(iph->saddr), ntohs(tcph->source), | 590 | NIPQUAD(iph->saddr), ntohs(tcph->source), |
591 | NIPQUAD(iph->daddr), ntohs(tcph->dest), | 591 | NIPQUAD(iph->daddr), ntohs(tcph->dest), |
592 | seq, ack, sack, win, end); | 592 | seq, ack, sack, win, end); |
593 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " | 593 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " |
594 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 594 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
595 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 595 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
596 | sender->td_scale, | 596 | sender->td_scale, |
597 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 597 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
598 | receiver->td_scale); | 598 | receiver->td_scale); |
599 | 599 | ||
600 | if (sender->td_end == 0) { | 600 | if (sender->td_end == 0) { |
601 | /* | 601 | /* |
602 | * Initialize sender data. | 602 | * Initialize sender data. |
@@ -605,26 +605,26 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
605 | /* | 605 | /* |
606 | * Outgoing SYN-ACK in reply to a SYN. | 606 | * Outgoing SYN-ACK in reply to a SYN. |
607 | */ | 607 | */ |
608 | sender->td_end = | 608 | sender->td_end = |
609 | sender->td_maxend = end; | 609 | sender->td_maxend = end; |
610 | sender->td_maxwin = (win == 0 ? 1 : win); | 610 | sender->td_maxwin = (win == 0 ? 1 : win); |
611 | 611 | ||
612 | tcp_options(skb, iph, tcph, sender); | 612 | tcp_options(skb, iph, tcph, sender); |
613 | /* | 613 | /* |
614 | * RFC 1323: | 614 | * RFC 1323: |
615 | * Both sides must send the Window Scale option | 615 | * Both sides must send the Window Scale option |
616 | * to enable window scaling in either direction. | 616 | * to enable window scaling in either direction. |
617 | */ | 617 | */ |
618 | if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE | 618 | if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE |
619 | && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) | 619 | && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) |
620 | sender->td_scale = | 620 | sender->td_scale = |
621 | receiver->td_scale = 0; | 621 | receiver->td_scale = 0; |
622 | } else { | 622 | } else { |
623 | /* | 623 | /* |
624 | * We are in the middle of a connection, | 624 | * We are in the middle of a connection, |
625 | * its history is lost for us. | 625 | * its history is lost for us. |
626 | * Let's try to use the data from the packet. | 626 | * Let's try to use the data from the packet. |
627 | */ | 627 | */ |
628 | sender->td_end = end; | 628 | sender->td_end = end; |
629 | sender->td_maxwin = (win == 0 ? 1 : win); | 629 | sender->td_maxwin = (win == 0 ? 1 : win); |
630 | sender->td_maxend = end + sender->td_maxwin; | 630 | sender->td_maxend = end + sender->td_maxwin; |
@@ -632,11 +632,11 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
632 | } else if (((state->state == TCP_CONNTRACK_SYN_SENT | 632 | } else if (((state->state == TCP_CONNTRACK_SYN_SENT |
633 | && dir == IP_CT_DIR_ORIGINAL) | 633 | && dir == IP_CT_DIR_ORIGINAL) |
634 | || (state->state == TCP_CONNTRACK_SYN_RECV | 634 | || (state->state == TCP_CONNTRACK_SYN_RECV |
635 | && dir == IP_CT_DIR_REPLY)) | 635 | && dir == IP_CT_DIR_REPLY)) |
636 | && after(end, sender->td_end)) { | 636 | && after(end, sender->td_end)) { |
637 | /* | 637 | /* |
638 | * RFC 793: "if a TCP is reinitialized ... then it need | 638 | * RFC 793: "if a TCP is reinitialized ... then it need |
639 | * not wait at all; it must only be sure to use sequence | 639 | * not wait at all; it must only be sure to use sequence |
640 | * numbers larger than those recently used." | 640 | * numbers larger than those recently used." |
641 | */ | 641 | */ |
642 | sender->td_end = | 642 | sender->td_end = |
@@ -645,14 +645,14 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
645 | 645 | ||
646 | tcp_options(skb, iph, tcph, sender); | 646 | tcp_options(skb, iph, tcph, sender); |
647 | } | 647 | } |
648 | 648 | ||
649 | if (!(tcph->ack)) { | 649 | if (!(tcph->ack)) { |
650 | /* | 650 | /* |
651 | * If there is no ACK, just pretend it was set and OK. | 651 | * If there is no ACK, just pretend it was set and OK. |
652 | */ | 652 | */ |
653 | ack = sack = receiver->td_end; | 653 | ack = sack = receiver->td_end; |
654 | } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) == | 654 | } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) == |
655 | (TCP_FLAG_ACK|TCP_FLAG_RST)) | 655 | (TCP_FLAG_ACK|TCP_FLAG_RST)) |
656 | && (ack == 0)) { | 656 | && (ack == 0)) { |
657 | /* | 657 | /* |
658 | * Broken TCP stacks, that set ACK in RST packets as well | 658 | * Broken TCP stacks, that set ACK in RST packets as well |
@@ -662,8 +662,8 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
662 | } | 662 | } |
663 | 663 | ||
664 | if (seq == end | 664 | if (seq == end |
665 | && (!tcph->rst | 665 | && (!tcph->rst |
666 | || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) | 666 | || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) |
667 | /* | 667 | /* |
668 | * Packets contains no data: we assume it is valid | 668 | * Packets contains no data: we assume it is valid |
669 | * and check the ack value only. | 669 | * and check the ack value only. |
@@ -672,7 +672,7 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
672 | * SYN. | 672 | * SYN. |
673 | */ | 673 | */ |
674 | seq = end = sender->td_end; | 674 | seq = end = sender->td_end; |
675 | 675 | ||
676 | DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " | 676 | DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " |
677 | "seq=%u ack=%u sack =%u win=%u end=%u\n", | 677 | "seq=%u ack=%u sack =%u win=%u end=%u\n", |
678 | NIPQUAD(iph->saddr), ntohs(tcph->source), | 678 | NIPQUAD(iph->saddr), ntohs(tcph->source), |
@@ -681,26 +681,26 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
681 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " | 681 | DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i " |
682 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 682 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
683 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 683 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
684 | sender->td_scale, | 684 | sender->td_scale, |
685 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 685 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
686 | receiver->td_scale); | 686 | receiver->td_scale); |
687 | 687 | ||
688 | DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n", | 688 | DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n", |
689 | before(seq, sender->td_maxend + 1), | 689 | before(seq, sender->td_maxend + 1), |
690 | after(end, sender->td_end - receiver->td_maxwin - 1), | 690 | after(end, sender->td_end - receiver->td_maxwin - 1), |
691 | before(sack, receiver->td_end + 1), | 691 | before(sack, receiver->td_end + 1), |
692 | after(ack, receiver->td_end - MAXACKWINDOW(sender))); | 692 | after(ack, receiver->td_end - MAXACKWINDOW(sender))); |
693 | 693 | ||
694 | if (before(seq, sender->td_maxend + 1) && | 694 | if (before(seq, sender->td_maxend + 1) && |
695 | after(end, sender->td_end - receiver->td_maxwin - 1) && | 695 | after(end, sender->td_end - receiver->td_maxwin - 1) && |
696 | before(sack, receiver->td_end + 1) && | 696 | before(sack, receiver->td_end + 1) && |
697 | after(ack, receiver->td_end - MAXACKWINDOW(sender))) { | 697 | after(ack, receiver->td_end - MAXACKWINDOW(sender))) { |
698 | /* | 698 | /* |
699 | * Take into account window scaling (RFC 1323). | 699 | * Take into account window scaling (RFC 1323). |
700 | */ | 700 | */ |
701 | if (!tcph->syn) | 701 | if (!tcph->syn) |
702 | win <<= sender->td_scale; | 702 | win <<= sender->td_scale; |
703 | 703 | ||
704 | /* | 704 | /* |
705 | * Update sender data. | 705 | * Update sender data. |
706 | */ | 706 | */ |
@@ -720,7 +720,7 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
720 | receiver->td_maxend++; | 720 | receiver->td_maxend++; |
721 | } | 721 | } |
722 | 722 | ||
723 | /* | 723 | /* |
724 | * Check retransmissions. | 724 | * Check retransmissions. |
725 | */ | 725 | */ |
726 | if (index == TCP_ACK_SET) { | 726 | if (index == TCP_ACK_SET) { |
@@ -756,11 +756,11 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
756 | : "ACK is over the upper bound (ACKed data not seen yet)" | 756 | : "ACK is over the upper bound (ACKed data not seen yet)" |
757 | : "SEQ is under the lower bound (already ACKed data retransmitted)" | 757 | : "SEQ is under the lower bound (already ACKed data retransmitted)" |
758 | : "SEQ is over the upper bound (over the window of the receiver)"); | 758 | : "SEQ is over the upper bound (over the window of the receiver)"); |
759 | } | 759 | } |
760 | 760 | ||
761 | DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " | 761 | DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u " |
762 | "receiver end=%u maxend=%u maxwin=%u\n", | 762 | "receiver end=%u maxend=%u maxwin=%u\n", |
763 | res, sender->td_end, sender->td_maxend, sender->td_maxwin, | 763 | res, sender->td_end, sender->td_maxend, sender->td_maxwin, |
764 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin); | 764 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin); |
765 | 765 | ||
766 | return res; | 766 | return res; |
@@ -769,7 +769,7 @@ static int tcp_in_window(struct ip_ct_tcp *state, | |||
769 | #ifdef CONFIG_IP_NF_NAT_NEEDED | 769 | #ifdef CONFIG_IP_NF_NAT_NEEDED |
770 | /* Update sender->td_end after NAT successfully mangled the packet */ | 770 | /* Update sender->td_end after NAT successfully mangled the packet */ |
771 | void ip_conntrack_tcp_update(struct sk_buff *skb, | 771 | void ip_conntrack_tcp_update(struct sk_buff *skb, |
772 | struct ip_conntrack *conntrack, | 772 | struct ip_conntrack *conntrack, |
773 | enum ip_conntrack_dir dir) | 773 | enum ip_conntrack_dir dir) |
774 | { | 774 | { |
775 | struct iphdr *iph = skb->nh.iph; | 775 | struct iphdr *iph = skb->nh.iph; |
@@ -781,7 +781,7 @@ void ip_conntrack_tcp_update(struct sk_buff *skb, | |||
781 | #endif | 781 | #endif |
782 | 782 | ||
783 | end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph); | 783 | end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, iph, tcph); |
784 | 784 | ||
785 | write_lock_bh(&tcp_lock); | 785 | write_lock_bh(&tcp_lock); |
786 | /* | 786 | /* |
787 | * We have to worry for the ack in the reply packet only... | 787 | * We have to worry for the ack in the reply packet only... |
@@ -793,11 +793,11 @@ void ip_conntrack_tcp_update(struct sk_buff *skb, | |||
793 | DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " | 793 | DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i " |
794 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 794 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
795 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 795 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
796 | sender->td_scale, | 796 | sender->td_scale, |
797 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 797 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
798 | receiver->td_scale); | 798 | receiver->td_scale); |
799 | } | 799 | } |
800 | 800 | ||
801 | #endif | 801 | #endif |
802 | 802 | ||
803 | #define TH_FIN 0x01 | 803 | #define TH_FIN 0x01 |
@@ -847,8 +847,8 @@ static int tcp_error(struct sk_buff *skb, | |||
847 | nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, | 847 | nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, |
848 | "ip_ct_tcp: short packet "); | 848 | "ip_ct_tcp: short packet "); |
849 | return -NF_ACCEPT; | 849 | return -NF_ACCEPT; |
850 | } | 850 | } |
851 | 851 | ||
852 | /* Not whole TCP header or malformed packet */ | 852 | /* Not whole TCP header or malformed packet */ |
853 | if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { | 853 | if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { |
854 | if (LOG_INVALID(IPPROTO_TCP)) | 854 | if (LOG_INVALID(IPPROTO_TCP)) |
@@ -856,7 +856,7 @@ static int tcp_error(struct sk_buff *skb, | |||
856 | "ip_ct_tcp: truncated/malformed packet "); | 856 | "ip_ct_tcp: truncated/malformed packet "); |
857 | return -NF_ACCEPT; | 857 | return -NF_ACCEPT; |
858 | } | 858 | } |
859 | 859 | ||
860 | /* Checksum invalid? Ignore. | 860 | /* Checksum invalid? Ignore. |
861 | * We skip checking packets on the outgoing path | 861 | * We skip checking packets on the outgoing path |
862 | * because it is assumed to be correct. | 862 | * because it is assumed to be correct. |
@@ -893,11 +893,11 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
893 | struct tcphdr *th, _tcph; | 893 | struct tcphdr *th, _tcph; |
894 | unsigned long timeout; | 894 | unsigned long timeout; |
895 | unsigned int index; | 895 | unsigned int index; |
896 | 896 | ||
897 | th = skb_header_pointer(skb, iph->ihl * 4, | 897 | th = skb_header_pointer(skb, iph->ihl * 4, |
898 | sizeof(_tcph), &_tcph); | 898 | sizeof(_tcph), &_tcph); |
899 | BUG_ON(th == NULL); | 899 | BUG_ON(th == NULL); |
900 | 900 | ||
901 | write_lock_bh(&tcp_lock); | 901 | write_lock_bh(&tcp_lock); |
902 | old_state = conntrack->proto.tcp.state; | 902 | old_state = conntrack->proto.tcp.state; |
903 | dir = CTINFO2DIR(ctinfo); | 903 | dir = CTINFO2DIR(ctinfo); |
@@ -907,7 +907,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
907 | switch (new_state) { | 907 | switch (new_state) { |
908 | case TCP_CONNTRACK_IGNORE: | 908 | case TCP_CONNTRACK_IGNORE: |
909 | /* Ignored packets: | 909 | /* Ignored packets: |
910 | * | 910 | * |
911 | * a) SYN in ORIGINAL | 911 | * a) SYN in ORIGINAL |
912 | * b) SYN/ACK in REPLY | 912 | * b) SYN/ACK in REPLY |
913 | * c) ACK in reply direction after initial SYN in original. | 913 | * c) ACK in reply direction after initial SYN in original. |
@@ -916,30 +916,30 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
916 | && conntrack->proto.tcp.last_index == TCP_SYN_SET | 916 | && conntrack->proto.tcp.last_index == TCP_SYN_SET |
917 | && conntrack->proto.tcp.last_dir != dir | 917 | && conntrack->proto.tcp.last_dir != dir |
918 | && ntohl(th->ack_seq) == | 918 | && ntohl(th->ack_seq) == |
919 | conntrack->proto.tcp.last_end) { | 919 | conntrack->proto.tcp.last_end) { |
920 | /* This SYN/ACK acknowledges a SYN that we earlier | 920 | /* This SYN/ACK acknowledges a SYN that we earlier |
921 | * ignored as invalid. This means that the client and | 921 | * ignored as invalid. This means that the client and |
922 | * the server are both in sync, while the firewall is | 922 | * the server are both in sync, while the firewall is |
923 | * not. We kill this session and block the SYN/ACK so | 923 | * not. We kill this session and block the SYN/ACK so |
924 | * that the client cannot but retransmit its SYN and | 924 | * that the client cannot but retransmit its SYN and |
925 | * thus initiate a clean new session. | 925 | * thus initiate a clean new session. |
926 | */ | 926 | */ |
927 | write_unlock_bh(&tcp_lock); | 927 | write_unlock_bh(&tcp_lock); |
928 | if (LOG_INVALID(IPPROTO_TCP)) | 928 | if (LOG_INVALID(IPPROTO_TCP)) |
929 | nf_log_packet(PF_INET, 0, skb, NULL, NULL, | 929 | nf_log_packet(PF_INET, 0, skb, NULL, NULL, |
930 | NULL, "ip_ct_tcp: " | 930 | NULL, "ip_ct_tcp: " |
931 | "killing out of sync session "); | 931 | "killing out of sync session "); |
932 | if (del_timer(&conntrack->timeout)) | 932 | if (del_timer(&conntrack->timeout)) |
933 | conntrack->timeout.function((unsigned long) | 933 | conntrack->timeout.function((unsigned long) |
934 | conntrack); | 934 | conntrack); |
935 | return -NF_DROP; | 935 | return -NF_DROP; |
936 | } | 936 | } |
937 | conntrack->proto.tcp.last_index = index; | 937 | conntrack->proto.tcp.last_index = index; |
938 | conntrack->proto.tcp.last_dir = dir; | 938 | conntrack->proto.tcp.last_dir = dir; |
939 | conntrack->proto.tcp.last_seq = ntohl(th->seq); | 939 | conntrack->proto.tcp.last_seq = ntohl(th->seq); |
940 | conntrack->proto.tcp.last_end = | 940 | conntrack->proto.tcp.last_end = |
941 | segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th); | 941 | segment_seq_plus_len(ntohl(th->seq), skb->len, iph, th); |
942 | 942 | ||
943 | write_unlock_bh(&tcp_lock); | 943 | write_unlock_bh(&tcp_lock); |
944 | if (LOG_INVALID(IPPROTO_TCP)) | 944 | if (LOG_INVALID(IPPROTO_TCP)) |
945 | nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, | 945 | nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, |
@@ -959,16 +959,16 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
959 | if (old_state < TCP_CONNTRACK_TIME_WAIT) | 959 | if (old_state < TCP_CONNTRACK_TIME_WAIT) |
960 | break; | 960 | break; |
961 | if ((conntrack->proto.tcp.seen[dir].flags & | 961 | if ((conntrack->proto.tcp.seen[dir].flags & |
962 | IP_CT_TCP_FLAG_CLOSE_INIT) | 962 | IP_CT_TCP_FLAG_CLOSE_INIT) |
963 | || after(ntohl(th->seq), | 963 | || after(ntohl(th->seq), |
964 | conntrack->proto.tcp.seen[dir].td_end)) { | 964 | conntrack->proto.tcp.seen[dir].td_end)) { |
965 | /* Attempt to reopen a closed connection. | 965 | /* Attempt to reopen a closed connection. |
966 | * Delete this connection and look up again. */ | 966 | * Delete this connection and look up again. */ |
967 | write_unlock_bh(&tcp_lock); | 967 | write_unlock_bh(&tcp_lock); |
968 | if (del_timer(&conntrack->timeout)) | 968 | if (del_timer(&conntrack->timeout)) |
969 | conntrack->timeout.function((unsigned long) | 969 | conntrack->timeout.function((unsigned long) |
970 | conntrack); | 970 | conntrack); |
971 | return -NF_REPEAT; | 971 | return -NF_REPEAT; |
972 | } else { | 972 | } else { |
973 | write_unlock_bh(&tcp_lock); | 973 | write_unlock_bh(&tcp_lock); |
974 | if (LOG_INVALID(IPPROTO_TCP)) | 974 | if (LOG_INVALID(IPPROTO_TCP)) |
@@ -979,9 +979,9 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
979 | case TCP_CONNTRACK_CLOSE: | 979 | case TCP_CONNTRACK_CLOSE: |
980 | if (index == TCP_RST_SET | 980 | if (index == TCP_RST_SET |
981 | && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) | 981 | && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) |
982 | && conntrack->proto.tcp.last_index == TCP_SYN_SET) | 982 | && conntrack->proto.tcp.last_index == TCP_SYN_SET) |
983 | || (!test_bit(IPS_ASSURED_BIT, &conntrack->status) | 983 | || (!test_bit(IPS_ASSURED_BIT, &conntrack->status) |
984 | && conntrack->proto.tcp.last_index == TCP_ACK_SET)) | 984 | && conntrack->proto.tcp.last_index == TCP_ACK_SET)) |
985 | && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) { | 985 | && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) { |
986 | /* RST sent to invalid SYN or ACK we had let through | 986 | /* RST sent to invalid SYN or ACK we had let through |
987 | * at a) and c) above: | 987 | * at a) and c) above: |
@@ -1000,13 +1000,13 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
1000 | break; | 1000 | break; |
1001 | } | 1001 | } |
1002 | 1002 | ||
1003 | if (!tcp_in_window(&conntrack->proto.tcp, dir, index, | 1003 | if (!tcp_in_window(&conntrack->proto.tcp, dir, index, |
1004 | skb, iph, th)) { | 1004 | skb, iph, th)) { |
1005 | write_unlock_bh(&tcp_lock); | 1005 | write_unlock_bh(&tcp_lock); |
1006 | return -NF_ACCEPT; | 1006 | return -NF_ACCEPT; |
1007 | } | 1007 | } |
1008 | in_window: | 1008 | in_window: |
1009 | /* From now on we have got in-window packets */ | 1009 | /* From now on we have got in-window packets */ |
1010 | conntrack->proto.tcp.last_index = index; | 1010 | conntrack->proto.tcp.last_index = index; |
1011 | 1011 | ||
1012 | DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " | 1012 | DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " |
@@ -1018,9 +1018,9 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
1018 | old_state, new_state); | 1018 | old_state, new_state); |
1019 | 1019 | ||
1020 | conntrack->proto.tcp.state = new_state; | 1020 | conntrack->proto.tcp.state = new_state; |
1021 | if (old_state != new_state | 1021 | if (old_state != new_state |
1022 | && (new_state == TCP_CONNTRACK_FIN_WAIT | 1022 | && (new_state == TCP_CONNTRACK_FIN_WAIT |
1023 | || new_state == TCP_CONNTRACK_CLOSE)) | 1023 | || new_state == TCP_CONNTRACK_CLOSE)) |
1024 | conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; | 1024 | conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; |
1025 | timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans | 1025 | timeout = conntrack->proto.tcp.retrans >= ip_ct_tcp_max_retrans |
1026 | && *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans | 1026 | && *tcp_timeouts[new_state] > ip_ct_tcp_timeout_max_retrans |
@@ -1046,8 +1046,8 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
1046 | && (old_state == TCP_CONNTRACK_SYN_RECV | 1046 | && (old_state == TCP_CONNTRACK_SYN_RECV |
1047 | || old_state == TCP_CONNTRACK_ESTABLISHED) | 1047 | || old_state == TCP_CONNTRACK_ESTABLISHED) |
1048 | && new_state == TCP_CONNTRACK_ESTABLISHED) { | 1048 | && new_state == TCP_CONNTRACK_ESTABLISHED) { |
1049 | /* Set ASSURED if we see see valid ack in ESTABLISHED | 1049 | /* Set ASSURED if we see see valid ack in ESTABLISHED |
1050 | after SYN_RECV or a valid answer for a picked up | 1050 | after SYN_RECV or a valid answer for a picked up |
1051 | connection. */ | 1051 | connection. */ |
1052 | set_bit(IPS_ASSURED_BIT, &conntrack->status); | 1052 | set_bit(IPS_ASSURED_BIT, &conntrack->status); |
1053 | ip_conntrack_event_cache(IPCT_STATUS, skb); | 1053 | ip_conntrack_event_cache(IPCT_STATUS, skb); |
@@ -1056,7 +1056,7 @@ static int tcp_packet(struct ip_conntrack *conntrack, | |||
1056 | 1056 | ||
1057 | return NF_ACCEPT; | 1057 | return NF_ACCEPT; |
1058 | } | 1058 | } |
1059 | 1059 | ||
1060 | /* Called when a new connection for this protocol found. */ | 1060 | /* Called when a new connection for this protocol found. */ |
1061 | static int tcp_new(struct ip_conntrack *conntrack, | 1061 | static int tcp_new(struct ip_conntrack *conntrack, |
1062 | const struct sk_buff *skb) | 1062 | const struct sk_buff *skb) |
@@ -1072,7 +1072,7 @@ static int tcp_new(struct ip_conntrack *conntrack, | |||
1072 | th = skb_header_pointer(skb, iph->ihl * 4, | 1072 | th = skb_header_pointer(skb, iph->ihl * 4, |
1073 | sizeof(_tcph), &_tcph); | 1073 | sizeof(_tcph), &_tcph); |
1074 | BUG_ON(th == NULL); | 1074 | BUG_ON(th == NULL); |
1075 | 1075 | ||
1076 | /* Don't need lock here: this conntrack not in circulation yet */ | 1076 | /* Don't need lock here: this conntrack not in circulation yet */ |
1077 | new_state | 1077 | new_state |
1078 | = tcp_conntracks[0][get_conntrack_index(th)] | 1078 | = tcp_conntracks[0][get_conntrack_index(th)] |
@@ -1113,7 +1113,7 @@ static int tcp_new(struct ip_conntrack *conntrack, | |||
1113 | if (conntrack->proto.tcp.seen[0].td_maxwin == 0) | 1113 | if (conntrack->proto.tcp.seen[0].td_maxwin == 0) |
1114 | conntrack->proto.tcp.seen[0].td_maxwin = 1; | 1114 | conntrack->proto.tcp.seen[0].td_maxwin = 1; |
1115 | conntrack->proto.tcp.seen[0].td_maxend = | 1115 | conntrack->proto.tcp.seen[0].td_maxend = |
1116 | conntrack->proto.tcp.seen[0].td_end + | 1116 | conntrack->proto.tcp.seen[0].td_end + |
1117 | conntrack->proto.tcp.seen[0].td_maxwin; | 1117 | conntrack->proto.tcp.seen[0].td_maxwin; |
1118 | conntrack->proto.tcp.seen[0].td_scale = 0; | 1118 | conntrack->proto.tcp.seen[0].td_scale = 0; |
1119 | 1119 | ||
@@ -1123,25 +1123,25 @@ static int tcp_new(struct ip_conntrack *conntrack, | |||
1123 | conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | | 1123 | conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM | |
1124 | IP_CT_TCP_FLAG_BE_LIBERAL; | 1124 | IP_CT_TCP_FLAG_BE_LIBERAL; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | conntrack->proto.tcp.seen[1].td_end = 0; | 1127 | conntrack->proto.tcp.seen[1].td_end = 0; |
1128 | conntrack->proto.tcp.seen[1].td_maxend = 0; | 1128 | conntrack->proto.tcp.seen[1].td_maxend = 0; |
1129 | conntrack->proto.tcp.seen[1].td_maxwin = 1; | 1129 | conntrack->proto.tcp.seen[1].td_maxwin = 1; |
1130 | conntrack->proto.tcp.seen[1].td_scale = 0; | 1130 | conntrack->proto.tcp.seen[1].td_scale = 0; |
1131 | 1131 | ||
1132 | /* tcp_packet will set them */ | 1132 | /* tcp_packet will set them */ |
1133 | conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; | 1133 | conntrack->proto.tcp.state = TCP_CONNTRACK_NONE; |
1134 | conntrack->proto.tcp.last_index = TCP_NONE_SET; | 1134 | conntrack->proto.tcp.last_index = TCP_NONE_SET; |
1135 | 1135 | ||
1136 | DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " | 1136 | DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i " |
1137 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", | 1137 | "receiver end=%u maxend=%u maxwin=%u scale=%i\n", |
1138 | sender->td_end, sender->td_maxend, sender->td_maxwin, | 1138 | sender->td_end, sender->td_maxend, sender->td_maxwin, |
1139 | sender->td_scale, | 1139 | sender->td_scale, |
1140 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 1140 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
1141 | receiver->td_scale); | 1141 | receiver->td_scale); |
1142 | return 1; | 1142 | return 1; |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | struct ip_conntrack_protocol ip_conntrack_protocol_tcp = | 1145 | struct ip_conntrack_protocol ip_conntrack_protocol_tcp = |
1146 | { | 1146 | { |
1147 | .proto = IPPROTO_TCP, | 1147 | .proto = IPPROTO_TCP, |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c index d0e8a16970ec..a99a7c75e5b5 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c | |||
@@ -70,7 +70,7 @@ static int udp_packet(struct ip_conntrack *conntrack, | |||
70 | /* If we've seen traffic both ways, this is some kind of UDP | 70 | /* If we've seen traffic both ways, this is some kind of UDP |
71 | stream. Extend timeout. */ | 71 | stream. Extend timeout. */ |
72 | if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { | 72 | if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) { |
73 | ip_ct_refresh_acct(conntrack, ctinfo, skb, | 73 | ip_ct_refresh_acct(conntrack, ctinfo, skb, |
74 | ip_ct_udp_timeout_stream); | 74 | ip_ct_udp_timeout_stream); |
75 | /* Also, more likely to be important, and not a probe */ | 75 | /* Also, more likely to be important, and not a probe */ |
76 | if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status)) | 76 | if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status)) |
@@ -102,7 +102,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, | |||
102 | "ip_ct_udp: short packet "); | 102 | "ip_ct_udp: short packet "); |
103 | return -NF_ACCEPT; | 103 | return -NF_ACCEPT; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* Truncated/malformed packets */ | 106 | /* Truncated/malformed packets */ |
107 | if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { | 107 | if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { |
108 | if (LOG_INVALID(IPPROTO_UDP)) | 108 | if (LOG_INVALID(IPPROTO_UDP)) |
@@ -110,7 +110,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, | |||
110 | "ip_ct_udp: truncated/malformed packet "); | 110 | "ip_ct_udp: truncated/malformed packet "); |
111 | return -NF_ACCEPT; | 111 | return -NF_ACCEPT; |
112 | } | 112 | } |
113 | 113 | ||
114 | /* Packet with no checksum */ | 114 | /* Packet with no checksum */ |
115 | if (!hdr->check) | 115 | if (!hdr->check) |
116 | return NF_ACCEPT; | 116 | return NF_ACCEPT; |
@@ -126,7 +126,7 @@ static int udp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, | |||
126 | "ip_ct_udp: bad UDP checksum "); | 126 | "ip_ct_udp: bad UDP checksum "); |
127 | return -NF_ACCEPT; | 127 | return -NF_ACCEPT; |
128 | } | 128 | } |
129 | 129 | ||
130 | return NF_ACCEPT; | 130 | return NF_ACCEPT; |
131 | } | 131 | } |
132 | 132 | ||
diff --git a/net/ipv4/netfilter/ip_conntrack_sip.c b/net/ipv4/netfilter/ip_conntrack_sip.c index 11c588a10e6b..c59a962c1f61 100644 --- a/net/ipv4/netfilter/ip_conntrack_sip.c +++ b/net/ipv4/netfilter/ip_conntrack_sip.c | |||
@@ -321,7 +321,7 @@ int ct_sip_get_info(const char *dptr, size_t dlen, | |||
321 | continue; | 321 | continue; |
322 | } | 322 | } |
323 | aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, | 323 | aux = ct_sip_search(hnfo->ln_str, dptr, hnfo->ln_strlen, |
324 | ct_sip_lnlen(dptr, limit), | 324 | ct_sip_lnlen(dptr, limit), |
325 | hnfo->case_sensitive); | 325 | hnfo->case_sensitive); |
326 | if (!aux) { | 326 | if (!aux) { |
327 | DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, | 327 | DEBUGP("'%s' not found in '%s'.\n", hnfo->ln_str, |
@@ -406,7 +406,7 @@ static int sip_help(struct sk_buff **pskb, | |||
406 | if (dataoff >= (*pskb)->len) { | 406 | if (dataoff >= (*pskb)->len) { |
407 | DEBUGP("skb->len = %u\n", (*pskb)->len); | 407 | DEBUGP("skb->len = %u\n", (*pskb)->len); |
408 | return NF_ACCEPT; | 408 | return NF_ACCEPT; |
409 | } | 409 | } |
410 | 410 | ||
411 | ip_ct_refresh(ct, *pskb, sip_timeout * HZ); | 411 | ip_ct_refresh(ct, *pskb, sip_timeout * HZ); |
412 | 412 | ||
@@ -439,16 +439,16 @@ static int sip_help(struct sk_buff **pskb, | |||
439 | } | 439 | } |
440 | /* Get ip and port address from SDP packet. */ | 440 | /* Get ip and port address from SDP packet. */ |
441 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, | 441 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, |
442 | POS_CONNECTION) > 0) { | 442 | POS_CONNECTION) > 0) { |
443 | 443 | ||
444 | /* We'll drop only if there are parse problems. */ | 444 | /* We'll drop only if there are parse problems. */ |
445 | if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr, | 445 | if (parse_ipaddr(dptr + matchoff, NULL, &ipaddr, |
446 | dptr + datalen) < 0) { | 446 | dptr + datalen) < 0) { |
447 | ret = NF_DROP; | 447 | ret = NF_DROP; |
448 | goto out; | 448 | goto out; |
449 | } | 449 | } |
450 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, | 450 | if (ct_sip_get_info(dptr, datalen, &matchoff, &matchlen, |
451 | POS_MEDIA) > 0) { | 451 | POS_MEDIA) > 0) { |
452 | 452 | ||
453 | port = simple_strtoul(dptr + matchoff, NULL, 10); | 453 | port = simple_strtoul(dptr + matchoff, NULL, 10); |
454 | if (port < 1024) { | 454 | if (port < 1024) { |
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index 86efb5449676..5903588fddce 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c | |||
@@ -46,7 +46,7 @@ DECLARE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); | |||
46 | 46 | ||
47 | static int kill_proto(struct ip_conntrack *i, void *data) | 47 | static int kill_proto(struct ip_conntrack *i, void *data) |
48 | { | 48 | { |
49 | return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == | 49 | return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == |
50 | *((u_int8_t *) data)); | 50 | *((u_int8_t *) data)); |
51 | } | 51 | } |
52 | 52 | ||
@@ -124,12 +124,12 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
124 | (*pos)++; | 124 | (*pos)++; |
125 | return ct_get_next(s, v); | 125 | return ct_get_next(s, v); |
126 | } | 126 | } |
127 | 127 | ||
128 | static void ct_seq_stop(struct seq_file *s, void *v) | 128 | static void ct_seq_stop(struct seq_file *s, void *v) |
129 | { | 129 | { |
130 | read_unlock_bh(&ip_conntrack_lock); | 130 | read_unlock_bh(&ip_conntrack_lock); |
131 | } | 131 | } |
132 | 132 | ||
133 | static int ct_seq_show(struct seq_file *s, void *v) | 133 | static int ct_seq_show(struct seq_file *s, void *v) |
134 | { | 134 | { |
135 | const struct ip_conntrack_tuple_hash *hash = v; | 135 | const struct ip_conntrack_tuple_hash *hash = v; |
@@ -155,12 +155,12 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
155 | 155 | ||
156 | if (proto->print_conntrack(s, conntrack)) | 156 | if (proto->print_conntrack(s, conntrack)) |
157 | return -ENOSPC; | 157 | return -ENOSPC; |
158 | 158 | ||
159 | if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | 159 | if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
160 | proto)) | 160 | proto)) |
161 | return -ENOSPC; | 161 | return -ENOSPC; |
162 | 162 | ||
163 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL])) | 163 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL])) |
164 | return -ENOSPC; | 164 | return -ENOSPC; |
165 | 165 | ||
166 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status))) | 166 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status))) |
@@ -171,7 +171,7 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
171 | proto)) | 171 | proto)) |
172 | return -ENOSPC; | 172 | return -ENOSPC; |
173 | 173 | ||
174 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY])) | 174 | if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY])) |
175 | return -ENOSPC; | 175 | return -ENOSPC; |
176 | 176 | ||
177 | if (test_bit(IPS_ASSURED_BIT, &conntrack->status)) | 177 | if (test_bit(IPS_ASSURED_BIT, &conntrack->status)) |
@@ -200,7 +200,7 @@ static struct seq_operations ct_seq_ops = { | |||
200 | .stop = ct_seq_stop, | 200 | .stop = ct_seq_stop, |
201 | .show = ct_seq_show | 201 | .show = ct_seq_show |
202 | }; | 202 | }; |
203 | 203 | ||
204 | static int ct_open(struct inode *inode, struct file *file) | 204 | static int ct_open(struct inode *inode, struct file *file) |
205 | { | 205 | { |
206 | struct seq_file *seq; | 206 | struct seq_file *seq; |
@@ -229,7 +229,7 @@ static struct file_operations ct_file_ops = { | |||
229 | .llseek = seq_lseek, | 229 | .llseek = seq_lseek, |
230 | .release = seq_release_private, | 230 | .release = seq_release_private, |
231 | }; | 231 | }; |
232 | 232 | ||
233 | /* expects */ | 233 | /* expects */ |
234 | static void *exp_seq_start(struct seq_file *s, loff_t *pos) | 234 | static void *exp_seq_start(struct seq_file *s, loff_t *pos) |
235 | { | 235 | { |
@@ -253,7 +253,7 @@ static void *exp_seq_start(struct seq_file *s, loff_t *pos) | |||
253 | 253 | ||
254 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) | 254 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) |
255 | { | 255 | { |
256 | struct list_head *e = v; | 256 | struct list_head *e = v; |
257 | 257 | ||
258 | ++*pos; | 258 | ++*pos; |
259 | e = e->next; | 259 | e = e->next; |
@@ -297,7 +297,7 @@ static int exp_open(struct inode *inode, struct file *file) | |||
297 | { | 297 | { |
298 | return seq_open(file, &exp_seq_ops); | 298 | return seq_open(file, &exp_seq_ops); |
299 | } | 299 | } |
300 | 300 | ||
301 | static struct file_operations exp_file_ops = { | 301 | static struct file_operations exp_file_ops = { |
302 | .owner = THIS_MODULE, | 302 | .owner = THIS_MODULE, |
303 | .open = exp_open, | 303 | .open = exp_open, |
@@ -426,14 +426,14 @@ static unsigned int ip_conntrack_help(unsigned int hooknum, | |||
426 | } | 426 | } |
427 | 427 | ||
428 | static unsigned int ip_conntrack_defrag(unsigned int hooknum, | 428 | static unsigned int ip_conntrack_defrag(unsigned int hooknum, |
429 | struct sk_buff **pskb, | 429 | struct sk_buff **pskb, |
430 | const struct net_device *in, | 430 | const struct net_device *in, |
431 | const struct net_device *out, | 431 | const struct net_device *out, |
432 | int (*okfn)(struct sk_buff *)) | 432 | int (*okfn)(struct sk_buff *)) |
433 | { | 433 | { |
434 | #if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE) | 434 | #if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE) |
435 | /* Previously seen (loopback)? Ignore. Do this before | 435 | /* Previously seen (loopback)? Ignore. Do this before |
436 | fragment check. */ | 436 | fragment check. */ |
437 | if ((*pskb)->nfct) | 437 | if ((*pskb)->nfct) |
438 | return NF_ACCEPT; | 438 | return NF_ACCEPT; |
439 | #endif | 439 | #endif |
@@ -441,7 +441,7 @@ static unsigned int ip_conntrack_defrag(unsigned int hooknum, | |||
441 | /* Gather fragments. */ | 441 | /* Gather fragments. */ |
442 | if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { | 442 | if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) { |
443 | *pskb = ip_ct_gather_frags(*pskb, | 443 | *pskb = ip_ct_gather_frags(*pskb, |
444 | hooknum == NF_IP_PRE_ROUTING ? | 444 | hooknum == NF_IP_PRE_ROUTING ? |
445 | IP_DEFRAG_CONNTRACK_IN : | 445 | IP_DEFRAG_CONNTRACK_IN : |
446 | IP_DEFRAG_CONNTRACK_OUT); | 446 | IP_DEFRAG_CONNTRACK_OUT); |
447 | if (!*pskb) | 447 | if (!*pskb) |
@@ -776,7 +776,7 @@ static ctl_table ip_ct_net_table[] = { | |||
776 | { | 776 | { |
777 | .ctl_name = CTL_NET, | 777 | .ctl_name = CTL_NET, |
778 | .procname = "net", | 778 | .procname = "net", |
779 | .mode = 0555, | 779 | .mode = 0555, |
780 | .child = ip_ct_ipv4_table, | 780 | .child = ip_ct_ipv4_table, |
781 | }, | 781 | }, |
782 | { .ctl_name = 0 } | 782 | { .ctl_name = 0 } |
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c index ef56de2eff0c..76e175e7a972 100644 --- a/net/ipv4/netfilter/ip_conntrack_tftp.c +++ b/net/ipv4/netfilter/ip_conntrack_tftp.c | |||
@@ -33,7 +33,7 @@ MODULE_PARM_DESC(ports, "port numbers of tftp servers"); | |||
33 | 33 | ||
34 | #if 0 | 34 | #if 0 |
35 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ | 35 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ |
36 | __FILE__, __FUNCTION__ , ## args) | 36 | __FILE__, __FUNCTION__ , ## args) |
37 | #else | 37 | #else |
38 | #define DEBUGP(format, args...) | 38 | #define DEBUGP(format, args...) |
39 | #endif | 39 | #endif |
@@ -113,7 +113,7 @@ static void ip_conntrack_tftp_fini(void) | |||
113 | DEBUGP("unregistering helper for port %d\n", | 113 | DEBUGP("unregistering helper for port %d\n", |
114 | ports[i]); | 114 | ports[i]); |
115 | ip_conntrack_helper_unregister(&tftp[i]); | 115 | ip_conntrack_helper_unregister(&tftp[i]); |
116 | } | 116 | } |
117 | } | 117 | } |
118 | 118 | ||
119 | static int __init ip_conntrack_tftp_init(void) | 119 | static int __init ip_conntrack_tftp_init(void) |
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index 5e08c2bf887d..275a4d3faf0a 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c | |||
@@ -120,7 +120,7 @@ static int | |||
120 | in_range(const struct ip_conntrack_tuple *tuple, | 120 | in_range(const struct ip_conntrack_tuple *tuple, |
121 | const struct ip_nat_range *range) | 121 | const struct ip_nat_range *range) |
122 | { | 122 | { |
123 | struct ip_nat_protocol *proto = | 123 | struct ip_nat_protocol *proto = |
124 | __ip_nat_proto_find(tuple->dst.protonum); | 124 | __ip_nat_proto_find(tuple->dst.protonum); |
125 | 125 | ||
126 | /* If we are supposed to map IPs, then we must be in the | 126 | /* If we are supposed to map IPs, then we must be in the |
@@ -443,8 +443,8 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, | |||
443 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); | 443 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); |
444 | 444 | ||
445 | /* Redirects on non-null nats must be dropped, else they'll | 445 | /* Redirects on non-null nats must be dropped, else they'll |
446 | start talking to each other without our translation, and be | 446 | start talking to each other without our translation, and be |
447 | confused... --RR */ | 447 | confused... --RR */ |
448 | if (inside->icmp.type == ICMP_REDIRECT) { | 448 | if (inside->icmp.type == ICMP_REDIRECT) { |
449 | /* If NAT isn't finished, assume it and drop. */ | 449 | /* If NAT isn't finished, assume it and drop. */ |
450 | if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) | 450 | if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) |
@@ -458,8 +458,8 @@ int ip_nat_icmp_reply_translation(struct ip_conntrack *ct, | |||
458 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | 458 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); |
459 | 459 | ||
460 | if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 + | 460 | if (!ip_ct_get_tuple(&inside->ip, *pskb, (*pskb)->nh.iph->ihl*4 + |
461 | sizeof(struct icmphdr) + inside->ip.ihl*4, | 461 | sizeof(struct icmphdr) + inside->ip.ihl*4, |
462 | &inner, | 462 | &inner, |
463 | __ip_conntrack_proto_find(inside->ip.protocol))) | 463 | __ip_conntrack_proto_find(inside->ip.protocol))) |
464 | return 0; | 464 | return 0; |
465 | 465 | ||
@@ -537,7 +537,7 @@ EXPORT_SYMBOL(ip_nat_protocol_unregister); | |||
537 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ | 537 | #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ |
538 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) | 538 | defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) |
539 | int | 539 | int |
540 | ip_nat_port_range_to_nfattr(struct sk_buff *skb, | 540 | ip_nat_port_range_to_nfattr(struct sk_buff *skb, |
541 | const struct ip_nat_range *range) | 541 | const struct ip_nat_range *range) |
542 | { | 542 | { |
543 | NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), | 543 | NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), |
@@ -555,21 +555,21 @@ int | |||
555 | ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range) | 555 | ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range) |
556 | { | 556 | { |
557 | int ret = 0; | 557 | int ret = 0; |
558 | 558 | ||
559 | /* we have to return whether we actually parsed something or not */ | 559 | /* we have to return whether we actually parsed something or not */ |
560 | 560 | ||
561 | if (tb[CTA_PROTONAT_PORT_MIN-1]) { | 561 | if (tb[CTA_PROTONAT_PORT_MIN-1]) { |
562 | ret = 1; | 562 | ret = 1; |
563 | range->min.tcp.port = | 563 | range->min.tcp.port = |
564 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); | 564 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); |
565 | } | 565 | } |
566 | 566 | ||
567 | if (!tb[CTA_PROTONAT_PORT_MAX-1]) { | 567 | if (!tb[CTA_PROTONAT_PORT_MAX-1]) { |
568 | if (ret) | 568 | if (ret) |
569 | range->max.tcp.port = range->min.tcp.port; | 569 | range->max.tcp.port = range->min.tcp.port; |
570 | } else { | 570 | } else { |
571 | ret = 1; | 571 | ret = 1; |
572 | range->max.tcp.port = | 572 | range->max.tcp.port = |
573 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); | 573 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); |
574 | } | 574 | } |
575 | 575 | ||
diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c index 913960e1380f..32e01d8dffcb 100644 --- a/net/ipv4/netfilter/ip_nat_ftp.c +++ b/net/ipv4/netfilter/ip_nat_ftp.c | |||
@@ -50,7 +50,7 @@ mangle_rfc959_packet(struct sk_buff **pskb, | |||
50 | DEBUGP("calling ip_nat_mangle_tcp_packet\n"); | 50 | DEBUGP("calling ip_nat_mangle_tcp_packet\n"); |
51 | 51 | ||
52 | *seq += strlen(buffer) - matchlen; | 52 | *seq += strlen(buffer) - matchlen; |
53 | return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 53 | return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, |
54 | matchlen, buffer, strlen(buffer)); | 54 | matchlen, buffer, strlen(buffer)); |
55 | } | 55 | } |
56 | 56 | ||
@@ -72,7 +72,7 @@ mangle_eprt_packet(struct sk_buff **pskb, | |||
72 | DEBUGP("calling ip_nat_mangle_tcp_packet\n"); | 72 | DEBUGP("calling ip_nat_mangle_tcp_packet\n"); |
73 | 73 | ||
74 | *seq += strlen(buffer) - matchlen; | 74 | *seq += strlen(buffer) - matchlen; |
75 | return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 75 | return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, |
76 | matchlen, buffer, strlen(buffer)); | 76 | matchlen, buffer, strlen(buffer)); |
77 | } | 77 | } |
78 | 78 | ||
@@ -94,7 +94,7 @@ mangle_epsv_packet(struct sk_buff **pskb, | |||
94 | DEBUGP("calling ip_nat_mangle_tcp_packet\n"); | 94 | DEBUGP("calling ip_nat_mangle_tcp_packet\n"); |
95 | 95 | ||
96 | *seq += strlen(buffer) - matchlen; | 96 | *seq += strlen(buffer) - matchlen; |
97 | return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, | 97 | return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff, |
98 | matchlen, buffer, strlen(buffer)); | 98 | matchlen, buffer, strlen(buffer)); |
99 | } | 99 | } |
100 | 100 | ||
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c index 2e5c4bc52a60..dc778cfef58b 100644 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ b/net/ipv4/netfilter/ip_nat_helper.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* ip_nat_helper.c - generic support functions for NAT helpers | 1 | /* ip_nat_helper.c - generic support functions for NAT helpers |
2 | * | 2 | * |
3 | * (C) 2000-2002 Harald Welte <laforge@netfilter.org> | 3 | * (C) 2000-2002 Harald Welte <laforge@netfilter.org> |
4 | * (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org> | 4 | * (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org> |
@@ -8,7 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * 14 Jan 2002 Harald Welte <laforge@gnumonks.org>: | 10 | * 14 Jan 2002 Harald Welte <laforge@gnumonks.org>: |
11 | * - add support for SACK adjustment | 11 | * - add support for SACK adjustment |
12 | * 14 Mar 2002 Harald Welte <laforge@gnumonks.org>: | 12 | * 14 Mar 2002 Harald Welte <laforge@gnumonks.org>: |
13 | * - merge SACK support into newnat API | 13 | * - merge SACK support into newnat API |
14 | * 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>: | 14 | * 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>: |
@@ -45,10 +45,10 @@ | |||
45 | static DEFINE_SPINLOCK(ip_nat_seqofs_lock); | 45 | static DEFINE_SPINLOCK(ip_nat_seqofs_lock); |
46 | 46 | ||
47 | /* Setup TCP sequence correction given this change at this sequence */ | 47 | /* Setup TCP sequence correction given this change at this sequence */ |
48 | static inline void | 48 | static inline void |
49 | adjust_tcp_sequence(u32 seq, | 49 | adjust_tcp_sequence(u32 seq, |
50 | int sizediff, | 50 | int sizediff, |
51 | struct ip_conntrack *ct, | 51 | struct ip_conntrack *ct, |
52 | enum ip_conntrack_info ctinfo) | 52 | enum ip_conntrack_info ctinfo) |
53 | { | 53 | { |
54 | int dir; | 54 | int dir; |
@@ -150,7 +150,7 @@ static int enlarge_skb(struct sk_buff **pskb, unsigned int extra) | |||
150 | * skb enlargement, ... | 150 | * skb enlargement, ... |
151 | * | 151 | * |
152 | * */ | 152 | * */ |
153 | int | 153 | int |
154 | ip_nat_mangle_tcp_packet(struct sk_buff **pskb, | 154 | ip_nat_mangle_tcp_packet(struct sk_buff **pskb, |
155 | struct ip_conntrack *ct, | 155 | struct ip_conntrack *ct, |
156 | enum ip_conntrack_info ctinfo, | 156 | enum ip_conntrack_info ctinfo, |
@@ -186,7 +186,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
186 | tcph->check = tcp_v4_check(datalen, | 186 | tcph->check = tcp_v4_check(datalen, |
187 | iph->saddr, iph->daddr, | 187 | iph->saddr, iph->daddr, |
188 | csum_partial((char *)tcph, | 188 | csum_partial((char *)tcph, |
189 | datalen, 0)); | 189 | datalen, 0)); |
190 | } else | 190 | } else |
191 | nf_proto_csum_replace2(&tcph->check, *pskb, | 191 | nf_proto_csum_replace2(&tcph->check, *pskb, |
192 | htons(oldlen), htons(datalen), 1); | 192 | htons(oldlen), htons(datalen), 1); |
@@ -202,7 +202,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
202 | return 1; | 202 | return 1; |
203 | } | 203 | } |
204 | EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); | 204 | EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); |
205 | 205 | ||
206 | /* Generic function for mangling variable-length address changes inside | 206 | /* Generic function for mangling variable-length address changes inside |
207 | * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX | 207 | * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX |
208 | * command in the Amanda protocol) | 208 | * command in the Amanda protocol) |
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); | |||
213 | * XXX - This function could be merged with ip_nat_mangle_tcp_packet which | 213 | * XXX - This function could be merged with ip_nat_mangle_tcp_packet which |
214 | * should be fairly easy to do. | 214 | * should be fairly easy to do. |
215 | */ | 215 | */ |
216 | int | 216 | int |
217 | ip_nat_mangle_udp_packet(struct sk_buff **pskb, | 217 | ip_nat_mangle_udp_packet(struct sk_buff **pskb, |
218 | struct ip_conntrack *ct, | 218 | struct ip_conntrack *ct, |
219 | enum ip_conntrack_info ctinfo, | 219 | enum ip_conntrack_info ctinfo, |
@@ -228,8 +228,8 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
228 | 228 | ||
229 | /* UDP helpers might accidentally mangle the wrong packet */ | 229 | /* UDP helpers might accidentally mangle the wrong packet */ |
230 | iph = (*pskb)->nh.iph; | 230 | iph = (*pskb)->nh.iph; |
231 | if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + | 231 | if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + |
232 | match_offset + match_len) | 232 | match_offset + match_len) |
233 | return 0; | 233 | return 0; |
234 | 234 | ||
235 | if (!skb_make_writable(pskb, (*pskb)->len)) | 235 | if (!skb_make_writable(pskb, (*pskb)->len)) |
@@ -258,9 +258,9 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
258 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | 258 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { |
259 | udph->check = 0; | 259 | udph->check = 0; |
260 | udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, | 260 | udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, |
261 | datalen, IPPROTO_UDP, | 261 | datalen, IPPROTO_UDP, |
262 | csum_partial((char *)udph, | 262 | csum_partial((char *)udph, |
263 | datalen, 0)); | 263 | datalen, 0)); |
264 | if (!udph->check) | 264 | if (!udph->check) |
265 | udph->check = CSUM_MANGLED_0; | 265 | udph->check = CSUM_MANGLED_0; |
266 | } else | 266 | } else |
@@ -273,7 +273,7 @@ EXPORT_SYMBOL(ip_nat_mangle_udp_packet); | |||
273 | /* Adjust one found SACK option including checksum correction */ | 273 | /* Adjust one found SACK option including checksum correction */ |
274 | static void | 274 | static void |
275 | sack_adjust(struct sk_buff *skb, | 275 | sack_adjust(struct sk_buff *skb, |
276 | struct tcphdr *tcph, | 276 | struct tcphdr *tcph, |
277 | unsigned int sackoff, | 277 | unsigned int sackoff, |
278 | unsigned int sackend, | 278 | unsigned int sackend, |
279 | struct ip_nat_seq *natseq) | 279 | struct ip_nat_seq *natseq) |
@@ -360,14 +360,14 @@ ip_nat_sack_adjust(struct sk_buff **pskb, | |||
360 | 360 | ||
361 | /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ | 361 | /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */ |
362 | int | 362 | int |
363 | ip_nat_seq_adjust(struct sk_buff **pskb, | 363 | ip_nat_seq_adjust(struct sk_buff **pskb, |
364 | struct ip_conntrack *ct, | 364 | struct ip_conntrack *ct, |
365 | enum ip_conntrack_info ctinfo) | 365 | enum ip_conntrack_info ctinfo) |
366 | { | 366 | { |
367 | struct tcphdr *tcph; | 367 | struct tcphdr *tcph; |
368 | int dir; | 368 | int dir; |
369 | __be32 newseq, newack; | 369 | __be32 newseq, newack; |
370 | struct ip_nat_seq *this_way, *other_way; | 370 | struct ip_nat_seq *this_way, *other_way; |
371 | 371 | ||
372 | dir = CTINFO2DIR(ctinfo); | 372 | dir = CTINFO2DIR(ctinfo); |
373 | 373 | ||
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c index ec957bbb5366..24ce4a5023d7 100644 --- a/net/ipv4/netfilter/ip_nat_helper_pptp.c +++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c | |||
@@ -202,10 +202,10 @@ pptp_outbound_pkt(struct sk_buff **pskb, | |||
202 | 202 | ||
203 | /* mangle packet */ | 203 | /* mangle packet */ |
204 | if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 204 | if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, |
205 | cid_off + sizeof(struct pptp_pkt_hdr) + | 205 | cid_off + sizeof(struct pptp_pkt_hdr) + |
206 | sizeof(struct PptpControlHeader), | 206 | sizeof(struct PptpControlHeader), |
207 | sizeof(new_callid), (char *)&new_callid, | 207 | sizeof(new_callid), (char *)&new_callid, |
208 | sizeof(new_callid)) == 0) | 208 | sizeof(new_callid)) == 0) |
209 | return NF_DROP; | 209 | return NF_DROP; |
210 | 210 | ||
211 | return NF_ACCEPT; | 211 | return NF_ACCEPT; |
@@ -293,7 +293,7 @@ pptp_inbound_pkt(struct sk_buff **pskb, | |||
293 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); | 293 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); |
294 | 294 | ||
295 | if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 295 | if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, |
296 | pcid_off + sizeof(struct pptp_pkt_hdr) + | 296 | pcid_off + sizeof(struct pptp_pkt_hdr) + |
297 | sizeof(struct PptpControlHeader), | 297 | sizeof(struct PptpControlHeader), |
298 | sizeof(new_pcid), (char *)&new_pcid, | 298 | sizeof(new_pcid), (char *)&new_pcid, |
299 | sizeof(new_pcid)) == 0) | 299 | sizeof(new_pcid)) == 0) |
diff --git a/net/ipv4/netfilter/ip_nat_irc.c b/net/ipv4/netfilter/ip_nat_irc.c index feb26b48f1d5..cfaeea38314f 100644 --- a/net/ipv4/netfilter/ip_nat_irc.c +++ b/net/ipv4/netfilter/ip_nat_irc.c | |||
@@ -88,8 +88,8 @@ static unsigned int help(struct sk_buff **pskb, | |||
88 | DEBUGP("ip_nat_irc: Inserting '%s' == %u.%u.%u.%u, port %u\n", | 88 | DEBUGP("ip_nat_irc: Inserting '%s' == %u.%u.%u.%u, port %u\n", |
89 | buffer, NIPQUAD(exp->tuple.src.ip), port); | 89 | buffer, NIPQUAD(exp->tuple.src.ip), port); |
90 | 90 | ||
91 | ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, | 91 | ret = ip_nat_mangle_tcp_packet(pskb, exp->master, ctinfo, |
92 | matchoff, matchlen, buffer, | 92 | matchoff, matchlen, buffer, |
93 | strlen(buffer)); | 93 | strlen(buffer)); |
94 | if (ret != NF_ACCEPT) | 94 | if (ret != NF_ACCEPT) |
95 | ip_conntrack_unexpect_related(exp); | 95 | ip_conntrack_unexpect_related(exp); |
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c index fb716edd5bc6..22a528ae0380 100644 --- a/net/ipv4/netfilter/ip_nat_proto_icmp.c +++ b/net/ipv4/netfilter/ip_nat_proto_icmp.c | |||
@@ -45,7 +45,7 @@ icmp_unique_tuple(struct ip_conntrack_tuple *tuple, | |||
45 | 45 | ||
46 | for (i = 0; i < range_size; i++, id++) { | 46 | for (i = 0; i < range_size; i++, id++) { |
47 | tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + | 47 | tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + |
48 | (id % range_size)); | 48 | (id % range_size)); |
49 | if (!ip_nat_used_tuple(tuple, conntrack)) | 49 | if (!ip_nat_used_tuple(tuple, conntrack)) |
50 | return 1; | 50 | return 1; |
51 | } | 51 | } |
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c index e1c8a05f3dc6..080eb1d92200 100644 --- a/net/ipv4/netfilter/ip_nat_rule.c +++ b/net/ipv4/netfilter/ip_nat_rule.c | |||
@@ -112,7 +112,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb, | |||
112 | 112 | ||
113 | /* Connection must be valid and new. */ | 113 | /* Connection must be valid and new. */ |
114 | IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED | 114 | IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED |
115 | || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); | 115 | || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); |
116 | IP_NF_ASSERT(out); | 116 | IP_NF_ASSERT(out); |
117 | 117 | ||
118 | return ip_nat_setup_info(ct, &mr->range[0], hooknum); | 118 | return ip_nat_setup_info(ct, &mr->range[0], hooknum); |
@@ -223,8 +223,8 @@ alloc_null_binding(struct ip_conntrack *conntrack, | |||
223 | 223 | ||
224 | unsigned int | 224 | unsigned int |
225 | alloc_null_binding_confirmed(struct ip_conntrack *conntrack, | 225 | alloc_null_binding_confirmed(struct ip_conntrack *conntrack, |
226 | struct ip_nat_info *info, | 226 | struct ip_nat_info *info, |
227 | unsigned int hooknum) | 227 | unsigned int hooknum) |
228 | { | 228 | { |
229 | __be32 ip | 229 | __be32 ip |
230 | = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC | 230 | = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC |
diff --git a/net/ipv4/netfilter/ip_nat_sip.c b/net/ipv4/netfilter/ip_nat_sip.c index 6223abc924ff..325c5a9dc2ef 100644 --- a/net/ipv4/netfilter/ip_nat_sip.c +++ b/net/ipv4/netfilter/ip_nat_sip.c | |||
@@ -88,7 +88,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, | |||
88 | return 1; | 88 | return 1; |
89 | 89 | ||
90 | if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, | 90 | if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, |
91 | matchoff, matchlen, addr, addrlen)) | 91 | matchoff, matchlen, addr, addrlen)) |
92 | return 0; | 92 | return 0; |
93 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | 93 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); |
94 | return 1; | 94 | return 1; |
@@ -149,7 +149,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb, | |||
149 | return 0; | 149 | return 0; |
150 | 150 | ||
151 | if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, | 151 | if (!ip_nat_mangle_udp_packet(pskb, ct, ctinfo, |
152 | matchoff, matchlen, buffer, bufflen)) | 152 | matchoff, matchlen, buffer, bufflen)) |
153 | return 0; | 153 | return 0; |
154 | 154 | ||
155 | /* We need to reload this. Thanks Patrick. */ | 155 | /* We need to reload this. Thanks Patrick. */ |
@@ -170,7 +170,7 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
170 | 170 | ||
171 | /* Get actual SDP lenght */ | 171 | /* Get actual SDP lenght */ |
172 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, | 172 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, |
173 | &matchlen, POS_SDP_HEADER) > 0) { | 173 | &matchlen, POS_SDP_HEADER) > 0) { |
174 | 174 | ||
175 | /* since ct_sip_get_info() give us a pointer passing 'v=' | 175 | /* since ct_sip_get_info() give us a pointer passing 'v=' |
176 | we need to add 2 bytes in this count. */ | 176 | we need to add 2 bytes in this count. */ |
@@ -178,7 +178,7 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
178 | 178 | ||
179 | /* Now, update SDP lenght */ | 179 | /* Now, update SDP lenght */ |
180 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, | 180 | if (ct_sip_get_info(dptr, (*pskb)->len - dataoff, &matchoff, |
181 | &matchlen, POS_CONTENT) > 0) { | 181 | &matchlen, POS_CONTENT) > 0) { |
182 | 182 | ||
183 | bufflen = sprintf(buffer, "%u", c_len); | 183 | bufflen = sprintf(buffer, "%u", c_len); |
184 | 184 | ||
@@ -204,17 +204,17 @@ static unsigned int mangle_sdp(struct sk_buff **pskb, | |||
204 | /* Mangle owner and contact info. */ | 204 | /* Mangle owner and contact info. */ |
205 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); | 205 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); |
206 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 206 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
207 | buffer, bufflen, POS_OWNER)) | 207 | buffer, bufflen, POS_OWNER)) |
208 | return 0; | 208 | return 0; |
209 | 209 | ||
210 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 210 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
211 | buffer, bufflen, POS_CONNECTION)) | 211 | buffer, bufflen, POS_CONNECTION)) |
212 | return 0; | 212 | return 0; |
213 | 213 | ||
214 | /* Mangle media port. */ | 214 | /* Mangle media port. */ |
215 | bufflen = sprintf(buffer, "%u", port); | 215 | bufflen = sprintf(buffer, "%u", port); |
216 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 216 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
217 | buffer, bufflen, POS_MEDIA)) | 217 | buffer, bufflen, POS_MEDIA)) |
218 | return 0; | 218 | return 0; |
219 | 219 | ||
220 | return mangle_content_len(pskb, ctinfo, ct, dptr); | 220 | return mangle_content_len(pskb, ctinfo, ct, dptr); |
diff --git a/net/ipv4/netfilter/ip_nat_snmp_basic.c b/net/ipv4/netfilter/ip_nat_snmp_basic.c index c3d9f3b090c4..e41d0efae515 100644 --- a/net/ipv4/netfilter/ip_nat_snmp_basic.c +++ b/net/ipv4/netfilter/ip_nat_snmp_basic.c | |||
@@ -3,11 +3,11 @@ | |||
3 | * | 3 | * |
4 | * Basic SNMP Application Layer Gateway | 4 | * Basic SNMP Application Layer Gateway |
5 | * | 5 | * |
6 | * This IP NAT module is intended for use with SNMP network | 6 | * This IP NAT module is intended for use with SNMP network |
7 | * discovery and monitoring applications where target networks use | 7 | * discovery and monitoring applications where target networks use |
8 | * conflicting private address realms. | 8 | * conflicting private address realms. |
9 | * | 9 | * |
10 | * Static NAT is used to remap the networks from the view of the network | 10 | * Static NAT is used to remap the networks from the view of the network |
11 | * management system at the IP layer, and this module remaps some application | 11 | * management system at the IP layer, and this module remaps some application |
12 | * layer addresses to match. | 12 | * layer addresses to match. |
13 | * | 13 | * |
@@ -20,7 +20,7 @@ | |||
20 | * More information on ALG and associated issues can be found in | 20 | * More information on ALG and associated issues can be found in |
21 | * RFC 2962 | 21 | * RFC 2962 |
22 | * | 22 | * |
23 | * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory | 23 | * The ASB.1/BER parsing code is derived from the gxsnmp package by Gregory |
24 | * McLean & Jochen Friedrich, stripped down for use in the kernel. | 24 | * McLean & Jochen Friedrich, stripped down for use in the kernel. |
25 | * | 25 | * |
26 | * Copyright (c) 2000 RP Internet (www.rpi.net.au). | 26 | * Copyright (c) 2000 RP Internet (www.rpi.net.au). |
@@ -69,8 +69,8 @@ MODULE_DESCRIPTION("Basic SNMP Application Layer Gateway"); | |||
69 | static int debug; | 69 | static int debug; |
70 | static DEFINE_SPINLOCK(snmp_lock); | 70 | static DEFINE_SPINLOCK(snmp_lock); |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * Application layer address mapping mimics the NAT mapping, but | 73 | * Application layer address mapping mimics the NAT mapping, but |
74 | * only for the first octet in this case (a more flexible system | 74 | * only for the first octet in this case (a more flexible system |
75 | * can be implemented if needed). | 75 | * can be implemented if needed). |
76 | */ | 76 | */ |
@@ -80,7 +80,7 @@ struct oct1_map | |||
80 | u_int8_t to; | 80 | u_int8_t to; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | 83 | ||
84 | /***************************************************************************** | 84 | /***************************************************************************** |
85 | * | 85 | * |
86 | * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse) | 86 | * Basic ASN.1 decoding routines (gxsnmp author Dirk Wisse) |
@@ -129,7 +129,7 @@ struct oct1_map | |||
129 | #define ASN1_ERR_DEC_LENGTH_MISMATCH 4 | 129 | #define ASN1_ERR_DEC_LENGTH_MISMATCH 4 |
130 | #define ASN1_ERR_DEC_BADVALUE 5 | 130 | #define ASN1_ERR_DEC_BADVALUE 5 |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * ASN.1 context. | 133 | * ASN.1 context. |
134 | */ | 134 | */ |
135 | struct asn1_ctx | 135 | struct asn1_ctx |
@@ -148,10 +148,10 @@ struct asn1_octstr | |||
148 | unsigned char *data; | 148 | unsigned char *data; |
149 | unsigned int len; | 149 | unsigned int len; |
150 | }; | 150 | }; |
151 | 151 | ||
152 | static void asn1_open(struct asn1_ctx *ctx, | 152 | static void asn1_open(struct asn1_ctx *ctx, |
153 | unsigned char *buf, | 153 | unsigned char *buf, |
154 | unsigned int len) | 154 | unsigned int len) |
155 | { | 155 | { |
156 | ctx->begin = buf; | 156 | ctx->begin = buf; |
157 | ctx->end = buf + len; | 157 | ctx->end = buf + len; |
@@ -172,9 +172,9 @@ static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) | |||
172 | static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) | 172 | static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) |
173 | { | 173 | { |
174 | unsigned char ch; | 174 | unsigned char ch; |
175 | 175 | ||
176 | *tag = 0; | 176 | *tag = 0; |
177 | 177 | ||
178 | do | 178 | do |
179 | { | 179 | { |
180 | if (!asn1_octet_decode(ctx, &ch)) | 180 | if (!asn1_octet_decode(ctx, &ch)) |
@@ -185,20 +185,20 @@ static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) | |||
185 | return 1; | 185 | return 1; |
186 | } | 186 | } |
187 | 187 | ||
188 | static unsigned char asn1_id_decode(struct asn1_ctx *ctx, | 188 | static unsigned char asn1_id_decode(struct asn1_ctx *ctx, |
189 | unsigned int *cls, | 189 | unsigned int *cls, |
190 | unsigned int *con, | 190 | unsigned int *con, |
191 | unsigned int *tag) | 191 | unsigned int *tag) |
192 | { | 192 | { |
193 | unsigned char ch; | 193 | unsigned char ch; |
194 | 194 | ||
195 | if (!asn1_octet_decode(ctx, &ch)) | 195 | if (!asn1_octet_decode(ctx, &ch)) |
196 | return 0; | 196 | return 0; |
197 | 197 | ||
198 | *cls = (ch & 0xC0) >> 6; | 198 | *cls = (ch & 0xC0) >> 6; |
199 | *con = (ch & 0x20) >> 5; | 199 | *con = (ch & 0x20) >> 5; |
200 | *tag = (ch & 0x1F); | 200 | *tag = (ch & 0x1F); |
201 | 201 | ||
202 | if (*tag == 0x1F) { | 202 | if (*tag == 0x1F) { |
203 | if (!asn1_tag_decode(ctx, tag)) | 203 | if (!asn1_tag_decode(ctx, tag)) |
204 | return 0; | 204 | return 0; |
@@ -207,25 +207,25 @@ static unsigned char asn1_id_decode(struct asn1_ctx *ctx, | |||
207 | } | 207 | } |
208 | 208 | ||
209 | static unsigned char asn1_length_decode(struct asn1_ctx *ctx, | 209 | static unsigned char asn1_length_decode(struct asn1_ctx *ctx, |
210 | unsigned int *def, | 210 | unsigned int *def, |
211 | unsigned int *len) | 211 | unsigned int *len) |
212 | { | 212 | { |
213 | unsigned char ch, cnt; | 213 | unsigned char ch, cnt; |
214 | 214 | ||
215 | if (!asn1_octet_decode(ctx, &ch)) | 215 | if (!asn1_octet_decode(ctx, &ch)) |
216 | return 0; | 216 | return 0; |
217 | 217 | ||
218 | if (ch == 0x80) | 218 | if (ch == 0x80) |
219 | *def = 0; | 219 | *def = 0; |
220 | else { | 220 | else { |
221 | *def = 1; | 221 | *def = 1; |
222 | 222 | ||
223 | if (ch < 0x80) | 223 | if (ch < 0x80) |
224 | *len = ch; | 224 | *len = ch; |
225 | else { | 225 | else { |
226 | cnt = (unsigned char) (ch & 0x7F); | 226 | cnt = (unsigned char) (ch & 0x7F); |
227 | *len = 0; | 227 | *len = 0; |
228 | 228 | ||
229 | while (cnt > 0) { | 229 | while (cnt > 0) { |
230 | if (!asn1_octet_decode(ctx, &ch)) | 230 | if (!asn1_octet_decode(ctx, &ch)) |
231 | return 0; | 231 | return 0; |
@@ -239,20 +239,20 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx, | |||
239 | } | 239 | } |
240 | 240 | ||
241 | static unsigned char asn1_header_decode(struct asn1_ctx *ctx, | 241 | static unsigned char asn1_header_decode(struct asn1_ctx *ctx, |
242 | unsigned char **eoc, | 242 | unsigned char **eoc, |
243 | unsigned int *cls, | 243 | unsigned int *cls, |
244 | unsigned int *con, | 244 | unsigned int *con, |
245 | unsigned int *tag) | 245 | unsigned int *tag) |
246 | { | 246 | { |
247 | unsigned int def, len; | 247 | unsigned int def, len; |
248 | 248 | ||
249 | if (!asn1_id_decode(ctx, cls, con, tag)) | 249 | if (!asn1_id_decode(ctx, cls, con, tag)) |
250 | return 0; | 250 | return 0; |
251 | 251 | ||
252 | def = len = 0; | 252 | def = len = 0; |
253 | if (!asn1_length_decode(ctx, &def, &len)) | 253 | if (!asn1_length_decode(ctx, &def, &len)) |
254 | return 0; | 254 | return 0; |
255 | 255 | ||
256 | if (def) | 256 | if (def) |
257 | *eoc = ctx->pointer + len; | 257 | *eoc = ctx->pointer + len; |
258 | else | 258 | else |
@@ -263,19 +263,19 @@ static unsigned char asn1_header_decode(struct asn1_ctx *ctx, | |||
263 | static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) | 263 | static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) |
264 | { | 264 | { |
265 | unsigned char ch; | 265 | unsigned char ch; |
266 | 266 | ||
267 | if (eoc == 0) { | 267 | if (eoc == 0) { |
268 | if (!asn1_octet_decode(ctx, &ch)) | 268 | if (!asn1_octet_decode(ctx, &ch)) |
269 | return 0; | 269 | return 0; |
270 | 270 | ||
271 | if (ch != 0x00) { | 271 | if (ch != 0x00) { |
272 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; | 272 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; |
273 | return 0; | 273 | return 0; |
274 | } | 274 | } |
275 | 275 | ||
276 | if (!asn1_octet_decode(ctx, &ch)) | 276 | if (!asn1_octet_decode(ctx, &ch)) |
277 | return 0; | 277 | return 0; |
278 | 278 | ||
279 | if (ch != 0x00) { | 279 | if (ch != 0x00) { |
280 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; | 280 | ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; |
281 | return 0; | 281 | return 0; |
@@ -297,27 +297,27 @@ static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc) | |||
297 | } | 297 | } |
298 | 298 | ||
299 | static unsigned char asn1_long_decode(struct asn1_ctx *ctx, | 299 | static unsigned char asn1_long_decode(struct asn1_ctx *ctx, |
300 | unsigned char *eoc, | 300 | unsigned char *eoc, |
301 | long *integer) | 301 | long *integer) |
302 | { | 302 | { |
303 | unsigned char ch; | 303 | unsigned char ch; |
304 | unsigned int len; | 304 | unsigned int len; |
305 | 305 | ||
306 | if (!asn1_octet_decode(ctx, &ch)) | 306 | if (!asn1_octet_decode(ctx, &ch)) |
307 | return 0; | 307 | return 0; |
308 | 308 | ||
309 | *integer = (signed char) ch; | 309 | *integer = (signed char) ch; |
310 | len = 1; | 310 | len = 1; |
311 | 311 | ||
312 | while (ctx->pointer < eoc) { | 312 | while (ctx->pointer < eoc) { |
313 | if (++len > sizeof (long)) { | 313 | if (++len > sizeof (long)) { |
314 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 314 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
315 | return 0; | 315 | return 0; |
316 | } | 316 | } |
317 | 317 | ||
318 | if (!asn1_octet_decode(ctx, &ch)) | 318 | if (!asn1_octet_decode(ctx, &ch)) |
319 | return 0; | 319 | return 0; |
320 | 320 | ||
321 | *integer <<= 8; | 321 | *integer <<= 8; |
322 | *integer |= ch; | 322 | *integer |= ch; |
323 | } | 323 | } |
@@ -325,28 +325,28 @@ static unsigned char asn1_long_decode(struct asn1_ctx *ctx, | |||
325 | } | 325 | } |
326 | 326 | ||
327 | static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, | 327 | static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, |
328 | unsigned char *eoc, | 328 | unsigned char *eoc, |
329 | unsigned int *integer) | 329 | unsigned int *integer) |
330 | { | 330 | { |
331 | unsigned char ch; | 331 | unsigned char ch; |
332 | unsigned int len; | 332 | unsigned int len; |
333 | 333 | ||
334 | if (!asn1_octet_decode(ctx, &ch)) | 334 | if (!asn1_octet_decode(ctx, &ch)) |
335 | return 0; | 335 | return 0; |
336 | 336 | ||
337 | *integer = ch; | 337 | *integer = ch; |
338 | if (ch == 0) len = 0; | 338 | if (ch == 0) len = 0; |
339 | else len = 1; | 339 | else len = 1; |
340 | 340 | ||
341 | while (ctx->pointer < eoc) { | 341 | while (ctx->pointer < eoc) { |
342 | if (++len > sizeof (unsigned int)) { | 342 | if (++len > sizeof (unsigned int)) { |
343 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 343 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
344 | return 0; | 344 | return 0; |
345 | } | 345 | } |
346 | 346 | ||
347 | if (!asn1_octet_decode(ctx, &ch)) | 347 | if (!asn1_octet_decode(ctx, &ch)) |
348 | return 0; | 348 | return 0; |
349 | 349 | ||
350 | *integer <<= 8; | 350 | *integer <<= 8; |
351 | *integer |= ch; | 351 | *integer |= ch; |
352 | } | 352 | } |
@@ -354,28 +354,28 @@ static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, | |||
354 | } | 354 | } |
355 | 355 | ||
356 | static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, | 356 | static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, |
357 | unsigned char *eoc, | 357 | unsigned char *eoc, |
358 | unsigned long *integer) | 358 | unsigned long *integer) |
359 | { | 359 | { |
360 | unsigned char ch; | 360 | unsigned char ch; |
361 | unsigned int len; | 361 | unsigned int len; |
362 | 362 | ||
363 | if (!asn1_octet_decode(ctx, &ch)) | 363 | if (!asn1_octet_decode(ctx, &ch)) |
364 | return 0; | 364 | return 0; |
365 | 365 | ||
366 | *integer = ch; | 366 | *integer = ch; |
367 | if (ch == 0) len = 0; | 367 | if (ch == 0) len = 0; |
368 | else len = 1; | 368 | else len = 1; |
369 | 369 | ||
370 | while (ctx->pointer < eoc) { | 370 | while (ctx->pointer < eoc) { |
371 | if (++len > sizeof (unsigned long)) { | 371 | if (++len > sizeof (unsigned long)) { |
372 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 372 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
373 | return 0; | 373 | return 0; |
374 | } | 374 | } |
375 | 375 | ||
376 | if (!asn1_octet_decode(ctx, &ch)) | 376 | if (!asn1_octet_decode(ctx, &ch)) |
377 | return 0; | 377 | return 0; |
378 | 378 | ||
379 | *integer <<= 8; | 379 | *integer <<= 8; |
380 | *integer |= ch; | 380 | *integer |= ch; |
381 | } | 381 | } |
@@ -383,21 +383,21 @@ static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, | |||
383 | } | 383 | } |
384 | 384 | ||
385 | static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, | 385 | static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, |
386 | unsigned char *eoc, | 386 | unsigned char *eoc, |
387 | unsigned char **octets, | 387 | unsigned char **octets, |
388 | unsigned int *len) | 388 | unsigned int *len) |
389 | { | 389 | { |
390 | unsigned char *ptr; | 390 | unsigned char *ptr; |
391 | 391 | ||
392 | *len = 0; | 392 | *len = 0; |
393 | 393 | ||
394 | *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); | 394 | *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); |
395 | if (*octets == NULL) { | 395 | if (*octets == NULL) { |
396 | if (net_ratelimit()) | 396 | if (net_ratelimit()) |
397 | printk("OOM in bsalg (%d)\n", __LINE__); | 397 | printk("OOM in bsalg (%d)\n", __LINE__); |
398 | return 0; | 398 | return 0; |
399 | } | 399 | } |
400 | 400 | ||
401 | ptr = *octets; | 401 | ptr = *octets; |
402 | while (ctx->pointer < eoc) { | 402 | while (ctx->pointer < eoc) { |
403 | if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { | 403 | if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) { |
@@ -411,16 +411,16 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, | |||
411 | } | 411 | } |
412 | 412 | ||
413 | static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, | 413 | static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, |
414 | unsigned long *subid) | 414 | unsigned long *subid) |
415 | { | 415 | { |
416 | unsigned char ch; | 416 | unsigned char ch; |
417 | 417 | ||
418 | *subid = 0; | 418 | *subid = 0; |
419 | 419 | ||
420 | do { | 420 | do { |
421 | if (!asn1_octet_decode(ctx, &ch)) | 421 | if (!asn1_octet_decode(ctx, &ch)) |
422 | return 0; | 422 | return 0; |
423 | 423 | ||
424 | *subid <<= 7; | 424 | *subid <<= 7; |
425 | *subid |= ch & 0x7F; | 425 | *subid |= ch & 0x7F; |
426 | } while ((ch & 0x80) == 0x80); | 426 | } while ((ch & 0x80) == 0x80); |
@@ -428,14 +428,14 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, | |||
428 | } | 428 | } |
429 | 429 | ||
430 | static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | 430 | static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, |
431 | unsigned char *eoc, | 431 | unsigned char *eoc, |
432 | unsigned long **oid, | 432 | unsigned long **oid, |
433 | unsigned int *len) | 433 | unsigned int *len) |
434 | { | 434 | { |
435 | unsigned long subid; | 435 | unsigned long subid; |
436 | unsigned int size; | 436 | unsigned int size; |
437 | unsigned long *optr; | 437 | unsigned long *optr; |
438 | 438 | ||
439 | size = eoc - ctx->pointer + 1; | 439 | size = eoc - ctx->pointer + 1; |
440 | *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); | 440 | *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); |
441 | if (*oid == NULL) { | 441 | if (*oid == NULL) { |
@@ -443,15 +443,15 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | |||
443 | printk("OOM in bsalg (%d)\n", __LINE__); | 443 | printk("OOM in bsalg (%d)\n", __LINE__); |
444 | return 0; | 444 | return 0; |
445 | } | 445 | } |
446 | 446 | ||
447 | optr = *oid; | 447 | optr = *oid; |
448 | 448 | ||
449 | if (!asn1_subid_decode(ctx, &subid)) { | 449 | if (!asn1_subid_decode(ctx, &subid)) { |
450 | kfree(*oid); | 450 | kfree(*oid); |
451 | *oid = NULL; | 451 | *oid = NULL; |
452 | return 0; | 452 | return 0; |
453 | } | 453 | } |
454 | 454 | ||
455 | if (subid < 40) { | 455 | if (subid < 40) { |
456 | optr [0] = 0; | 456 | optr [0] = 0; |
457 | optr [1] = subid; | 457 | optr [1] = subid; |
@@ -462,10 +462,10 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | |||
462 | optr [0] = 2; | 462 | optr [0] = 2; |
463 | optr [1] = subid - 80; | 463 | optr [1] = subid - 80; |
464 | } | 464 | } |
465 | 465 | ||
466 | *len = 2; | 466 | *len = 2; |
467 | optr += 2; | 467 | optr += 2; |
468 | 468 | ||
469 | while (ctx->pointer < eoc) { | 469 | while (ctx->pointer < eoc) { |
470 | if (++(*len) > size) { | 470 | if (++(*len) > size) { |
471 | ctx->error = ASN1_ERR_DEC_BADVALUE; | 471 | ctx->error = ASN1_ERR_DEC_BADVALUE; |
@@ -473,7 +473,7 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | |||
473 | *oid = NULL; | 473 | *oid = NULL; |
474 | return 0; | 474 | return 0; |
475 | } | 475 | } |
476 | 476 | ||
477 | if (!asn1_subid_decode(ctx, optr++)) { | 477 | if (!asn1_subid_decode(ctx, optr++)) { |
478 | kfree(*oid); | 478 | kfree(*oid); |
479 | *oid = NULL; | 479 | *oid = NULL; |
@@ -611,9 +611,9 @@ struct snmp_v1_trap | |||
611 | #define SERR_EOM 2 | 611 | #define SERR_EOM 2 |
612 | 612 | ||
613 | static inline void mangle_address(unsigned char *begin, | 613 | static inline void mangle_address(unsigned char *begin, |
614 | unsigned char *addr, | 614 | unsigned char *addr, |
615 | const struct oct1_map *map, | 615 | const struct oct1_map *map, |
616 | __sum16 *check); | 616 | __sum16 *check); |
617 | struct snmp_cnv | 617 | struct snmp_cnv |
618 | { | 618 | { |
619 | unsigned int class; | 619 | unsigned int class; |
@@ -633,7 +633,7 @@ static struct snmp_cnv snmp_conv [] = | |||
633 | {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */ | 633 | {ASN1_APL, SNMP_GGE, SNMP_GAUGE}, /* Gauge32 == Unsigned32 */ |
634 | {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS}, | 634 | {ASN1_APL, SNMP_TIT, SNMP_TIMETICKS}, |
635 | {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE}, | 635 | {ASN1_APL, SNMP_OPQ, SNMP_OPAQUE}, |
636 | 636 | ||
637 | /* SNMPv2 data types and errors */ | 637 | /* SNMPv2 data types and errors */ |
638 | {ASN1_UNI, ASN1_BTS, SNMP_BITSTR}, | 638 | {ASN1_UNI, ASN1_BTS, SNMP_BITSTR}, |
639 | {ASN1_APL, SNMP_C64, SNMP_COUNTER64}, | 639 | {ASN1_APL, SNMP_C64, SNMP_COUNTER64}, |
@@ -644,13 +644,13 @@ static struct snmp_cnv snmp_conv [] = | |||
644 | }; | 644 | }; |
645 | 645 | ||
646 | static unsigned char snmp_tag_cls2syntax(unsigned int tag, | 646 | static unsigned char snmp_tag_cls2syntax(unsigned int tag, |
647 | unsigned int cls, | 647 | unsigned int cls, |
648 | unsigned short *syntax) | 648 | unsigned short *syntax) |
649 | { | 649 | { |
650 | struct snmp_cnv *cnv; | 650 | struct snmp_cnv *cnv; |
651 | 651 | ||
652 | cnv = snmp_conv; | 652 | cnv = snmp_conv; |
653 | 653 | ||
654 | while (cnv->syntax != -1) { | 654 | while (cnv->syntax != -1) { |
655 | if (cnv->tag == tag && cnv->class == cls) { | 655 | if (cnv->tag == tag && cnv->class == cls) { |
656 | *syntax = cnv->syntax; | 656 | *syntax = cnv->syntax; |
@@ -662,7 +662,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag, | |||
662 | } | 662 | } |
663 | 663 | ||
664 | static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | 664 | static unsigned char snmp_object_decode(struct asn1_ctx *ctx, |
665 | struct snmp_object **obj) | 665 | struct snmp_object **obj) |
666 | { | 666 | { |
667 | unsigned int cls, con, tag, len, idlen; | 667 | unsigned int cls, con, tag, len, idlen; |
668 | unsigned short type; | 668 | unsigned short type; |
@@ -670,41 +670,41 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
670 | unsigned long *lp, *id; | 670 | unsigned long *lp, *id; |
671 | unsigned long ul; | 671 | unsigned long ul; |
672 | long l; | 672 | long l; |
673 | 673 | ||
674 | *obj = NULL; | 674 | *obj = NULL; |
675 | id = NULL; | 675 | id = NULL; |
676 | 676 | ||
677 | if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) | 677 | if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) |
678 | return 0; | 678 | return 0; |
679 | 679 | ||
680 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | 680 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) |
681 | return 0; | 681 | return 0; |
682 | 682 | ||
683 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 683 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
684 | return 0; | 684 | return 0; |
685 | 685 | ||
686 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) | 686 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) |
687 | return 0; | 687 | return 0; |
688 | 688 | ||
689 | if (!asn1_oid_decode(ctx, end, &id, &idlen)) | 689 | if (!asn1_oid_decode(ctx, end, &id, &idlen)) |
690 | return 0; | 690 | return 0; |
691 | 691 | ||
692 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { | 692 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { |
693 | kfree(id); | 693 | kfree(id); |
694 | return 0; | 694 | return 0; |
695 | } | 695 | } |
696 | 696 | ||
697 | if (con != ASN1_PRI) { | 697 | if (con != ASN1_PRI) { |
698 | kfree(id); | 698 | kfree(id); |
699 | return 0; | 699 | return 0; |
700 | } | 700 | } |
701 | 701 | ||
702 | type = 0; | 702 | type = 0; |
703 | if (!snmp_tag_cls2syntax(tag, cls, &type)) { | 703 | if (!snmp_tag_cls2syntax(tag, cls, &type)) { |
704 | kfree(id); | 704 | kfree(id); |
705 | return 0; | 705 | return 0; |
706 | } | 706 | } |
707 | 707 | ||
708 | l = 0; | 708 | l = 0; |
709 | switch (type) { | 709 | switch (type) { |
710 | case SNMP_INTEGER: | 710 | case SNMP_INTEGER: |
@@ -714,7 +714,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
714 | return 0; | 714 | return 0; |
715 | } | 715 | } |
716 | *obj = kmalloc(sizeof(struct snmp_object) + len, | 716 | *obj = kmalloc(sizeof(struct snmp_object) + len, |
717 | GFP_ATOMIC); | 717 | GFP_ATOMIC); |
718 | if (*obj == NULL) { | 718 | if (*obj == NULL) { |
719 | kfree(id); | 719 | kfree(id); |
720 | if (net_ratelimit()) | 720 | if (net_ratelimit()) |
@@ -730,7 +730,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
730 | return 0; | 730 | return 0; |
731 | } | 731 | } |
732 | *obj = kmalloc(sizeof(struct snmp_object) + len, | 732 | *obj = kmalloc(sizeof(struct snmp_object) + len, |
733 | GFP_ATOMIC); | 733 | GFP_ATOMIC); |
734 | if (*obj == NULL) { | 734 | if (*obj == NULL) { |
735 | kfree(id); | 735 | kfree(id); |
736 | if (net_ratelimit()) | 736 | if (net_ratelimit()) |
@@ -818,12 +818,12 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
818 | kfree(id); | 818 | kfree(id); |
819 | return 0; | 819 | return 0; |
820 | } | 820 | } |
821 | 821 | ||
822 | (*obj)->syntax_len = len; | 822 | (*obj)->syntax_len = len; |
823 | (*obj)->type = type; | 823 | (*obj)->type = type; |
824 | (*obj)->id = id; | 824 | (*obj)->id = id; |
825 | (*obj)->id_len = idlen; | 825 | (*obj)->id_len = idlen; |
826 | 826 | ||
827 | if (!asn1_eoc_decode(ctx, eoc)) { | 827 | if (!asn1_eoc_decode(ctx, eoc)) { |
828 | kfree(id); | 828 | kfree(id); |
829 | kfree(*obj); | 829 | kfree(*obj); |
@@ -834,49 +834,49 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
834 | } | 834 | } |
835 | 835 | ||
836 | static unsigned char snmp_request_decode(struct asn1_ctx *ctx, | 836 | static unsigned char snmp_request_decode(struct asn1_ctx *ctx, |
837 | struct snmp_request *request) | 837 | struct snmp_request *request) |
838 | { | 838 | { |
839 | unsigned int cls, con, tag; | 839 | unsigned int cls, con, tag; |
840 | unsigned char *end; | 840 | unsigned char *end; |
841 | 841 | ||
842 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 842 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
843 | return 0; | 843 | return 0; |
844 | 844 | ||
845 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 845 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
846 | return 0; | 846 | return 0; |
847 | 847 | ||
848 | if (!asn1_ulong_decode(ctx, end, &request->id)) | 848 | if (!asn1_ulong_decode(ctx, end, &request->id)) |
849 | return 0; | 849 | return 0; |
850 | 850 | ||
851 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 851 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
852 | return 0; | 852 | return 0; |
853 | 853 | ||
854 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 854 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
855 | return 0; | 855 | return 0; |
856 | 856 | ||
857 | if (!asn1_uint_decode(ctx, end, &request->error_status)) | 857 | if (!asn1_uint_decode(ctx, end, &request->error_status)) |
858 | return 0; | 858 | return 0; |
859 | 859 | ||
860 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 860 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
861 | return 0; | 861 | return 0; |
862 | 862 | ||
863 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 863 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
864 | return 0; | 864 | return 0; |
865 | 865 | ||
866 | if (!asn1_uint_decode(ctx, end, &request->error_index)) | 866 | if (!asn1_uint_decode(ctx, end, &request->error_index)) |
867 | return 0; | 867 | return 0; |
868 | 868 | ||
869 | return 1; | 869 | return 1; |
870 | } | 870 | } |
871 | 871 | ||
872 | /* | 872 | /* |
873 | * Fast checksum update for possibly oddly-aligned UDP byte, from the | 873 | * Fast checksum update for possibly oddly-aligned UDP byte, from the |
874 | * code example in the draft. | 874 | * code example in the draft. |
875 | */ | 875 | */ |
876 | static void fast_csum(__sum16 *csum, | 876 | static void fast_csum(__sum16 *csum, |
877 | const unsigned char *optr, | 877 | const unsigned char *optr, |
878 | const unsigned char *nptr, | 878 | const unsigned char *nptr, |
879 | int offset) | 879 | int offset) |
880 | { | 880 | { |
881 | unsigned char s[4]; | 881 | unsigned char s[4]; |
882 | 882 | ||
@@ -893,30 +893,30 @@ static void fast_csum(__sum16 *csum, | |||
893 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); | 893 | *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); |
894 | } | 894 | } |
895 | 895 | ||
896 | /* | 896 | /* |
897 | * Mangle IP address. | 897 | * Mangle IP address. |
898 | * - begin points to the start of the snmp messgae | 898 | * - begin points to the start of the snmp messgae |
899 | * - addr points to the start of the address | 899 | * - addr points to the start of the address |
900 | */ | 900 | */ |
901 | static inline void mangle_address(unsigned char *begin, | 901 | static inline void mangle_address(unsigned char *begin, |
902 | unsigned char *addr, | 902 | unsigned char *addr, |
903 | const struct oct1_map *map, | 903 | const struct oct1_map *map, |
904 | __sum16 *check) | 904 | __sum16 *check) |
905 | { | 905 | { |
906 | if (map->from == NOCT1(addr)) { | 906 | if (map->from == NOCT1(addr)) { |
907 | u_int32_t old; | 907 | u_int32_t old; |
908 | 908 | ||
909 | if (debug) | 909 | if (debug) |
910 | memcpy(&old, (unsigned char *)addr, sizeof(old)); | 910 | memcpy(&old, (unsigned char *)addr, sizeof(old)); |
911 | 911 | ||
912 | *addr = map->to; | 912 | *addr = map->to; |
913 | 913 | ||
914 | /* Update UDP checksum if being used */ | 914 | /* Update UDP checksum if being used */ |
915 | if (*check) { | 915 | if (*check) { |
916 | fast_csum(check, | 916 | fast_csum(check, |
917 | &map->from, &map->to, addr - begin); | 917 | &map->from, &map->to, addr - begin); |
918 | } | 918 | } |
919 | 919 | ||
920 | if (debug) | 920 | if (debug) |
921 | printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to " | 921 | printk(KERN_DEBUG "bsalg: mapped %u.%u.%u.%u to " |
922 | "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr)); | 922 | "%u.%u.%u.%u\n", NIPQUAD(old), NIPQUAD(*addr)); |
@@ -924,66 +924,66 @@ static inline void mangle_address(unsigned char *begin, | |||
924 | } | 924 | } |
925 | 925 | ||
926 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, | 926 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, |
927 | struct snmp_v1_trap *trap, | 927 | struct snmp_v1_trap *trap, |
928 | const struct oct1_map *map, | 928 | const struct oct1_map *map, |
929 | __sum16 *check) | 929 | __sum16 *check) |
930 | { | 930 | { |
931 | unsigned int cls, con, tag, len; | 931 | unsigned int cls, con, tag, len; |
932 | unsigned char *end; | 932 | unsigned char *end; |
933 | 933 | ||
934 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 934 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
935 | return 0; | 935 | return 0; |
936 | 936 | ||
937 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) | 937 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_OJI) |
938 | return 0; | 938 | return 0; |
939 | 939 | ||
940 | if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) | 940 | if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) |
941 | return 0; | 941 | return 0; |
942 | 942 | ||
943 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 943 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
944 | goto err_id_free; | 944 | goto err_id_free; |
945 | 945 | ||
946 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) || | 946 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_IPA) || |
947 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS))) | 947 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_OTS))) |
948 | goto err_id_free; | 948 | goto err_id_free; |
949 | 949 | ||
950 | if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) | 950 | if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) |
951 | goto err_id_free; | 951 | goto err_id_free; |
952 | 952 | ||
953 | /* IPv4 only */ | 953 | /* IPv4 only */ |
954 | if (len != 4) | 954 | if (len != 4) |
955 | goto err_addr_free; | 955 | goto err_addr_free; |
956 | 956 | ||
957 | mangle_address(ctx->begin, ctx->pointer - 4, map, check); | 957 | mangle_address(ctx->begin, ctx->pointer - 4, map, check); |
958 | 958 | ||
959 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 959 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
960 | goto err_addr_free; | 960 | goto err_addr_free; |
961 | 961 | ||
962 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 962 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
963 | goto err_addr_free; | 963 | goto err_addr_free; |
964 | 964 | ||
965 | if (!asn1_uint_decode(ctx, end, &trap->general)) | 965 | if (!asn1_uint_decode(ctx, end, &trap->general)) |
966 | goto err_addr_free; | 966 | goto err_addr_free; |
967 | 967 | ||
968 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 968 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
969 | goto err_addr_free; | 969 | goto err_addr_free; |
970 | 970 | ||
971 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) | 971 | if (cls != ASN1_UNI || con != ASN1_PRI || tag != ASN1_INT) |
972 | goto err_addr_free; | 972 | goto err_addr_free; |
973 | 973 | ||
974 | if (!asn1_uint_decode(ctx, end, &trap->specific)) | 974 | if (!asn1_uint_decode(ctx, end, &trap->specific)) |
975 | goto err_addr_free; | 975 | goto err_addr_free; |
976 | 976 | ||
977 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) | 977 | if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) |
978 | goto err_addr_free; | 978 | goto err_addr_free; |
979 | 979 | ||
980 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) || | 980 | if (!((cls == ASN1_APL && con == ASN1_PRI && tag == SNMP_TIT) || |
981 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT))) | 981 | (cls == ASN1_UNI && con == ASN1_PRI && tag == ASN1_INT))) |
982 | goto err_addr_free; | 982 | goto err_addr_free; |
983 | 983 | ||
984 | if (!asn1_ulong_decode(ctx, end, &trap->time)) | 984 | if (!asn1_ulong_decode(ctx, end, &trap->time)) |
985 | goto err_addr_free; | 985 | goto err_addr_free; |
986 | 986 | ||
987 | return 1; | 987 | return 1; |
988 | 988 | ||
989 | err_addr_free: | 989 | err_addr_free: |
@@ -1004,7 +1004,7 @@ err_id_free: | |||
1004 | static void hex_dump(unsigned char *buf, size_t len) | 1004 | static void hex_dump(unsigned char *buf, size_t len) |
1005 | { | 1005 | { |
1006 | size_t i; | 1006 | size_t i; |
1007 | 1007 | ||
1008 | for (i = 0; i < len; i++) { | 1008 | for (i = 0; i < len; i++) { |
1009 | if (i && !(i % 16)) | 1009 | if (i && !(i % 16)) |
1010 | printk("\n"); | 1010 | printk("\n"); |
@@ -1018,30 +1018,30 @@ static void hex_dump(unsigned char *buf, size_t len) | |||
1018 | * (And this is the fucking 'basic' method). | 1018 | * (And this is the fucking 'basic' method). |
1019 | */ | 1019 | */ |
1020 | static int snmp_parse_mangle(unsigned char *msg, | 1020 | static int snmp_parse_mangle(unsigned char *msg, |
1021 | u_int16_t len, | 1021 | u_int16_t len, |
1022 | const struct oct1_map *map, | 1022 | const struct oct1_map *map, |
1023 | __sum16 *check) | 1023 | __sum16 *check) |
1024 | { | 1024 | { |
1025 | unsigned char *eoc, *end; | 1025 | unsigned char *eoc, *end; |
1026 | unsigned int cls, con, tag, vers, pdutype; | 1026 | unsigned int cls, con, tag, vers, pdutype; |
1027 | struct asn1_ctx ctx; | 1027 | struct asn1_ctx ctx; |
1028 | struct asn1_octstr comm; | 1028 | struct asn1_octstr comm; |
1029 | struct snmp_object **obj; | 1029 | struct snmp_object **obj; |
1030 | 1030 | ||
1031 | if (debug > 1) | 1031 | if (debug > 1) |
1032 | hex_dump(msg, len); | 1032 | hex_dump(msg, len); |
1033 | 1033 | ||
1034 | asn1_open(&ctx, msg, len); | 1034 | asn1_open(&ctx, msg, len); |
1035 | 1035 | ||
1036 | /* | 1036 | /* |
1037 | * Start of SNMP message. | 1037 | * Start of SNMP message. |
1038 | */ | 1038 | */ |
1039 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) | 1039 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) |
1040 | return 0; | 1040 | return 0; |
1041 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | 1041 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) |
1042 | return 0; | 1042 | return 0; |
1043 | 1043 | ||
1044 | /* | 1044 | /* |
1045 | * Version 1 or 2 handled. | 1045 | * Version 1 or 2 handled. |
1046 | */ | 1046 | */ |
1047 | if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) | 1047 | if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) |
@@ -1054,7 +1054,7 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1054 | printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); | 1054 | printk(KERN_DEBUG "bsalg: snmp version: %u\n", vers + 1); |
1055 | if (vers > 1) | 1055 | if (vers > 1) |
1056 | return 1; | 1056 | return 1; |
1057 | 1057 | ||
1058 | /* | 1058 | /* |
1059 | * Community. | 1059 | * Community. |
1060 | */ | 1060 | */ |
@@ -1066,14 +1066,14 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1066 | return 0; | 1066 | return 0; |
1067 | if (debug > 1) { | 1067 | if (debug > 1) { |
1068 | unsigned int i; | 1068 | unsigned int i; |
1069 | 1069 | ||
1070 | printk(KERN_DEBUG "bsalg: community: "); | 1070 | printk(KERN_DEBUG "bsalg: community: "); |
1071 | for (i = 0; i < comm.len; i++) | 1071 | for (i = 0; i < comm.len; i++) |
1072 | printk("%c", comm.data[i]); | 1072 | printk("%c", comm.data[i]); |
1073 | printk("\n"); | 1073 | printk("\n"); |
1074 | } | 1074 | } |
1075 | kfree(comm.data); | 1075 | kfree(comm.data); |
1076 | 1076 | ||
1077 | /* | 1077 | /* |
1078 | * PDU type | 1078 | * PDU type |
1079 | */ | 1079 | */ |
@@ -1092,7 +1092,7 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1092 | [SNMP_PDU_INFORM] = "inform", | 1092 | [SNMP_PDU_INFORM] = "inform", |
1093 | [SNMP_PDU_TRAP2] = "trapv2" | 1093 | [SNMP_PDU_TRAP2] = "trapv2" |
1094 | }; | 1094 | }; |
1095 | 1095 | ||
1096 | if (pdutype > SNMP_PDU_TRAP2) | 1096 | if (pdutype > SNMP_PDU_TRAP2) |
1097 | printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); | 1097 | printk(KERN_DEBUG "bsalg: bad pdu type %u\n", pdutype); |
1098 | else | 1098 | else |
@@ -1101,56 +1101,56 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1101 | if (pdutype != SNMP_PDU_RESPONSE && | 1101 | if (pdutype != SNMP_PDU_RESPONSE && |
1102 | pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) | 1102 | pdutype != SNMP_PDU_TRAP1 && pdutype != SNMP_PDU_TRAP2) |
1103 | return 1; | 1103 | return 1; |
1104 | 1104 | ||
1105 | /* | 1105 | /* |
1106 | * Request header or v1 trap | 1106 | * Request header or v1 trap |
1107 | */ | 1107 | */ |
1108 | if (pdutype == SNMP_PDU_TRAP1) { | 1108 | if (pdutype == SNMP_PDU_TRAP1) { |
1109 | struct snmp_v1_trap trap; | 1109 | struct snmp_v1_trap trap; |
1110 | unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); | 1110 | unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); |
1111 | 1111 | ||
1112 | if (ret) { | 1112 | if (ret) { |
1113 | kfree(trap.id); | 1113 | kfree(trap.id); |
1114 | kfree((unsigned long *)trap.ip_address); | 1114 | kfree((unsigned long *)trap.ip_address); |
1115 | } else | 1115 | } else |
1116 | return ret; | 1116 | return ret; |
1117 | 1117 | ||
1118 | } else { | 1118 | } else { |
1119 | struct snmp_request req; | 1119 | struct snmp_request req; |
1120 | 1120 | ||
1121 | if (!snmp_request_decode(&ctx, &req)) | 1121 | if (!snmp_request_decode(&ctx, &req)) |
1122 | return 0; | 1122 | return 0; |
1123 | 1123 | ||
1124 | if (debug > 1) | 1124 | if (debug > 1) |
1125 | printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " | 1125 | printk(KERN_DEBUG "bsalg: request: id=0x%lx error_status=%u " |
1126 | "error_index=%u\n", req.id, req.error_status, | 1126 | "error_index=%u\n", req.id, req.error_status, |
1127 | req.error_index); | 1127 | req.error_index); |
1128 | } | 1128 | } |
1129 | 1129 | ||
1130 | /* | 1130 | /* |
1131 | * Loop through objects, look for IP addresses to mangle. | 1131 | * Loop through objects, look for IP addresses to mangle. |
1132 | */ | 1132 | */ |
1133 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) | 1133 | if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) |
1134 | return 0; | 1134 | return 0; |
1135 | 1135 | ||
1136 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) | 1136 | if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) |
1137 | return 0; | 1137 | return 0; |
1138 | 1138 | ||
1139 | obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); | 1139 | obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); |
1140 | if (obj == NULL) { | 1140 | if (obj == NULL) { |
1141 | if (net_ratelimit()) | 1141 | if (net_ratelimit()) |
1142 | printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__); | 1142 | printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__); |
1143 | return 0; | 1143 | return 0; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | while (!asn1_eoc_decode(&ctx, eoc)) { | 1146 | while (!asn1_eoc_decode(&ctx, eoc)) { |
1147 | unsigned int i; | 1147 | unsigned int i; |
1148 | 1148 | ||
1149 | if (!snmp_object_decode(&ctx, obj)) { | 1149 | if (!snmp_object_decode(&ctx, obj)) { |
1150 | if (*obj) { | 1150 | if (*obj) { |
1151 | kfree((*obj)->id); | 1151 | kfree((*obj)->id); |
1152 | kfree(*obj); | 1152 | kfree(*obj); |
1153 | } | 1153 | } |
1154 | kfree(obj); | 1154 | kfree(obj); |
1155 | return 0; | 1155 | return 0; |
1156 | } | 1156 | } |
@@ -1163,20 +1163,20 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1163 | printk("%lu", (*obj)->id[i]); | 1163 | printk("%lu", (*obj)->id[i]); |
1164 | } | 1164 | } |
1165 | printk(": type=%u\n", (*obj)->type); | 1165 | printk(": type=%u\n", (*obj)->type); |
1166 | 1166 | ||
1167 | } | 1167 | } |
1168 | 1168 | ||
1169 | if ((*obj)->type == SNMP_IPADDR) | 1169 | if ((*obj)->type == SNMP_IPADDR) |
1170 | mangle_address(ctx.begin, ctx.pointer - 4 , map, check); | 1170 | mangle_address(ctx.begin, ctx.pointer - 4 , map, check); |
1171 | 1171 | ||
1172 | kfree((*obj)->id); | 1172 | kfree((*obj)->id); |
1173 | kfree(*obj); | 1173 | kfree(*obj); |
1174 | } | 1174 | } |
1175 | kfree(obj); | 1175 | kfree(obj); |
1176 | 1176 | ||
1177 | if (!asn1_eoc_decode(&ctx, eoc)) | 1177 | if (!asn1_eoc_decode(&ctx, eoc)) |
1178 | return 0; | 1178 | return 0; |
1179 | 1179 | ||
1180 | return 1; | 1180 | return 1; |
1181 | } | 1181 | } |
1182 | 1182 | ||
@@ -1186,12 +1186,12 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1186 | * | 1186 | * |
1187 | *****************************************************************************/ | 1187 | *****************************************************************************/ |
1188 | 1188 | ||
1189 | /* | 1189 | /* |
1190 | * SNMP translation routine. | 1190 | * SNMP translation routine. |
1191 | */ | 1191 | */ |
1192 | static int snmp_translate(struct ip_conntrack *ct, | 1192 | static int snmp_translate(struct ip_conntrack *ct, |
1193 | enum ip_conntrack_info ctinfo, | 1193 | enum ip_conntrack_info ctinfo, |
1194 | struct sk_buff **pskb) | 1194 | struct sk_buff **pskb) |
1195 | { | 1195 | { |
1196 | struct iphdr *iph = (*pskb)->nh.iph; | 1196 | struct iphdr *iph = (*pskb)->nh.iph; |
1197 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); | 1197 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); |
@@ -1213,12 +1213,12 @@ static int snmp_translate(struct ip_conntrack *ct, | |||
1213 | map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); | 1213 | map.from = NOCT1(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip); |
1214 | map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip); | 1214 | map.to = NOCT1(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip); |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | if (map.from == map.to) | 1217 | if (map.from == map.to) |
1218 | return NF_ACCEPT; | 1218 | return NF_ACCEPT; |
1219 | 1219 | ||
1220 | if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), | 1220 | if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), |
1221 | paylen, &map, &udph->check)) { | 1221 | paylen, &map, &udph->check)) { |
1222 | if (net_ratelimit()) | 1222 | if (net_ratelimit()) |
1223 | printk(KERN_WARNING "bsalg: parser failed\n"); | 1223 | printk(KERN_WARNING "bsalg: parser failed\n"); |
1224 | return NF_DROP; | 1224 | return NF_DROP; |
@@ -1247,7 +1247,7 @@ static int help(struct sk_buff **pskb, | |||
1247 | if (!(ct->status & IPS_NAT_MASK)) | 1247 | if (!(ct->status & IPS_NAT_MASK)) |
1248 | return NF_ACCEPT; | 1248 | return NF_ACCEPT; |
1249 | 1249 | ||
1250 | /* | 1250 | /* |
1251 | * Make sure the packet length is ok. So far, we were only guaranteed | 1251 | * Make sure the packet length is ok. So far, we were only guaranteed |
1252 | * to have a valid length IP header plus 8 bytes, which means we have | 1252 | * to have a valid length IP header plus 8 bytes, which means we have |
1253 | * enough room for a UDP header. Just verify the UDP length field so we | 1253 | * enough room for a UDP header. Just verify the UDP length field so we |
@@ -1305,7 +1305,7 @@ static struct ip_conntrack_helper snmp_trap_helper = { | |||
1305 | * Module stuff. | 1305 | * Module stuff. |
1306 | * | 1306 | * |
1307 | *****************************************************************************/ | 1307 | *****************************************************************************/ |
1308 | 1308 | ||
1309 | static int __init ip_nat_snmp_basic_init(void) | 1309 | static int __init ip_nat_snmp_basic_init(void) |
1310 | { | 1310 | { |
1311 | int ret = 0; | 1311 | int ret = 0; |
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index ad66328baa5d..adf25f9f70e1 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c | |||
@@ -81,7 +81,7 @@ static void nat_decode_session(struct sk_buff *skb, struct flowi *fl) | |||
81 | } | 81 | } |
82 | } | 82 | } |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | static unsigned int | 85 | static unsigned int |
86 | ip_nat_fn(unsigned int hooknum, | 86 | ip_nat_fn(unsigned int hooknum, |
87 | struct sk_buff **pskb, | 87 | struct sk_buff **pskb, |
@@ -107,8 +107,8 @@ ip_nat_fn(unsigned int hooknum, | |||
107 | protocol. 8) --RR */ | 107 | protocol. 8) --RR */ |
108 | if (!ct) { | 108 | if (!ct) { |
109 | /* Exception: ICMP redirect to new connection (not in | 109 | /* Exception: ICMP redirect to new connection (not in |
110 | hash table yet). We must not let this through, in | 110 | hash table yet). We must not let this through, in |
111 | case we're doing NAT to the same network. */ | 111 | case we're doing NAT to the same network. */ |
112 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { | 112 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { |
113 | struct icmphdr _hdr, *hp; | 113 | struct icmphdr _hdr, *hp; |
114 | 114 | ||
@@ -148,7 +148,7 @@ ip_nat_fn(unsigned int hooknum, | |||
148 | if (unlikely(is_confirmed(ct))) | 148 | if (unlikely(is_confirmed(ct))) |
149 | /* NAT module was loaded late */ | 149 | /* NAT module was loaded late */ |
150 | ret = alloc_null_binding_confirmed(ct, info, | 150 | ret = alloc_null_binding_confirmed(ct, info, |
151 | hooknum); | 151 | hooknum); |
152 | else if (hooknum == NF_IP_LOCAL_IN) | 152 | else if (hooknum == NF_IP_LOCAL_IN) |
153 | /* LOCAL_IN hook doesn't have a chain! */ | 153 | /* LOCAL_IN hook doesn't have a chain! */ |
154 | ret = alloc_null_binding(ct, info, hooknum); | 154 | ret = alloc_null_binding(ct, info, hooknum); |
@@ -179,10 +179,10 @@ ip_nat_fn(unsigned int hooknum, | |||
179 | 179 | ||
180 | static unsigned int | 180 | static unsigned int |
181 | ip_nat_in(unsigned int hooknum, | 181 | ip_nat_in(unsigned int hooknum, |
182 | struct sk_buff **pskb, | 182 | struct sk_buff **pskb, |
183 | const struct net_device *in, | 183 | const struct net_device *in, |
184 | const struct net_device *out, | 184 | const struct net_device *out, |
185 | int (*okfn)(struct sk_buff *)) | 185 | int (*okfn)(struct sk_buff *)) |
186 | { | 186 | { |
187 | unsigned int ret; | 187 | unsigned int ret; |
188 | __be32 daddr = (*pskb)->nh.iph->daddr; | 188 | __be32 daddr = (*pskb)->nh.iph->daddr; |
@@ -277,9 +277,9 @@ ip_nat_adjust(unsigned int hooknum, | |||
277 | 277 | ||
278 | ct = ip_conntrack_get(*pskb, &ctinfo); | 278 | ct = ip_conntrack_get(*pskb, &ctinfo); |
279 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { | 279 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { |
280 | DEBUGP("ip_nat_standalone: adjusting sequence number\n"); | 280 | DEBUGP("ip_nat_standalone: adjusting sequence number\n"); |
281 | if (!ip_nat_seq_adjust(pskb, ct, ctinfo)) | 281 | if (!ip_nat_seq_adjust(pskb, ct, ctinfo)) |
282 | return NF_DROP; | 282 | return NF_DROP; |
283 | } | 283 | } |
284 | return NF_ACCEPT; | 284 | return NF_ACCEPT; |
285 | } | 285 | } |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index cd520df4dcf4..68bf19f3b01c 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -11,13 +11,13 @@ | |||
11 | * | 11 | * |
12 | * 2000-03-27: Simplified code (thanks to Andi Kleen for clues). | 12 | * 2000-03-27: Simplified code (thanks to Andi Kleen for clues). |
13 | * 2000-05-20: Fixed notifier problems (following Miguel Freitas' report). | 13 | * 2000-05-20: Fixed notifier problems (following Miguel Freitas' report). |
14 | * 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian | 14 | * 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian |
15 | * Zander). | 15 | * Zander). |
16 | * 2000-08-01: Added Nick Williams' MAC support. | 16 | * 2000-08-01: Added Nick Williams' MAC support. |
17 | * 2002-06-25: Code cleanup. | 17 | * 2002-06-25: Code cleanup. |
18 | * 2005-01-10: Added /proc counter for dropped packets; fixed so | 18 | * 2005-01-10: Added /proc counter for dropped packets; fixed so |
19 | * packets aren't delivered to user space if they're going | 19 | * packets aren't delivered to user space if they're going |
20 | * to be dropped. | 20 | * to be dropped. |
21 | * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte) | 21 | * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte) |
22 | * | 22 | * |
23 | */ | 23 | */ |
@@ -97,7 +97,7 @@ __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data) | |||
97 | 97 | ||
98 | list_for_each_prev(p, &queue_list) { | 98 | list_for_each_prev(p, &queue_list) { |
99 | struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p; | 99 | struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p; |
100 | 100 | ||
101 | if (!cmpfn || cmpfn(entry, data)) | 101 | if (!cmpfn || cmpfn(entry, data)) |
102 | return entry; | 102 | return entry; |
103 | } | 103 | } |
@@ -129,7 +129,7 @@ static inline void | |||
129 | __ipq_flush(int verdict) | 129 | __ipq_flush(int verdict) |
130 | { | 130 | { |
131 | struct ipq_queue_entry *entry; | 131 | struct ipq_queue_entry *entry; |
132 | 132 | ||
133 | while ((entry = __ipq_find_dequeue_entry(NULL, 0))) | 133 | while ((entry = __ipq_find_dequeue_entry(NULL, 0))) |
134 | ipq_issue_verdict(entry, verdict); | 134 | ipq_issue_verdict(entry, verdict); |
135 | } | 135 | } |
@@ -138,21 +138,21 @@ static inline int | |||
138 | __ipq_set_mode(unsigned char mode, unsigned int range) | 138 | __ipq_set_mode(unsigned char mode, unsigned int range) |
139 | { | 139 | { |
140 | int status = 0; | 140 | int status = 0; |
141 | 141 | ||
142 | switch(mode) { | 142 | switch(mode) { |
143 | case IPQ_COPY_NONE: | 143 | case IPQ_COPY_NONE: |
144 | case IPQ_COPY_META: | 144 | case IPQ_COPY_META: |
145 | copy_mode = mode; | 145 | copy_mode = mode; |
146 | copy_range = 0; | 146 | copy_range = 0; |
147 | break; | 147 | break; |
148 | 148 | ||
149 | case IPQ_COPY_PACKET: | 149 | case IPQ_COPY_PACKET: |
150 | copy_mode = mode; | 150 | copy_mode = mode; |
151 | copy_range = range; | 151 | copy_range = range; |
152 | if (copy_range > 0xFFFF) | 152 | if (copy_range > 0xFFFF) |
153 | copy_range = 0xFFFF; | 153 | copy_range = 0xFFFF; |
154 | break; | 154 | break; |
155 | 155 | ||
156 | default: | 156 | default: |
157 | status = -EINVAL; | 157 | status = -EINVAL; |
158 | 158 | ||
@@ -173,7 +173,7 @@ static struct ipq_queue_entry * | |||
173 | ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data) | 173 | ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data) |
174 | { | 174 | { |
175 | struct ipq_queue_entry *entry; | 175 | struct ipq_queue_entry *entry; |
176 | 176 | ||
177 | write_lock_bh(&queue_lock); | 177 | write_lock_bh(&queue_lock); |
178 | entry = __ipq_find_dequeue_entry(cmpfn, data); | 178 | entry = __ipq_find_dequeue_entry(cmpfn, data); |
179 | write_unlock_bh(&queue_lock); | 179 | write_unlock_bh(&queue_lock); |
@@ -199,14 +199,14 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
199 | struct nlmsghdr *nlh; | 199 | struct nlmsghdr *nlh; |
200 | 200 | ||
201 | read_lock_bh(&queue_lock); | 201 | read_lock_bh(&queue_lock); |
202 | 202 | ||
203 | switch (copy_mode) { | 203 | switch (copy_mode) { |
204 | case IPQ_COPY_META: | 204 | case IPQ_COPY_META: |
205 | case IPQ_COPY_NONE: | 205 | case IPQ_COPY_NONE: |
206 | size = NLMSG_SPACE(sizeof(*pmsg)); | 206 | size = NLMSG_SPACE(sizeof(*pmsg)); |
207 | data_len = 0; | 207 | data_len = 0; |
208 | break; | 208 | break; |
209 | 209 | ||
210 | case IPQ_COPY_PACKET: | 210 | case IPQ_COPY_PACKET: |
211 | if ((entry->skb->ip_summed == CHECKSUM_PARTIAL || | 211 | if ((entry->skb->ip_summed == CHECKSUM_PARTIAL || |
212 | entry->skb->ip_summed == CHECKSUM_COMPLETE) && | 212 | entry->skb->ip_summed == CHECKSUM_COMPLETE) && |
@@ -218,10 +218,10 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
218 | data_len = entry->skb->len; | 218 | data_len = entry->skb->len; |
219 | else | 219 | else |
220 | data_len = copy_range; | 220 | data_len = copy_range; |
221 | 221 | ||
222 | size = NLMSG_SPACE(sizeof(*pmsg) + data_len); | 222 | size = NLMSG_SPACE(sizeof(*pmsg) + data_len); |
223 | break; | 223 | break; |
224 | 224 | ||
225 | default: | 225 | default: |
226 | *errp = -EINVAL; | 226 | *errp = -EINVAL; |
227 | read_unlock_bh(&queue_lock); | 227 | read_unlock_bh(&queue_lock); |
@@ -233,7 +233,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
233 | skb = alloc_skb(size, GFP_ATOMIC); | 233 | skb = alloc_skb(size, GFP_ATOMIC); |
234 | if (!skb) | 234 | if (!skb) |
235 | goto nlmsg_failure; | 235 | goto nlmsg_failure; |
236 | 236 | ||
237 | old_tail= skb->tail; | 237 | old_tail= skb->tail; |
238 | nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); | 238 | nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); |
239 | pmsg = NLMSG_DATA(nlh); | 239 | pmsg = NLMSG_DATA(nlh); |
@@ -246,29 +246,29 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) | |||
246 | pmsg->mark = entry->skb->mark; | 246 | pmsg->mark = entry->skb->mark; |
247 | pmsg->hook = entry->info->hook; | 247 | pmsg->hook = entry->info->hook; |
248 | pmsg->hw_protocol = entry->skb->protocol; | 248 | pmsg->hw_protocol = entry->skb->protocol; |
249 | 249 | ||
250 | if (entry->info->indev) | 250 | if (entry->info->indev) |
251 | strcpy(pmsg->indev_name, entry->info->indev->name); | 251 | strcpy(pmsg->indev_name, entry->info->indev->name); |
252 | else | 252 | else |
253 | pmsg->indev_name[0] = '\0'; | 253 | pmsg->indev_name[0] = '\0'; |
254 | 254 | ||
255 | if (entry->info->outdev) | 255 | if (entry->info->outdev) |
256 | strcpy(pmsg->outdev_name, entry->info->outdev->name); | 256 | strcpy(pmsg->outdev_name, entry->info->outdev->name); |
257 | else | 257 | else |
258 | pmsg->outdev_name[0] = '\0'; | 258 | pmsg->outdev_name[0] = '\0'; |
259 | 259 | ||
260 | if (entry->info->indev && entry->skb->dev) { | 260 | if (entry->info->indev && entry->skb->dev) { |
261 | pmsg->hw_type = entry->skb->dev->type; | 261 | pmsg->hw_type = entry->skb->dev->type; |
262 | if (entry->skb->dev->hard_header_parse) | 262 | if (entry->skb->dev->hard_header_parse) |
263 | pmsg->hw_addrlen = | 263 | pmsg->hw_addrlen = |
264 | entry->skb->dev->hard_header_parse(entry->skb, | 264 | entry->skb->dev->hard_header_parse(entry->skb, |
265 | pmsg->hw_addr); | 265 | pmsg->hw_addr); |
266 | } | 266 | } |
267 | 267 | ||
268 | if (data_len) | 268 | if (data_len) |
269 | if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) | 269 | if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len)) |
270 | BUG(); | 270 | BUG(); |
271 | 271 | ||
272 | nlh->nlmsg_len = skb->tail - old_tail; | 272 | nlh->nlmsg_len = skb->tail - old_tail; |
273 | return skb; | 273 | return skb; |
274 | 274 | ||
@@ -303,26 +303,26 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, | |||
303 | nskb = ipq_build_packet_message(entry, &status); | 303 | nskb = ipq_build_packet_message(entry, &status); |
304 | if (nskb == NULL) | 304 | if (nskb == NULL) |
305 | goto err_out_free; | 305 | goto err_out_free; |
306 | 306 | ||
307 | write_lock_bh(&queue_lock); | 307 | write_lock_bh(&queue_lock); |
308 | 308 | ||
309 | if (!peer_pid) | 309 | if (!peer_pid) |
310 | goto err_out_free_nskb; | 310 | goto err_out_free_nskb; |
311 | 311 | ||
312 | if (queue_total >= queue_maxlen) { | 312 | if (queue_total >= queue_maxlen) { |
313 | queue_dropped++; | 313 | queue_dropped++; |
314 | status = -ENOSPC; | 314 | status = -ENOSPC; |
315 | if (net_ratelimit()) | 315 | if (net_ratelimit()) |
316 | printk (KERN_WARNING "ip_queue: full at %d entries, " | 316 | printk (KERN_WARNING "ip_queue: full at %d entries, " |
317 | "dropping packets(s). Dropped: %d\n", queue_total, | 317 | "dropping packets(s). Dropped: %d\n", queue_total, |
318 | queue_dropped); | 318 | queue_dropped); |
319 | goto err_out_free_nskb; | 319 | goto err_out_free_nskb; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* netlink_unicast will either free the nskb or attach it to a socket */ | 322 | /* netlink_unicast will either free the nskb or attach it to a socket */ |
323 | status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); | 323 | status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT); |
324 | if (status < 0) { | 324 | if (status < 0) { |
325 | queue_user_dropped++; | 325 | queue_user_dropped++; |
326 | goto err_out_unlock; | 326 | goto err_out_unlock; |
327 | } | 327 | } |
328 | 328 | ||
@@ -332,8 +332,8 @@ ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, | |||
332 | return status; | 332 | return status; |
333 | 333 | ||
334 | err_out_free_nskb: | 334 | err_out_free_nskb: |
335 | kfree_skb(nskb); | 335 | kfree_skb(nskb); |
336 | 336 | ||
337 | err_out_unlock: | 337 | err_out_unlock: |
338 | write_unlock_bh(&queue_lock); | 338 | write_unlock_bh(&queue_lock); |
339 | 339 | ||
@@ -359,11 +359,11 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | |||
359 | return -EINVAL; | 359 | return -EINVAL; |
360 | if (diff > skb_tailroom(e->skb)) { | 360 | if (diff > skb_tailroom(e->skb)) { |
361 | struct sk_buff *newskb; | 361 | struct sk_buff *newskb; |
362 | 362 | ||
363 | newskb = skb_copy_expand(e->skb, | 363 | newskb = skb_copy_expand(e->skb, |
364 | skb_headroom(e->skb), | 364 | skb_headroom(e->skb), |
365 | diff, | 365 | diff, |
366 | GFP_ATOMIC); | 366 | GFP_ATOMIC); |
367 | if (newskb == NULL) { | 367 | if (newskb == NULL) { |
368 | printk(KERN_WARNING "ip_queue: OOM " | 368 | printk(KERN_WARNING "ip_queue: OOM " |
369 | "in mangle, dropping packet\n"); | 369 | "in mangle, dropping packet\n"); |
@@ -403,11 +403,11 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) | |||
403 | return -ENOENT; | 403 | return -ENOENT; |
404 | else { | 404 | else { |
405 | int verdict = vmsg->value; | 405 | int verdict = vmsg->value; |
406 | 406 | ||
407 | if (vmsg->data_len && vmsg->data_len == len) | 407 | if (vmsg->data_len && vmsg->data_len == len) |
408 | if (ipq_mangle_ipv4(vmsg, entry) < 0) | 408 | if (ipq_mangle_ipv4(vmsg, entry) < 0) |
409 | verdict = NF_DROP; | 409 | verdict = NF_DROP; |
410 | 410 | ||
411 | ipq_issue_verdict(entry, verdict); | 411 | ipq_issue_verdict(entry, verdict); |
412 | return 0; | 412 | return 0; |
413 | } | 413 | } |
@@ -426,7 +426,7 @@ ipq_set_mode(unsigned char mode, unsigned int range) | |||
426 | 426 | ||
427 | static int | 427 | static int |
428 | ipq_receive_peer(struct ipq_peer_msg *pmsg, | 428 | ipq_receive_peer(struct ipq_peer_msg *pmsg, |
429 | unsigned char type, unsigned int len) | 429 | unsigned char type, unsigned int len) |
430 | { | 430 | { |
431 | int status = 0; | 431 | int status = 0; |
432 | 432 | ||
@@ -436,15 +436,15 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg, | |||
436 | switch (type) { | 436 | switch (type) { |
437 | case IPQM_MODE: | 437 | case IPQM_MODE: |
438 | status = ipq_set_mode(pmsg->msg.mode.value, | 438 | status = ipq_set_mode(pmsg->msg.mode.value, |
439 | pmsg->msg.mode.range); | 439 | pmsg->msg.mode.range); |
440 | break; | 440 | break; |
441 | 441 | ||
442 | case IPQM_VERDICT: | 442 | case IPQM_VERDICT: |
443 | if (pmsg->msg.verdict.value > NF_MAX_VERDICT) | 443 | if (pmsg->msg.verdict.value > NF_MAX_VERDICT) |
444 | status = -EINVAL; | 444 | status = -EINVAL; |
445 | else | 445 | else |
446 | status = ipq_set_verdict(&pmsg->msg.verdict, | 446 | status = ipq_set_verdict(&pmsg->msg.verdict, |
447 | len - sizeof(*pmsg)); | 447 | len - sizeof(*pmsg)); |
448 | break; | 448 | break; |
449 | default: | 449 | default: |
450 | status = -EINVAL; | 450 | status = -EINVAL; |
@@ -468,7 +468,7 @@ dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex) | |||
468 | return 1; | 468 | return 1; |
469 | if (entry->skb->nf_bridge->physoutdev && | 469 | if (entry->skb->nf_bridge->physoutdev && |
470 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) | 470 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) |
471 | return 1; | 471 | return 1; |
472 | } | 472 | } |
473 | #endif | 473 | #endif |
474 | return 0; | 474 | return 0; |
@@ -478,7 +478,7 @@ static void | |||
478 | ipq_dev_drop(int ifindex) | 478 | ipq_dev_drop(int ifindex) |
479 | { | 479 | { |
480 | struct ipq_queue_entry *entry; | 480 | struct ipq_queue_entry *entry; |
481 | 481 | ||
482 | while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL) | 482 | while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL) |
483 | ipq_issue_verdict(entry, NF_DROP); | 483 | ipq_issue_verdict(entry, NF_DROP); |
484 | } | 484 | } |
@@ -502,25 +502,25 @@ ipq_rcv_skb(struct sk_buff *skb) | |||
502 | 502 | ||
503 | pid = nlh->nlmsg_pid; | 503 | pid = nlh->nlmsg_pid; |
504 | flags = nlh->nlmsg_flags; | 504 | flags = nlh->nlmsg_flags; |
505 | 505 | ||
506 | if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) | 506 | if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI) |
507 | RCV_SKB_FAIL(-EINVAL); | 507 | RCV_SKB_FAIL(-EINVAL); |
508 | 508 | ||
509 | if (flags & MSG_TRUNC) | 509 | if (flags & MSG_TRUNC) |
510 | RCV_SKB_FAIL(-ECOMM); | 510 | RCV_SKB_FAIL(-ECOMM); |
511 | 511 | ||
512 | type = nlh->nlmsg_type; | 512 | type = nlh->nlmsg_type; |
513 | if (type < NLMSG_NOOP || type >= IPQM_MAX) | 513 | if (type < NLMSG_NOOP || type >= IPQM_MAX) |
514 | RCV_SKB_FAIL(-EINVAL); | 514 | RCV_SKB_FAIL(-EINVAL); |
515 | 515 | ||
516 | if (type <= IPQM_BASE) | 516 | if (type <= IPQM_BASE) |
517 | return; | 517 | return; |
518 | 518 | ||
519 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) | 519 | if (security_netlink_recv(skb, CAP_NET_ADMIN)) |
520 | RCV_SKB_FAIL(-EPERM); | 520 | RCV_SKB_FAIL(-EPERM); |
521 | 521 | ||
522 | write_lock_bh(&queue_lock); | 522 | write_lock_bh(&queue_lock); |
523 | 523 | ||
524 | if (peer_pid) { | 524 | if (peer_pid) { |
525 | if (peer_pid != pid) { | 525 | if (peer_pid != pid) { |
526 | write_unlock_bh(&queue_lock); | 526 | write_unlock_bh(&queue_lock); |
@@ -530,17 +530,17 @@ ipq_rcv_skb(struct sk_buff *skb) | |||
530 | net_enable_timestamp(); | 530 | net_enable_timestamp(); |
531 | peer_pid = pid; | 531 | peer_pid = pid; |
532 | } | 532 | } |
533 | 533 | ||
534 | write_unlock_bh(&queue_lock); | 534 | write_unlock_bh(&queue_lock); |
535 | 535 | ||
536 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, | 536 | status = ipq_receive_peer(NLMSG_DATA(nlh), type, |
537 | nlmsglen - NLMSG_LENGTH(0)); | 537 | nlmsglen - NLMSG_LENGTH(0)); |
538 | if (status < 0) | 538 | if (status < 0) |
539 | RCV_SKB_FAIL(status); | 539 | RCV_SKB_FAIL(status); |
540 | 540 | ||
541 | if (flags & NLM_F_ACK) | 541 | if (flags & NLM_F_ACK) |
542 | netlink_ack(skb, nlh, 0); | 542 | netlink_ack(skb, nlh, 0); |
543 | return; | 543 | return; |
544 | } | 544 | } |
545 | 545 | ||
546 | static void | 546 | static void |
@@ -550,19 +550,19 @@ ipq_rcv_sk(struct sock *sk, int len) | |||
550 | unsigned int qlen; | 550 | unsigned int qlen; |
551 | 551 | ||
552 | mutex_lock(&ipqnl_mutex); | 552 | mutex_lock(&ipqnl_mutex); |
553 | 553 | ||
554 | for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { | 554 | for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { |
555 | skb = skb_dequeue(&sk->sk_receive_queue); | 555 | skb = skb_dequeue(&sk->sk_receive_queue); |
556 | ipq_rcv_skb(skb); | 556 | ipq_rcv_skb(skb); |
557 | kfree_skb(skb); | 557 | kfree_skb(skb); |
558 | } | 558 | } |
559 | 559 | ||
560 | mutex_unlock(&ipqnl_mutex); | 560 | mutex_unlock(&ipqnl_mutex); |
561 | } | 561 | } |
562 | 562 | ||
563 | static int | 563 | static int |
564 | ipq_rcv_dev_event(struct notifier_block *this, | 564 | ipq_rcv_dev_event(struct notifier_block *this, |
565 | unsigned long event, void *ptr) | 565 | unsigned long event, void *ptr) |
566 | { | 566 | { |
567 | struct net_device *dev = ptr; | 567 | struct net_device *dev = ptr; |
568 | 568 | ||
@@ -578,7 +578,7 @@ static struct notifier_block ipq_dev_notifier = { | |||
578 | 578 | ||
579 | static int | 579 | static int |
580 | ipq_rcv_nl_event(struct notifier_block *this, | 580 | ipq_rcv_nl_event(struct notifier_block *this, |
581 | unsigned long event, void *ptr) | 581 | unsigned long event, void *ptr) |
582 | { | 582 | { |
583 | struct netlink_notify *n = ptr; | 583 | struct netlink_notify *n = ptr; |
584 | 584 | ||
@@ -607,7 +607,7 @@ static ctl_table ipq_table[] = { | |||
607 | .mode = 0644, | 607 | .mode = 0644, |
608 | .proc_handler = proc_dointvec | 608 | .proc_handler = proc_dointvec |
609 | }, | 609 | }, |
610 | { .ctl_name = 0 } | 610 | { .ctl_name = 0 } |
611 | }; | 611 | }; |
612 | 612 | ||
613 | static ctl_table ipq_dir_table[] = { | 613 | static ctl_table ipq_dir_table[] = { |
@@ -637,25 +637,25 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length) | |||
637 | int len; | 637 | int len; |
638 | 638 | ||
639 | read_lock_bh(&queue_lock); | 639 | read_lock_bh(&queue_lock); |
640 | 640 | ||
641 | len = sprintf(buffer, | 641 | len = sprintf(buffer, |
642 | "Peer PID : %d\n" | 642 | "Peer PID : %d\n" |
643 | "Copy mode : %hu\n" | 643 | "Copy mode : %hu\n" |
644 | "Copy range : %u\n" | 644 | "Copy range : %u\n" |
645 | "Queue length : %u\n" | 645 | "Queue length : %u\n" |
646 | "Queue max. length : %u\n" | 646 | "Queue max. length : %u\n" |
647 | "Queue dropped : %u\n" | 647 | "Queue dropped : %u\n" |
648 | "Netlink dropped : %u\n", | 648 | "Netlink dropped : %u\n", |
649 | peer_pid, | 649 | peer_pid, |
650 | copy_mode, | 650 | copy_mode, |
651 | copy_range, | 651 | copy_range, |
652 | queue_total, | 652 | queue_total, |
653 | queue_maxlen, | 653 | queue_maxlen, |
654 | queue_dropped, | 654 | queue_dropped, |
655 | queue_user_dropped); | 655 | queue_user_dropped); |
656 | 656 | ||
657 | read_unlock_bh(&queue_lock); | 657 | read_unlock_bh(&queue_lock); |
658 | 658 | ||
659 | *start = buffer + offset; | 659 | *start = buffer + offset; |
660 | len -= offset; | 660 | len -= offset; |
661 | if (len > length) | 661 | if (len > length) |
@@ -675,7 +675,7 @@ static int __init ip_queue_init(void) | |||
675 | { | 675 | { |
676 | int status = -ENOMEM; | 676 | int status = -ENOMEM; |
677 | struct proc_dir_entry *proc; | 677 | struct proc_dir_entry *proc; |
678 | 678 | ||
679 | netlink_register_notifier(&ipq_nl_notifier); | 679 | netlink_register_notifier(&ipq_nl_notifier); |
680 | ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk, | 680 | ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk, |
681 | THIS_MODULE); | 681 | THIS_MODULE); |
@@ -691,10 +691,10 @@ static int __init ip_queue_init(void) | |||
691 | printk(KERN_ERR "ip_queue: failed to create proc entry\n"); | 691 | printk(KERN_ERR "ip_queue: failed to create proc entry\n"); |
692 | goto cleanup_ipqnl; | 692 | goto cleanup_ipqnl; |
693 | } | 693 | } |
694 | 694 | ||
695 | register_netdevice_notifier(&ipq_dev_notifier); | 695 | register_netdevice_notifier(&ipq_dev_notifier); |
696 | ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0); | 696 | ipq_sysctl_header = register_sysctl_table(ipq_root_table, 0); |
697 | 697 | ||
698 | status = nf_register_queue_handler(PF_INET, &nfqh); | 698 | status = nf_register_queue_handler(PF_INET, &nfqh); |
699 | if (status < 0) { | 699 | if (status < 0) { |
700 | printk(KERN_ERR "ip_queue: failed to register queue handler\n"); | 700 | printk(KERN_ERR "ip_queue: failed to register queue handler\n"); |
@@ -706,12 +706,12 @@ cleanup_sysctl: | |||
706 | unregister_sysctl_table(ipq_sysctl_header); | 706 | unregister_sysctl_table(ipq_sysctl_header); |
707 | unregister_netdevice_notifier(&ipq_dev_notifier); | 707 | unregister_netdevice_notifier(&ipq_dev_notifier); |
708 | proc_net_remove(IPQ_PROC_FS_NAME); | 708 | proc_net_remove(IPQ_PROC_FS_NAME); |
709 | 709 | ||
710 | cleanup_ipqnl: | 710 | cleanup_ipqnl: |
711 | sock_release(ipqnl->sk_socket); | 711 | sock_release(ipqnl->sk_socket); |
712 | mutex_lock(&ipqnl_mutex); | 712 | mutex_lock(&ipqnl_mutex); |
713 | mutex_unlock(&ipqnl_mutex); | 713 | mutex_unlock(&ipqnl_mutex); |
714 | 714 | ||
715 | cleanup_netlink_notifier: | 715 | cleanup_netlink_notifier: |
716 | netlink_unregister_notifier(&ipq_nl_notifier); | 716 | netlink_unregister_notifier(&ipq_nl_notifier); |
717 | return status; | 717 | return status; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 5a7b3a341389..50cc4b92e284 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -297,7 +297,7 @@ ipt_do_table(struct sk_buff **pskb, | |||
297 | e = get_entry(table_base, v); | 297 | e = get_entry(table_base, v); |
298 | } else { | 298 | } else { |
299 | /* Targets which reenter must return | 299 | /* Targets which reenter must return |
300 | abs. verdicts */ | 300 | abs. verdicts */ |
301 | #ifdef CONFIG_NETFILTER_DEBUG | 301 | #ifdef CONFIG_NETFILTER_DEBUG |
302 | ((struct ipt_entry *)table_base)->comefrom | 302 | ((struct ipt_entry *)table_base)->comefrom |
303 | = 0xeeeeeeec; | 303 | = 0xeeeeeeec; |
@@ -556,9 +556,9 @@ err: | |||
556 | 556 | ||
557 | static inline int check_target(struct ipt_entry *e, const char *name) | 557 | static inline int check_target(struct ipt_entry *e, const char *name) |
558 | { | 558 | { |
559 | struct ipt_entry_target *t; | 559 | struct ipt_entry_target *t; |
560 | struct xt_target *target; | 560 | struct xt_target *target; |
561 | int ret; | 561 | int ret; |
562 | 562 | ||
563 | t = ipt_get_target(e); | 563 | t = ipt_get_target(e); |
564 | target = t->u.kernel.target; | 564 | target = t->u.kernel.target; |
@@ -652,7 +652,7 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
652 | } | 652 | } |
653 | 653 | ||
654 | /* FIXME: underflows must be unconditional, standard verdicts | 654 | /* FIXME: underflows must be unconditional, standard verdicts |
655 | < 0 (not IPT_RETURN). --RR */ | 655 | < 0 (not IPT_RETURN). --RR */ |
656 | 656 | ||
657 | /* Clear counters and comefrom */ | 657 | /* Clear counters and comefrom */ |
658 | e->counters = ((struct xt_counters) { 0, 0 }); | 658 | e->counters = ((struct xt_counters) { 0, 0 }); |
@@ -2057,7 +2057,7 @@ void ipt_unregister_table(struct xt_table *table) | |||
2057 | struct xt_table_info *private; | 2057 | struct xt_table_info *private; |
2058 | void *loc_cpu_entry; | 2058 | void *loc_cpu_entry; |
2059 | 2059 | ||
2060 | private = xt_unregister_table(table); | 2060 | private = xt_unregister_table(table); |
2061 | 2061 | ||
2062 | /* Decrease module usage counts and free resources */ | 2062 | /* Decrease module usage counts and free resources */ |
2063 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 2063 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 343c2abdc1a0..4fe28f264475 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Cluster IP hashmark target | 1 | /* Cluster IP hashmark target |
2 | * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> | 2 | * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> |
3 | * based on ideas of Fabio Olive Leite <olive@unixforge.org> | 3 | * based on ideas of Fabio Olive Leite <olive@unixforge.org> |
4 | * | 4 | * |
@@ -123,7 +123,7 @@ __clusterip_config_find(__be32 clusterip) | |||
123 | struct list_head *pos; | 123 | struct list_head *pos; |
124 | 124 | ||
125 | list_for_each(pos, &clusterip_configs) { | 125 | list_for_each(pos, &clusterip_configs) { |
126 | struct clusterip_config *c = list_entry(pos, | 126 | struct clusterip_config *c = list_entry(pos, |
127 | struct clusterip_config, list); | 127 | struct clusterip_config, list); |
128 | if (c->clusterip == clusterip) { | 128 | if (c->clusterip == clusterip) { |
129 | return c; | 129 | return c; |
@@ -229,7 +229,7 @@ clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum) | |||
229 | if (nodenum == 0 || | 229 | if (nodenum == 0 || |
230 | nodenum > c->num_total_nodes) | 230 | nodenum > c->num_total_nodes) |
231 | return 1; | 231 | return 1; |
232 | 232 | ||
233 | if (test_and_clear_bit(nodenum - 1, &c->local_nodes)) | 233 | if (test_and_clear_bit(nodenum - 1, &c->local_nodes)) |
234 | return 0; | 234 | return 0; |
235 | 235 | ||
@@ -270,7 +270,7 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config) | |||
270 | config->hash_initval); | 270 | config->hash_initval); |
271 | break; | 271 | break; |
272 | case CLUSTERIP_HASHMODE_SIP_SPT: | 272 | case CLUSTERIP_HASHMODE_SIP_SPT: |
273 | hashval = jhash_2words(ntohl(iph->saddr), sport, | 273 | hashval = jhash_2words(ntohl(iph->saddr), sport, |
274 | config->hash_initval); | 274 | config->hash_initval); |
275 | break; | 275 | break; |
276 | case CLUSTERIP_HASHMODE_SIP_SPT_DPT: | 276 | case CLUSTERIP_HASHMODE_SIP_SPT_DPT: |
@@ -297,8 +297,8 @@ clusterip_responsible(struct clusterip_config *config, u_int32_t hash) | |||
297 | return test_bit(hash - 1, &config->local_nodes); | 297 | return test_bit(hash - 1, &config->local_nodes); |
298 | } | 298 | } |
299 | 299 | ||
300 | /*********************************************************************** | 300 | /*********************************************************************** |
301 | * IPTABLES TARGET | 301 | * IPTABLES TARGET |
302 | ***********************************************************************/ | 302 | ***********************************************************************/ |
303 | 303 | ||
304 | static unsigned int | 304 | static unsigned int |
@@ -321,7 +321,7 @@ target(struct sk_buff **pskb, | |||
321 | if (mark == NULL) { | 321 | if (mark == NULL) { |
322 | printk(KERN_ERR "CLUSTERIP: no conntrack!\n"); | 322 | printk(KERN_ERR "CLUSTERIP: no conntrack!\n"); |
323 | /* FIXME: need to drop invalid ones, since replies | 323 | /* FIXME: need to drop invalid ones, since replies |
324 | * to outgoing connections of other nodes will be | 324 | * to outgoing connections of other nodes will be |
325 | * marked as INVALID */ | 325 | * marked as INVALID */ |
326 | return NF_DROP; | 326 | return NF_DROP; |
327 | } | 327 | } |
@@ -329,11 +329,11 @@ target(struct sk_buff **pskb, | |||
329 | /* special case: ICMP error handling. conntrack distinguishes between | 329 | /* special case: ICMP error handling. conntrack distinguishes between |
330 | * error messages (RELATED) and information requests (see below) */ | 330 | * error messages (RELATED) and information requests (see below) */ |
331 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP | 331 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP |
332 | && (ctinfo == IP_CT_RELATED | 332 | && (ctinfo == IP_CT_RELATED |
333 | || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) | 333 | || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) |
334 | return XT_CONTINUE; | 334 | return XT_CONTINUE; |
335 | 335 | ||
336 | /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, | 336 | /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, |
337 | * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here | 337 | * TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here |
338 | * on, which all have an ID field [relevant for hashing]. */ | 338 | * on, which all have an ID field [relevant for hashing]. */ |
339 | 339 | ||
@@ -376,8 +376,8 @@ static int | |||
376 | checkentry(const char *tablename, | 376 | checkentry(const char *tablename, |
377 | const void *e_void, | 377 | const void *e_void, |
378 | const struct xt_target *target, | 378 | const struct xt_target *target, |
379 | void *targinfo, | 379 | void *targinfo, |
380 | unsigned int hook_mask) | 380 | unsigned int hook_mask) |
381 | { | 381 | { |
382 | struct ipt_clusterip_tgt_info *cipinfo = targinfo; | 382 | struct ipt_clusterip_tgt_info *cipinfo = targinfo; |
383 | const struct ipt_entry *e = e_void; | 383 | const struct ipt_entry *e = e_void; |
@@ -437,7 +437,7 @@ checkentry(const char *tablename, | |||
437 | return 0; | 437 | return 0; |
438 | } | 438 | } |
439 | 439 | ||
440 | config = clusterip_config_init(cipinfo, | 440 | config = clusterip_config_init(cipinfo, |
441 | e->ip.dst.s_addr, dev); | 441 | e->ip.dst.s_addr, dev); |
442 | if (!config) { | 442 | if (!config) { |
443 | printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n"); | 443 | printk(KERN_WARNING "CLUSTERIP: cannot allocate config\n"); |
@@ -483,8 +483,8 @@ static struct xt_target clusterip_tgt = { | |||
483 | }; | 483 | }; |
484 | 484 | ||
485 | 485 | ||
486 | /*********************************************************************** | 486 | /*********************************************************************** |
487 | * ARP MANGLING CODE | 487 | * ARP MANGLING CODE |
488 | ***********************************************************************/ | 488 | ***********************************************************************/ |
489 | 489 | ||
490 | /* hardcoded for 48bit ethernet and 32bit ipv4 addresses */ | 490 | /* hardcoded for 48bit ethernet and 32bit ipv4 addresses */ |
@@ -496,7 +496,7 @@ struct arp_payload { | |||
496 | } __attribute__ ((packed)); | 496 | } __attribute__ ((packed)); |
497 | 497 | ||
498 | #ifdef CLUSTERIP_DEBUG | 498 | #ifdef CLUSTERIP_DEBUG |
499 | static void arp_print(struct arp_payload *payload) | 499 | static void arp_print(struct arp_payload *payload) |
500 | { | 500 | { |
501 | #define HBUFFERLEN 30 | 501 | #define HBUFFERLEN 30 |
502 | char hbuffer[HBUFFERLEN]; | 502 | char hbuffer[HBUFFERLEN]; |
@@ -510,7 +510,7 @@ static void arp_print(struct arp_payload *payload) | |||
510 | } | 510 | } |
511 | hbuffer[--k]='\0'; | 511 | hbuffer[--k]='\0'; |
512 | 512 | ||
513 | printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n", | 513 | printk("src %u.%u.%u.%u@%s, dst %u.%u.%u.%u\n", |
514 | NIPQUAD(payload->src_ip), hbuffer, | 514 | NIPQUAD(payload->src_ip), hbuffer, |
515 | NIPQUAD(payload->dst_ip)); | 515 | NIPQUAD(payload->dst_ip)); |
516 | } | 516 | } |
@@ -540,13 +540,13 @@ arp_mangle(unsigned int hook, | |||
540 | 540 | ||
541 | payload = (void *)(arp+1); | 541 | payload = (void *)(arp+1); |
542 | 542 | ||
543 | /* if there is no clusterip configuration for the arp reply's | 543 | /* if there is no clusterip configuration for the arp reply's |
544 | * source ip, we don't want to mangle it */ | 544 | * source ip, we don't want to mangle it */ |
545 | c = clusterip_config_find_get(payload->src_ip, 0); | 545 | c = clusterip_config_find_get(payload->src_ip, 0); |
546 | if (!c) | 546 | if (!c) |
547 | return NF_ACCEPT; | 547 | return NF_ACCEPT; |
548 | 548 | ||
549 | /* normally the linux kernel always replies to arp queries of | 549 | /* normally the linux kernel always replies to arp queries of |
550 | * addresses on different interfacs. However, in the CLUSTERIP case | 550 | * addresses on different interfacs. However, in the CLUSTERIP case |
551 | * this wouldn't work, since we didn't subscribe the mcast group on | 551 | * this wouldn't work, since we didn't subscribe the mcast group on |
552 | * other interfaces */ | 552 | * other interfaces */ |
@@ -577,8 +577,8 @@ static struct nf_hook_ops cip_arp_ops = { | |||
577 | .priority = -1 | 577 | .priority = -1 |
578 | }; | 578 | }; |
579 | 579 | ||
580 | /*********************************************************************** | 580 | /*********************************************************************** |
581 | * PROC DIR HANDLING | 581 | * PROC DIR HANDLING |
582 | ***********************************************************************/ | 582 | ***********************************************************************/ |
583 | 583 | ||
584 | #ifdef CONFIG_PROC_FS | 584 | #ifdef CONFIG_PROC_FS |
@@ -640,7 +640,7 @@ static int clusterip_seq_show(struct seq_file *s, void *v) | |||
640 | { | 640 | { |
641 | struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v; | 641 | struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v; |
642 | 642 | ||
643 | if (idx->pos != 0) | 643 | if (idx->pos != 0) |
644 | seq_putc(s, ','); | 644 | seq_putc(s, ','); |
645 | 645 | ||
646 | seq_printf(s, "%u", idx->bit); | 646 | seq_printf(s, "%u", idx->bit); |
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index b5ca5938d1fe..4f565633631d 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* iptables module for the IPv4 and TCP ECN bits, Version 1.5 | 1 | /* iptables module for the IPv4 and TCP ECN bits, Version 1.5 |
2 | * | 2 | * |
3 | * (C) 2002 by Harald Welte <laforge@netfilter.org> | 3 | * (C) 2002 by Harald Welte <laforge@netfilter.org> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp | 9 | * ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp |
@@ -40,7 +40,7 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo) | |||
40 | iph->tos &= ~IPT_ECN_IP_MASK; | 40 | iph->tos &= ~IPT_ECN_IP_MASK; |
41 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); | 41 | iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK); |
42 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); | 42 | nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos)); |
43 | } | 43 | } |
44 | return 1; | 44 | return 1; |
45 | } | 45 | } |
46 | 46 | ||
@@ -104,8 +104,8 @@ static int | |||
104 | checkentry(const char *tablename, | 104 | checkentry(const char *tablename, |
105 | const void *e_void, | 105 | const void *e_void, |
106 | const struct xt_target *target, | 106 | const struct xt_target *target, |
107 | void *targinfo, | 107 | void *targinfo, |
108 | unsigned int hook_mask) | 108 | unsigned int hook_mask) |
109 | { | 109 | { |
110 | const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo; | 110 | const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo; |
111 | const struct ipt_entry *e = e_void; | 111 | const struct ipt_entry *e = e_void; |
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index f68370ffb43f..f4a62f2522ff 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -289,7 +289,7 @@ static void dump_packet(const struct nf_loginfo *info, | |||
289 | 289 | ||
290 | if (ntohs(ih->frag_off) & IP_OFFSET) | 290 | if (ntohs(ih->frag_off) & IP_OFFSET) |
291 | break; | 291 | break; |
292 | 292 | ||
293 | /* Max length: 9 "PROTO=AH " */ | 293 | /* Max length: 9 "PROTO=AH " */ |
294 | printk("PROTO=AH "); | 294 | printk("PROTO=AH "); |
295 | 295 | ||
@@ -334,10 +334,10 @@ static void dump_packet(const struct nf_loginfo *info, | |||
334 | } | 334 | } |
335 | 335 | ||
336 | /* Max length: 15 "UID=4294967295 " */ | 336 | /* Max length: 15 "UID=4294967295 " */ |
337 | if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) { | 337 | if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) { |
338 | read_lock_bh(&skb->sk->sk_callback_lock); | 338 | read_lock_bh(&skb->sk->sk_callback_lock); |
339 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) | 339 | if (skb->sk->sk_socket && skb->sk->sk_socket->file) |
340 | printk("UID=%u ", skb->sk->sk_socket->file->f_uid); | 340 | printk("UID=%u ", skb->sk->sk_socket->file->f_uid); |
341 | read_unlock_bh(&skb->sk->sk_callback_lock); | 341 | read_unlock_bh(&skb->sk->sk_callback_lock); |
342 | } | 342 | } |
343 | 343 | ||
@@ -431,7 +431,7 @@ ipt_log_target(struct sk_buff **pskb, | |||
431 | li.u.log.logflags = loginfo->logflags; | 431 | li.u.log.logflags = loginfo->logflags; |
432 | 432 | ||
433 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | 433 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, |
434 | loginfo->prefix); | 434 | loginfo->prefix); |
435 | return XT_CONTINUE; | 435 | return XT_CONTINUE; |
436 | } | 436 | } |
437 | 437 | ||
@@ -483,7 +483,7 @@ static int __init ipt_log_init(void) | |||
483 | /* we cannot make module load fail here, since otherwise | 483 | /* we cannot make module load fail here, since otherwise |
484 | * iptables userspace would abort */ | 484 | * iptables userspace would abort */ |
485 | } | 485 | } |
486 | 486 | ||
487 | return 0; | 487 | return 0; |
488 | } | 488 | } |
489 | 489 | ||
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 91c42efcd533..b5955f3a3f8f 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -86,7 +86,7 @@ masquerade_target(struct sk_buff **pskb, | |||
86 | nat = nfct_nat(ct); | 86 | nat = nfct_nat(ct); |
87 | #endif | 87 | #endif |
88 | IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED | 88 | IP_NF_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED |
89 | || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); | 89 | || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); |
90 | 90 | ||
91 | /* Source address is 0.0.0.0 - locally generated packet that is | 91 | /* Source address is 0.0.0.0 - locally generated packet that is |
92 | * probably not supposed to be masqueraded. | 92 | * probably not supposed to be masqueraded. |
@@ -221,7 +221,7 @@ static void __exit ipt_masquerade_fini(void) | |||
221 | { | 221 | { |
222 | xt_unregister_target(&masquerade); | 222 | xt_unregister_target(&masquerade); |
223 | unregister_netdevice_notifier(&masq_dev_notifier); | 223 | unregister_netdevice_notifier(&masq_dev_notifier); |
224 | unregister_inetaddr_notifier(&masq_inet_notifier); | 224 | unregister_inetaddr_notifier(&masq_inet_notifier); |
225 | } | 225 | } |
226 | 226 | ||
227 | module_init(ipt_masquerade_init); | 227 | module_init(ipt_masquerade_init); |
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c index b4acc241d898..fd7aaa347cd8 100644 --- a/net/ipv4/netfilter/ipt_NETMAP.c +++ b/net/ipv4/netfilter/ipt_NETMAP.c | |||
@@ -92,13 +92,13 @@ target(struct sk_buff **pskb, | |||
92 | static struct xt_target target_module = { | 92 | static struct xt_target target_module = { |
93 | .name = MODULENAME, | 93 | .name = MODULENAME, |
94 | .family = AF_INET, | 94 | .family = AF_INET, |
95 | .target = target, | 95 | .target = target, |
96 | .targetsize = sizeof(struct ip_nat_multi_range_compat), | 96 | .targetsize = sizeof(struct ip_nat_multi_range_compat), |
97 | .table = "nat", | 97 | .table = "nat", |
98 | .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) | | 98 | .hooks = (1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_POST_ROUTING) | |
99 | (1 << NF_IP_LOCAL_OUT), | 99 | (1 << NF_IP_LOCAL_OUT), |
100 | .checkentry = check, | 100 | .checkentry = check, |
101 | .me = THIS_MODULE | 101 | .me = THIS_MODULE |
102 | }; | 102 | }; |
103 | 103 | ||
104 | static int __init ipt_netmap_init(void) | 104 | static int __init ipt_netmap_init(void) |
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index 54cd021aa5a8..c2b6b80670f8 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c | |||
@@ -84,7 +84,7 @@ redirect_target(struct sk_buff **pskb, | |||
84 | struct in_ifaddr *ifa; | 84 | struct in_ifaddr *ifa; |
85 | 85 | ||
86 | newdst = 0; | 86 | newdst = 0; |
87 | 87 | ||
88 | rcu_read_lock(); | 88 | rcu_read_lock(); |
89 | indev = __in_dev_get_rcu((*pskb)->dev); | 89 | indev = __in_dev_get_rcu((*pskb)->dev); |
90 | if (indev && (ifa = indev->ifa_list)) | 90 | if (indev && (ifa = indev->ifa_list)) |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index e4a1ddb386a7..a9eb3635fff2 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -57,7 +57,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
57 | oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4, | 57 | oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4, |
58 | sizeof(_otcph), &_otcph); | 58 | sizeof(_otcph), &_otcph); |
59 | if (oth == NULL) | 59 | if (oth == NULL) |
60 | return; | 60 | return; |
61 | 61 | ||
62 | /* No RST for RST. */ | 62 | /* No RST for RST. */ |
63 | if (oth->rst) | 63 | if (oth->rst) |
@@ -145,7 +145,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
145 | 145 | ||
146 | /* Adjust IP checksum */ | 146 | /* Adjust IP checksum */ |
147 | nskb->nh.iph->check = 0; | 147 | nskb->nh.iph->check = 0; |
148 | nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, | 148 | nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, |
149 | nskb->nh.iph->ihl); | 149 | nskb->nh.iph->ihl); |
150 | 150 | ||
151 | /* "Never happens" */ | 151 | /* "Never happens" */ |
@@ -165,7 +165,7 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
165 | static inline void send_unreach(struct sk_buff *skb_in, int code) | 165 | static inline void send_unreach(struct sk_buff *skb_in, int code) |
166 | { | 166 | { |
167 | icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); | 167 | icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); |
168 | } | 168 | } |
169 | 169 | ||
170 | static unsigned int reject(struct sk_buff **pskb, | 170 | static unsigned int reject(struct sk_buff **pskb, |
171 | const struct net_device *in, | 171 | const struct net_device *in, |
@@ -177,33 +177,33 @@ static unsigned int reject(struct sk_buff **pskb, | |||
177 | const struct ipt_reject_info *reject = targinfo; | 177 | const struct ipt_reject_info *reject = targinfo; |
178 | 178 | ||
179 | /* Our naive response construction doesn't deal with IP | 179 | /* Our naive response construction doesn't deal with IP |
180 | options, and probably shouldn't try. */ | 180 | options, and probably shouldn't try. */ |
181 | if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr)) | 181 | if ((*pskb)->nh.iph->ihl<<2 != sizeof(struct iphdr)) |
182 | return NF_DROP; | 182 | return NF_DROP; |
183 | 183 | ||
184 | /* WARNING: This code causes reentry within iptables. | 184 | /* WARNING: This code causes reentry within iptables. |
185 | This means that the iptables jump stack is now crap. We | 185 | This means that the iptables jump stack is now crap. We |
186 | must return an absolute verdict. --RR */ | 186 | must return an absolute verdict. --RR */ |
187 | switch (reject->with) { | 187 | switch (reject->with) { |
188 | case IPT_ICMP_NET_UNREACHABLE: | 188 | case IPT_ICMP_NET_UNREACHABLE: |
189 | send_unreach(*pskb, ICMP_NET_UNREACH); | 189 | send_unreach(*pskb, ICMP_NET_UNREACH); |
190 | break; | 190 | break; |
191 | case IPT_ICMP_HOST_UNREACHABLE: | 191 | case IPT_ICMP_HOST_UNREACHABLE: |
192 | send_unreach(*pskb, ICMP_HOST_UNREACH); | 192 | send_unreach(*pskb, ICMP_HOST_UNREACH); |
193 | break; | 193 | break; |
194 | case IPT_ICMP_PROT_UNREACHABLE: | 194 | case IPT_ICMP_PROT_UNREACHABLE: |
195 | send_unreach(*pskb, ICMP_PROT_UNREACH); | 195 | send_unreach(*pskb, ICMP_PROT_UNREACH); |
196 | break; | 196 | break; |
197 | case IPT_ICMP_PORT_UNREACHABLE: | 197 | case IPT_ICMP_PORT_UNREACHABLE: |
198 | send_unreach(*pskb, ICMP_PORT_UNREACH); | 198 | send_unreach(*pskb, ICMP_PORT_UNREACH); |
199 | break; | 199 | break; |
200 | case IPT_ICMP_NET_PROHIBITED: | 200 | case IPT_ICMP_NET_PROHIBITED: |
201 | send_unreach(*pskb, ICMP_NET_ANO); | 201 | send_unreach(*pskb, ICMP_NET_ANO); |
202 | break; | 202 | break; |
203 | case IPT_ICMP_HOST_PROHIBITED: | 203 | case IPT_ICMP_HOST_PROHIBITED: |
204 | send_unreach(*pskb, ICMP_HOST_ANO); | 204 | send_unreach(*pskb, ICMP_HOST_ANO); |
205 | break; | 205 | break; |
206 | case IPT_ICMP_ADMIN_PROHIBITED: | 206 | case IPT_ICMP_ADMIN_PROHIBITED: |
207 | send_unreach(*pskb, ICMP_PKT_FILTERED); | 207 | send_unreach(*pskb, ICMP_PKT_FILTERED); |
208 | break; | 208 | break; |
209 | case IPT_TCP_RESET: | 209 | case IPT_TCP_RESET: |
@@ -222,7 +222,7 @@ static int check(const char *tablename, | |||
222 | void *targinfo, | 222 | void *targinfo, |
223 | unsigned int hook_mask) | 223 | unsigned int hook_mask) |
224 | { | 224 | { |
225 | const struct ipt_reject_info *rejinfo = targinfo; | 225 | const struct ipt_reject_info *rejinfo = targinfo; |
226 | const struct ipt_entry *e = e_void; | 226 | const struct ipt_entry *e = e_void; |
227 | 227 | ||
228 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { | 228 | if (rejinfo->with == IPT_ICMP_ECHOREPLY) { |
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c index a1cdd1262de2..bd4404e5c688 100644 --- a/net/ipv4/netfilter/ipt_SAME.c +++ b/net/ipv4/netfilter/ipt_SAME.c | |||
@@ -87,24 +87,24 @@ same_check(const char *tablename, | |||
87 | DEBUGP("same_check: bad MAP_IPS.\n"); | 87 | DEBUGP("same_check: bad MAP_IPS.\n"); |
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
90 | rangeip = (ntohl(mr->range[count].max_ip) - | 90 | rangeip = (ntohl(mr->range[count].max_ip) - |
91 | ntohl(mr->range[count].min_ip) + 1); | 91 | ntohl(mr->range[count].min_ip) + 1); |
92 | mr->ipnum += rangeip; | 92 | mr->ipnum += rangeip; |
93 | 93 | ||
94 | DEBUGP("same_check: range %u, ipnum = %u\n", count, rangeip); | 94 | DEBUGP("same_check: range %u, ipnum = %u\n", count, rangeip); |
95 | } | 95 | } |
96 | DEBUGP("same_check: total ipaddresses = %u\n", mr->ipnum); | 96 | DEBUGP("same_check: total ipaddresses = %u\n", mr->ipnum); |
97 | 97 | ||
98 | mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL); | 98 | mr->iparray = kmalloc((sizeof(u_int32_t) * mr->ipnum), GFP_KERNEL); |
99 | if (!mr->iparray) { | 99 | if (!mr->iparray) { |
100 | DEBUGP("same_check: Couldn't allocate %u bytes " | 100 | DEBUGP("same_check: Couldn't allocate %u bytes " |
101 | "for %u ipaddresses!\n", | 101 | "for %u ipaddresses!\n", |
102 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); | 102 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); |
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | DEBUGP("same_check: Allocated %u bytes for %u ipaddresses.\n", | 105 | DEBUGP("same_check: Allocated %u bytes for %u ipaddresses.\n", |
106 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); | 106 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); |
107 | 107 | ||
108 | for (count = 0; count < mr->rangesize; count++) { | 108 | for (count = 0; count < mr->rangesize; count++) { |
109 | for (countess = ntohl(mr->range[count].min_ip); | 109 | for (countess = ntohl(mr->range[count].min_ip); |
110 | countess <= ntohl(mr->range[count].max_ip); | 110 | countess <= ntohl(mr->range[count].max_ip); |
@@ -119,13 +119,13 @@ same_check(const char *tablename, | |||
119 | return 1; | 119 | return 1; |
120 | } | 120 | } |
121 | 121 | ||
122 | static void | 122 | static void |
123 | same_destroy(const struct xt_target *target, void *targinfo) | 123 | same_destroy(const struct xt_target *target, void *targinfo) |
124 | { | 124 | { |
125 | struct ipt_same_info *mr = targinfo; | 125 | struct ipt_same_info *mr = targinfo; |
126 | 126 | ||
127 | kfree(mr->iparray); | 127 | kfree(mr->iparray); |
128 | 128 | ||
129 | DEBUGP("same_destroy: Deallocated %u bytes for %u ipaddresses.\n", | 129 | DEBUGP("same_destroy: Deallocated %u bytes for %u ipaddresses.\n", |
130 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); | 130 | (sizeof(u_int32_t) * mr->ipnum), mr->ipnum); |
131 | } | 131 | } |
@@ -156,7 +156,7 @@ same_target(struct sk_buff **pskb, | |||
156 | giving some hope for consistency across reboots. | 156 | giving some hope for consistency across reboots. |
157 | Here we calculate the index in same->iparray which | 157 | Here we calculate the index in same->iparray which |
158 | holds the ipaddress we should use */ | 158 | holds the ipaddress we should use */ |
159 | 159 | ||
160 | #ifdef CONFIG_NF_NAT_NEEDED | 160 | #ifdef CONFIG_NF_NAT_NEEDED |
161 | tmpip = ntohl(t->src.u3.ip); | 161 | tmpip = ntohl(t->src.u3.ip); |
162 | 162 | ||
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c index 29b05a6bd108..cedf9f7d9d6e 100644 --- a/net/ipv4/netfilter/ipt_TOS.c +++ b/net/ipv4/netfilter/ipt_TOS.c | |||
@@ -47,8 +47,8 @@ static int | |||
47 | checkentry(const char *tablename, | 47 | checkentry(const char *tablename, |
48 | const void *e_void, | 48 | const void *e_void, |
49 | const struct xt_target *target, | 49 | const struct xt_target *target, |
50 | void *targinfo, | 50 | void *targinfo, |
51 | unsigned int hook_mask) | 51 | unsigned int hook_mask) |
52 | { | 52 | { |
53 | const u_int8_t tos = ((struct ipt_tos_target_info *)targinfo)->tos; | 53 | const u_int8_t tos = ((struct ipt_tos_target_info *)targinfo)->tos; |
54 | 54 | ||
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c index d2b6fa3f9dcd..64be31c22ba9 100644 --- a/net/ipv4/netfilter/ipt_TTL.c +++ b/net/ipv4/netfilter/ipt_TTL.c | |||
@@ -19,7 +19,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |||
19 | MODULE_DESCRIPTION("IP tables TTL modification module"); | 19 | MODULE_DESCRIPTION("IP tables TTL modification module"); |
20 | MODULE_LICENSE("GPL"); | 20 | MODULE_LICENSE("GPL"); |
21 | 21 | ||
22 | static unsigned int | 22 | static unsigned int |
23 | ipt_ttl_target(struct sk_buff **pskb, | 23 | ipt_ttl_target(struct sk_buff **pskb, |
24 | const struct net_device *in, const struct net_device *out, | 24 | const struct net_device *in, const struct net_device *out, |
25 | unsigned int hooknum, const struct xt_target *target, | 25 | unsigned int hooknum, const struct xt_target *target, |
@@ -71,7 +71,7 @@ static int ipt_ttl_checkentry(const char *tablename, | |||
71 | struct ipt_TTL_info *info = targinfo; | 71 | struct ipt_TTL_info *info = targinfo; |
72 | 72 | ||
73 | if (info->mode > IPT_TTL_MAXMODE) { | 73 | if (info->mode > IPT_TTL_MAXMODE) { |
74 | printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", | 74 | printk(KERN_WARNING "ipt_TTL: invalid or unknown Mode %u\n", |
75 | info->mode); | 75 | info->mode); |
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
@@ -83,10 +83,10 @@ static int ipt_ttl_checkentry(const char *tablename, | |||
83 | static struct xt_target ipt_TTL = { | 83 | static struct xt_target ipt_TTL = { |
84 | .name = "TTL", | 84 | .name = "TTL", |
85 | .family = AF_INET, | 85 | .family = AF_INET, |
86 | .target = ipt_ttl_target, | 86 | .target = ipt_ttl_target, |
87 | .targetsize = sizeof(struct ipt_TTL_info), | 87 | .targetsize = sizeof(struct ipt_TTL_info), |
88 | .table = "mangle", | 88 | .table = "mangle", |
89 | .checkentry = ipt_ttl_checkentry, | 89 | .checkentry = ipt_ttl_checkentry, |
90 | .me = THIS_MODULE, | 90 | .me = THIS_MODULE, |
91 | }; | 91 | }; |
92 | 92 | ||
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 7af57a3a1f36..3a1eacc634b3 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c | |||
@@ -4,9 +4,9 @@ | |||
4 | * (C) 2000-2004 by Harald Welte <laforge@netfilter.org> | 4 | * (C) 2000-2004 by Harald Welte <laforge@netfilter.org> |
5 | * | 5 | * |
6 | * 2000/09/22 ulog-cprange feature added | 6 | * 2000/09/22 ulog-cprange feature added |
7 | * 2001/01/04 in-kernel queue as proposed by Sebastian Zander | 7 | * 2001/01/04 in-kernel queue as proposed by Sebastian Zander |
8 | * <zander@fokus.gmd.de> | 8 | * <zander@fokus.gmd.de> |
9 | * 2001/01/30 per-rule nlgroup conflicts with global queue. | 9 | * 2001/01/30 per-rule nlgroup conflicts with global queue. |
10 | * nlgroup now global (sysctl) | 10 | * nlgroup now global (sysctl) |
11 | * 2001/04/19 ulog-queue reworked, now fixed buffer size specified at | 11 | * 2001/04/19 ulog-queue reworked, now fixed buffer size specified at |
12 | * module loadtime -HW | 12 | * module loadtime -HW |
@@ -23,8 +23,8 @@ | |||
23 | * it under the terms of the GNU General Public License version 2 as | 23 | * it under the terms of the GNU General Public License version 2 as |
24 | * published by the Free Software Foundation. | 24 | * published by the Free Software Foundation. |
25 | * | 25 | * |
26 | * This module accepts two parameters: | 26 | * This module accepts two parameters: |
27 | * | 27 | * |
28 | * nlbufsiz: | 28 | * nlbufsiz: |
29 | * The parameter specifies how big the buffer for each netlink multicast | 29 | * The parameter specifies how big the buffer for each netlink multicast |
30 | * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will | 30 | * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will |
@@ -72,7 +72,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG); | |||
72 | 72 | ||
73 | #if 0 | 73 | #if 0 |
74 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ | 74 | #define DEBUGP(format, args...) printk("%s:%s:" format, \ |
75 | __FILE__, __FUNCTION__ , ## args) | 75 | __FILE__, __FUNCTION__ , ## args) |
76 | #else | 76 | #else |
77 | #define DEBUGP(format, args...) | 77 | #define DEBUGP(format, args...) |
78 | #endif | 78 | #endif |
@@ -162,7 +162,7 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size) | |||
162 | PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n); | 162 | PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n); |
163 | 163 | ||
164 | if (n > size) { | 164 | if (n > size) { |
165 | /* try to allocate only as much as we need for | 165 | /* try to allocate only as much as we need for |
166 | * current packet */ | 166 | * current packet */ |
167 | 167 | ||
168 | skb = alloc_skb(size, GFP_ATOMIC); | 168 | skb = alloc_skb(size, GFP_ATOMIC); |
@@ -203,7 +203,7 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
203 | size = NLMSG_SPACE(sizeof(*pm) + copy_len); | 203 | size = NLMSG_SPACE(sizeof(*pm) + copy_len); |
204 | 204 | ||
205 | ub = &ulog_buffers[groupnum]; | 205 | ub = &ulog_buffers[groupnum]; |
206 | 206 | ||
207 | spin_lock_bh(&ulog_lock); | 207 | spin_lock_bh(&ulog_lock); |
208 | 208 | ||
209 | if (!ub->skb) { | 209 | if (!ub->skb) { |
@@ -211,7 +211,7 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
211 | goto alloc_failure; | 211 | goto alloc_failure; |
212 | } else if (ub->qlen >= loginfo->qthreshold || | 212 | } else if (ub->qlen >= loginfo->qthreshold || |
213 | size > skb_tailroom(ub->skb)) { | 213 | size > skb_tailroom(ub->skb)) { |
214 | /* either the queue len is too high or we don't have | 214 | /* either the queue len is too high or we don't have |
215 | * enough room in nlskb left. send it to userspace. */ | 215 | * enough room in nlskb left. send it to userspace. */ |
216 | 216 | ||
217 | ulog_send(groupnum); | 217 | ulog_send(groupnum); |
@@ -220,11 +220,11 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
220 | goto alloc_failure; | 220 | goto alloc_failure; |
221 | } | 221 | } |
222 | 222 | ||
223 | DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, | 223 | DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, |
224 | loginfo->qthreshold); | 224 | loginfo->qthreshold); |
225 | 225 | ||
226 | /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ | 226 | /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */ |
227 | nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, | 227 | nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, |
228 | sizeof(*pm)+copy_len); | 228 | sizeof(*pm)+copy_len); |
229 | ub->qlen++; | 229 | ub->qlen++; |
230 | 230 | ||
@@ -268,7 +268,7 @@ static void ipt_ulog_packet(unsigned int hooknum, | |||
268 | /* copy_len <= skb->len, so can't fail. */ | 268 | /* copy_len <= skb->len, so can't fail. */ |
269 | if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) | 269 | if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) |
270 | BUG(); | 270 | BUG(); |
271 | 271 | ||
272 | /* check if we are building multi-part messages */ | 272 | /* check if we are building multi-part messages */ |
273 | if (ub->qlen > 1) { | 273 | if (ub->qlen > 1) { |
274 | ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; | 274 | ub->lastnlh->nlmsg_flags |= NLM_F_MULTI; |
@@ -312,10 +312,10 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb, | |||
312 | struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo; | 312 | struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo; |
313 | 313 | ||
314 | ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL); | 314 | ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL); |
315 | 315 | ||
316 | return XT_CONTINUE; | 316 | return XT_CONTINUE; |
317 | } | 317 | } |
318 | 318 | ||
319 | static void ipt_logfn(unsigned int pf, | 319 | static void ipt_logfn(unsigned int pf, |
320 | unsigned int hooknum, | 320 | unsigned int hooknum, |
321 | const struct sk_buff *skb, | 321 | const struct sk_buff *skb, |
@@ -396,7 +396,7 @@ static int __init ipt_ulog_init(void) | |||
396 | } | 396 | } |
397 | 397 | ||
398 | nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, | 398 | nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, |
399 | THIS_MODULE); | 399 | THIS_MODULE); |
400 | if (!nflognl) | 400 | if (!nflognl) |
401 | return -ENOMEM; | 401 | return -ENOMEM; |
402 | 402 | ||
@@ -407,7 +407,7 @@ static int __init ipt_ulog_init(void) | |||
407 | } | 407 | } |
408 | if (nflog) | 408 | if (nflog) |
409 | nf_log_register(PF_INET, &ipt_ulog_logger); | 409 | nf_log_register(PF_INET, &ipt_ulog_logger); |
410 | 410 | ||
411 | return 0; | 411 | return 0; |
412 | } | 412 | } |
413 | 413 | ||
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c index 648f555c4d16..cfa0472617f6 100644 --- a/net/ipv4/netfilter/ipt_addrtype.c +++ b/net/ipv4/netfilter/ipt_addrtype.c | |||
@@ -40,7 +40,7 @@ static int match(const struct sk_buff *skb, | |||
40 | ret &= match_type(iph->saddr, info->source)^info->invert_source; | 40 | ret &= match_type(iph->saddr, info->source)^info->invert_source; |
41 | if (info->dest) | 41 | if (info->dest) |
42 | ret &= match_type(iph->daddr, info->dest)^info->invert_dest; | 42 | ret &= match_type(iph->daddr, info->dest)^info->invert_dest; |
43 | 43 | ||
44 | return ret; | 44 | return ret; |
45 | } | 45 | } |
46 | 46 | ||
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c index 42f41224a43a..18a16782cf40 100644 --- a/net/ipv4/netfilter/ipt_ah.c +++ b/net/ipv4/netfilter/ipt_ah.c | |||
@@ -29,8 +29,8 @@ static inline int | |||
29 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) | 29 | spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, int invert) |
30 | { | 30 | { |
31 | int r=0; | 31 | int r=0; |
32 | duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', | 32 | duprintf("ah spi_match:%c 0x%x <= 0x%x <= 0x%x",invert? '!':' ', |
33 | min,spi,max); | 33 | min,spi,max); |
34 | r=(spi >= min && spi <= max) ^ invert; | 34 | r=(spi >= min && spi <= max) ^ invert; |
35 | duprintf(" result %s\n",r? "PASS" : "FAILED"); | 35 | duprintf(" result %s\n",r? "PASS" : "FAILED"); |
36 | return r; | 36 | return r; |
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c index 05de593be94c..bc5d5e6091e4 100644 --- a/net/ipv4/netfilter/ipt_iprange.c +++ b/net/ipv4/netfilter/ipt_iprange.c | |||
@@ -41,7 +41,7 @@ match(const struct sk_buff *skb, | |||
41 | DEBUGP("src IP %u.%u.%u.%u NOT in range %s" | 41 | DEBUGP("src IP %u.%u.%u.%u NOT in range %s" |
42 | "%u.%u.%u.%u-%u.%u.%u.%u\n", | 42 | "%u.%u.%u.%u-%u.%u.%u.%u\n", |
43 | NIPQUAD(iph->saddr), | 43 | NIPQUAD(iph->saddr), |
44 | info->flags & IPRANGE_SRC_INV ? "(INV) " : "", | 44 | info->flags & IPRANGE_SRC_INV ? "(INV) " : "", |
45 | NIPQUAD(info->src.min_ip), | 45 | NIPQUAD(info->src.min_ip), |
46 | NIPQUAD(info->src.max_ip)); | 46 | NIPQUAD(info->src.max_ip)); |
47 | return 0; | 47 | return 0; |
@@ -54,7 +54,7 @@ match(const struct sk_buff *skb, | |||
54 | DEBUGP("dst IP %u.%u.%u.%u NOT in range %s" | 54 | DEBUGP("dst IP %u.%u.%u.%u NOT in range %s" |
55 | "%u.%u.%u.%u-%u.%u.%u.%u\n", | 55 | "%u.%u.%u.%u-%u.%u.%u.%u\n", |
56 | NIPQUAD(iph->daddr), | 56 | NIPQUAD(iph->daddr), |
57 | info->flags & IPRANGE_DST_INV ? "(INV) " : "", | 57 | info->flags & IPRANGE_DST_INV ? "(INV) " : "", |
58 | NIPQUAD(info->dst.min_ip), | 58 | NIPQUAD(info->dst.min_ip), |
59 | NIPQUAD(info->dst.max_ip)); | 59 | NIPQUAD(info->dst.max_ip)); |
60 | return 0; | 60 | return 0; |
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c index 9f496ac834b5..7fae9aa8944c 100644 --- a/net/ipv4/netfilter/ipt_owner.c +++ b/net/ipv4/netfilter/ipt_owner.c | |||
@@ -53,10 +53,10 @@ match(const struct sk_buff *skb, | |||
53 | 53 | ||
54 | static int | 54 | static int |
55 | checkentry(const char *tablename, | 55 | checkentry(const char *tablename, |
56 | const void *ip, | 56 | const void *ip, |
57 | const struct xt_match *match, | 57 | const struct xt_match *match, |
58 | void *matchinfo, | 58 | void *matchinfo, |
59 | unsigned int hook_mask) | 59 | unsigned int hook_mask) |
60 | { | 60 | { |
61 | const struct ipt_owner_info *info = matchinfo; | 61 | const struct ipt_owner_info *info = matchinfo; |
62 | 62 | ||
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c index d5cd984e5ed2..1eca9f400374 100644 --- a/net/ipv4/netfilter/ipt_ttl.c +++ b/net/ipv4/netfilter/ipt_ttl.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* IP tables module for matching the value of the TTL | 1 | /* IP tables module for matching the value of the TTL |
2 | * | 2 | * |
3 | * ipt_ttl.c,v 1.5 2000/11/13 11:16:08 laforge Exp | 3 | * ipt_ttl.c,v 1.5 2000/11/13 11:16:08 laforge Exp |
4 | * | 4 | * |
@@ -41,7 +41,7 @@ static int match(const struct sk_buff *skb, | |||
41 | return (skb->nh.iph->ttl > info->ttl); | 41 | return (skb->nh.iph->ttl > info->ttl); |
42 | break; | 42 | break; |
43 | default: | 43 | default: |
44 | printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", | 44 | printk(KERN_WARNING "ipt_ttl: unknown mode %d\n", |
45 | info->mode); | 45 | info->mode); |
46 | return 0; | 46 | return 0; |
47 | } | 47 | } |
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index 51053cb42f43..d1d61e97b976 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c | |||
@@ -25,7 +25,7 @@ static struct | |||
25 | struct ipt_replace repl; | 25 | struct ipt_replace repl; |
26 | struct ipt_standard entries[3]; | 26 | struct ipt_standard entries[3]; |
27 | struct ipt_error term; | 27 | struct ipt_error term; |
28 | } initial_table __initdata | 28 | } initial_table __initdata |
29 | = { { "filter", FILTER_VALID_HOOKS, 4, | 29 | = { { "filter", FILTER_VALID_HOOKS, 4, |
30 | sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), | 30 | sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), |
31 | { [NF_IP_LOCAL_IN] = 0, | 31 | { [NF_IP_LOCAL_IN] = 0, |
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index a532e4d84332..98b66ef0c714 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c | |||
@@ -58,7 +58,7 @@ static struct | |||
58 | { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, | 58 | { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, |
59 | -NF_ACCEPT - 1 } }, | 59 | -NF_ACCEPT - 1 } }, |
60 | /* LOCAL_IN */ | 60 | /* LOCAL_IN */ |
61 | { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, | 61 | { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, |
62 | 0, | 62 | 0, |
63 | sizeof(struct ipt_entry), | 63 | sizeof(struct ipt_entry), |
64 | sizeof(struct ipt_standard), | 64 | sizeof(struct ipt_standard), |
@@ -66,7 +66,7 @@ static struct | |||
66 | { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, | 66 | { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, |
67 | -NF_ACCEPT - 1 } }, | 67 | -NF_ACCEPT - 1 } }, |
68 | /* FORWARD */ | 68 | /* FORWARD */ |
69 | { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, | 69 | { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, |
70 | 0, | 70 | 0, |
71 | sizeof(struct ipt_entry), | 71 | sizeof(struct ipt_entry), |
72 | sizeof(struct ipt_standard), | 72 | sizeof(struct ipt_standard), |
@@ -166,7 +166,7 @@ static struct nf_hook_ops ipt_ops[] = { | |||
166 | .hook = ipt_route_hook, | 166 | .hook = ipt_route_hook, |
167 | .owner = THIS_MODULE, | 167 | .owner = THIS_MODULE, |
168 | .pf = PF_INET, | 168 | .pf = PF_INET, |
169 | .hooknum = NF_IP_PRE_ROUTING, | 169 | .hooknum = NF_IP_PRE_ROUTING, |
170 | .priority = NF_IP_PRI_MANGLE, | 170 | .priority = NF_IP_PRI_MANGLE, |
171 | }, | 171 | }, |
172 | { | 172 | { |
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 5277550fa6b5..18c3d4c9ff51 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT . | 2 | * 'raw' table, which is the very first hooked in at PRE_ROUTING and LOCAL_OUT . |
3 | * | 3 | * |
4 | * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> | 4 | * Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> |
@@ -15,26 +15,26 @@ static struct | |||
15 | struct ipt_error term; | 15 | struct ipt_error term; |
16 | } initial_table __initdata = { | 16 | } initial_table __initdata = { |
17 | .repl = { | 17 | .repl = { |
18 | .name = "raw", | 18 | .name = "raw", |
19 | .valid_hooks = RAW_VALID_HOOKS, | 19 | .valid_hooks = RAW_VALID_HOOKS, |
20 | .num_entries = 3, | 20 | .num_entries = 3, |
21 | .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error), | 21 | .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error), |
22 | .hook_entry = { | 22 | .hook_entry = { |
23 | [NF_IP_PRE_ROUTING] = 0, | 23 | [NF_IP_PRE_ROUTING] = 0, |
24 | [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, | 24 | [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, |
25 | .underflow = { | 25 | .underflow = { |
26 | [NF_IP_PRE_ROUTING] = 0, | 26 | [NF_IP_PRE_ROUTING] = 0, |
27 | [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, | 27 | [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, |
28 | }, | 28 | }, |
29 | .entries = { | 29 | .entries = { |
30 | /* PRE_ROUTING */ | 30 | /* PRE_ROUTING */ |
31 | { | 31 | { |
32 | .entry = { | 32 | .entry = { |
33 | .target_offset = sizeof(struct ipt_entry), | 33 | .target_offset = sizeof(struct ipt_entry), |
34 | .next_offset = sizeof(struct ipt_standard), | 34 | .next_offset = sizeof(struct ipt_standard), |
35 | }, | 35 | }, |
36 | .target = { | 36 | .target = { |
37 | .target = { | 37 | .target = { |
38 | .u = { | 38 | .u = { |
39 | .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), | 39 | .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)), |
40 | }, | 40 | }, |
@@ -69,7 +69,7 @@ static struct | |||
69 | .target = { | 69 | .target = { |
70 | .u = { | 70 | .u = { |
71 | .user = { | 71 | .user = { |
72 | .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)), | 72 | .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)), |
73 | .name = IPT_ERROR_TARGET, | 73 | .name = IPT_ERROR_TARGET, |
74 | }, | 74 | }, |
75 | }, | 75 | }, |
@@ -80,9 +80,9 @@ static struct | |||
80 | }; | 80 | }; |
81 | 81 | ||
82 | static struct xt_table packet_raw = { | 82 | static struct xt_table packet_raw = { |
83 | .name = "raw", | 83 | .name = "raw", |
84 | .valid_hooks = RAW_VALID_HOOKS, | 84 | .valid_hooks = RAW_VALID_HOOKS, |
85 | .lock = RW_LOCK_UNLOCKED, | 85 | .lock = RW_LOCK_UNLOCKED, |
86 | .me = THIS_MODULE, | 86 | .me = THIS_MODULE, |
87 | .af = AF_INET, | 87 | .af = AF_INET, |
88 | }; | 88 | }; |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 471b638cedec..b984db771258 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
@@ -66,7 +66,7 @@ static int ipv4_print_tuple(struct seq_file *s, | |||
66 | const struct nf_conntrack_tuple *tuple) | 66 | const struct nf_conntrack_tuple *tuple) |
67 | { | 67 | { |
68 | return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ", | 68 | return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ", |
69 | NIPQUAD(tuple->src.u3.ip), | 69 | NIPQUAD(tuple->src.u3.ip), |
70 | NIPQUAD(tuple->dst.u3.ip)); | 70 | NIPQUAD(tuple->dst.u3.ip)); |
71 | } | 71 | } |
72 | 72 | ||
@@ -82,14 +82,14 @@ nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) | |||
82 | { | 82 | { |
83 | skb_orphan(skb); | 83 | skb_orphan(skb); |
84 | 84 | ||
85 | local_bh_disable(); | 85 | local_bh_disable(); |
86 | skb = ip_defrag(skb, user); | 86 | skb = ip_defrag(skb, user); |
87 | local_bh_enable(); | 87 | local_bh_enable(); |
88 | 88 | ||
89 | if (skb) | 89 | if (skb) |
90 | ip_send_check(skb->nh.iph); | 90 | ip_send_check(skb->nh.iph); |
91 | 91 | ||
92 | return skb; | 92 | return skb; |
93 | } | 93 | } |
94 | 94 | ||
95 | static int | 95 | static int |
@@ -192,10 +192,10 @@ static unsigned int ipv4_conntrack_in(unsigned int hooknum, | |||
192 | } | 192 | } |
193 | 193 | ||
194 | static unsigned int ipv4_conntrack_local(unsigned int hooknum, | 194 | static unsigned int ipv4_conntrack_local(unsigned int hooknum, |
195 | struct sk_buff **pskb, | 195 | struct sk_buff **pskb, |
196 | const struct net_device *in, | 196 | const struct net_device *in, |
197 | const struct net_device *out, | 197 | const struct net_device *out, |
198 | int (*okfn)(struct sk_buff *)) | 198 | int (*okfn)(struct sk_buff *)) |
199 | { | 199 | { |
200 | /* root is playing with raw sockets. */ | 200 | /* root is playing with raw sockets. */ |
201 | if ((*pskb)->len < sizeof(struct iphdr) | 201 | if ((*pskb)->len < sizeof(struct iphdr) |
@@ -332,7 +332,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) | |||
332 | struct inet_sock *inet = inet_sk(sk); | 332 | struct inet_sock *inet = inet_sk(sk); |
333 | struct nf_conntrack_tuple_hash *h; | 333 | struct nf_conntrack_tuple_hash *h; |
334 | struct nf_conntrack_tuple tuple; | 334 | struct nf_conntrack_tuple tuple; |
335 | 335 | ||
336 | NF_CT_TUPLE_U_BLANK(&tuple); | 336 | NF_CT_TUPLE_U_BLANK(&tuple); |
337 | tuple.src.u3.ip = inet->rcv_saddr; | 337 | tuple.src.u3.ip = inet->rcv_saddr; |
338 | tuple.src.u.tcp.port = inet->sport; | 338 | tuple.src.u.tcp.port = inet->sport; |
@@ -501,7 +501,7 @@ static int __init nf_conntrack_l3proto_ipv4_init(void) | |||
501 | return ret; | 501 | return ret; |
502 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) | 502 | #if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) |
503 | cleanup_hooks: | 503 | cleanup_hooks: |
504 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); | 504 | nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops)); |
505 | #endif | 505 | #endif |
506 | cleanup_ipv4: | 506 | cleanup_ipv4: |
507 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); | 507 | nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4); |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 3b31bc649608..14a93a738418 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -135,7 +135,7 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
135 | l3proto, l4proto)) | 135 | l3proto, l4proto)) |
136 | return -ENOSPC; | 136 | return -ENOSPC; |
137 | 137 | ||
138 | if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL])) | 138 | if (seq_print_counters(s, &ct->counters[IP_CT_DIR_ORIGINAL])) |
139 | return -ENOSPC; | 139 | return -ENOSPC; |
140 | 140 | ||
141 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) | 141 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) |
@@ -146,7 +146,7 @@ static int ct_seq_show(struct seq_file *s, void *v) | |||
146 | l3proto, l4proto)) | 146 | l3proto, l4proto)) |
147 | return -ENOSPC; | 147 | return -ENOSPC; |
148 | 148 | ||
149 | if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY])) | 149 | if (seq_print_counters(s, &ct->counters[IP_CT_DIR_REPLY])) |
150 | return -ENOSPC; | 150 | return -ENOSPC; |
151 | 151 | ||
152 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) | 152 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) |
@@ -228,7 +228,7 @@ static void *exp_seq_start(struct seq_file *s, loff_t *pos) | |||
228 | 228 | ||
229 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) | 229 | static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos) |
230 | { | 230 | { |
231 | struct list_head *e = v; | 231 | struct list_head *e = v; |
232 | 232 | ||
233 | ++*pos; | 233 | ++*pos; |
234 | e = e->next; | 234 | e = e->next; |
@@ -262,7 +262,7 @@ static int exp_seq_show(struct seq_file *s, void *v) | |||
262 | print_tuple(s, &exp->tuple, | 262 | print_tuple(s, &exp->tuple, |
263 | __nf_ct_l3proto_find(exp->tuple.src.l3num), | 263 | __nf_ct_l3proto_find(exp->tuple.src.l3num), |
264 | __nf_ct_l4proto_find(exp->tuple.src.l3num, | 264 | __nf_ct_l4proto_find(exp->tuple.src.l3num, |
265 | exp->tuple.dst.protonum)); | 265 | exp->tuple.dst.protonum)); |
266 | return seq_putc(s, '\n'); | 266 | return seq_putc(s, '\n'); |
267 | } | 267 | } |
268 | 268 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index db9e7c45d3b4..677b6c80c618 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c | |||
@@ -101,9 +101,9 @@ static int icmp_packet(struct nf_conn *ct, | |||
101 | unsigned int hooknum) | 101 | unsigned int hooknum) |
102 | { | 102 | { |
103 | /* Try to delete connection immediately after all replies: | 103 | /* Try to delete connection immediately after all replies: |
104 | won't actually vanish as we still have skb, and del_timer | 104 | won't actually vanish as we still have skb, and del_timer |
105 | means this will only run once even if count hits zero twice | 105 | means this will only run once even if count hits zero twice |
106 | (theoretically possible with SMP) */ | 106 | (theoretically possible with SMP) */ |
107 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { | 107 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) { |
108 | if (atomic_dec_and_test(&ct->proto.icmp.count) | 108 | if (atomic_dec_and_test(&ct->proto.icmp.count) |
109 | && del_timer(&ct->timeout)) | 109 | && del_timer(&ct->timeout)) |
@@ -144,8 +144,8 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; | |||
144 | /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ | 144 | /* Returns conntrack if it dealt with ICMP, and filled in skb fields */ |
145 | static int | 145 | static int |
146 | icmp_error_message(struct sk_buff *skb, | 146 | icmp_error_message(struct sk_buff *skb, |
147 | enum ip_conntrack_info *ctinfo, | 147 | enum ip_conntrack_info *ctinfo, |
148 | unsigned int hooknum) | 148 | unsigned int hooknum) |
149 | { | 149 | { |
150 | struct nf_conntrack_tuple innertuple, origtuple; | 150 | struct nf_conntrack_tuple innertuple, origtuple; |
151 | struct { | 151 | struct { |
@@ -181,9 +181,9 @@ icmp_error_message(struct sk_buff *skb, | |||
181 | return -NF_ACCEPT; | 181 | return -NF_ACCEPT; |
182 | } | 182 | } |
183 | 183 | ||
184 | /* Ordinarily, we'd expect the inverted tupleproto, but it's | 184 | /* Ordinarily, we'd expect the inverted tupleproto, but it's |
185 | been preserved inside the ICMP. */ | 185 | been preserved inside the ICMP. */ |
186 | if (!nf_ct_invert_tuple(&innertuple, &origtuple, | 186 | if (!nf_ct_invert_tuple(&innertuple, &origtuple, |
187 | &nf_conntrack_l3proto_ipv4, innerproto)) { | 187 | &nf_conntrack_l3proto_ipv4, innerproto)) { |
188 | DEBUGP("icmp_error_message: no match\n"); | 188 | DEBUGP("icmp_error_message: no match\n"); |
189 | return -NF_ACCEPT; | 189 | return -NF_ACCEPT; |
@@ -212,10 +212,10 @@ icmp_error_message(struct sk_buff *skb, | |||
212 | *ctinfo += IP_CT_IS_REPLY; | 212 | *ctinfo += IP_CT_IS_REPLY; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* Update skb to refer to this connection */ | 215 | /* Update skb to refer to this connection */ |
216 | skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; | 216 | skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; |
217 | skb->nfctinfo = *ctinfo; | 217 | skb->nfctinfo = *ctinfo; |
218 | return -NF_ACCEPT; | 218 | return -NF_ACCEPT; |
219 | } | 219 | } |
220 | 220 | ||
221 | /* Small and modified version of icmp_rcv */ | 221 | /* Small and modified version of icmp_rcv */ |
@@ -306,7 +306,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[], | |||
306 | if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) | 306 | if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) |
307 | return -EINVAL; | 307 | return -EINVAL; |
308 | 308 | ||
309 | tuple->dst.u.icmp.type = | 309 | tuple->dst.u.icmp.type = |
310 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); | 310 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); |
311 | tuple->dst.u.icmp.code = | 311 | tuple->dst.u.icmp.code = |
312 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); | 312 | *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); |
@@ -332,7 +332,7 @@ static struct ctl_table icmp_sysctl_table[] = { | |||
332 | .mode = 0644, | 332 | .mode = 0644, |
333 | .proc_handler = &proc_dointvec_jiffies, | 333 | .proc_handler = &proc_dointvec_jiffies, |
334 | }, | 334 | }, |
335 | { | 335 | { |
336 | .ctl_name = 0 | 336 | .ctl_name = 0 |
337 | } | 337 | } |
338 | }; | 338 | }; |
@@ -346,7 +346,7 @@ static struct ctl_table icmp_compat_sysctl_table[] = { | |||
346 | .mode = 0644, | 346 | .mode = 0644, |
347 | .proc_handler = &proc_dointvec_jiffies, | 347 | .proc_handler = &proc_dointvec_jiffies, |
348 | }, | 348 | }, |
349 | { | 349 | { |
350 | .ctl_name = 0 | 350 | .ctl_name = 0 |
351 | } | 351 | } |
352 | }; | 352 | }; |
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 998b2557692c..cf1010827be1 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c | |||
@@ -452,8 +452,8 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
452 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); | 452 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); |
453 | 453 | ||
454 | /* Redirects on non-null nats must be dropped, else they'll | 454 | /* Redirects on non-null nats must be dropped, else they'll |
455 | start talking to each other without our translation, and be | 455 | start talking to each other without our translation, and be |
456 | confused... --RR */ | 456 | confused... --RR */ |
457 | if (inside->icmp.type == ICMP_REDIRECT) { | 457 | if (inside->icmp.type == ICMP_REDIRECT) { |
458 | /* If NAT isn't finished, assume it and drop. */ | 458 | /* If NAT isn't finished, assume it and drop. */ |
459 | if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) | 459 | if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) |
@@ -469,13 +469,13 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |||
469 | if (!nf_ct_get_tuple(*pskb, | 469 | if (!nf_ct_get_tuple(*pskb, |
470 | (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr), | 470 | (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr), |
471 | (*pskb)->nh.iph->ihl*4 + | 471 | (*pskb)->nh.iph->ihl*4 + |
472 | sizeof(struct icmphdr) + inside->ip.ihl*4, | 472 | sizeof(struct icmphdr) + inside->ip.ihl*4, |
473 | (u_int16_t)AF_INET, | 473 | (u_int16_t)AF_INET, |
474 | inside->ip.protocol, | 474 | inside->ip.protocol, |
475 | &inner, | 475 | &inner, |
476 | l3proto, | 476 | l3proto, |
477 | __nf_ct_l4proto_find((u_int16_t)PF_INET, | 477 | __nf_ct_l4proto_find((u_int16_t)PF_INET, |
478 | inside->ip.protocol))) | 478 | inside->ip.protocol))) |
479 | return 0; | 479 | return 0; |
480 | 480 | ||
481 | /* Change inner back to look like incoming packet. We do the | 481 | /* Change inner back to look like incoming packet. We do the |
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index fb9ab0114c23..9cbf3f9be13b 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c | |||
@@ -256,7 +256,7 @@ static int nat_rtp_rtcp(struct sk_buff **pskb, struct nf_conn *ct, | |||
256 | if (set_h245_addr(pskb, data, dataoff, taddr, | 256 | if (set_h245_addr(pskb, data, dataoff, taddr, |
257 | &ct->tuplehash[!dir].tuple.dst.u3, | 257 | &ct->tuplehash[!dir].tuple.dst.u3, |
258 | htons((port & htons(1)) ? nated_port + 1 : | 258 | htons((port & htons(1)) ? nated_port + 1 : |
259 | nated_port)) == 0) { | 259 | nated_port)) == 0) { |
260 | /* Save ports */ | 260 | /* Save ports */ |
261 | info->rtp_port[i][dir] = rtp_port; | 261 | info->rtp_port[i][dir] = rtp_port; |
262 | info->rtp_port[i][!dir] = htons(nated_port); | 262 | info->rtp_port[i][!dir] = htons(nated_port); |
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index dc6738bdfab7..49a90c39ffce 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -179,7 +179,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb, | |||
179 | tcph->check = tcp_v4_check(datalen, | 179 | tcph->check = tcp_v4_check(datalen, |
180 | iph->saddr, iph->daddr, | 180 | iph->saddr, iph->daddr, |
181 | csum_partial((char *)tcph, | 181 | csum_partial((char *)tcph, |
182 | datalen, 0)); | 182 | datalen, 0)); |
183 | } else | 183 | } else |
184 | nf_proto_csum_replace2(&tcph->check, *pskb, | 184 | nf_proto_csum_replace2(&tcph->check, *pskb, |
185 | htons(oldlen), htons(datalen), 1); | 185 | htons(oldlen), htons(datalen), 1); |
@@ -223,7 +223,7 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
223 | /* UDP helpers might accidentally mangle the wrong packet */ | 223 | /* UDP helpers might accidentally mangle the wrong packet */ |
224 | iph = (*pskb)->nh.iph; | 224 | iph = (*pskb)->nh.iph; |
225 | if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + | 225 | if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) + |
226 | match_offset + match_len) | 226 | match_offset + match_len) |
227 | return 0; | 227 | return 0; |
228 | 228 | ||
229 | if (!skb_make_writable(pskb, (*pskb)->len)) | 229 | if (!skb_make_writable(pskb, (*pskb)->len)) |
@@ -252,9 +252,9 @@ nf_nat_mangle_udp_packet(struct sk_buff **pskb, | |||
252 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | 252 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { |
253 | udph->check = 0; | 253 | udph->check = 0; |
254 | udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, | 254 | udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, |
255 | datalen, IPPROTO_UDP, | 255 | datalen, IPPROTO_UDP, |
256 | csum_partial((char *)udph, | 256 | csum_partial((char *)udph, |
257 | datalen, 0)); | 257 | datalen, 0)); |
258 | if (!udph->check) | 258 | if (!udph->check) |
259 | udph->check = CSUM_MANGLED_0; | 259 | udph->check = CSUM_MANGLED_0; |
260 | } else | 260 | } else |
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c index 5df4fcae3ab6..7ba341c22eaa 100644 --- a/net/ipv4/netfilter/nf_nat_pptp.c +++ b/net/ipv4/netfilter/nf_nat_pptp.c | |||
@@ -184,10 +184,10 @@ pptp_outbound_pkt(struct sk_buff **pskb, | |||
184 | 184 | ||
185 | /* mangle packet */ | 185 | /* mangle packet */ |
186 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 186 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, |
187 | cid_off + sizeof(struct pptp_pkt_hdr) + | 187 | cid_off + sizeof(struct pptp_pkt_hdr) + |
188 | sizeof(struct PptpControlHeader), | 188 | sizeof(struct PptpControlHeader), |
189 | sizeof(new_callid), (char *)&new_callid, | 189 | sizeof(new_callid), (char *)&new_callid, |
190 | sizeof(new_callid)) == 0) | 190 | sizeof(new_callid)) == 0) |
191 | return NF_DROP; | 191 | return NF_DROP; |
192 | return NF_ACCEPT; | 192 | return NF_ACCEPT; |
193 | } | 193 | } |
@@ -276,7 +276,7 @@ pptp_inbound_pkt(struct sk_buff **pskb, | |||
276 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); | 276 | ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); |
277 | 277 | ||
278 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, | 278 | if (nf_nat_mangle_tcp_packet(pskb, ct, ctinfo, |
279 | pcid_off + sizeof(struct pptp_pkt_hdr) + | 279 | pcid_off + sizeof(struct pptp_pkt_hdr) + |
280 | sizeof(struct PptpControlHeader), | 280 | sizeof(struct PptpControlHeader), |
281 | sizeof(new_pcid), (char *)&new_pcid, | 281 | sizeof(new_pcid), (char *)&new_pcid, |
282 | sizeof(new_pcid)) == 0) | 282 | sizeof(new_pcid)) == 0) |
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c index dcfd772972d7..6bc2f06de055 100644 --- a/net/ipv4/netfilter/nf_nat_proto_icmp.c +++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c | |||
@@ -44,7 +44,7 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple, | |||
44 | 44 | ||
45 | for (i = 0; i < range_size; i++, id++) { | 45 | for (i = 0; i < range_size; i++, id++) { |
46 | tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + | 46 | tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + |
47 | (id % range_size)); | 47 | (id % range_size)); |
48 | if (!nf_nat_used_tuple(tuple, ct)) | 48 | if (!nf_nat_used_tuple(tuple, ct)) |
49 | return 1; | 49 | return 1; |
50 | } | 50 | } |
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c index 7f95b4e2eb31..147a4370cf03 100644 --- a/net/ipv4/netfilter/nf_nat_rule.c +++ b/net/ipv4/netfilter/nf_nat_rule.c | |||
@@ -56,8 +56,8 @@ static struct | |||
56 | /* PRE_ROUTING */ | 56 | /* PRE_ROUTING */ |
57 | { | 57 | { |
58 | .entry = { | 58 | .entry = { |
59 | .target_offset = sizeof(struct ipt_entry), | 59 | .target_offset = sizeof(struct ipt_entry), |
60 | .next_offset = sizeof(struct ipt_standard), | 60 | .next_offset = sizeof(struct ipt_standard), |
61 | }, | 61 | }, |
62 | .target = { | 62 | .target = { |
63 | .target = { | 63 | .target = { |
@@ -71,8 +71,8 @@ static struct | |||
71 | /* POST_ROUTING */ | 71 | /* POST_ROUTING */ |
72 | { | 72 | { |
73 | .entry = { | 73 | .entry = { |
74 | .target_offset = sizeof(struct ipt_entry), | 74 | .target_offset = sizeof(struct ipt_entry), |
75 | .next_offset = sizeof(struct ipt_standard), | 75 | .next_offset = sizeof(struct ipt_standard), |
76 | }, | 76 | }, |
77 | .target = { | 77 | .target = { |
78 | .target = { | 78 | .target = { |
@@ -86,8 +86,8 @@ static struct | |||
86 | /* LOCAL_OUT */ | 86 | /* LOCAL_OUT */ |
87 | { | 87 | { |
88 | .entry = { | 88 | .entry = { |
89 | .target_offset = sizeof(struct ipt_entry), | 89 | .target_offset = sizeof(struct ipt_entry), |
90 | .next_offset = sizeof(struct ipt_standard), | 90 | .next_offset = sizeof(struct ipt_standard), |
91 | }, | 91 | }, |
92 | .target = { | 92 | .target = { |
93 | .target = { | 93 | .target = { |
@@ -145,7 +145,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb, | |||
145 | 145 | ||
146 | /* Connection must be valid and new. */ | 146 | /* Connection must be valid and new. */ |
147 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || | 147 | NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || |
148 | ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); | 148 | ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); |
149 | NF_CT_ASSERT(out); | 149 | NF_CT_ASSERT(out); |
150 | 150 | ||
151 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); | 151 | return nf_nat_setup_info(ct, &mr->range[0], hooknum); |
@@ -256,8 +256,8 @@ alloc_null_binding(struct nf_conn *ct, | |||
256 | 256 | ||
257 | unsigned int | 257 | unsigned int |
258 | alloc_null_binding_confirmed(struct nf_conn *ct, | 258 | alloc_null_binding_confirmed(struct nf_conn *ct, |
259 | struct nf_nat_info *info, | 259 | struct nf_nat_info *info, |
260 | unsigned int hooknum) | 260 | unsigned int hooknum) |
261 | { | 261 | { |
262 | __be32 ip | 262 | __be32 ip |
263 | = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC | 263 | = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC |
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index 3d524b957310..b12cd7c314ca 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c | |||
@@ -90,7 +90,7 @@ static int map_sip_addr(struct sk_buff **pskb, enum ip_conntrack_info ctinfo, | |||
90 | return 1; | 90 | return 1; |
91 | 91 | ||
92 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | 92 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, |
93 | matchoff, matchlen, addr, addrlen)) | 93 | matchoff, matchlen, addr, addrlen)) |
94 | return 0; | 94 | return 0; |
95 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); | 95 | *dptr = (*pskb)->data + (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); |
96 | return 1; | 96 | return 1; |
@@ -151,7 +151,7 @@ static unsigned int mangle_sip_packet(struct sk_buff **pskb, | |||
151 | return 0; | 151 | return 0; |
152 | 152 | ||
153 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | 153 | if (!nf_nat_mangle_udp_packet(pskb, ct, ctinfo, |
154 | matchoff, matchlen, buffer, bufflen)) | 154 | matchoff, matchlen, buffer, bufflen)) |
155 | return 0; | 155 | return 0; |
156 | 156 | ||
157 | /* We need to reload this. Thanks Patrick. */ | 157 | /* We need to reload this. Thanks Patrick. */ |
@@ -172,7 +172,7 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
172 | 172 | ||
173 | /* Get actual SDP lenght */ | 173 | /* Get actual SDP lenght */ |
174 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, | 174 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, |
175 | &matchlen, POS_SDP_HEADER) > 0) { | 175 | &matchlen, POS_SDP_HEADER) > 0) { |
176 | 176 | ||
177 | /* since ct_sip_get_info() give us a pointer passing 'v=' | 177 | /* since ct_sip_get_info() give us a pointer passing 'v=' |
178 | we need to add 2 bytes in this count. */ | 178 | we need to add 2 bytes in this count. */ |
@@ -180,7 +180,7 @@ static int mangle_content_len(struct sk_buff **pskb, | |||
180 | 180 | ||
181 | /* Now, update SDP length */ | 181 | /* Now, update SDP length */ |
182 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, | 182 | if (ct_sip_get_info(ct, dptr, (*pskb)->len - dataoff, &matchoff, |
183 | &matchlen, POS_CONTENT) > 0) { | 183 | &matchlen, POS_CONTENT) > 0) { |
184 | 184 | ||
185 | bufflen = sprintf(buffer, "%u", c_len); | 185 | bufflen = sprintf(buffer, "%u", c_len); |
186 | return nf_nat_mangle_udp_packet(pskb, ct, ctinfo, | 186 | return nf_nat_mangle_udp_packet(pskb, ct, ctinfo, |
@@ -205,17 +205,17 @@ static unsigned int mangle_sdp(struct sk_buff **pskb, | |||
205 | /* Mangle owner and contact info. */ | 205 | /* Mangle owner and contact info. */ |
206 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); | 206 | bufflen = sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(newip)); |
207 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 207 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
208 | buffer, bufflen, POS_OWNER_IP4)) | 208 | buffer, bufflen, POS_OWNER_IP4)) |
209 | return 0; | 209 | return 0; |
210 | 210 | ||
211 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 211 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
212 | buffer, bufflen, POS_CONNECTION_IP4)) | 212 | buffer, bufflen, POS_CONNECTION_IP4)) |
213 | return 0; | 213 | return 0; |
214 | 214 | ||
215 | /* Mangle media port. */ | 215 | /* Mangle media port. */ |
216 | bufflen = sprintf(buffer, "%u", port); | 216 | bufflen = sprintf(buffer, "%u", port); |
217 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, | 217 | if (!mangle_sip_packet(pskb, ctinfo, ct, &dptr, (*pskb)->len - dataoff, |
218 | buffer, bufflen, POS_MEDIA)) | 218 | buffer, bufflen, POS_MEDIA)) |
219 | return 0; | 219 | return 0; |
220 | 220 | ||
221 | return mangle_content_len(pskb, ctinfo, ct, dptr); | 221 | return mangle_content_len(pskb, ctinfo, ct, dptr); |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index f12528fe1bf9..ce5c4939a6ee 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -150,8 +150,8 @@ struct asn1_octstr | |||
150 | }; | 150 | }; |
151 | 151 | ||
152 | static void asn1_open(struct asn1_ctx *ctx, | 152 | static void asn1_open(struct asn1_ctx *ctx, |
153 | unsigned char *buf, | 153 | unsigned char *buf, |
154 | unsigned int len) | 154 | unsigned int len) |
155 | { | 155 | { |
156 | ctx->begin = buf; | 156 | ctx->begin = buf; |
157 | ctx->end = buf + len; | 157 | ctx->end = buf + len; |
@@ -186,9 +186,9 @@ static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) | |||
186 | } | 186 | } |
187 | 187 | ||
188 | static unsigned char asn1_id_decode(struct asn1_ctx *ctx, | 188 | static unsigned char asn1_id_decode(struct asn1_ctx *ctx, |
189 | unsigned int *cls, | 189 | unsigned int *cls, |
190 | unsigned int *con, | 190 | unsigned int *con, |
191 | unsigned int *tag) | 191 | unsigned int *tag) |
192 | { | 192 | { |
193 | unsigned char ch; | 193 | unsigned char ch; |
194 | 194 | ||
@@ -207,8 +207,8 @@ static unsigned char asn1_id_decode(struct asn1_ctx *ctx, | |||
207 | } | 207 | } |
208 | 208 | ||
209 | static unsigned char asn1_length_decode(struct asn1_ctx *ctx, | 209 | static unsigned char asn1_length_decode(struct asn1_ctx *ctx, |
210 | unsigned int *def, | 210 | unsigned int *def, |
211 | unsigned int *len) | 211 | unsigned int *len) |
212 | { | 212 | { |
213 | unsigned char ch, cnt; | 213 | unsigned char ch, cnt; |
214 | 214 | ||
@@ -239,10 +239,10 @@ static unsigned char asn1_length_decode(struct asn1_ctx *ctx, | |||
239 | } | 239 | } |
240 | 240 | ||
241 | static unsigned char asn1_header_decode(struct asn1_ctx *ctx, | 241 | static unsigned char asn1_header_decode(struct asn1_ctx *ctx, |
242 | unsigned char **eoc, | 242 | unsigned char **eoc, |
243 | unsigned int *cls, | 243 | unsigned int *cls, |
244 | unsigned int *con, | 244 | unsigned int *con, |
245 | unsigned int *tag) | 245 | unsigned int *tag) |
246 | { | 246 | { |
247 | unsigned int def, len; | 247 | unsigned int def, len; |
248 | 248 | ||
@@ -297,8 +297,8 @@ static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc) | |||
297 | } | 297 | } |
298 | 298 | ||
299 | static unsigned char asn1_long_decode(struct asn1_ctx *ctx, | 299 | static unsigned char asn1_long_decode(struct asn1_ctx *ctx, |
300 | unsigned char *eoc, | 300 | unsigned char *eoc, |
301 | long *integer) | 301 | long *integer) |
302 | { | 302 | { |
303 | unsigned char ch; | 303 | unsigned char ch; |
304 | unsigned int len; | 304 | unsigned int len; |
@@ -325,8 +325,8 @@ static unsigned char asn1_long_decode(struct asn1_ctx *ctx, | |||
325 | } | 325 | } |
326 | 326 | ||
327 | static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, | 327 | static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, |
328 | unsigned char *eoc, | 328 | unsigned char *eoc, |
329 | unsigned int *integer) | 329 | unsigned int *integer) |
330 | { | 330 | { |
331 | unsigned char ch; | 331 | unsigned char ch; |
332 | unsigned int len; | 332 | unsigned int len; |
@@ -354,8 +354,8 @@ static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, | |||
354 | } | 354 | } |
355 | 355 | ||
356 | static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, | 356 | static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, |
357 | unsigned char *eoc, | 357 | unsigned char *eoc, |
358 | unsigned long *integer) | 358 | unsigned long *integer) |
359 | { | 359 | { |
360 | unsigned char ch; | 360 | unsigned char ch; |
361 | unsigned int len; | 361 | unsigned int len; |
@@ -383,9 +383,9 @@ static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, | |||
383 | } | 383 | } |
384 | 384 | ||
385 | static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, | 385 | static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, |
386 | unsigned char *eoc, | 386 | unsigned char *eoc, |
387 | unsigned char **octets, | 387 | unsigned char **octets, |
388 | unsigned int *len) | 388 | unsigned int *len) |
389 | { | 389 | { |
390 | unsigned char *ptr; | 390 | unsigned char *ptr; |
391 | 391 | ||
@@ -411,7 +411,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, | |||
411 | } | 411 | } |
412 | 412 | ||
413 | static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, | 413 | static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, |
414 | unsigned long *subid) | 414 | unsigned long *subid) |
415 | { | 415 | { |
416 | unsigned char ch; | 416 | unsigned char ch; |
417 | 417 | ||
@@ -428,9 +428,9 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, | |||
428 | } | 428 | } |
429 | 429 | ||
430 | static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, | 430 | static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, |
431 | unsigned char *eoc, | 431 | unsigned char *eoc, |
432 | unsigned long **oid, | 432 | unsigned long **oid, |
433 | unsigned int *len) | 433 | unsigned int *len) |
434 | { | 434 | { |
435 | unsigned long subid; | 435 | unsigned long subid; |
436 | unsigned int size; | 436 | unsigned int size; |
@@ -611,9 +611,9 @@ struct snmp_v1_trap | |||
611 | #define SERR_EOM 2 | 611 | #define SERR_EOM 2 |
612 | 612 | ||
613 | static inline void mangle_address(unsigned char *begin, | 613 | static inline void mangle_address(unsigned char *begin, |
614 | unsigned char *addr, | 614 | unsigned char *addr, |
615 | const struct oct1_map *map, | 615 | const struct oct1_map *map, |
616 | __sum16 *check); | 616 | __sum16 *check); |
617 | struct snmp_cnv | 617 | struct snmp_cnv |
618 | { | 618 | { |
619 | unsigned int class; | 619 | unsigned int class; |
@@ -644,8 +644,8 @@ static struct snmp_cnv snmp_conv [] = | |||
644 | }; | 644 | }; |
645 | 645 | ||
646 | static unsigned char snmp_tag_cls2syntax(unsigned int tag, | 646 | static unsigned char snmp_tag_cls2syntax(unsigned int tag, |
647 | unsigned int cls, | 647 | unsigned int cls, |
648 | unsigned short *syntax) | 648 | unsigned short *syntax) |
649 | { | 649 | { |
650 | struct snmp_cnv *cnv; | 650 | struct snmp_cnv *cnv; |
651 | 651 | ||
@@ -662,7 +662,7 @@ static unsigned char snmp_tag_cls2syntax(unsigned int tag, | |||
662 | } | 662 | } |
663 | 663 | ||
664 | static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | 664 | static unsigned char snmp_object_decode(struct asn1_ctx *ctx, |
665 | struct snmp_object **obj) | 665 | struct snmp_object **obj) |
666 | { | 666 | { |
667 | unsigned int cls, con, tag, len, idlen; | 667 | unsigned int cls, con, tag, len, idlen; |
668 | unsigned short type; | 668 | unsigned short type; |
@@ -714,7 +714,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
714 | return 0; | 714 | return 0; |
715 | } | 715 | } |
716 | *obj = kmalloc(sizeof(struct snmp_object) + len, | 716 | *obj = kmalloc(sizeof(struct snmp_object) + len, |
717 | GFP_ATOMIC); | 717 | GFP_ATOMIC); |
718 | if (*obj == NULL) { | 718 | if (*obj == NULL) { |
719 | kfree(id); | 719 | kfree(id); |
720 | if (net_ratelimit()) | 720 | if (net_ratelimit()) |
@@ -730,7 +730,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
730 | return 0; | 730 | return 0; |
731 | } | 731 | } |
732 | *obj = kmalloc(sizeof(struct snmp_object) + len, | 732 | *obj = kmalloc(sizeof(struct snmp_object) + len, |
733 | GFP_ATOMIC); | 733 | GFP_ATOMIC); |
734 | if (*obj == NULL) { | 734 | if (*obj == NULL) { |
735 | kfree(id); | 735 | kfree(id); |
736 | if (net_ratelimit()) | 736 | if (net_ratelimit()) |
@@ -834,7 +834,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx, | |||
834 | } | 834 | } |
835 | 835 | ||
836 | static unsigned char snmp_request_decode(struct asn1_ctx *ctx, | 836 | static unsigned char snmp_request_decode(struct asn1_ctx *ctx, |
837 | struct snmp_request *request) | 837 | struct snmp_request *request) |
838 | { | 838 | { |
839 | unsigned int cls, con, tag; | 839 | unsigned int cls, con, tag; |
840 | unsigned char *end; | 840 | unsigned char *end; |
@@ -874,9 +874,9 @@ static unsigned char snmp_request_decode(struct asn1_ctx *ctx, | |||
874 | * code example in the draft. | 874 | * code example in the draft. |
875 | */ | 875 | */ |
876 | static void fast_csum(__sum16 *csum, | 876 | static void fast_csum(__sum16 *csum, |
877 | const unsigned char *optr, | 877 | const unsigned char *optr, |
878 | const unsigned char *nptr, | 878 | const unsigned char *nptr, |
879 | int offset) | 879 | int offset) |
880 | { | 880 | { |
881 | unsigned char s[4]; | 881 | unsigned char s[4]; |
882 | 882 | ||
@@ -899,9 +899,9 @@ static void fast_csum(__sum16 *csum, | |||
899 | * - addr points to the start of the address | 899 | * - addr points to the start of the address |
900 | */ | 900 | */ |
901 | static inline void mangle_address(unsigned char *begin, | 901 | static inline void mangle_address(unsigned char *begin, |
902 | unsigned char *addr, | 902 | unsigned char *addr, |
903 | const struct oct1_map *map, | 903 | const struct oct1_map *map, |
904 | __sum16 *check) | 904 | __sum16 *check) |
905 | { | 905 | { |
906 | if (map->from == NOCT1(addr)) { | 906 | if (map->from == NOCT1(addr)) { |
907 | u_int32_t old; | 907 | u_int32_t old; |
@@ -914,7 +914,7 @@ static inline void mangle_address(unsigned char *begin, | |||
914 | /* Update UDP checksum if being used */ | 914 | /* Update UDP checksum if being used */ |
915 | if (*check) { | 915 | if (*check) { |
916 | fast_csum(check, | 916 | fast_csum(check, |
917 | &map->from, &map->to, addr - begin); | 917 | &map->from, &map->to, addr - begin); |
918 | 918 | ||
919 | } | 919 | } |
920 | 920 | ||
@@ -925,9 +925,9 @@ static inline void mangle_address(unsigned char *begin, | |||
925 | } | 925 | } |
926 | 926 | ||
927 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, | 927 | static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, |
928 | struct snmp_v1_trap *trap, | 928 | struct snmp_v1_trap *trap, |
929 | const struct oct1_map *map, | 929 | const struct oct1_map *map, |
930 | __sum16 *check) | 930 | __sum16 *check) |
931 | { | 931 | { |
932 | unsigned int cls, con, tag, len; | 932 | unsigned int cls, con, tag, len; |
933 | unsigned char *end; | 933 | unsigned char *end; |
@@ -1019,9 +1019,9 @@ static void hex_dump(unsigned char *buf, size_t len) | |||
1019 | * (And this is the fucking 'basic' method). | 1019 | * (And this is the fucking 'basic' method). |
1020 | */ | 1020 | */ |
1021 | static int snmp_parse_mangle(unsigned char *msg, | 1021 | static int snmp_parse_mangle(unsigned char *msg, |
1022 | u_int16_t len, | 1022 | u_int16_t len, |
1023 | const struct oct1_map *map, | 1023 | const struct oct1_map *map, |
1024 | __sum16 *check) | 1024 | __sum16 *check) |
1025 | { | 1025 | { |
1026 | unsigned char *eoc, *end; | 1026 | unsigned char *eoc, *end; |
1027 | unsigned int cls, con, tag, vers, pdutype; | 1027 | unsigned int cls, con, tag, vers, pdutype; |
@@ -1191,8 +1191,8 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1191 | * SNMP translation routine. | 1191 | * SNMP translation routine. |
1192 | */ | 1192 | */ |
1193 | static int snmp_translate(struct nf_conn *ct, | 1193 | static int snmp_translate(struct nf_conn *ct, |
1194 | enum ip_conntrack_info ctinfo, | 1194 | enum ip_conntrack_info ctinfo, |
1195 | struct sk_buff **pskb) | 1195 | struct sk_buff **pskb) |
1196 | { | 1196 | { |
1197 | struct iphdr *iph = (*pskb)->nh.iph; | 1197 | struct iphdr *iph = (*pskb)->nh.iph; |
1198 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); | 1198 | struct udphdr *udph = (struct udphdr *)((__be32 *)iph + iph->ihl); |
@@ -1219,7 +1219,7 @@ static int snmp_translate(struct nf_conn *ct, | |||
1219 | return NF_ACCEPT; | 1219 | return NF_ACCEPT; |
1220 | 1220 | ||
1221 | if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), | 1221 | if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr), |
1222 | paylen, &map, &udph->check)) { | 1222 | paylen, &map, &udph->check)) { |
1223 | if (net_ratelimit()) | 1223 | if (net_ratelimit()) |
1224 | printk(KERN_WARNING "bsalg: parser failed\n"); | 1224 | printk(KERN_WARNING "bsalg: parser failed\n"); |
1225 | return NF_DROP; | 1225 | return NF_DROP; |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index 5a964a167c13..e4d3ef17d45b 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -96,8 +96,8 @@ nf_nat_fn(unsigned int hooknum, | |||
96 | protocol. 8) --RR */ | 96 | protocol. 8) --RR */ |
97 | if (!ct) { | 97 | if (!ct) { |
98 | /* Exception: ICMP redirect to new connection (not in | 98 | /* Exception: ICMP redirect to new connection (not in |
99 | hash table yet). We must not let this through, in | 99 | hash table yet). We must not let this through, in |
100 | case we're doing NAT to the same network. */ | 100 | case we're doing NAT to the same network. */ |
101 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { | 101 | if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { |
102 | struct icmphdr _hdr, *hp; | 102 | struct icmphdr _hdr, *hp; |
103 | 103 | ||
@@ -141,7 +141,7 @@ nf_nat_fn(unsigned int hooknum, | |||
141 | if (unlikely(nf_ct_is_confirmed(ct))) | 141 | if (unlikely(nf_ct_is_confirmed(ct))) |
142 | /* NAT module was loaded late */ | 142 | /* NAT module was loaded late */ |
143 | ret = alloc_null_binding_confirmed(ct, info, | 143 | ret = alloc_null_binding_confirmed(ct, info, |
144 | hooknum); | 144 | hooknum); |
145 | else if (hooknum == NF_IP_LOCAL_IN) | 145 | else if (hooknum == NF_IP_LOCAL_IN) |
146 | /* LOCAL_IN hook doesn't have a chain! */ | 146 | /* LOCAL_IN hook doesn't have a chain! */ |
147 | ret = alloc_null_binding(ct, info, hooknum); | 147 | ret = alloc_null_binding(ct, info, hooknum); |
@@ -171,10 +171,10 @@ nf_nat_fn(unsigned int hooknum, | |||
171 | 171 | ||
172 | static unsigned int | 172 | static unsigned int |
173 | nf_nat_in(unsigned int hooknum, | 173 | nf_nat_in(unsigned int hooknum, |
174 | struct sk_buff **pskb, | 174 | struct sk_buff **pskb, |
175 | const struct net_device *in, | 175 | const struct net_device *in, |
176 | const struct net_device *out, | 176 | const struct net_device *out, |
177 | int (*okfn)(struct sk_buff *)) | 177 | int (*okfn)(struct sk_buff *)) |
178 | { | 178 | { |
179 | unsigned int ret; | 179 | unsigned int ret; |
180 | __be32 daddr = (*pskb)->nh.iph->daddr; | 180 | __be32 daddr = (*pskb)->nh.iph->daddr; |
@@ -269,9 +269,9 @@ nf_nat_adjust(unsigned int hooknum, | |||
269 | 269 | ||
270 | ct = nf_ct_get(*pskb, &ctinfo); | 270 | ct = nf_ct_get(*pskb, &ctinfo); |
271 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { | 271 | if (ct && test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { |
272 | DEBUGP("nf_nat_standalone: adjusting sequence number\n"); | 272 | DEBUGP("nf_nat_standalone: adjusting sequence number\n"); |
273 | if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) | 273 | if (!nf_nat_seq_adjust(pskb, ct, ctinfo)) |
274 | return NF_DROP; | 274 | return NF_DROP; |
275 | } | 275 | } |
276 | return NF_ACCEPT; | 276 | return NF_ACCEPT; |
277 | } | 277 | } |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index cd873da54cbe..ccb199e9dd8b 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -266,7 +266,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
266 | 266 | ||
267 | for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) | 267 | for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) |
268 | seq_printf(seq, " %lu", | 268 | seq_printf(seq, " %lu", |
269 | fold_field((void **) ip_statistics, | 269 | fold_field((void **) ip_statistics, |
270 | snmp4_ipstats_list[i].entry)); | 270 | snmp4_ipstats_list[i].entry)); |
271 | 271 | ||
272 | seq_puts(seq, "\nIcmp:"); | 272 | seq_puts(seq, "\nIcmp:"); |
@@ -276,7 +276,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
276 | seq_puts(seq, "\nIcmp:"); | 276 | seq_puts(seq, "\nIcmp:"); |
277 | for (i = 0; snmp4_icmp_list[i].name != NULL; i++) | 277 | for (i = 0; snmp4_icmp_list[i].name != NULL; i++) |
278 | seq_printf(seq, " %lu", | 278 | seq_printf(seq, " %lu", |
279 | fold_field((void **) icmp_statistics, | 279 | fold_field((void **) icmp_statistics, |
280 | snmp4_icmp_list[i].entry)); | 280 | snmp4_icmp_list[i].entry)); |
281 | 281 | ||
282 | seq_puts(seq, "\nTcp:"); | 282 | seq_puts(seq, "\nTcp:"); |
@@ -288,7 +288,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
288 | /* MaxConn field is signed, RFC 2012 */ | 288 | /* MaxConn field is signed, RFC 2012 */ |
289 | if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) | 289 | if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) |
290 | seq_printf(seq, " %ld", | 290 | seq_printf(seq, " %ld", |
291 | fold_field((void **) tcp_statistics, | 291 | fold_field((void **) tcp_statistics, |
292 | snmp4_tcp_list[i].entry)); | 292 | snmp4_tcp_list[i].entry)); |
293 | else | 293 | else |
294 | seq_printf(seq, " %lu", | 294 | seq_printf(seq, " %lu", |
@@ -303,7 +303,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v) | |||
303 | seq_puts(seq, "\nUdp:"); | 303 | seq_puts(seq, "\nUdp:"); |
304 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) | 304 | for (i = 0; snmp4_udp_list[i].name != NULL; i++) |
305 | seq_printf(seq, " %lu", | 305 | seq_printf(seq, " %lu", |
306 | fold_field((void **) udp_statistics, | 306 | fold_field((void **) udp_statistics, |
307 | snmp4_udp_list[i].entry)); | 307 | snmp4_udp_list[i].entry)); |
308 | 308 | ||
309 | /* the UDP and UDP-Lite MIBs are the same */ | 309 | /* the UDP and UDP-Lite MIBs are the same */ |
@@ -348,7 +348,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v) | |||
348 | seq_puts(seq, "\nTcpExt:"); | 348 | seq_puts(seq, "\nTcpExt:"); |
349 | for (i = 0; snmp4_net_list[i].name != NULL; i++) | 349 | for (i = 0; snmp4_net_list[i].name != NULL; i++) |
350 | seq_printf(seq, " %lu", | 350 | seq_printf(seq, " %lu", |
351 | fold_field((void **) net_statistics, | 351 | fold_field((void **) net_statistics, |
352 | snmp4_net_list[i].entry)); | 352 | snmp4_net_list[i].entry)); |
353 | 353 | ||
354 | seq_putc(seq, '\n'); | 354 | seq_putc(seq, '\n'); |
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 05f5114828ea..6cd6340de8bd 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c | |||
@@ -74,7 +74,7 @@ int inet_add_protocol(struct net_protocol *prot, unsigned char protocol) | |||
74 | /* | 74 | /* |
75 | * Remove a protocol from the hash tables. | 75 | * Remove a protocol from the hash tables. |
76 | */ | 76 | */ |
77 | 77 | ||
78 | int inet_del_protocol(struct net_protocol *prot, unsigned char protocol) | 78 | int inet_del_protocol(struct net_protocol *prot, unsigned char protocol) |
79 | { | 79 | { |
80 | int hash, ret; | 80 | int hash, ret; |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index fed6a1e7af9e..931084bfb572 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -95,7 +95,7 @@ static void raw_v4_hash(struct sock *sk) | |||
95 | 95 | ||
96 | static void raw_v4_unhash(struct sock *sk) | 96 | static void raw_v4_unhash(struct sock *sk) |
97 | { | 97 | { |
98 | write_lock_bh(&raw_v4_lock); | 98 | write_lock_bh(&raw_v4_lock); |
99 | if (sk_del_node_init(sk)) | 99 | if (sk_del_node_init(sk)) |
100 | sock_prot_dec_use(sk->sk_prot); | 100 | sock_prot_dec_use(sk->sk_prot); |
101 | write_unlock_bh(&raw_v4_lock); | 101 | write_unlock_bh(&raw_v4_lock); |
@@ -238,7 +238,7 @@ void raw_err (struct sock *sk, struct sk_buff *skb, u32 info) | |||
238 | static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) | 238 | static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) |
239 | { | 239 | { |
240 | /* Charge it to the socket. */ | 240 | /* Charge it to the socket. */ |
241 | 241 | ||
242 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 242 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
243 | /* FIXME: increment a raw drops counter here */ | 243 | /* FIXME: increment a raw drops counter here */ |
244 | kfree_skb(skb); | 244 | kfree_skb(skb); |
@@ -263,7 +263,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) | |||
263 | } | 263 | } |
264 | 264 | ||
265 | static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | 265 | static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, |
266 | struct rtable *rt, | 266 | struct rtable *rt, |
267 | unsigned int flags) | 267 | unsigned int flags) |
268 | { | 268 | { |
269 | struct inet_sock *inet = inet_sk(sk); | 269 | struct inet_sock *inet = inet_sk(sk); |
@@ -285,7 +285,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
285 | skb = sock_alloc_send_skb(sk, length+hh_len+15, | 285 | skb = sock_alloc_send_skb(sk, length+hh_len+15, |
286 | flags&MSG_DONTWAIT, &err); | 286 | flags&MSG_DONTWAIT, &err); |
287 | if (skb == NULL) | 287 | if (skb == NULL) |
288 | goto error; | 288 | goto error; |
289 | skb_reserve(skb, hh_len); | 289 | skb_reserve(skb, hh_len); |
290 | 290 | ||
291 | skb->priority = sk->sk_priority; | 291 | skb->priority = sk->sk_priority; |
@@ -326,7 +326,7 @@ error_fault: | |||
326 | kfree_skb(skb); | 326 | kfree_skb(skb); |
327 | error: | 327 | error: |
328 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); | 328 | IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS); |
329 | return err; | 329 | return err; |
330 | } | 330 | } |
331 | 331 | ||
332 | static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg) | 332 | static int raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg) |
@@ -399,9 +399,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
399 | err = -EOPNOTSUPP; | 399 | err = -EOPNOTSUPP; |
400 | if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ | 400 | if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ |
401 | goto out; /* compatibility */ | 401 | goto out; /* compatibility */ |
402 | 402 | ||
403 | /* | 403 | /* |
404 | * Get and verify the address. | 404 | * Get and verify the address. |
405 | */ | 405 | */ |
406 | 406 | ||
407 | if (msg->msg_namelen) { | 407 | if (msg->msg_namelen) { |
@@ -426,7 +426,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
426 | */ | 426 | */ |
427 | } else { | 427 | } else { |
428 | err = -EDESTADDRREQ; | 428 | err = -EDESTADDRREQ; |
429 | if (sk->sk_state != TCP_ESTABLISHED) | 429 | if (sk->sk_state != TCP_ESTABLISHED) |
430 | goto out; | 430 | goto out; |
431 | daddr = inet->daddr; | 431 | daddr = inet->daddr; |
432 | } | 432 | } |
@@ -480,7 +480,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
480 | .saddr = saddr, | 480 | .saddr = saddr, |
481 | .tos = tos } }, | 481 | .tos = tos } }, |
482 | .proto = inet->hdrincl ? IPPROTO_RAW : | 482 | .proto = inet->hdrincl ? IPPROTO_RAW : |
483 | sk->sk_protocol, | 483 | sk->sk_protocol, |
484 | }; | 484 | }; |
485 | if (!inet->hdrincl) { | 485 | if (!inet->hdrincl) { |
486 | err = raw_probe_proto_opt(&fl, msg); | 486 | err = raw_probe_proto_opt(&fl, msg); |
@@ -503,9 +503,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
503 | back_from_confirm: | 503 | back_from_confirm: |
504 | 504 | ||
505 | if (inet->hdrincl) | 505 | if (inet->hdrincl) |
506 | err = raw_send_hdrinc(sk, msg->msg_iov, len, | 506 | err = raw_send_hdrinc(sk, msg->msg_iov, len, |
507 | rt, msg->msg_flags); | 507 | rt, msg->msg_flags); |
508 | 508 | ||
509 | else { | 509 | else { |
510 | if (!ipc.addr) | 510 | if (!ipc.addr) |
511 | ipc.addr = rt->rt_dst; | 511 | ipc.addr = rt->rt_dst; |
@@ -538,7 +538,7 @@ do_confirm: | |||
538 | 538 | ||
539 | static void raw_close(struct sock *sk, long timeout) | 539 | static void raw_close(struct sock *sk, long timeout) |
540 | { | 540 | { |
541 | /* | 541 | /* |
542 | * Raw sockets may have direct kernel refereneces. Kill them. | 542 | * Raw sockets may have direct kernel refereneces. Kill them. |
543 | */ | 543 | */ |
544 | ip_ra_control(sk, 0, NULL); | 544 | ip_ra_control(sk, 0, NULL); |
@@ -861,7 +861,7 @@ static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i) | |||
861 | 861 | ||
862 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" | 862 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" |
863 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", | 863 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", |
864 | i, src, srcp, dest, destp, sp->sk_state, | 864 | i, src, srcp, dest, destp, sp->sk_state, |
865 | atomic_read(&sp->sk_wmem_alloc), | 865 | atomic_read(&sp->sk_wmem_alloc), |
866 | atomic_read(&sp->sk_rmem_alloc), | 866 | atomic_read(&sp->sk_rmem_alloc), |
867 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | 867 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index baee304a3cb7..56d6602affb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -20,7 +20,7 @@ | |||
20 | * (rco@di.uminho.pt) Routing table insertion and update | 20 | * (rco@di.uminho.pt) Routing table insertion and update |
21 | * Linus Torvalds : Rewrote bits to be sensible | 21 | * Linus Torvalds : Rewrote bits to be sensible |
22 | * Alan Cox : Added BSD route gw semantics | 22 | * Alan Cox : Added BSD route gw semantics |
23 | * Alan Cox : Super /proc >4K | 23 | * Alan Cox : Super /proc >4K |
24 | * Alan Cox : MTU in route table | 24 | * Alan Cox : MTU in route table |
25 | * Alan Cox : MSS actually. Also added the window | 25 | * Alan Cox : MSS actually. Also added the window |
26 | * clamper. | 26 | * clamper. |
@@ -38,7 +38,7 @@ | |||
38 | * Alan Cox : Faster /proc handling | 38 | * Alan Cox : Faster /proc handling |
39 | * Alexey Kuznetsov : Massive rework to support tree based routing, | 39 | * Alexey Kuznetsov : Massive rework to support tree based routing, |
40 | * routing caches and better behaviour. | 40 | * routing caches and better behaviour. |
41 | * | 41 | * |
42 | * Olaf Erb : irtt wasn't being copied right. | 42 | * Olaf Erb : irtt wasn't being copied right. |
43 | * Bjorn Ekwall : Kerneld route support. | 43 | * Bjorn Ekwall : Kerneld route support. |
44 | * Alan Cox : Multicast fixed (I hope) | 44 | * Alan Cox : Multicast fixed (I hope) |
@@ -361,8 +361,8 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
361 | dev_queue_xmit) : 0, | 361 | dev_queue_xmit) : 0, |
362 | r->rt_spec_dst); | 362 | r->rt_spec_dst); |
363 | seq_printf(seq, "%-127s\n", temp); | 363 | seq_printf(seq, "%-127s\n", temp); |
364 | } | 364 | } |
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | 367 | ||
368 | static struct seq_operations rt_cache_seq_ops = { | 368 | static struct seq_operations rt_cache_seq_ops = { |
@@ -429,7 +429,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
429 | return &per_cpu(rt_cache_stat, cpu); | 429 | return &per_cpu(rt_cache_stat, cpu); |
430 | } | 430 | } |
431 | return NULL; | 431 | return NULL; |
432 | 432 | ||
433 | } | 433 | } |
434 | 434 | ||
435 | static void rt_cpu_seq_stop(struct seq_file *seq, void *v) | 435 | static void rt_cpu_seq_stop(struct seq_file *seq, void *v) |
@@ -445,7 +445,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v) | |||
445 | seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); | 445 | seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); |
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " | 449 | seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " |
450 | " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", | 450 | " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", |
451 | atomic_read(&ipv4_dst_ops.entries), | 451 | atomic_read(&ipv4_dst_ops.entries), |
@@ -459,7 +459,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v) | |||
459 | 459 | ||
460 | st->out_hit, | 460 | st->out_hit, |
461 | st->out_slow_tot, | 461 | st->out_slow_tot, |
462 | st->out_slow_mc, | 462 | st->out_slow_mc, |
463 | 463 | ||
464 | st->gc_total, | 464 | st->gc_total, |
465 | st->gc_ignored, | 465 | st->gc_ignored, |
@@ -493,7 +493,7 @@ static struct file_operations rt_cpu_seq_fops = { | |||
493 | }; | 493 | }; |
494 | 494 | ||
495 | #endif /* CONFIG_PROC_FS */ | 495 | #endif /* CONFIG_PROC_FS */ |
496 | 496 | ||
497 | static __inline__ void rt_free(struct rtable *rt) | 497 | static __inline__ void rt_free(struct rtable *rt) |
498 | { | 498 | { |
499 | multipath_remove(rt); | 499 | multipath_remove(rt); |
@@ -672,8 +672,8 @@ static void rt_check_expire(unsigned long dummy) | |||
672 | rt_free(rth); | 672 | rt_free(rth); |
673 | } | 673 | } |
674 | #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | 674 | #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ |
675 | *rthp = rth->u.rt_next; | 675 | *rthp = rth->u.rt_next; |
676 | rt_free(rth); | 676 | rt_free(rth); |
677 | #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ | 677 | #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */ |
678 | } | 678 | } |
679 | spin_unlock(rt_hash_lock_addr(i)); | 679 | spin_unlock(rt_hash_lock_addr(i)); |
@@ -739,7 +739,7 @@ void rt_cache_flush(int delay) | |||
739 | 739 | ||
740 | if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay) | 740 | if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay) |
741 | tmo = 0; | 741 | tmo = 0; |
742 | 742 | ||
743 | if (delay > tmo) | 743 | if (delay > tmo) |
744 | delay = tmo; | 744 | delay = tmo; |
745 | } | 745 | } |
@@ -1104,7 +1104,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) | |||
1104 | return; | 1104 | return; |
1105 | } | 1105 | } |
1106 | } else | 1106 | } else |
1107 | printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", | 1107 | printk(KERN_DEBUG "rt_bind_peer(0) @%p\n", |
1108 | __builtin_return_address(0)); | 1108 | __builtin_return_address(0)); |
1109 | 1109 | ||
1110 | ip_select_fb_ident(iph); | 1110 | ip_select_fb_ident(iph); |
@@ -1190,7 +1190,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1190 | 1190 | ||
1191 | /* Copy all the information. */ | 1191 | /* Copy all the information. */ |
1192 | *rt = *rth; | 1192 | *rt = *rth; |
1193 | INIT_RCU_HEAD(&rt->u.dst.rcu_head); | 1193 | INIT_RCU_HEAD(&rt->u.dst.rcu_head); |
1194 | rt->u.dst.__use = 1; | 1194 | rt->u.dst.__use = 1; |
1195 | atomic_set(&rt->u.dst.__refcnt, 1); | 1195 | atomic_set(&rt->u.dst.__refcnt, 1); |
1196 | rt->u.dst.child = NULL; | 1196 | rt->u.dst.child = NULL; |
@@ -1225,11 +1225,11 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1225 | rt_drop(rt); | 1225 | rt_drop(rt); |
1226 | goto do_next; | 1226 | goto do_next; |
1227 | } | 1227 | } |
1228 | 1228 | ||
1229 | netevent.old = &rth->u.dst; | 1229 | netevent.old = &rth->u.dst; |
1230 | netevent.new = &rt->u.dst; | 1230 | netevent.new = &rt->u.dst; |
1231 | call_netevent_notifiers(NETEVENT_REDIRECT, | 1231 | call_netevent_notifiers(NETEVENT_REDIRECT, |
1232 | &netevent); | 1232 | &netevent); |
1233 | 1233 | ||
1234 | rt_del(hash, rth); | 1234 | rt_del(hash, rth); |
1235 | if (!rt_intern_hash(hash, rt, &rt)) | 1235 | if (!rt_intern_hash(hash, rt, &rt)) |
@@ -1343,7 +1343,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1343 | #endif | 1343 | #endif |
1344 | } | 1344 | } |
1345 | out: | 1345 | out: |
1346 | in_dev_put(in_dev); | 1346 | in_dev_put(in_dev); |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | static int ip_error(struct sk_buff *skb) | 1349 | static int ip_error(struct sk_buff *skb) |
@@ -1379,7 +1379,7 @@ static int ip_error(struct sk_buff *skb) | |||
1379 | 1379 | ||
1380 | out: kfree_skb(skb); | 1380 | out: kfree_skb(skb); |
1381 | return 0; | 1381 | return 0; |
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | /* | 1384 | /* |
1385 | * The last two values are not from the RFC but | 1385 | * The last two values are not from the RFC but |
@@ -1392,7 +1392,7 @@ static const unsigned short mtu_plateau[] = | |||
1392 | static __inline__ unsigned short guess_mtu(unsigned short old_mtu) | 1392 | static __inline__ unsigned short guess_mtu(unsigned short old_mtu) |
1393 | { | 1393 | { |
1394 | int i; | 1394 | int i; |
1395 | 1395 | ||
1396 | for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++) | 1396 | for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++) |
1397 | if (old_mtu > mtu_plateau[i]) | 1397 | if (old_mtu > mtu_plateau[i]) |
1398 | return mtu_plateau[i]; | 1398 | return mtu_plateau[i]; |
@@ -1436,7 +1436,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu) | |||
1436 | mtu = guess_mtu(old_mtu); | 1436 | mtu = guess_mtu(old_mtu); |
1437 | } | 1437 | } |
1438 | if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) { | 1438 | if (mtu <= rth->u.dst.metrics[RTAX_MTU-1]) { |
1439 | if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { | 1439 | if (mtu < rth->u.dst.metrics[RTAX_MTU-1]) { |
1440 | dst_confirm(&rth->u.dst); | 1440 | dst_confirm(&rth->u.dst); |
1441 | if (mtu < ip_rt_min_pmtu) { | 1441 | if (mtu < ip_rt_min_pmtu) { |
1442 | mtu = ip_rt_min_pmtu; | 1442 | mtu = ip_rt_min_pmtu; |
@@ -1600,7 +1600,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) | |||
1600 | #endif | 1600 | #endif |
1601 | set_class_tag(rt, itag); | 1601 | set_class_tag(rt, itag); |
1602 | #endif | 1602 | #endif |
1603 | rt->rt_type = res->type; | 1603 | rt->rt_type = res->type; |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 1606 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
@@ -1714,11 +1714,11 @@ static void ip_handle_martian_source(struct net_device *dev, | |||
1714 | #endif | 1714 | #endif |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | static inline int __mkroute_input(struct sk_buff *skb, | 1717 | static inline int __mkroute_input(struct sk_buff *skb, |
1718 | struct fib_result* res, | 1718 | struct fib_result* res, |
1719 | struct in_device *in_dev, | 1719 | struct in_device *in_dev, |
1720 | __be32 daddr, __be32 saddr, u32 tos, | 1720 | __be32 daddr, __be32 saddr, u32 tos, |
1721 | struct rtable **result) | 1721 | struct rtable **result) |
1722 | { | 1722 | { |
1723 | 1723 | ||
1724 | struct rtable *rth; | 1724 | struct rtable *rth; |
@@ -1738,12 +1738,12 @@ static inline int __mkroute_input(struct sk_buff *skb, | |||
1738 | } | 1738 | } |
1739 | 1739 | ||
1740 | 1740 | ||
1741 | err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), | 1741 | err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), |
1742 | in_dev->dev, &spec_dst, &itag); | 1742 | in_dev->dev, &spec_dst, &itag); |
1743 | if (err < 0) { | 1743 | if (err < 0) { |
1744 | ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, | 1744 | ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, |
1745 | saddr); | 1745 | saddr); |
1746 | 1746 | ||
1747 | err = -EINVAL; | 1747 | err = -EINVAL; |
1748 | goto cleanup; | 1748 | goto cleanup; |
1749 | } | 1749 | } |
@@ -1811,10 +1811,10 @@ static inline int __mkroute_input(struct sk_buff *skb, | |||
1811 | /* release the working reference to the output device */ | 1811 | /* release the working reference to the output device */ |
1812 | in_dev_put(out_dev); | 1812 | in_dev_put(out_dev); |
1813 | return err; | 1813 | return err; |
1814 | } | 1814 | } |
1815 | 1815 | ||
1816 | static inline int ip_mkroute_input_def(struct sk_buff *skb, | 1816 | static inline int ip_mkroute_input_def(struct sk_buff *skb, |
1817 | struct fib_result* res, | 1817 | struct fib_result* res, |
1818 | const struct flowi *fl, | 1818 | const struct flowi *fl, |
1819 | struct in_device *in_dev, | 1819 | struct in_device *in_dev, |
1820 | __be32 daddr, __be32 saddr, u32 tos) | 1820 | __be32 daddr, __be32 saddr, u32 tos) |
@@ -1835,11 +1835,11 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb, | |||
1835 | 1835 | ||
1836 | /* put it into the cache */ | 1836 | /* put it into the cache */ |
1837 | hash = rt_hash(daddr, saddr, fl->iif); | 1837 | hash = rt_hash(daddr, saddr, fl->iif); |
1838 | return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); | 1838 | return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); |
1839 | } | 1839 | } |
1840 | 1840 | ||
1841 | static inline int ip_mkroute_input(struct sk_buff *skb, | 1841 | static inline int ip_mkroute_input(struct sk_buff *skb, |
1842 | struct fib_result* res, | 1842 | struct fib_result* res, |
1843 | const struct flowi *fl, | 1843 | const struct flowi *fl, |
1844 | struct in_device *in_dev, | 1844 | struct in_device *in_dev, |
1845 | __be32 daddr, __be32 saddr, u32 tos) | 1845 | __be32 daddr, __be32 saddr, u32 tos) |
@@ -1859,7 +1859,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, | |||
1859 | if (hopcount < 2) | 1859 | if (hopcount < 2) |
1860 | return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, | 1860 | return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, |
1861 | saddr, tos); | 1861 | saddr, tos); |
1862 | 1862 | ||
1863 | /* add all alternatives to the routing cache */ | 1863 | /* add all alternatives to the routing cache */ |
1864 | for (hop = 0; hop < hopcount; hop++) { | 1864 | for (hop = 0; hop < hopcount; hop++) { |
1865 | res->nh_sel = hop; | 1865 | res->nh_sel = hop; |
@@ -1988,7 +1988,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1988 | goto e_nobufs; | 1988 | goto e_nobufs; |
1989 | if (err == -EINVAL) | 1989 | if (err == -EINVAL) |
1990 | goto e_inval; | 1990 | goto e_inval; |
1991 | 1991 | ||
1992 | done: | 1992 | done: |
1993 | in_dev_put(in_dev); | 1993 | in_dev_put(in_dev); |
1994 | if (free_res) | 1994 | if (free_res) |
@@ -2071,8 +2071,8 @@ martian_destination: | |||
2071 | #endif | 2071 | #endif |
2072 | 2072 | ||
2073 | e_hostunreach: | 2073 | e_hostunreach: |
2074 | err = -EHOSTUNREACH; | 2074 | err = -EHOSTUNREACH; |
2075 | goto done; | 2075 | goto done; |
2076 | 2076 | ||
2077 | e_inval: | 2077 | e_inval: |
2078 | err = -EINVAL; | 2078 | err = -EINVAL; |
@@ -2153,11 +2153,11 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2153 | } | 2153 | } |
2154 | 2154 | ||
2155 | static inline int __mkroute_output(struct rtable **result, | 2155 | static inline int __mkroute_output(struct rtable **result, |
2156 | struct fib_result* res, | 2156 | struct fib_result* res, |
2157 | const struct flowi *fl, | 2157 | const struct flowi *fl, |
2158 | const struct flowi *oldflp, | 2158 | const struct flowi *oldflp, |
2159 | struct net_device *dev_out, | 2159 | struct net_device *dev_out, |
2160 | unsigned flags) | 2160 | unsigned flags) |
2161 | { | 2161 | { |
2162 | struct rtable *rth; | 2162 | struct rtable *rth; |
2163 | struct in_device *in_dev; | 2163 | struct in_device *in_dev; |
@@ -2190,7 +2190,7 @@ static inline int __mkroute_output(struct rtable **result, | |||
2190 | } | 2190 | } |
2191 | } else if (res->type == RTN_MULTICAST) { | 2191 | } else if (res->type == RTN_MULTICAST) { |
2192 | flags |= RTCF_MULTICAST|RTCF_LOCAL; | 2192 | flags |= RTCF_MULTICAST|RTCF_LOCAL; |
2193 | if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src, | 2193 | if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src, |
2194 | oldflp->proto)) | 2194 | oldflp->proto)) |
2195 | flags &= ~RTCF_LOCAL; | 2195 | flags &= ~RTCF_LOCAL; |
2196 | /* If multicast route do not exist use | 2196 | /* If multicast route do not exist use |
@@ -2208,7 +2208,7 @@ static inline int __mkroute_output(struct rtable **result, | |||
2208 | if (!rth) { | 2208 | if (!rth) { |
2209 | err = -ENOBUFS; | 2209 | err = -ENOBUFS; |
2210 | goto cleanup; | 2210 | goto cleanup; |
2211 | } | 2211 | } |
2212 | 2212 | ||
2213 | atomic_set(&rth->u.dst.__refcnt, 1); | 2213 | atomic_set(&rth->u.dst.__refcnt, 1); |
2214 | rth->u.dst.flags= DST_HOST; | 2214 | rth->u.dst.flags= DST_HOST; |
@@ -2232,7 +2232,7 @@ static inline int __mkroute_output(struct rtable **result, | |||
2232 | rth->rt_dst = fl->fl4_dst; | 2232 | rth->rt_dst = fl->fl4_dst; |
2233 | rth->rt_src = fl->fl4_src; | 2233 | rth->rt_src = fl->fl4_src; |
2234 | rth->rt_iif = oldflp->oif ? : dev_out->ifindex; | 2234 | rth->rt_iif = oldflp->oif ? : dev_out->ifindex; |
2235 | /* get references to the devices that are to be hold by the routing | 2235 | /* get references to the devices that are to be hold by the routing |
2236 | cache entry */ | 2236 | cache entry */ |
2237 | rth->u.dst.dev = dev_out; | 2237 | rth->u.dst.dev = dev_out; |
2238 | dev_hold(dev_out); | 2238 | dev_hold(dev_out); |
@@ -2250,7 +2250,7 @@ static inline int __mkroute_output(struct rtable **result, | |||
2250 | } | 2250 | } |
2251 | if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { | 2251 | if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { |
2252 | rth->rt_spec_dst = fl->fl4_src; | 2252 | rth->rt_spec_dst = fl->fl4_src; |
2253 | if (flags & RTCF_LOCAL && | 2253 | if (flags & RTCF_LOCAL && |
2254 | !(dev_out->flags & IFF_LOOPBACK)) { | 2254 | !(dev_out->flags & IFF_LOOPBACK)) { |
2255 | rth->u.dst.output = ip_mc_output; | 2255 | rth->u.dst.output = ip_mc_output; |
2256 | RT_CACHE_STAT_INC(out_slow_mc); | 2256 | RT_CACHE_STAT_INC(out_slow_mc); |
@@ -2292,7 +2292,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp, | |||
2292 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); | 2292 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif); |
2293 | err = rt_intern_hash(hash, rth, rp); | 2293 | err = rt_intern_hash(hash, rth, rp); |
2294 | } | 2294 | } |
2295 | 2295 | ||
2296 | return err; | 2296 | return err; |
2297 | } | 2297 | } |
2298 | 2298 | ||
@@ -2830,7 +2830,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2830 | continue; | 2830 | continue; |
2831 | skb->dst = dst_clone(&rt->u.dst); | 2831 | skb->dst = dst_clone(&rt->u.dst); |
2832 | if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, | 2832 | if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, |
2833 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 2833 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
2834 | 1, NLM_F_MULTI) <= 0) { | 2834 | 1, NLM_F_MULTI) <= 0) { |
2835 | dst_release(xchg(&skb->dst, NULL)); | 2835 | dst_release(xchg(&skb->dst, NULL)); |
2836 | rcu_read_unlock_bh(); | 2836 | rcu_read_unlock_bh(); |
@@ -2863,7 +2863,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *ctl, int write, | |||
2863 | proc_dointvec(ctl, write, filp, buffer, lenp, ppos); | 2863 | proc_dointvec(ctl, write, filp, buffer, lenp, ppos); |
2864 | rt_cache_flush(flush_delay); | 2864 | rt_cache_flush(flush_delay); |
2865 | return 0; | 2865 | return 0; |
2866 | } | 2866 | } |
2867 | 2867 | ||
2868 | return -EINVAL; | 2868 | return -EINVAL; |
2869 | } | 2869 | } |
@@ -2880,13 +2880,13 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table, | |||
2880 | if (newlen != sizeof(int)) | 2880 | if (newlen != sizeof(int)) |
2881 | return -EINVAL; | 2881 | return -EINVAL; |
2882 | if (get_user(delay, (int __user *)newval)) | 2882 | if (get_user(delay, (int __user *)newval)) |
2883 | return -EFAULT; | 2883 | return -EFAULT; |
2884 | rt_cache_flush(delay); | 2884 | rt_cache_flush(delay); |
2885 | return 0; | 2885 | return 0; |
2886 | } | 2886 | } |
2887 | 2887 | ||
2888 | ctl_table ipv4_route_table[] = { | 2888 | ctl_table ipv4_route_table[] = { |
2889 | { | 2889 | { |
2890 | .ctl_name = NET_IPV4_ROUTE_FLUSH, | 2890 | .ctl_name = NET_IPV4_ROUTE_FLUSH, |
2891 | .procname = "flush", | 2891 | .procname = "flush", |
2892 | .data = &flush_delay, | 2892 | .data = &flush_delay, |
@@ -2931,7 +2931,7 @@ ctl_table ipv4_route_table[] = { | |||
2931 | }, | 2931 | }, |
2932 | { | 2932 | { |
2933 | /* Deprecated. Use gc_min_interval_ms */ | 2933 | /* Deprecated. Use gc_min_interval_ms */ |
2934 | 2934 | ||
2935 | .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL, | 2935 | .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL, |
2936 | .procname = "gc_min_interval", | 2936 | .procname = "gc_min_interval", |
2937 | .data = &ip_rt_gc_min_interval, | 2937 | .data = &ip_rt_gc_min_interval, |
@@ -3180,8 +3180,8 @@ int __init ip_rt_init(void) | |||
3180 | { | 3180 | { |
3181 | struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */ | 3181 | struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */ |
3182 | if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || | 3182 | if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || |
3183 | !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, | 3183 | !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, |
3184 | proc_net_stat))) { | 3184 | proc_net_stat))) { |
3185 | return -ENOMEM; | 3185 | return -ENOMEM; |
3186 | } | 3186 | } |
3187 | rtstat_pde->proc_fops = &rt_cpu_seq_fops; | 3187 | rtstat_pde->proc_fops = &rt_cpu_seq_fops; |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 6b19530905af..33016cc90f0b 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -2,16 +2,16 @@ | |||
2 | * Syncookies implementation for the Linux kernel | 2 | * Syncookies implementation for the Linux kernel |
3 | * | 3 | * |
4 | * Copyright (C) 1997 Andi Kleen | 4 | * Copyright (C) 1997 Andi Kleen |
5 | * Based on ideas by D.J.Bernstein and Eric Schenk. | 5 | * Based on ideas by D.J.Bernstein and Eric Schenk. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version | 9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. | 10 | * 2 of the License, or (at your option) any later version. |
11 | * | 11 | * |
12 | * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ | 12 | * $Id: syncookies.c,v 1.18 2002/02/01 22:01:04 davem Exp $ |
13 | * | 13 | * |
14 | * Missing: IPv6 support. | 14 | * Missing: IPv6 support. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/tcp.h> | 17 | #include <linux/tcp.h> |
@@ -57,7 +57,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport, | |||
57 | /* | 57 | /* |
58 | * Compute the secure sequence number. | 58 | * Compute the secure sequence number. |
59 | * The output should be: | 59 | * The output should be: |
60 | * HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24) | 60 | * HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24) |
61 | * + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24). | 61 | * + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24). |
62 | * Where sseq is their sequence number and count increases every | 62 | * Where sseq is their sequence number and count increases every |
63 | * minute by 1. | 63 | * minute by 1. |
@@ -99,17 +99,17 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, | |||
99 | & COOKIEMASK; /* Leaving the data behind */ | 99 | & COOKIEMASK; /* Leaving the data behind */ |
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * This table has to be sorted and terminated with (__u16)-1. | 103 | * This table has to be sorted and terminated with (__u16)-1. |
104 | * XXX generate a better table. | 104 | * XXX generate a better table. |
105 | * Unresolved Issues: HIPPI with a 64k MSS is not well supported. | 105 | * Unresolved Issues: HIPPI with a 64k MSS is not well supported. |
106 | */ | 106 | */ |
107 | static __u16 const msstab[] = { | 107 | static __u16 const msstab[] = { |
108 | 64 - 1, | 108 | 64 - 1, |
109 | 256 - 1, | 109 | 256 - 1, |
110 | 512 - 1, | 110 | 512 - 1, |
111 | 536 - 1, | 111 | 536 - 1, |
112 | 1024 - 1, | 112 | 1024 - 1, |
113 | 1440 - 1, | 113 | 1440 - 1, |
114 | 1460 - 1, | 114 | 1460 - 1, |
115 | 4312 - 1, | 115 | 4312 - 1, |
@@ -128,7 +128,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
128 | int mssind; | 128 | int mssind; |
129 | const __u16 mss = *mssp; | 129 | const __u16 mss = *mssp; |
130 | 130 | ||
131 | 131 | ||
132 | tp->last_synq_overflow = jiffies; | 132 | tp->last_synq_overflow = jiffies; |
133 | 133 | ||
134 | /* XXX sort msstab[] by probability? Binary search? */ | 134 | /* XXX sort msstab[] by probability? Binary search? */ |
@@ -144,23 +144,23 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
144 | jiffies / (HZ * 60), mssind); | 144 | jiffies / (HZ * 60), mssind); |
145 | } | 145 | } |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * This (misnamed) value is the age of syncookie which is permitted. | 148 | * This (misnamed) value is the age of syncookie which is permitted. |
149 | * Its ideal value should be dependent on TCP_TIMEOUT_INIT and | 149 | * Its ideal value should be dependent on TCP_TIMEOUT_INIT and |
150 | * sysctl_tcp_retries1. It's a rather complicated formula (exponential | 150 | * sysctl_tcp_retries1. It's a rather complicated formula (exponential |
151 | * backoff) to compute at runtime so it's currently hardcoded here. | 151 | * backoff) to compute at runtime so it's currently hardcoded here. |
152 | */ | 152 | */ |
153 | #define COUNTER_TRIES 4 | 153 | #define COUNTER_TRIES 4 |
154 | /* | 154 | /* |
155 | * Check if a ack sequence number is a valid syncookie. | 155 | * Check if a ack sequence number is a valid syncookie. |
156 | * Return the decoded mss if it is, or 0 if not. | 156 | * Return the decoded mss if it is, or 0 if not. |
157 | */ | 157 | */ |
158 | static inline int cookie_check(struct sk_buff *skb, __u32 cookie) | 158 | static inline int cookie_check(struct sk_buff *skb, __u32 cookie) |
159 | { | 159 | { |
160 | __u32 seq; | 160 | __u32 seq; |
161 | __u32 mssind; | 161 | __u32 mssind; |
162 | 162 | ||
163 | seq = ntohl(skb->h.th->seq)-1; | 163 | seq = ntohl(skb->h.th->seq)-1; |
164 | mssind = check_tcp_syn_cookie(cookie, | 164 | mssind = check_tcp_syn_cookie(cookie, |
165 | skb->nh.iph->saddr, skb->nh.iph->daddr, | 165 | skb->nh.iph->saddr, skb->nh.iph->daddr, |
166 | skb->h.th->source, skb->h.th->dest, | 166 | skb->h.th->source, skb->h.th->dest, |
@@ -191,19 +191,19 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
191 | struct inet_request_sock *ireq; | 191 | struct inet_request_sock *ireq; |
192 | struct tcp_request_sock *treq; | 192 | struct tcp_request_sock *treq; |
193 | struct tcp_sock *tp = tcp_sk(sk); | 193 | struct tcp_sock *tp = tcp_sk(sk); |
194 | __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; | 194 | __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; |
195 | struct sock *ret = sk; | 195 | struct sock *ret = sk; |
196 | struct request_sock *req; | 196 | struct request_sock *req; |
197 | int mss; | 197 | int mss; |
198 | struct rtable *rt; | 198 | struct rtable *rt; |
199 | __u8 rcv_wscale; | 199 | __u8 rcv_wscale; |
200 | 200 | ||
201 | if (!sysctl_tcp_syncookies || !skb->h.th->ack) | 201 | if (!sysctl_tcp_syncookies || !skb->h.th->ack) |
202 | goto out; | 202 | goto out; |
203 | 203 | ||
204 | if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || | 204 | if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) || |
205 | (mss = cookie_check(skb, cookie)) == 0) { | 205 | (mss = cookie_check(skb, cookie)) == 0) { |
206 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); | 206 | NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESFAILED); |
207 | goto out; | 207 | goto out; |
208 | } | 208 | } |
209 | 209 | ||
@@ -221,9 +221,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
221 | ireq = inet_rsk(req); | 221 | ireq = inet_rsk(req); |
222 | treq = tcp_rsk(req); | 222 | treq = tcp_rsk(req); |
223 | treq->rcv_isn = ntohl(skb->h.th->seq) - 1; | 223 | treq->rcv_isn = ntohl(skb->h.th->seq) - 1; |
224 | treq->snt_isn = cookie; | 224 | treq->snt_isn = cookie; |
225 | req->mss = mss; | 225 | req->mss = mss; |
226 | ireq->rmt_port = skb->h.th->source; | 226 | ireq->rmt_port = skb->h.th->source; |
227 | ireq->loc_addr = skb->nh.iph->daddr; | 227 | ireq->loc_addr = skb->nh.iph->daddr; |
228 | ireq->rmt_addr = skb->nh.iph->saddr; | 228 | ireq->rmt_addr = skb->nh.iph->saddr; |
229 | ireq->opt = NULL; | 229 | ireq->opt = NULL; |
@@ -242,15 +242,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
242 | } | 242 | } |
243 | 243 | ||
244 | ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0; | 244 | ireq->snd_wscale = ireq->rcv_wscale = ireq->tstamp_ok = 0; |
245 | ireq->wscale_ok = ireq->sack_ok = 0; | 245 | ireq->wscale_ok = ireq->sack_ok = 0; |
246 | req->expires = 0UL; | 246 | req->expires = 0UL; |
247 | req->retrans = 0; | 247 | req->retrans = 0; |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * We need to lookup the route here to get at the correct | 250 | * We need to lookup the route here to get at the correct |
251 | * window size. We should better make sure that the window size | 251 | * window size. We should better make sure that the window size |
252 | * hasn't changed since we received the original syn, but I see | 252 | * hasn't changed since we received the original syn, but I see |
253 | * no easy way to do this. | 253 | * no easy way to do this. |
254 | */ | 254 | */ |
255 | { | 255 | { |
256 | struct flowi fl = { .nl_u = { .ip4_u = | 256 | struct flowi fl = { .nl_u = { .ip4_u = |
@@ -266,17 +266,17 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
266 | security_req_classify_flow(req, &fl); | 266 | security_req_classify_flow(req, &fl); |
267 | if (ip_route_output_key(&rt, &fl)) { | 267 | if (ip_route_output_key(&rt, &fl)) { |
268 | reqsk_free(req); | 268 | reqsk_free(req); |
269 | goto out; | 269 | goto out; |
270 | } | 270 | } |
271 | } | 271 | } |
272 | 272 | ||
273 | /* Try to redo what tcp_v4_send_synack did. */ | 273 | /* Try to redo what tcp_v4_send_synack did. */ |
274 | req->window_clamp = dst_metric(&rt->u.dst, RTAX_WINDOW); | 274 | req->window_clamp = dst_metric(&rt->u.dst, RTAX_WINDOW); |
275 | tcp_select_initial_window(tcp_full_space(sk), req->mss, | 275 | tcp_select_initial_window(tcp_full_space(sk), req->mss, |
276 | &req->rcv_wnd, &req->window_clamp, | 276 | &req->rcv_wnd, &req->window_clamp, |
277 | 0, &rcv_wscale); | 277 | 0, &rcv_wscale); |
278 | /* BTW win scale with syncookies is 0 by definition */ | 278 | /* BTW win scale with syncookies is 0 by definition */ |
279 | ireq->rcv_wscale = rcv_wscale; | 279 | ireq->rcv_wscale = rcv_wscale; |
280 | 280 | ||
281 | ret = get_cookie_sock(sk, skb, req, &rt->u.dst); | 281 | ret = get_cookie_sock(sk, skb, req, &rt->u.dst); |
282 | out: return ret; | 282 | out: return ret; |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index fabf69a9108c..0aa304711a96 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -24,7 +24,7 @@ extern int sysctl_ip_nonlocal_bind; | |||
24 | 24 | ||
25 | #ifdef CONFIG_SYSCTL | 25 | #ifdef CONFIG_SYSCTL |
26 | static int zero; | 26 | static int zero; |
27 | static int tcp_retr1_max = 255; | 27 | static int tcp_retr1_max = 255; |
28 | static int ip_local_port_range_min[] = { 1, 1 }; | 28 | static int ip_local_port_range_min[] = { 1, 1 }; |
29 | static int ip_local_port_range_max[] = { 65535, 65535 }; | 29 | static int ip_local_port_range_max[] = { 65535, 65535 }; |
30 | #endif | 30 | #endif |
@@ -187,7 +187,7 @@ static int strategy_allowed_congestion_control(ctl_table *table, int __user *nam | |||
187 | } | 187 | } |
188 | 188 | ||
189 | ctl_table ipv4_table[] = { | 189 | ctl_table ipv4_table[] = { |
190 | { | 190 | { |
191 | .ctl_name = NET_IPV4_TCP_TIMESTAMPS, | 191 | .ctl_name = NET_IPV4_TCP_TIMESTAMPS, |
192 | .procname = "tcp_timestamps", | 192 | .procname = "tcp_timestamps", |
193 | .data = &sysctl_tcp_timestamps, | 193 | .data = &sysctl_tcp_timestamps, |
@@ -195,7 +195,7 @@ ctl_table ipv4_table[] = { | |||
195 | .mode = 0644, | 195 | .mode = 0644, |
196 | .proc_handler = &proc_dointvec | 196 | .proc_handler = &proc_dointvec |
197 | }, | 197 | }, |
198 | { | 198 | { |
199 | .ctl_name = NET_IPV4_TCP_WINDOW_SCALING, | 199 | .ctl_name = NET_IPV4_TCP_WINDOW_SCALING, |
200 | .procname = "tcp_window_scaling", | 200 | .procname = "tcp_window_scaling", |
201 | .data = &sysctl_tcp_window_scaling, | 201 | .data = &sysctl_tcp_window_scaling, |
@@ -203,7 +203,7 @@ ctl_table ipv4_table[] = { | |||
203 | .mode = 0644, | 203 | .mode = 0644, |
204 | .proc_handler = &proc_dointvec | 204 | .proc_handler = &proc_dointvec |
205 | }, | 205 | }, |
206 | { | 206 | { |
207 | .ctl_name = NET_IPV4_TCP_SACK, | 207 | .ctl_name = NET_IPV4_TCP_SACK, |
208 | .procname = "tcp_sack", | 208 | .procname = "tcp_sack", |
209 | .data = &sysctl_tcp_sack, | 209 | .data = &sysctl_tcp_sack, |
@@ -211,7 +211,7 @@ ctl_table ipv4_table[] = { | |||
211 | .mode = 0644, | 211 | .mode = 0644, |
212 | .proc_handler = &proc_dointvec | 212 | .proc_handler = &proc_dointvec |
213 | }, | 213 | }, |
214 | { | 214 | { |
215 | .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE, | 215 | .ctl_name = NET_IPV4_TCP_RETRANS_COLLAPSE, |
216 | .procname = "tcp_retrans_collapse", | 216 | .procname = "tcp_retrans_collapse", |
217 | .data = &sysctl_tcp_retrans_collapse, | 217 | .data = &sysctl_tcp_retrans_collapse, |
@@ -219,7 +219,7 @@ ctl_table ipv4_table[] = { | |||
219 | .mode = 0644, | 219 | .mode = 0644, |
220 | .proc_handler = &proc_dointvec | 220 | .proc_handler = &proc_dointvec |
221 | }, | 221 | }, |
222 | { | 222 | { |
223 | .ctl_name = NET_IPV4_FORWARD, | 223 | .ctl_name = NET_IPV4_FORWARD, |
224 | .procname = "ip_forward", | 224 | .procname = "ip_forward", |
225 | .data = &ipv4_devconf.forwarding, | 225 | .data = &ipv4_devconf.forwarding, |
@@ -228,16 +228,16 @@ ctl_table ipv4_table[] = { | |||
228 | .proc_handler = &ipv4_sysctl_forward, | 228 | .proc_handler = &ipv4_sysctl_forward, |
229 | .strategy = &ipv4_sysctl_forward_strategy | 229 | .strategy = &ipv4_sysctl_forward_strategy |
230 | }, | 230 | }, |
231 | { | 231 | { |
232 | .ctl_name = NET_IPV4_DEFAULT_TTL, | 232 | .ctl_name = NET_IPV4_DEFAULT_TTL, |
233 | .procname = "ip_default_ttl", | 233 | .procname = "ip_default_ttl", |
234 | .data = &sysctl_ip_default_ttl, | 234 | .data = &sysctl_ip_default_ttl, |
235 | .maxlen = sizeof(int), | 235 | .maxlen = sizeof(int), |
236 | .mode = 0644, | 236 | .mode = 0644, |
237 | .proc_handler = &ipv4_doint_and_flush, | 237 | .proc_handler = &ipv4_doint_and_flush, |
238 | .strategy = &ipv4_doint_and_flush_strategy, | 238 | .strategy = &ipv4_doint_and_flush_strategy, |
239 | }, | 239 | }, |
240 | { | 240 | { |
241 | .ctl_name = NET_IPV4_NO_PMTU_DISC, | 241 | .ctl_name = NET_IPV4_NO_PMTU_DISC, |
242 | .procname = "ip_no_pmtu_disc", | 242 | .procname = "ip_no_pmtu_disc", |
243 | .data = &ipv4_config.no_pmtu_disc, | 243 | .data = &ipv4_config.no_pmtu_disc, |
@@ -728,7 +728,7 @@ ctl_table ipv4_table[] = { | |||
728 | .mode = 0644, | 728 | .mode = 0644, |
729 | .proc_handler = &proc_dointvec, | 729 | .proc_handler = &proc_dointvec, |
730 | }, | 730 | }, |
731 | { | 731 | { |
732 | .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, | 732 | .ctl_name = NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, |
733 | .procname = "tcp_workaround_signed_windows", | 733 | .procname = "tcp_workaround_signed_windows", |
734 | .data = &sysctl_tcp_workaround_signed_windows, | 734 | .data = &sysctl_tcp_workaround_signed_windows, |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5bd43d7294fd..ac6516c642a1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -475,7 +475,7 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, | |||
475 | if (!sk->sk_send_head) | 475 | if (!sk->sk_send_head) |
476 | sk->sk_send_head = skb; | 476 | sk->sk_send_head = skb; |
477 | if (tp->nonagle & TCP_NAGLE_PUSH) | 477 | if (tp->nonagle & TCP_NAGLE_PUSH) |
478 | tp->nonagle &= ~TCP_NAGLE_PUSH; | 478 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
479 | } | 479 | } |
480 | 480 | ||
481 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, | 481 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, |
@@ -557,7 +557,7 @@ new_segment: | |||
557 | } | 557 | } |
558 | if (!sk_stream_wmem_schedule(sk, copy)) | 558 | if (!sk_stream_wmem_schedule(sk, copy)) |
559 | goto wait_for_memory; | 559 | goto wait_for_memory; |
560 | 560 | ||
561 | if (can_coalesce) { | 561 | if (can_coalesce) { |
562 | skb_shinfo(skb)->frags[i - 1].size += copy; | 562 | skb_shinfo(skb)->frags[i - 1].size += copy; |
563 | } else { | 563 | } else { |
@@ -1439,12 +1439,12 @@ skip_copy: | |||
1439 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | 1439 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); |
1440 | 1440 | ||
1441 | while (dma_async_memcpy_complete(tp->ucopy.dma_chan, | 1441 | while (dma_async_memcpy_complete(tp->ucopy.dma_chan, |
1442 | tp->ucopy.dma_cookie, &done, | 1442 | tp->ucopy.dma_cookie, &done, |
1443 | &used) == DMA_IN_PROGRESS) { | 1443 | &used) == DMA_IN_PROGRESS) { |
1444 | /* do partial cleanup of sk_async_wait_queue */ | 1444 | /* do partial cleanup of sk_async_wait_queue */ |
1445 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | 1445 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && |
1446 | (dma_async_is_complete(skb->dma_cookie, done, | 1446 | (dma_async_is_complete(skb->dma_cookie, done, |
1447 | used) == DMA_SUCCESS)) { | 1447 | used) == DMA_SUCCESS)) { |
1448 | __skb_dequeue(&sk->sk_async_wait_queue); | 1448 | __skb_dequeue(&sk->sk_async_wait_queue); |
1449 | kfree_skb(skb); | 1449 | kfree_skb(skb); |
1450 | } | 1450 | } |
@@ -2006,7 +2006,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2006 | info->tcpi_options |= TCPI_OPT_WSCALE; | 2006 | info->tcpi_options |= TCPI_OPT_WSCALE; |
2007 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; | 2007 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; |
2008 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; | 2008 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | if (tp->ecn_flags&TCP_ECN_OK) | 2011 | if (tp->ecn_flags&TCP_ECN_OK) |
2012 | info->tcpi_options |= TCPI_OPT_ECN; | 2012 | info->tcpi_options |= TCPI_OPT_ECN; |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 5ca7723d0798..c1b34f1edb32 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -313,28 +313,28 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, | |||
313 | return; | 313 | return; |
314 | 314 | ||
315 | /* In "safe" area, increase. */ | 315 | /* In "safe" area, increase. */ |
316 | if (tp->snd_cwnd <= tp->snd_ssthresh) | 316 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
317 | tcp_slow_start(tp); | 317 | tcp_slow_start(tp); |
318 | 318 | ||
319 | /* In dangerous area, increase slowly. */ | 319 | /* In dangerous area, increase slowly. */ |
320 | else if (sysctl_tcp_abc) { | 320 | else if (sysctl_tcp_abc) { |
321 | /* RFC3465: Appropriate Byte Count | 321 | /* RFC3465: Appropriate Byte Count |
322 | * increase once for each full cwnd acked | 322 | * increase once for each full cwnd acked |
323 | */ | 323 | */ |
324 | if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { | 324 | if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { |
325 | tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; | 325 | tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; |
326 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 326 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
327 | tp->snd_cwnd++; | 327 | tp->snd_cwnd++; |
328 | } | 328 | } |
329 | } else { | 329 | } else { |
330 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */ | 330 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */ |
331 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 331 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { |
332 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 332 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
333 | tp->snd_cwnd++; | 333 | tp->snd_cwnd++; |
334 | tp->snd_cwnd_cnt = 0; | 334 | tp->snd_cwnd_cnt = 0; |
335 | } else | 335 | } else |
336 | tp->snd_cwnd_cnt++; | 336 | tp->snd_cwnd_cnt++; |
337 | } | 337 | } |
338 | } | 338 | } |
339 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); | 339 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); |
340 | 340 | ||
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 6ad184802266..5ce6cd85680b 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -175,42 +175,42 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
175 | } | 175 | } |
176 | } | 176 | } |
177 | 177 | ||
178 | /* cubic function - calc*/ | 178 | /* cubic function - calc*/ |
179 | /* calculate c * time^3 / rtt, | 179 | /* calculate c * time^3 / rtt, |
180 | * while considering overflow in calculation of time^3 | 180 | * while considering overflow in calculation of time^3 |
181 | * (so time^3 is done by using 64 bit) | 181 | * (so time^3 is done by using 64 bit) |
182 | * and without the support of division of 64bit numbers | 182 | * and without the support of division of 64bit numbers |
183 | * (so all divisions are done by using 32 bit) | 183 | * (so all divisions are done by using 32 bit) |
184 | * also NOTE the unit of those veriables | 184 | * also NOTE the unit of those veriables |
185 | * time = (t - K) / 2^bictcp_HZ | 185 | * time = (t - K) / 2^bictcp_HZ |
186 | * c = bic_scale >> 10 | 186 | * c = bic_scale >> 10 |
187 | * rtt = (srtt >> 3) / HZ | 187 | * rtt = (srtt >> 3) / HZ |
188 | * !!! The following code does not have overflow problems, | 188 | * !!! The following code does not have overflow problems, |
189 | * if the cwnd < 1 million packets !!! | 189 | * if the cwnd < 1 million packets !!! |
190 | */ | 190 | */ |
191 | 191 | ||
192 | /* change the unit from HZ to bictcp_HZ */ | 192 | /* change the unit from HZ to bictcp_HZ */ |
193 | t = ((tcp_time_stamp + (ca->delay_min>>3) - ca->epoch_start) | 193 | t = ((tcp_time_stamp + (ca->delay_min>>3) - ca->epoch_start) |
194 | << BICTCP_HZ) / HZ; | 194 | << BICTCP_HZ) / HZ; |
195 | 195 | ||
196 | if (t < ca->bic_K) /* t - K */ | 196 | if (t < ca->bic_K) /* t - K */ |
197 | offs = ca->bic_K - t; | 197 | offs = ca->bic_K - t; |
198 | else | 198 | else |
199 | offs = t - ca->bic_K; | 199 | offs = t - ca->bic_K; |
200 | 200 | ||
201 | /* c/rtt * (t-K)^3 */ | 201 | /* c/rtt * (t-K)^3 */ |
202 | delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); | 202 | delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); |
203 | if (t < ca->bic_K) /* below origin*/ | 203 | if (t < ca->bic_K) /* below origin*/ |
204 | bic_target = ca->bic_origin_point - delta; | 204 | bic_target = ca->bic_origin_point - delta; |
205 | else /* above origin*/ | 205 | else /* above origin*/ |
206 | bic_target = ca->bic_origin_point + delta; | 206 | bic_target = ca->bic_origin_point + delta; |
207 | 207 | ||
208 | /* cubic function - calc bictcp_cnt*/ | 208 | /* cubic function - calc bictcp_cnt*/ |
209 | if (bic_target > cwnd) { | 209 | if (bic_target > cwnd) { |
210 | ca->cnt = cwnd / (bic_target - cwnd); | 210 | ca->cnt = cwnd / (bic_target - cwnd); |
211 | } else { | 211 | } else { |
212 | ca->cnt = 100 * cwnd; /* very small increment*/ | 212 | ca->cnt = 100 * cwnd; /* very small increment*/ |
213 | } | 213 | } |
214 | 214 | ||
215 | if (ca->delay_min > 0) { | 215 | if (ca->delay_min > 0) { |
216 | /* max increment = Smax * rtt / 0.1 */ | 216 | /* max increment = Smax * rtt / 0.1 */ |
@@ -219,7 +219,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
219 | ca->cnt = min_cnt; | 219 | ca->cnt = min_cnt; |
220 | } | 220 | } |
221 | 221 | ||
222 | /* slow start and low utilization */ | 222 | /* slow start and low utilization */ |
223 | if (ca->loss_cwnd == 0) /* could be aggressive in slow start */ | 223 | if (ca->loss_cwnd == 0) /* could be aggressive in slow start */ |
224 | ca->cnt = 50; | 224 | ca->cnt = 50; |
225 | 225 | ||
@@ -227,9 +227,9 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
227 | if (tcp_friendliness) { | 227 | if (tcp_friendliness) { |
228 | u32 scale = beta_scale; | 228 | u32 scale = beta_scale; |
229 | delta = (cwnd * scale) >> 3; | 229 | delta = (cwnd * scale) >> 3; |
230 | while (ca->ack_cnt > delta) { /* update tcp cwnd */ | 230 | while (ca->ack_cnt > delta) { /* update tcp cwnd */ |
231 | ca->ack_cnt -= delta; | 231 | ca->ack_cnt -= delta; |
232 | ca->tcp_cwnd++; | 232 | ca->tcp_cwnd++; |
233 | } | 233 | } |
234 | 234 | ||
235 | if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ | 235 | if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ |
@@ -238,7 +238,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
238 | if (ca->cnt > max_cnt) | 238 | if (ca->cnt > max_cnt) |
239 | ca->cnt = max_cnt; | 239 | ca->cnt = max_cnt; |
240 | } | 240 | } |
241 | } | 241 | } |
242 | 242 | ||
243 | ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; | 243 | ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; |
244 | if (ca->cnt == 0) /* cannot be zero */ | 244 | if (ca->cnt == 0) /* cannot be zero */ |
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index c4fc811bf377..a291097fcc0a 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c | |||
@@ -14,8 +14,8 @@ | |||
14 | * with fixed-point MD scaled <<8. | 14 | * with fixed-point MD scaled <<8. |
15 | */ | 15 | */ |
16 | static const struct hstcp_aimd_val { | 16 | static const struct hstcp_aimd_val { |
17 | unsigned int cwnd; | 17 | unsigned int cwnd; |
18 | unsigned int md; | 18 | unsigned int md; |
19 | } hstcp_aimd_vals[] = { | 19 | } hstcp_aimd_vals[] = { |
20 | { 38, 128, /* 0.50 */ }, | 20 | { 38, 128, /* 0.50 */ }, |
21 | { 118, 112, /* 0.44 */ }, | 21 | { 118, 112, /* 0.44 */ }, |
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 753987a1048f..63318b6e9d51 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c | |||
@@ -224,7 +224,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
224 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 224 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
225 | return; | 225 | return; |
226 | 226 | ||
227 | if (tp->snd_cwnd <= tp->snd_ssthresh) | 227 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
228 | tcp_slow_start(tp); | 228 | tcp_slow_start(tp); |
229 | else { | 229 | else { |
230 | 230 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c6109895bb5e..1a14191687ac 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -50,9 +50,9 @@ | |||
50 | * Andi Kleen: Make sure we never ack data there is not | 50 | * Andi Kleen: Make sure we never ack data there is not |
51 | * enough room for. Also make this condition | 51 | * enough room for. Also make this condition |
52 | * a fatal error if it might still happen. | 52 | * a fatal error if it might still happen. |
53 | * Andi Kleen: Add tcp_measure_rcv_mss to make | 53 | * Andi Kleen: Add tcp_measure_rcv_mss to make |
54 | * connections with MSS<min(MTU,ann. MSS) | 54 | * connections with MSS<min(MTU,ann. MSS) |
55 | * work without delayed acks. | 55 | * work without delayed acks. |
56 | * Andi Kleen: Process packets with PSH set in the | 56 | * Andi Kleen: Process packets with PSH set in the |
57 | * fast path. | 57 | * fast path. |
58 | * J Hadi Salim: ECN support | 58 | * J Hadi Salim: ECN support |
@@ -112,17 +112,17 @@ int sysctl_tcp_abc __read_mostly; | |||
112 | 112 | ||
113 | #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) | 113 | #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) |
114 | 114 | ||
115 | /* Adapt the MSS value used to make delayed ack decision to the | 115 | /* Adapt the MSS value used to make delayed ack decision to the |
116 | * real world. | 116 | * real world. |
117 | */ | 117 | */ |
118 | static void tcp_measure_rcv_mss(struct sock *sk, | 118 | static void tcp_measure_rcv_mss(struct sock *sk, |
119 | const struct sk_buff *skb) | 119 | const struct sk_buff *skb) |
120 | { | 120 | { |
121 | struct inet_connection_sock *icsk = inet_csk(sk); | 121 | struct inet_connection_sock *icsk = inet_csk(sk); |
122 | const unsigned int lss = icsk->icsk_ack.last_seg_size; | 122 | const unsigned int lss = icsk->icsk_ack.last_seg_size; |
123 | unsigned int len; | 123 | unsigned int len; |
124 | 124 | ||
125 | icsk->icsk_ack.last_seg_size = 0; | 125 | icsk->icsk_ack.last_seg_size = 0; |
126 | 126 | ||
127 | /* skb->len may jitter because of SACKs, even if peer | 127 | /* skb->len may jitter because of SACKs, even if peer |
128 | * sends good full-sized frames. | 128 | * sends good full-sized frames. |
@@ -440,15 +440,15 @@ void tcp_rcv_space_adjust(struct sock *sk) | |||
440 | struct tcp_sock *tp = tcp_sk(sk); | 440 | struct tcp_sock *tp = tcp_sk(sk); |
441 | int time; | 441 | int time; |
442 | int space; | 442 | int space; |
443 | 443 | ||
444 | if (tp->rcvq_space.time == 0) | 444 | if (tp->rcvq_space.time == 0) |
445 | goto new_measure; | 445 | goto new_measure; |
446 | 446 | ||
447 | time = tcp_time_stamp - tp->rcvq_space.time; | 447 | time = tcp_time_stamp - tp->rcvq_space.time; |
448 | if (time < (tp->rcv_rtt_est.rtt >> 3) || | 448 | if (time < (tp->rcv_rtt_est.rtt >> 3) || |
449 | tp->rcv_rtt_est.rtt == 0) | 449 | tp->rcv_rtt_est.rtt == 0) |
450 | return; | 450 | return; |
451 | 451 | ||
452 | space = 2 * (tp->copied_seq - tp->rcvq_space.seq); | 452 | space = 2 * (tp->copied_seq - tp->rcvq_space.seq); |
453 | 453 | ||
454 | space = max(tp->rcvq_space.space, space); | 454 | space = max(tp->rcvq_space.space, space); |
@@ -483,7 +483,7 @@ void tcp_rcv_space_adjust(struct sock *sk) | |||
483 | } | 483 | } |
484 | } | 484 | } |
485 | } | 485 | } |
486 | 486 | ||
487 | new_measure: | 487 | new_measure: |
488 | tp->rcvq_space.seq = tp->copied_seq; | 488 | tp->rcvq_space.seq = tp->copied_seq; |
489 | tp->rcvq_space.time = tcp_time_stamp; | 489 | tp->rcvq_space.time = tcp_time_stamp; |
@@ -509,7 +509,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
509 | tcp_measure_rcv_mss(sk, skb); | 509 | tcp_measure_rcv_mss(sk, skb); |
510 | 510 | ||
511 | tcp_rcv_rtt_measure(tp); | 511 | tcp_rcv_rtt_measure(tp); |
512 | 512 | ||
513 | now = tcp_time_stamp; | 513 | now = tcp_time_stamp; |
514 | 514 | ||
515 | if (!icsk->icsk_ack.ato) { | 515 | if (!icsk->icsk_ack.ato) { |
@@ -561,7 +561,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
561 | /* The following amusing code comes from Jacobson's | 561 | /* The following amusing code comes from Jacobson's |
562 | * article in SIGCOMM '88. Note that rtt and mdev | 562 | * article in SIGCOMM '88. Note that rtt and mdev |
563 | * are scaled versions of rtt and mean deviation. | 563 | * are scaled versions of rtt and mean deviation. |
564 | * This is designed to be as fast as possible | 564 | * This is designed to be as fast as possible |
565 | * m stands for "measurement". | 565 | * m stands for "measurement". |
566 | * | 566 | * |
567 | * On a 1990 paper the rto value is changed to: | 567 | * On a 1990 paper the rto value is changed to: |
@@ -1249,8 +1249,8 @@ void tcp_enter_frto(struct sock *sk) | |||
1249 | tp->frto_counter = 1; | 1249 | tp->frto_counter = 1; |
1250 | 1250 | ||
1251 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || | 1251 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || |
1252 | tp->snd_una == tp->high_seq || | 1252 | tp->snd_una == tp->high_seq || |
1253 | (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { | 1253 | (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { |
1254 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 1254 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
1255 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); | 1255 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); |
1256 | tcp_ca_event(sk, CA_EVENT_FRTO); | 1256 | tcp_ca_event(sk, CA_EVENT_FRTO); |
@@ -1969,11 +1969,11 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
1969 | * 1. Reno does not count dupacks (sacked_out) automatically. */ | 1969 | * 1. Reno does not count dupacks (sacked_out) automatically. */ |
1970 | if (!tp->packets_out) | 1970 | if (!tp->packets_out) |
1971 | tp->sacked_out = 0; | 1971 | tp->sacked_out = 0; |
1972 | /* 2. SACK counts snd_fack in packets inaccurately. */ | 1972 | /* 2. SACK counts snd_fack in packets inaccurately. */ |
1973 | if (tp->sacked_out == 0) | 1973 | if (tp->sacked_out == 0) |
1974 | tp->fackets_out = 0; | 1974 | tp->fackets_out = 0; |
1975 | 1975 | ||
1976 | /* Now state machine starts. | 1976 | /* Now state machine starts. |
1977 | * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ | 1977 | * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ |
1978 | if (flag&FLAG_ECE) | 1978 | if (flag&FLAG_ECE) |
1979 | tp->prior_ssthresh = 0; | 1979 | tp->prior_ssthresh = 0; |
@@ -2203,7 +2203,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, | |||
2203 | __u32 now, __s32 *seq_rtt) | 2203 | __u32 now, __s32 *seq_rtt) |
2204 | { | 2204 | { |
2205 | struct tcp_sock *tp = tcp_sk(sk); | 2205 | struct tcp_sock *tp = tcp_sk(sk); |
2206 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); | 2206 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
2207 | __u32 seq = tp->snd_una; | 2207 | __u32 seq = tp->snd_una; |
2208 | __u32 packets_acked; | 2208 | __u32 packets_acked; |
2209 | int acked = 0; | 2209 | int acked = 0; |
@@ -2279,7 +2279,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) | |||
2279 | 2279 | ||
2280 | while ((skb = skb_peek(&sk->sk_write_queue)) && | 2280 | while ((skb = skb_peek(&sk->sk_write_queue)) && |
2281 | skb != sk->sk_send_head) { | 2281 | skb != sk->sk_send_head) { |
2282 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); | 2282 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
2283 | __u8 sacked = scb->sacked; | 2283 | __u8 sacked = scb->sacked; |
2284 | 2284 | ||
2285 | /* If our packet is before the ack sequence we can | 2285 | /* If our packet is before the ack sequence we can |
@@ -2470,9 +2470,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | |||
2470 | static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) | 2470 | static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) |
2471 | { | 2471 | { |
2472 | struct tcp_sock *tp = tcp_sk(sk); | 2472 | struct tcp_sock *tp = tcp_sk(sk); |
2473 | 2473 | ||
2474 | tcp_sync_left_out(tp); | 2474 | tcp_sync_left_out(tp); |
2475 | 2475 | ||
2476 | if (tp->snd_una == prior_snd_una || | 2476 | if (tp->snd_una == prior_snd_una || |
2477 | !before(tp->snd_una, tp->frto_highmark)) { | 2477 | !before(tp->snd_una, tp->frto_highmark)) { |
2478 | /* RTO was caused by loss, start retransmitting in | 2478 | /* RTO was caused by loss, start retransmitting in |
@@ -2627,7 +2627,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
2627 | opt_rx->saw_tstamp = 0; | 2627 | opt_rx->saw_tstamp = 0; |
2628 | 2628 | ||
2629 | while(length>0) { | 2629 | while(length>0) { |
2630 | int opcode=*ptr++; | 2630 | int opcode=*ptr++; |
2631 | int opsize; | 2631 | int opsize; |
2632 | 2632 | ||
2633 | switch (opcode) { | 2633 | switch (opcode) { |
@@ -2642,7 +2642,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
2642 | return; | 2642 | return; |
2643 | if (opsize > length) | 2643 | if (opsize > length) |
2644 | return; /* don't parse partial options */ | 2644 | return; /* don't parse partial options */ |
2645 | switch(opcode) { | 2645 | switch(opcode) { |
2646 | case TCPOPT_MSS: | 2646 | case TCPOPT_MSS: |
2647 | if(opsize==TCPOLEN_MSS && th->syn && !estab) { | 2647 | if(opsize==TCPOLEN_MSS && th->syn && !estab) { |
2648 | u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); | 2648 | u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); |
@@ -2701,10 +2701,10 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
2701 | */ | 2701 | */ |
2702 | break; | 2702 | break; |
2703 | #endif | 2703 | #endif |
2704 | }; | 2704 | }; |
2705 | ptr+=opsize-2; | 2705 | ptr+=opsize-2; |
2706 | length-=opsize; | 2706 | length-=opsize; |
2707 | }; | 2707 | }; |
2708 | } | 2708 | } |
2709 | } | 2709 | } |
2710 | 2710 | ||
@@ -3263,7 +3263,7 @@ drop: | |||
3263 | TCP_SKB_CB(skb)->end_seq); | 3263 | TCP_SKB_CB(skb)->end_seq); |
3264 | 3264 | ||
3265 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); | 3265 | tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); |
3266 | 3266 | ||
3267 | /* If window is closed, drop tail of packet. But after | 3267 | /* If window is closed, drop tail of packet. But after |
3268 | * remembering D-SACK for its head made in previous line. | 3268 | * remembering D-SACK for its head made in previous line. |
3269 | */ | 3269 | */ |
@@ -3342,7 +3342,7 @@ drop: | |||
3342 | } | 3342 | } |
3343 | } | 3343 | } |
3344 | __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); | 3344 | __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); |
3345 | 3345 | ||
3346 | /* And clean segments covered by new one as whole. */ | 3346 | /* And clean segments covered by new one as whole. */ |
3347 | while ((skb1 = skb->next) != | 3347 | while ((skb1 = skb->next) != |
3348 | (struct sk_buff*)&tp->out_of_order_queue && | 3348 | (struct sk_buff*)&tp->out_of_order_queue && |
@@ -3507,7 +3507,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
3507 | */ | 3507 | */ |
3508 | static int tcp_prune_queue(struct sock *sk) | 3508 | static int tcp_prune_queue(struct sock *sk) |
3509 | { | 3509 | { |
3510 | struct tcp_sock *tp = tcp_sk(sk); | 3510 | struct tcp_sock *tp = tcp_sk(sk); |
3511 | 3511 | ||
3512 | SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); | 3512 | SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); |
3513 | 3513 | ||
@@ -3617,7 +3617,7 @@ static void tcp_new_space(struct sock *sk) | |||
3617 | struct tcp_sock *tp = tcp_sk(sk); | 3617 | struct tcp_sock *tp = tcp_sk(sk); |
3618 | 3618 | ||
3619 | if (tcp_should_expand_sndbuf(sk, tp)) { | 3619 | if (tcp_should_expand_sndbuf(sk, tp)) { |
3620 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + | 3620 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + |
3621 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), | 3621 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), |
3622 | demanded = max_t(unsigned int, tp->snd_cwnd, | 3622 | demanded = max_t(unsigned int, tp->snd_cwnd, |
3623 | tp->reordering + 1); | 3623 | tp->reordering + 1); |
@@ -3690,7 +3690,7 @@ static inline void tcp_ack_snd_check(struct sock *sk) | |||
3690 | * For 1003.1g we should support a new option TCP_STDURG to permit | 3690 | * For 1003.1g we should support a new option TCP_STDURG to permit |
3691 | * either form (or just set the sysctl tcp_stdurg). | 3691 | * either form (or just set the sysctl tcp_stdurg). |
3692 | */ | 3692 | */ |
3693 | 3693 | ||
3694 | static void tcp_check_urg(struct sock * sk, struct tcphdr * th) | 3694 | static void tcp_check_urg(struct sock * sk, struct tcphdr * th) |
3695 | { | 3695 | { |
3696 | struct tcp_sock *tp = tcp_sk(sk); | 3696 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -3771,7 +3771,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) | |||
3771 | u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - | 3771 | u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - |
3772 | th->syn; | 3772 | th->syn; |
3773 | 3773 | ||
3774 | /* Is the urgent pointer pointing into this packet? */ | 3774 | /* Is the urgent pointer pointing into this packet? */ |
3775 | if (ptr < skb->len) { | 3775 | if (ptr < skb->len) { |
3776 | u8 tmp; | 3776 | u8 tmp; |
3777 | if (skb_copy_bits(skb, ptr, &tmp, 1)) | 3777 | if (skb_copy_bits(skb, ptr, &tmp, 1)) |
@@ -3835,7 +3835,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen | |||
3835 | int copied_early = 0; | 3835 | int copied_early = 0; |
3836 | 3836 | ||
3837 | if (tp->ucopy.wakeup) | 3837 | if (tp->ucopy.wakeup) |
3838 | return 0; | 3838 | return 0; |
3839 | 3839 | ||
3840 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | 3840 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
3841 | tp->ucopy.dma_chan = get_softnet_dma(); | 3841 | tp->ucopy.dma_chan = get_softnet_dma(); |
@@ -3871,26 +3871,26 @@ out: | |||
3871 | #endif /* CONFIG_NET_DMA */ | 3871 | #endif /* CONFIG_NET_DMA */ |
3872 | 3872 | ||
3873 | /* | 3873 | /* |
3874 | * TCP receive function for the ESTABLISHED state. | 3874 | * TCP receive function for the ESTABLISHED state. |
3875 | * | 3875 | * |
3876 | * It is split into a fast path and a slow path. The fast path is | 3876 | * It is split into a fast path and a slow path. The fast path is |
3877 | * disabled when: | 3877 | * disabled when: |
3878 | * - A zero window was announced from us - zero window probing | 3878 | * - A zero window was announced from us - zero window probing |
3879 | * is only handled properly in the slow path. | 3879 | * is only handled properly in the slow path. |
3880 | * - Out of order segments arrived. | 3880 | * - Out of order segments arrived. |
3881 | * - Urgent data is expected. | 3881 | * - Urgent data is expected. |
3882 | * - There is no buffer space left | 3882 | * - There is no buffer space left |
3883 | * - Unexpected TCP flags/window values/header lengths are received | 3883 | * - Unexpected TCP flags/window values/header lengths are received |
3884 | * (detected by checking the TCP header against pred_flags) | 3884 | * (detected by checking the TCP header against pred_flags) |
3885 | * - Data is sent in both directions. Fast path only supports pure senders | 3885 | * - Data is sent in both directions. Fast path only supports pure senders |
3886 | * or pure receivers (this means either the sequence number or the ack | 3886 | * or pure receivers (this means either the sequence number or the ack |
3887 | * value must stay constant) | 3887 | * value must stay constant) |
3888 | * - Unexpected TCP option. | 3888 | * - Unexpected TCP option. |
3889 | * | 3889 | * |
3890 | * When these conditions are not satisfied it drops into a standard | 3890 | * When these conditions are not satisfied it drops into a standard |
3891 | * receive procedure patterned after RFC793 to handle all cases. | 3891 | * receive procedure patterned after RFC793 to handle all cases. |
3892 | * The first three cases are guaranteed by proper pred_flags setting, | 3892 | * The first three cases are guaranteed by proper pred_flags setting, |
3893 | * the rest is checked inline. Fast processing is turned on in | 3893 | * the rest is checked inline. Fast processing is turned on in |
3894 | * tcp_data_queue when everything is OK. | 3894 | * tcp_data_queue when everything is OK. |
3895 | */ | 3895 | */ |
3896 | int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | 3896 | int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, |
@@ -3900,15 +3900,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3900 | 3900 | ||
3901 | /* | 3901 | /* |
3902 | * Header prediction. | 3902 | * Header prediction. |
3903 | * The code loosely follows the one in the famous | 3903 | * The code loosely follows the one in the famous |
3904 | * "30 instruction TCP receive" Van Jacobson mail. | 3904 | * "30 instruction TCP receive" Van Jacobson mail. |
3905 | * | 3905 | * |
3906 | * Van's trick is to deposit buffers into socket queue | 3906 | * Van's trick is to deposit buffers into socket queue |
3907 | * on a device interrupt, to call tcp_recv function | 3907 | * on a device interrupt, to call tcp_recv function |
3908 | * on the receive process context and checksum and copy | 3908 | * on the receive process context and checksum and copy |
3909 | * the buffer to user space. smart... | 3909 | * the buffer to user space. smart... |
3910 | * | 3910 | * |
3911 | * Our current scheme is not silly either but we take the | 3911 | * Our current scheme is not silly either but we take the |
3912 | * extra cost of the net_bh soft interrupt processing... | 3912 | * extra cost of the net_bh soft interrupt processing... |
3913 | * We do checksum and copy also but from device to kernel. | 3913 | * We do checksum and copy also but from device to kernel. |
3914 | */ | 3914 | */ |
@@ -3919,7 +3919,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3919 | * if header_prediction is to be made | 3919 | * if header_prediction is to be made |
3920 | * 'S' will always be tp->tcp_header_len >> 2 | 3920 | * 'S' will always be tp->tcp_header_len >> 2 |
3921 | * '?' will be 0 for the fast path, otherwise pred_flags is 0 to | 3921 | * '?' will be 0 for the fast path, otherwise pred_flags is 0 to |
3922 | * turn it off (when there are holes in the receive | 3922 | * turn it off (when there are holes in the receive |
3923 | * space for instance) | 3923 | * space for instance) |
3924 | * PSH flag is ignored. | 3924 | * PSH flag is ignored. |
3925 | */ | 3925 | */ |
@@ -3943,7 +3943,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3943 | goto slow_path; | 3943 | goto slow_path; |
3944 | 3944 | ||
3945 | tp->rx_opt.saw_tstamp = 1; | 3945 | tp->rx_opt.saw_tstamp = 1; |
3946 | ++ptr; | 3946 | ++ptr; |
3947 | tp->rx_opt.rcv_tsval = ntohl(*ptr); | 3947 | tp->rx_opt.rcv_tsval = ntohl(*ptr); |
3948 | ++ptr; | 3948 | ++ptr; |
3949 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); | 3949 | tp->rx_opt.rcv_tsecr = ntohl(*ptr); |
@@ -3975,7 +3975,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
3975 | * on entry. | 3975 | * on entry. |
3976 | */ | 3976 | */ |
3977 | tcp_ack(sk, skb, 0); | 3977 | tcp_ack(sk, skb, 0); |
3978 | __kfree_skb(skb); | 3978 | __kfree_skb(skb); |
3979 | tcp_data_snd_check(sk, tp); | 3979 | tcp_data_snd_check(sk, tp); |
3980 | return 0; | 3980 | return 0; |
3981 | } else { /* Header too small */ | 3981 | } else { /* Header too small */ |
@@ -4393,11 +4393,11 @@ reset_and_undo: | |||
4393 | 4393 | ||
4394 | /* | 4394 | /* |
4395 | * This function implements the receiving procedure of RFC 793 for | 4395 | * This function implements the receiving procedure of RFC 793 for |
4396 | * all states except ESTABLISHED and TIME_WAIT. | 4396 | * all states except ESTABLISHED and TIME_WAIT. |
4397 | * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be | 4397 | * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be |
4398 | * address independent. | 4398 | * address independent. |
4399 | */ | 4399 | */ |
4400 | 4400 | ||
4401 | int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | 4401 | int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
4402 | struct tcphdr *th, unsigned len) | 4402 | struct tcphdr *th, unsigned len) |
4403 | { | 4403 | { |
@@ -4422,19 +4422,19 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4422 | if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) | 4422 | if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) |
4423 | return 1; | 4423 | return 1; |
4424 | 4424 | ||
4425 | /* Now we have several options: In theory there is | 4425 | /* Now we have several options: In theory there is |
4426 | * nothing else in the frame. KA9Q has an option to | 4426 | * nothing else in the frame. KA9Q has an option to |
4427 | * send data with the syn, BSD accepts data with the | 4427 | * send data with the syn, BSD accepts data with the |
4428 | * syn up to the [to be] advertised window and | 4428 | * syn up to the [to be] advertised window and |
4429 | * Solaris 2.1 gives you a protocol error. For now | 4429 | * Solaris 2.1 gives you a protocol error. For now |
4430 | * we just ignore it, that fits the spec precisely | 4430 | * we just ignore it, that fits the spec precisely |
4431 | * and avoids incompatibilities. It would be nice in | 4431 | * and avoids incompatibilities. It would be nice in |
4432 | * future to drop through and process the data. | 4432 | * future to drop through and process the data. |
4433 | * | 4433 | * |
4434 | * Now that TTCP is starting to be used we ought to | 4434 | * Now that TTCP is starting to be used we ought to |
4435 | * queue this data. | 4435 | * queue this data. |
4436 | * But, this leaves one open to an easy denial of | 4436 | * But, this leaves one open to an easy denial of |
4437 | * service attack, and SYN cookies can't defend | 4437 | * service attack, and SYN cookies can't defend |
4438 | * against this problem. So, we drop the data | 4438 | * against this problem. So, we drop the data |
4439 | * in the interest of security over speed unless | 4439 | * in the interest of security over speed unless |
4440 | * it's still in use. | 4440 | * it's still in use. |
@@ -4624,7 +4624,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4624 | case TCP_FIN_WAIT1: | 4624 | case TCP_FIN_WAIT1: |
4625 | case TCP_FIN_WAIT2: | 4625 | case TCP_FIN_WAIT2: |
4626 | /* RFC 793 says to queue data in these states, | 4626 | /* RFC 793 says to queue data in these states, |
4627 | * RFC 1122 says we MUST send a reset. | 4627 | * RFC 1122 says we MUST send a reset. |
4628 | * BSD 4.4 also does reset. | 4628 | * BSD 4.4 also does reset. |
4629 | */ | 4629 | */ |
4630 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 4630 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
@@ -4636,7 +4636,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4636 | } | 4636 | } |
4637 | } | 4637 | } |
4638 | /* Fall through */ | 4638 | /* Fall through */ |
4639 | case TCP_ESTABLISHED: | 4639 | case TCP_ESTABLISHED: |
4640 | tcp_data_queue(sk, skb); | 4640 | tcp_data_queue(sk, skb); |
4641 | queued = 1; | 4641 | queued = 1; |
4642 | break; | 4642 | break; |
@@ -4648,7 +4648,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4648 | tcp_ack_snd_check(sk); | 4648 | tcp_ack_snd_check(sk); |
4649 | } | 4649 | } |
4650 | 4650 | ||
4651 | if (!queued) { | 4651 | if (!queued) { |
4652 | discard: | 4652 | discard: |
4653 | __kfree_skb(skb); | 4653 | __kfree_skb(skb); |
4654 | } | 4654 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f51d6404c61c..0ba74bbe7d30 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -303,7 +303,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) | |||
303 | /* We don't check in the destentry if pmtu discovery is forbidden | 303 | /* We don't check in the destentry if pmtu discovery is forbidden |
304 | * on this route. We just assume that no packet_to_big packets | 304 | * on this route. We just assume that no packet_to_big packets |
305 | * are send back when pmtu discovery is not active. | 305 | * are send back when pmtu discovery is not active. |
306 | * There is a small race when the user changes this flag in the | 306 | * There is a small race when the user changes this flag in the |
307 | * route, but I think that's acceptable. | 307 | * route, but I think that's acceptable. |
308 | */ | 308 | */ |
309 | if ((dst = __sk_dst_check(sk, 0)) == NULL) | 309 | if ((dst = __sk_dst_check(sk, 0)) == NULL) |
@@ -880,7 +880,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
880 | 880 | ||
881 | if (md5sig->alloced4 == md5sig->entries4) { | 881 | if (md5sig->alloced4 == md5sig->entries4) { |
882 | keys = kmalloc((sizeof(*keys) * | 882 | keys = kmalloc((sizeof(*keys) * |
883 | (md5sig->entries4 + 1)), GFP_ATOMIC); | 883 | (md5sig->entries4 + 1)), GFP_ATOMIC); |
884 | if (!keys) { | 884 | if (!keys) { |
885 | kfree(newkey); | 885 | kfree(newkey); |
886 | tcp_free_md5sig_pool(); | 886 | tcp_free_md5sig_pool(); |
@@ -934,7 +934,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
934 | memcpy(&tp->md5sig_info->keys4[i], | 934 | memcpy(&tp->md5sig_info->keys4[i], |
935 | &tp->md5sig_info->keys4[i+1], | 935 | &tp->md5sig_info->keys4[i+1], |
936 | (tp->md5sig_info->entries4 - i) * | 936 | (tp->md5sig_info->entries4 - i) * |
937 | sizeof(struct tcp4_md5sig_key)); | 937 | sizeof(struct tcp4_md5sig_key)); |
938 | } | 938 | } |
939 | tcp_free_md5sig_pool(); | 939 | tcp_free_md5sig_pool(); |
940 | return 0; | 940 | return 0; |
@@ -1388,7 +1388,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1388 | goto drop_and_free; | 1388 | goto drop_and_free; |
1389 | 1389 | ||
1390 | if (want_cookie) { | 1390 | if (want_cookie) { |
1391 | reqsk_free(req); | 1391 | reqsk_free(req); |
1392 | } else { | 1392 | } else { |
1393 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); | 1393 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); |
1394 | } | 1394 | } |
@@ -1704,7 +1704,7 @@ bad_packet: | |||
1704 | discard_it: | 1704 | discard_it: |
1705 | /* Discard frame. */ | 1705 | /* Discard frame. */ |
1706 | kfree_skb(skb); | 1706 | kfree_skb(skb); |
1707 | return 0; | 1707 | return 0; |
1708 | 1708 | ||
1709 | discard_and_relse: | 1709 | discard_and_relse: |
1710 | sock_put(sk); | 1710 | sock_put(sk); |
@@ -1890,10 +1890,10 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1890 | tcp_cleanup_congestion_control(sk); | 1890 | tcp_cleanup_congestion_control(sk); |
1891 | 1891 | ||
1892 | /* Cleanup up the write buffer. */ | 1892 | /* Cleanup up the write buffer. */ |
1893 | sk_stream_writequeue_purge(sk); | 1893 | sk_stream_writequeue_purge(sk); |
1894 | 1894 | ||
1895 | /* Cleans up our, hopefully empty, out_of_order_queue. */ | 1895 | /* Cleans up our, hopefully empty, out_of_order_queue. */ |
1896 | __skb_queue_purge(&tp->out_of_order_queue); | 1896 | __skb_queue_purge(&tp->out_of_order_queue); |
1897 | 1897 | ||
1898 | #ifdef CONFIG_TCP_MD5SIG | 1898 | #ifdef CONFIG_TCP_MD5SIG |
1899 | /* Clean up the MD5 key list, if any */ | 1899 | /* Clean up the MD5 key list, if any */ |
@@ -1906,7 +1906,7 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1906 | 1906 | ||
1907 | #ifdef CONFIG_NET_DMA | 1907 | #ifdef CONFIG_NET_DMA |
1908 | /* Cleans up our sk_async_wait_queue */ | 1908 | /* Cleans up our sk_async_wait_queue */ |
1909 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1909 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1910 | #endif | 1910 | #endif |
1911 | 1911 | ||
1912 | /* Clean prequeue, it must be empty really */ | 1912 | /* Clean prequeue, it must be empty really */ |
@@ -1983,7 +1983,7 @@ get_req: | |||
1983 | st->state = TCP_SEQ_STATE_LISTENING; | 1983 | st->state = TCP_SEQ_STATE_LISTENING; |
1984 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1984 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1985 | } else { | 1985 | } else { |
1986 | icsk = inet_csk(sk); | 1986 | icsk = inet_csk(sk); |
1987 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 1987 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1988 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) | 1988 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) |
1989 | goto start_req; | 1989 | goto start_req; |
@@ -1996,7 +1996,7 @@ get_sk: | |||
1996 | cur = sk; | 1996 | cur = sk; |
1997 | goto out; | 1997 | goto out; |
1998 | } | 1998 | } |
1999 | icsk = inet_csk(sk); | 1999 | icsk = inet_csk(sk); |
2000 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 2000 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
2001 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { | 2001 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { |
2002 | start_req: | 2002 | start_req: |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4a3889dd1943..30b1e520ad94 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -64,7 +64,7 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) | |||
64 | return (seq == e_win && seq == end_seq); | 64 | return (seq == e_win && seq == end_seq); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * * Main purpose of TIME-WAIT state is to close connection gracefully, | 68 | * * Main purpose of TIME-WAIT state is to close connection gracefully, |
69 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN | 69 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN |
70 | * (and, probably, tail of data) and one or more our ACKs are lost. | 70 | * (and, probably, tail of data) and one or more our ACKs are lost. |
@@ -176,13 +176,13 @@ kill_with_rst: | |||
176 | * "When a connection is [...] on TIME-WAIT state [...] | 176 | * "When a connection is [...] on TIME-WAIT state [...] |
177 | * [a TCP] MAY accept a new SYN from the remote TCP to | 177 | * [a TCP] MAY accept a new SYN from the remote TCP to |
178 | * reopen the connection directly, if it: | 178 | * reopen the connection directly, if it: |
179 | * | 179 | * |
180 | * (1) assigns its initial sequence number for the new | 180 | * (1) assigns its initial sequence number for the new |
181 | * connection to be larger than the largest sequence | 181 | * connection to be larger than the largest sequence |
182 | * number it used on the previous connection incarnation, | 182 | * number it used on the previous connection incarnation, |
183 | * and | 183 | * and |
184 | * | 184 | * |
185 | * (2) returns to TIME-WAIT state if the SYN turns out | 185 | * (2) returns to TIME-WAIT state if the SYN turns out |
186 | * to be an old duplicate". | 186 | * to be an old duplicate". |
187 | */ | 187 | */ |
188 | 188 | ||
@@ -266,9 +266,9 @@ kill: | |||
266 | return TCP_TW_SUCCESS; | 266 | return TCP_TW_SUCCESS; |
267 | } | 267 | } |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * Move a socket to time-wait or dead fin-wait-2 state. | 270 | * Move a socket to time-wait or dead fin-wait-2 state. |
271 | */ | 271 | */ |
272 | void tcp_time_wait(struct sock *sk, int state, int timeo) | 272 | void tcp_time_wait(struct sock *sk, int state, int timeo) |
273 | { | 273 | { |
274 | struct inet_timewait_sock *tw = NULL; | 274 | struct inet_timewait_sock *tw = NULL; |
@@ -481,7 +481,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
481 | return newsk; | 481 | return newsk; |
482 | } | 482 | } |
483 | 483 | ||
484 | /* | 484 | /* |
485 | * Process an incoming packet for SYN_RECV sockets represented | 485 | * Process an incoming packet for SYN_RECV sockets represented |
486 | * as a request_sock. | 486 | * as a request_sock. |
487 | */ | 487 | */ |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 58b7111523f4..cebe9aa918a3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -198,7 +198,7 @@ void tcp_select_initial_window(int __space, __u32 mss, | |||
198 | (*rcv_wscale) = 0; | 198 | (*rcv_wscale) = 0; |
199 | if (wscale_ok) { | 199 | if (wscale_ok) { |
200 | /* Set window scaling on max possible window | 200 | /* Set window scaling on max possible window |
201 | * See RFC1323 for an explanation of the limit to 14 | 201 | * See RFC1323 for an explanation of the limit to 14 |
202 | */ | 202 | */ |
203 | space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); | 203 | space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); |
204 | space = min_t(u32, space, *window_clamp); | 204 | space = min_t(u32, space, *window_clamp); |
@@ -451,7 +451,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
451 | (tp->rx_opt.eff_sacks * | 451 | (tp->rx_opt.eff_sacks * |
452 | TCPOLEN_SACK_PERBLOCK)); | 452 | TCPOLEN_SACK_PERBLOCK)); |
453 | } | 453 | } |
454 | 454 | ||
455 | if (tcp_packets_in_flight(tp) == 0) | 455 | if (tcp_packets_in_flight(tp) == 0) |
456 | tcp_ca_event(sk, CA_EVENT_TX_START); | 456 | tcp_ca_event(sk, CA_EVENT_TX_START); |
457 | 457 | ||
@@ -555,7 +555,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
555 | } | 555 | } |
556 | 556 | ||
557 | 557 | ||
558 | /* This routine just queue's the buffer | 558 | /* This routine just queue's the buffer |
559 | * | 559 | * |
560 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, | 560 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
561 | * otherwise socket can stall. | 561 | * otherwise socket can stall. |
@@ -597,7 +597,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned | |||
597 | 597 | ||
598 | /* Function to create two new TCP segments. Shrinks the given segment | 598 | /* Function to create two new TCP segments. Shrinks the given segment |
599 | * to the specified size and appends a new segment with the rest of the | 599 | * to the specified size and appends a new segment with the rest of the |
600 | * packet to the list. This won't be called frequently, I hope. | 600 | * packet to the list. This won't be called frequently, I hope. |
601 | * Remember, these are still headerless SKBs at this point. | 601 | * Remember, these are still headerless SKBs at this point. |
602 | */ | 602 | */ |
603 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) | 603 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) |
@@ -610,7 +610,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
610 | 610 | ||
611 | BUG_ON(len > skb->len); | 611 | BUG_ON(len > skb->len); |
612 | 612 | ||
613 | clear_all_retrans_hints(tp); | 613 | clear_all_retrans_hints(tp); |
614 | nsize = skb_headlen(skb) - len; | 614 | nsize = skb_headlen(skb) - len; |
615 | if (nsize < 0) | 615 | if (nsize < 0) |
616 | nsize = 0; | 616 | nsize = 0; |
@@ -821,7 +821,7 @@ void tcp_mtup_init(struct sock *sk) | |||
821 | 821 | ||
822 | icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; | 822 | icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; |
823 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + | 823 | icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + |
824 | icsk->icsk_af_ops->net_header_len; | 824 | icsk->icsk_af_ops->net_header_len; |
825 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); | 825 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); |
826 | icsk->icsk_mtup.probe_size = 0; | 826 | icsk->icsk_mtup.probe_size = 0; |
827 | } | 827 | } |
@@ -1008,7 +1008,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp) | |||
1008 | */ | 1008 | */ |
1009 | 1009 | ||
1010 | static inline int tcp_nagle_check(const struct tcp_sock *tp, | 1010 | static inline int tcp_nagle_check(const struct tcp_sock *tp, |
1011 | const struct sk_buff *skb, | 1011 | const struct sk_buff *skb, |
1012 | unsigned mss_now, int nonagle) | 1012 | unsigned mss_now, int nonagle) |
1013 | { | 1013 | { |
1014 | return (skb->len < mss_now && | 1014 | return (skb->len < mss_now && |
@@ -1078,7 +1078,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, | |||
1078 | return cwnd_quota; | 1078 | return cwnd_quota; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | static inline int tcp_skb_is_last(const struct sock *sk, | 1081 | static inline int tcp_skb_is_last(const struct sock *sk, |
1082 | const struct sk_buff *skb) | 1082 | const struct sk_buff *skb) |
1083 | { | 1083 | { |
1084 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | 1084 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; |
@@ -1298,7 +1298,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1298 | skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); | 1298 | skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); |
1299 | else | 1299 | else |
1300 | nskb->csum = skb_copy_and_csum_bits(skb, 0, | 1300 | nskb->csum = skb_copy_and_csum_bits(skb, 0, |
1301 | skb_put(nskb, copy), copy, nskb->csum); | 1301 | skb_put(nskb, copy), copy, nskb->csum); |
1302 | 1302 | ||
1303 | if (skb->len <= copy) { | 1303 | if (skb->len <= copy) { |
1304 | /* We've eaten all the data from this skb. | 1304 | /* We've eaten all the data from this skb. |
@@ -1308,7 +1308,7 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1308 | sk_stream_free_skb(sk, skb); | 1308 | sk_stream_free_skb(sk, skb); |
1309 | } else { | 1309 | } else { |
1310 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & | 1310 | TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & |
1311 | ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); | 1311 | ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); |
1312 | if (!skb_shinfo(skb)->nr_frags) { | 1312 | if (!skb_shinfo(skb)->nr_frags) { |
1313 | skb_pull(skb, copy); | 1313 | skb_pull(skb, copy); |
1314 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 1314 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
@@ -1501,7 +1501,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1501 | 1501 | ||
1502 | /* This function returns the amount that we can raise the | 1502 | /* This function returns the amount that we can raise the |
1503 | * usable window based on the following constraints | 1503 | * usable window based on the following constraints |
1504 | * | 1504 | * |
1505 | * 1. The window can never be shrunk once it is offered (RFC 793) | 1505 | * 1. The window can never be shrunk once it is offered (RFC 793) |
1506 | * 2. We limit memory per socket | 1506 | * 2. We limit memory per socket |
1507 | * | 1507 | * |
@@ -1520,12 +1520,12 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) | |||
1520 | * side SWS prevention criteria. The problem is that under this rule | 1520 | * side SWS prevention criteria. The problem is that under this rule |
1521 | * a stream of single byte packets will cause the right side of the | 1521 | * a stream of single byte packets will cause the right side of the |
1522 | * window to always advance by a single byte. | 1522 | * window to always advance by a single byte. |
1523 | * | 1523 | * |
1524 | * Of course, if the sender implements sender side SWS prevention | 1524 | * Of course, if the sender implements sender side SWS prevention |
1525 | * then this will not be a problem. | 1525 | * then this will not be a problem. |
1526 | * | 1526 | * |
1527 | * BSD seems to make the following compromise: | 1527 | * BSD seems to make the following compromise: |
1528 | * | 1528 | * |
1529 | * If the free space is less than the 1/4 of the maximum | 1529 | * If the free space is less than the 1/4 of the maximum |
1530 | * space available and the free space is less than 1/2 mss, | 1530 | * space available and the free space is less than 1/2 mss, |
1531 | * then set the window to 0. | 1531 | * then set the window to 0. |
@@ -1567,7 +1567,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
1567 | int window; | 1567 | int window; |
1568 | 1568 | ||
1569 | if (mss > full_space) | 1569 | if (mss > full_space) |
1570 | mss = full_space; | 1570 | mss = full_space; |
1571 | 1571 | ||
1572 | if (free_space < full_space/2) { | 1572 | if (free_space < full_space/2) { |
1573 | icsk->icsk_ack.quick = 0; | 1573 | icsk->icsk_ack.quick = 0; |
@@ -1691,9 +1691,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m | |||
1691 | } | 1691 | } |
1692 | 1692 | ||
1693 | /* Do a simple retransmit without using the backoff mechanisms in | 1693 | /* Do a simple retransmit without using the backoff mechanisms in |
1694 | * tcp_timer. This is used for path mtu discovery. | 1694 | * tcp_timer. This is used for path mtu discovery. |
1695 | * The socket is already locked here. | 1695 | * The socket is already locked here. |
1696 | */ | 1696 | */ |
1697 | void tcp_simple_retransmit(struct sock *sk) | 1697 | void tcp_simple_retransmit(struct sock *sk) |
1698 | { | 1698 | { |
1699 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1699 | const struct inet_connection_sock *icsk = inet_csk(sk); |
@@ -1703,7 +1703,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1703 | int lost = 0; | 1703 | int lost = 0; |
1704 | 1704 | ||
1705 | sk_stream_for_retrans_queue(skb, sk) { | 1705 | sk_stream_for_retrans_queue(skb, sk) { |
1706 | if (skb->len > mss && | 1706 | if (skb->len > mss && |
1707 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { | 1707 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { |
1708 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { | 1708 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { |
1709 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1709 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
@@ -1724,7 +1724,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1724 | 1724 | ||
1725 | tcp_sync_left_out(tp); | 1725 | tcp_sync_left_out(tp); |
1726 | 1726 | ||
1727 | /* Don't muck with the congestion window here. | 1727 | /* Don't muck with the congestion window here. |
1728 | * Reason is that we do not increase amount of _data_ | 1728 | * Reason is that we do not increase amount of _data_ |
1729 | * in network, but units changed and effective | 1729 | * in network, but units changed and effective |
1730 | * cwnd/ssthresh really reduced now. | 1730 | * cwnd/ssthresh really reduced now. |
@@ -1747,7 +1747,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1747 | { | 1747 | { |
1748 | struct tcp_sock *tp = tcp_sk(sk); | 1748 | struct tcp_sock *tp = tcp_sk(sk); |
1749 | struct inet_connection_sock *icsk = inet_csk(sk); | 1749 | struct inet_connection_sock *icsk = inet_csk(sk); |
1750 | unsigned int cur_mss = tcp_current_mss(sk, 0); | 1750 | unsigned int cur_mss = tcp_current_mss(sk, 0); |
1751 | int err; | 1751 | int err; |
1752 | 1752 | ||
1753 | /* Inconslusive MTU probe */ | 1753 | /* Inconslusive MTU probe */ |
@@ -1984,10 +1984,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1984 | */ | 1984 | */ |
1985 | void tcp_send_fin(struct sock *sk) | 1985 | void tcp_send_fin(struct sock *sk) |
1986 | { | 1986 | { |
1987 | struct tcp_sock *tp = tcp_sk(sk); | 1987 | struct tcp_sock *tp = tcp_sk(sk); |
1988 | struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); | 1988 | struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); |
1989 | int mss_now; | 1989 | int mss_now; |
1990 | 1990 | ||
1991 | /* Optimization, tack on the FIN if we have a queue of | 1991 | /* Optimization, tack on the FIN if we have a queue of |
1992 | * unsent frames. But be careful about outgoing SACKS | 1992 | * unsent frames. But be careful about outgoing SACKS |
1993 | * and IP options. | 1993 | * and IP options. |
@@ -2146,17 +2146,17 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2146 | th->seq = htonl(TCP_SKB_CB(skb)->seq); | 2146 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
2147 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); | 2147 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); |
2148 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ | 2148 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
2149 | __u8 rcv_wscale; | 2149 | __u8 rcv_wscale; |
2150 | /* Set this up on the first call only */ | 2150 | /* Set this up on the first call only */ |
2151 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); | 2151 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
2152 | /* tcp_full_space because it is guaranteed to be the first packet */ | 2152 | /* tcp_full_space because it is guaranteed to be the first packet */ |
2153 | tcp_select_initial_window(tcp_full_space(sk), | 2153 | tcp_select_initial_window(tcp_full_space(sk), |
2154 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), | 2154 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
2155 | &req->rcv_wnd, | 2155 | &req->rcv_wnd, |
2156 | &req->window_clamp, | 2156 | &req->window_clamp, |
2157 | ireq->wscale_ok, | 2157 | ireq->wscale_ok, |
2158 | &rcv_wscale); | 2158 | &rcv_wscale); |
2159 | ireq->rcv_wscale = rcv_wscale; | 2159 | ireq->rcv_wscale = rcv_wscale; |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ | 2162 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
@@ -2192,9 +2192,9 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2192 | return skb; | 2192 | return skb; |
2193 | } | 2193 | } |
2194 | 2194 | ||
2195 | /* | 2195 | /* |
2196 | * Do all connect socket setups that can be done AF independent. | 2196 | * Do all connect socket setups that can be done AF independent. |
2197 | */ | 2197 | */ |
2198 | static void tcp_connect_init(struct sock *sk) | 2198 | static void tcp_connect_init(struct sock *sk) |
2199 | { | 2199 | { |
2200 | struct dst_entry *dst = __sk_dst_get(sk); | 2200 | struct dst_entry *dst = __sk_dst_get(sk); |
@@ -2251,7 +2251,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2251 | 2251 | ||
2252 | /* | 2252 | /* |
2253 | * Build a SYN and send it off. | 2253 | * Build a SYN and send it off. |
2254 | */ | 2254 | */ |
2255 | int tcp_connect(struct sock *sk) | 2255 | int tcp_connect(struct sock *sk) |
2256 | { | 2256 | { |
2257 | struct tcp_sock *tp = tcp_sk(sk); | 2257 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -2409,7 +2409,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) | |||
2409 | 2409 | ||
2410 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ | 2410 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
2411 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); | 2411 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
2412 | if (skb == NULL) | 2412 | if (skb == NULL) |
2413 | return -1; | 2413 | return -1; |
2414 | 2414 | ||
2415 | /* Reserve space for headers and set control bits. */ | 2415 | /* Reserve space for headers and set control bits. */ |
@@ -2498,7 +2498,7 @@ void tcp_send_probe0(struct sock *sk) | |||
2498 | if (icsk->icsk_backoff < sysctl_tcp_retries2) | 2498 | if (icsk->icsk_backoff < sysctl_tcp_retries2) |
2499 | icsk->icsk_backoff++; | 2499 | icsk->icsk_backoff++; |
2500 | icsk->icsk_probes_out++; | 2500 | icsk->icsk_probes_out++; |
2501 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 2501 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2502 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), | 2502 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), |
2503 | TCP_RTO_MAX); | 2503 | TCP_RTO_MAX); |
2504 | } else { | 2504 | } else { |
@@ -2510,7 +2510,7 @@ void tcp_send_probe0(struct sock *sk) | |||
2510 | */ | 2510 | */ |
2511 | if (!icsk->icsk_probes_out) | 2511 | if (!icsk->icsk_probes_out) |
2512 | icsk->icsk_probes_out = 1; | 2512 | icsk->icsk_probes_out = 1; |
2513 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 2513 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
2514 | min(icsk->icsk_rto << icsk->icsk_backoff, | 2514 | min(icsk->icsk_rto << icsk->icsk_backoff, |
2515 | TCP_RESOURCE_PROBE_INTERVAL), | 2515 | TCP_RESOURCE_PROBE_INTERVAL), |
2516 | TCP_RTO_MAX); | 2516 | TCP_RTO_MAX); |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 3355c276b611..a9243cfc1bea 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -69,7 +69,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset) | |||
69 | struct tcp_sock *tp = tcp_sk(sk); | 69 | struct tcp_sock *tp = tcp_sk(sk); |
70 | int orphans = atomic_read(&tcp_orphan_count); | 70 | int orphans = atomic_read(&tcp_orphan_count); |
71 | 71 | ||
72 | /* If peer does not open window for long time, or did not transmit | 72 | /* If peer does not open window for long time, or did not transmit |
73 | * anything for long time, penalize it. */ | 73 | * anything for long time, penalize it. */ |
74 | if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) | 74 | if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) |
75 | orphans <<= 1; | 75 | orphans <<= 1; |
@@ -137,7 +137,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
137 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 137 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
138 | } else { | 138 | } else { |
139 | mss = min(sysctl_tcp_base_mss, | 139 | mss = min(sysctl_tcp_base_mss, |
140 | tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)/2); | 140 | tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)/2); |
141 | mss = max(mss, 68 - tp->tcp_header_len); | 141 | mss = max(mss, 68 - tp->tcp_header_len); |
142 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); | 142 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); |
143 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 143 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
@@ -150,7 +150,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
150 | retry_until = sysctl_tcp_retries2; | 150 | retry_until = sysctl_tcp_retries2; |
151 | if (sock_flag(sk, SOCK_DEAD)) { | 151 | if (sock_flag(sk, SOCK_DEAD)) { |
152 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); | 152 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); |
153 | 153 | ||
154 | retry_until = tcp_orphan_retries(sk, alive); | 154 | retry_until = tcp_orphan_retries(sk, alive); |
155 | 155 | ||
156 | if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) | 156 | if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) |
@@ -257,7 +257,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
257 | 257 | ||
258 | if (sock_flag(sk, SOCK_DEAD)) { | 258 | if (sock_flag(sk, SOCK_DEAD)) { |
259 | const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); | 259 | const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); |
260 | 260 | ||
261 | max_probes = tcp_orphan_retries(sk, alive); | 261 | max_probes = tcp_orphan_retries(sk, alive); |
262 | 262 | ||
263 | if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) | 263 | if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) |
@@ -453,7 +453,7 @@ static void tcp_keepalive_timer (unsigned long data) | |||
453 | /* Only process if socket is not in use. */ | 453 | /* Only process if socket is not in use. */ |
454 | bh_lock_sock(sk); | 454 | bh_lock_sock(sk); |
455 | if (sock_owned_by_user(sk)) { | 455 | if (sock_owned_by_user(sk)) { |
456 | /* Try again later. */ | 456 | /* Try again later. */ |
457 | inet_csk_reset_keepalive_timer (sk, HZ/20); | 457 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
458 | goto out; | 458 | goto out; |
459 | } | 459 | } |
@@ -515,7 +515,7 @@ resched: | |||
515 | inet_csk_reset_keepalive_timer (sk, elapsed); | 515 | inet_csk_reset_keepalive_timer (sk, elapsed); |
516 | goto out; | 516 | goto out; |
517 | 517 | ||
518 | death: | 518 | death: |
519 | tcp_done(sk); | 519 | tcp_done(sk); |
520 | 520 | ||
521 | out: | 521 | out: |
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index ddc4bcc5785e..5c484dceb967 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -330,9 +330,9 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
330 | vegas->minRTT = 0x7fffffff; | 330 | vegas->minRTT = 0x7fffffff; |
331 | } | 331 | } |
332 | /* Use normal slow start */ | 332 | /* Use normal slow start */ |
333 | else if (tp->snd_cwnd <= tp->snd_ssthresh) | 333 | else if (tp->snd_cwnd <= tp->snd_ssthresh) |
334 | tcp_slow_start(tp); | 334 | tcp_slow_start(tp); |
335 | 335 | ||
336 | } | 336 | } |
337 | 337 | ||
338 | /* Extract info for Tcp socket info provided via netlink. */ | 338 | /* Extract info for Tcp socket info provided via netlink. */ |
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index 4f42a86c77f3..4e1b61032a9c 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c | |||
@@ -63,10 +63,10 @@ static void tcp_westwood_init(struct sock *sk) | |||
63 | struct westwood *w = inet_csk_ca(sk); | 63 | struct westwood *w = inet_csk_ca(sk); |
64 | 64 | ||
65 | w->bk = 0; | 65 | w->bk = 0; |
66 | w->bw_ns_est = 0; | 66 | w->bw_ns_est = 0; |
67 | w->bw_est = 0; | 67 | w->bw_est = 0; |
68 | w->accounted = 0; | 68 | w->accounted = 0; |
69 | w->cumul_ack = 0; | 69 | w->cumul_ack = 0; |
70 | w->reset_rtt_min = 1; | 70 | w->reset_rtt_min = 1; |
71 | w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; | 71 | w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; |
72 | w->rtt_win_sx = tcp_time_stamp; | 72 | w->rtt_win_sx = tcp_time_stamp; |
@@ -121,7 +121,7 @@ static void westwood_update_window(struct sock *sk) | |||
121 | * to fix mismatch between tp->snd_una and w->snd_una for the first | 121 | * to fix mismatch between tp->snd_una and w->snd_una for the first |
122 | * bandwidth sample | 122 | * bandwidth sample |
123 | */ | 123 | */ |
124 | if (w->first_ack) { | 124 | if (w->first_ack) { |
125 | w->snd_una = tcp_sk(sk)->snd_una; | 125 | w->snd_una = tcp_sk(sk)->snd_una; |
126 | w->first_ack = 0; | 126 | w->first_ack = 0; |
127 | } | 127 | } |
@@ -147,7 +147,7 @@ static inline void update_rtt_min(struct westwood *w) | |||
147 | { | 147 | { |
148 | if (w->reset_rtt_min) { | 148 | if (w->reset_rtt_min) { |
149 | w->rtt_min = w->rtt; | 149 | w->rtt_min = w->rtt; |
150 | w->reset_rtt_min = 0; | 150 | w->reset_rtt_min = 0; |
151 | } else | 151 | } else |
152 | w->rtt_min = min(w->rtt, w->rtt_min); | 152 | w->rtt_min = min(w->rtt, w->rtt_min); |
153 | } | 153 | } |
@@ -183,15 +183,15 @@ static inline u32 westwood_acked_count(struct sock *sk) | |||
183 | 183 | ||
184 | w->cumul_ack = tp->snd_una - w->snd_una; | 184 | w->cumul_ack = tp->snd_una - w->snd_una; |
185 | 185 | ||
186 | /* If cumul_ack is 0 this is a dupack since it's not moving | 186 | /* If cumul_ack is 0 this is a dupack since it's not moving |
187 | * tp->snd_una. | 187 | * tp->snd_una. |
188 | */ | 188 | */ |
189 | if (!w->cumul_ack) { | 189 | if (!w->cumul_ack) { |
190 | w->accounted += tp->mss_cache; | 190 | w->accounted += tp->mss_cache; |
191 | w->cumul_ack = tp->mss_cache; | 191 | w->cumul_ack = tp->mss_cache; |
192 | } | 192 | } |
193 | 193 | ||
194 | if (w->cumul_ack > tp->mss_cache) { | 194 | if (w->cumul_ack > tp->mss_cache) { |
195 | /* Partial or delayed ack */ | 195 | /* Partial or delayed ack */ |
196 | if (w->accounted >= w->cumul_ack) { | 196 | if (w->accounted >= w->cumul_ack) { |
197 | w->accounted -= w->cumul_ack; | 197 | w->accounted -= w->cumul_ack; |
@@ -237,7 +237,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) | |||
237 | 237 | ||
238 | case CA_EVENT_FRTO: | 238 | case CA_EVENT_FRTO: |
239 | tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); | 239 | tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); |
240 | /* Update RTT_min when next ack arrives */ | 240 | /* Update RTT_min when next ack arrives */ |
241 | w->reset_rtt_min = 1; | 241 | w->reset_rtt_min = 1; |
242 | break; | 242 | break; |
243 | 243 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8b54c68a0d12..2a246de6a671 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -20,8 +20,8 @@ | |||
20 | * for udp at least is 'valid'. | 20 | * for udp at least is 'valid'. |
21 | * Alan Cox : Fixed icmp handling properly | 21 | * Alan Cox : Fixed icmp handling properly |
22 | * Alan Cox : Correct error for oversized datagrams | 22 | * Alan Cox : Correct error for oversized datagrams |
23 | * Alan Cox : Tidied select() semantics. | 23 | * Alan Cox : Tidied select() semantics. |
24 | * Alan Cox : udp_err() fixed properly, also now | 24 | * Alan Cox : udp_err() fixed properly, also now |
25 | * select and read wake correctly on errors | 25 | * select and read wake correctly on errors |
26 | * Alan Cox : udp_send verify_area moved to avoid mem leak | 26 | * Alan Cox : udp_send verify_area moved to avoid mem leak |
27 | * Alan Cox : UDP can count its memory | 27 | * Alan Cox : UDP can count its memory |
@@ -56,7 +56,7 @@ | |||
56 | * does have a high hit rate. | 56 | * does have a high hit rate. |
57 | * Olaf Kirch : Don't linearise iovec on sendmsg. | 57 | * Olaf Kirch : Don't linearise iovec on sendmsg. |
58 | * Andi Kleen : Some cleanups, cache destination entry | 58 | * Andi Kleen : Some cleanups, cache destination entry |
59 | * for connect. | 59 | * for connect. |
60 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. | 60 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. |
61 | * Melvin Smith : Check msg_name not msg_namelen in sendto(), | 61 | * Melvin Smith : Check msg_name not msg_namelen in sendto(), |
62 | * return ENOTCONN for unconnected sockets (POSIX) | 62 | * return ENOTCONN for unconnected sockets (POSIX) |
@@ -77,7 +77,7 @@ | |||
77 | * as published by the Free Software Foundation; either version | 77 | * as published by the Free Software Foundation; either version |
78 | * 2 of the License, or (at your option) any later version. | 78 | * 2 of the License, or (at your option) any later version. |
79 | */ | 79 | */ |
80 | 80 | ||
81 | #include <asm/system.h> | 81 | #include <asm/system.h> |
82 | #include <asm/uaccess.h> | 82 | #include <asm/uaccess.h> |
83 | #include <asm/ioctls.h> | 83 | #include <asm/ioctls.h> |
@@ -306,17 +306,17 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk, | |||
306 | if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) | 306 | if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) |
307 | continue; | 307 | continue; |
308 | goto found; | 308 | goto found; |
309 | } | 309 | } |
310 | s = NULL; | 310 | s = NULL; |
311 | found: | 311 | found: |
312 | return s; | 312 | return s; |
313 | } | 313 | } |
314 | 314 | ||
315 | /* | 315 | /* |
316 | * This routine is called by the ICMP module when it gets some | 316 | * This routine is called by the ICMP module when it gets some |
317 | * sort of error condition. If err < 0 then the socket should | 317 | * sort of error condition. If err < 0 then the socket should |
318 | * be closed and the error returned to the user. If err > 0 | 318 | * be closed and the error returned to the user. If err > 0 |
319 | * it's just the icmp type << 8 | icmp code. | 319 | * it's just the icmp type << 8 | icmp code. |
320 | * Header points to the ip header of the error packet. We move | 320 | * Header points to the ip header of the error packet. We move |
321 | * on past this. Then (as it used to claim before adjustment) | 321 | * on past this. Then (as it used to claim before adjustment) |
322 | * header points to the first 8 bytes of the udp header. We need | 322 | * header points to the first 8 bytes of the udp header. We need |
@@ -338,7 +338,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) | |||
338 | skb->dev->ifindex, udptable ); | 338 | skb->dev->ifindex, udptable ); |
339 | if (sk == NULL) { | 339 | if (sk == NULL) { |
340 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 340 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); |
341 | return; /* No socket for error */ | 341 | return; /* No socket for error */ |
342 | } | 342 | } |
343 | 343 | ||
344 | err = 0; | 344 | err = 0; |
@@ -374,7 +374,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct hlist_head udptable[]) | |||
374 | } | 374 | } |
375 | 375 | ||
376 | /* | 376 | /* |
377 | * RFC1122: OK. Passes ICMP errors back to application, as per | 377 | * RFC1122: OK. Passes ICMP errors back to application, as per |
378 | * 4.1.3.3. | 378 | * 4.1.3.3. |
379 | */ | 379 | */ |
380 | if (!inet->recverr) { | 380 | if (!inet->recverr) { |
@@ -524,7 +524,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
524 | if (len > 0xFFFF) | 524 | if (len > 0xFFFF) |
525 | return -EMSGSIZE; | 525 | return -EMSGSIZE; |
526 | 526 | ||
527 | /* | 527 | /* |
528 | * Check the flags. | 528 | * Check the flags. |
529 | */ | 529 | */ |
530 | 530 | ||
@@ -536,7 +536,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
536 | if (up->pending) { | 536 | if (up->pending) { |
537 | /* | 537 | /* |
538 | * There are pending frames. | 538 | * There are pending frames. |
539 | * The socket lock must be held while it's corked. | 539 | * The socket lock must be held while it's corked. |
540 | */ | 540 | */ |
541 | lock_sock(sk); | 541 | lock_sock(sk); |
542 | if (likely(up->pending)) { | 542 | if (likely(up->pending)) { |
@@ -544,14 +544,14 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
544 | release_sock(sk); | 544 | release_sock(sk); |
545 | return -EINVAL; | 545 | return -EINVAL; |
546 | } | 546 | } |
547 | goto do_append_data; | 547 | goto do_append_data; |
548 | } | 548 | } |
549 | release_sock(sk); | 549 | release_sock(sk); |
550 | } | 550 | } |
551 | ulen += sizeof(struct udphdr); | 551 | ulen += sizeof(struct udphdr); |
552 | 552 | ||
553 | /* | 553 | /* |
554 | * Get and verify the address. | 554 | * Get and verify the address. |
555 | */ | 555 | */ |
556 | if (msg->msg_name) { | 556 | if (msg->msg_name) { |
557 | struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; | 557 | struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; |
@@ -575,7 +575,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
575 | Route will not be used, if at least one option is set. | 575 | Route will not be used, if at least one option is set. |
576 | */ | 576 | */ |
577 | connected = 1; | 577 | connected = 1; |
578 | } | 578 | } |
579 | ipc.addr = inet->saddr; | 579 | ipc.addr = inet->saddr; |
580 | 580 | ||
581 | ipc.oif = sk->sk_bound_dev_if; | 581 | ipc.oif = sk->sk_bound_dev_if; |
@@ -601,7 +601,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
601 | } | 601 | } |
602 | tos = RT_TOS(inet->tos); | 602 | tos = RT_TOS(inet->tos); |
603 | if (sock_flag(sk, SOCK_LOCALROUTE) || | 603 | if (sock_flag(sk, SOCK_LOCALROUTE) || |
604 | (msg->msg_flags & MSG_DONTROUTE) || | 604 | (msg->msg_flags & MSG_DONTROUTE) || |
605 | (ipc.opt && ipc.opt->is_strictroute)) { | 605 | (ipc.opt && ipc.opt->is_strictroute)) { |
606 | tos |= RTO_ONLINK; | 606 | tos |= RTO_ONLINK; |
607 | connected = 0; | 607 | connected = 0; |
@@ -761,10 +761,10 @@ out: | |||
761 | /* | 761 | /* |
762 | * IOCTL requests applicable to the UDP protocol | 762 | * IOCTL requests applicable to the UDP protocol |
763 | */ | 763 | */ |
764 | 764 | ||
765 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | 765 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
766 | { | 766 | { |
767 | switch(cmd) | 767 | switch(cmd) |
768 | { | 768 | { |
769 | case SIOCOUTQ: | 769 | case SIOCOUTQ: |
770 | { | 770 | { |
@@ -804,11 +804,11 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
804 | */ | 804 | */ |
805 | 805 | ||
806 | int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | 806 | int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
807 | size_t len, int noblock, int flags, int *addr_len) | 807 | size_t len, int noblock, int flags, int *addr_len) |
808 | { | 808 | { |
809 | struct inet_sock *inet = inet_sk(sk); | 809 | struct inet_sock *inet = inet_sk(sk); |
810 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | 810 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; |
811 | struct sk_buff *skb; | 811 | struct sk_buff *skb; |
812 | int copied, err, copy_only, is_udplite = IS_UDPLITE(sk); | 812 | int copied, err, copy_only, is_udplite = IS_UDPLITE(sk); |
813 | 813 | ||
814 | /* | 814 | /* |
@@ -824,8 +824,8 @@ try_again: | |||
824 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 824 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
825 | if (!skb) | 825 | if (!skb) |
826 | goto out; | 826 | goto out; |
827 | 827 | ||
828 | copied = skb->len - sizeof(struct udphdr); | 828 | copied = skb->len - sizeof(struct udphdr); |
829 | if (copied > len) { | 829 | if (copied > len) { |
830 | copied = len; | 830 | copied = len; |
831 | msg->msg_flags |= MSG_TRUNC; | 831 | msg->msg_flags |= MSG_TRUNC; |
@@ -868,18 +868,18 @@ try_again: | |||
868 | sin->sin_port = skb->h.uh->source; | 868 | sin->sin_port = skb->h.uh->source; |
869 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | 869 | sin->sin_addr.s_addr = skb->nh.iph->saddr; |
870 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | 870 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
871 | } | 871 | } |
872 | if (inet->cmsg_flags) | 872 | if (inet->cmsg_flags) |
873 | ip_cmsg_recv(msg, skb); | 873 | ip_cmsg_recv(msg, skb); |
874 | 874 | ||
875 | err = copied; | 875 | err = copied; |
876 | if (flags & MSG_TRUNC) | 876 | if (flags & MSG_TRUNC) |
877 | err = skb->len - sizeof(struct udphdr); | 877 | err = skb->len - sizeof(struct udphdr); |
878 | 878 | ||
879 | out_free: | 879 | out_free: |
880 | skb_free_datagram(sk, skb); | 880 | skb_free_datagram(sk, skb); |
881 | out: | 881 | out: |
882 | return err; | 882 | return err; |
883 | 883 | ||
884 | csum_copy_err: | 884 | csum_copy_err: |
885 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); | 885 | UDP_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); |
@@ -887,7 +887,7 @@ csum_copy_err: | |||
887 | skb_kill_datagram(sk, skb, flags); | 887 | skb_kill_datagram(sk, skb, flags); |
888 | 888 | ||
889 | if (noblock) | 889 | if (noblock) |
890 | return -EAGAIN; | 890 | return -EAGAIN; |
891 | goto try_again; | 891 | goto try_again; |
892 | } | 892 | } |
893 | 893 | ||
@@ -898,7 +898,7 @@ int udp_disconnect(struct sock *sk, int flags) | |||
898 | /* | 898 | /* |
899 | * 1003.1g - break association. | 899 | * 1003.1g - break association. |
900 | */ | 900 | */ |
901 | 901 | ||
902 | sk->sk_state = TCP_CLOSE; | 902 | sk->sk_state = TCP_CLOSE; |
903 | inet->daddr = 0; | 903 | inet->daddr = 0; |
904 | inet->dport = 0; | 904 | inet->dport = 0; |
@@ -922,13 +922,13 @@ int udp_disconnect(struct sock *sk, int flags) | |||
922 | static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) | 922 | static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) |
923 | { | 923 | { |
924 | #ifndef CONFIG_XFRM | 924 | #ifndef CONFIG_XFRM |
925 | return 1; | 925 | return 1; |
926 | #else | 926 | #else |
927 | struct udp_sock *up = udp_sk(sk); | 927 | struct udp_sock *up = udp_sk(sk); |
928 | struct udphdr *uh; | 928 | struct udphdr *uh; |
929 | struct iphdr *iph; | 929 | struct iphdr *iph; |
930 | int iphlen, len; | 930 | int iphlen, len; |
931 | 931 | ||
932 | __u8 *udpdata; | 932 | __u8 *udpdata; |
933 | __be32 *udpdata32; | 933 | __be32 *udpdata32; |
934 | __u16 encap_type = up->encap_type; | 934 | __u16 encap_type = up->encap_type; |
@@ -971,7 +971,7 @@ static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) | |||
971 | return 0; | 971 | return 0; |
972 | } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && | 972 | } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && |
973 | udpdata32[0] == 0 && udpdata32[1] == 0) { | 973 | udpdata32[0] == 0 && udpdata32[1] == 0) { |
974 | 974 | ||
975 | /* ESP Packet with Non-IKE marker */ | 975 | /* ESP Packet with Non-IKE marker */ |
976 | len = sizeof(struct udphdr) + 2 * sizeof(u32); | 976 | len = sizeof(struct udphdr) + 2 * sizeof(u32); |
977 | } else | 977 | } else |
@@ -1187,14 +1187,14 @@ static inline void udp4_csum_init(struct sk_buff *skb, struct udphdr *uh) | |||
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | /* | 1189 | /* |
1190 | * All we need to do is get the socket, and then do a checksum. | 1190 | * All we need to do is get the socket, and then do a checksum. |
1191 | */ | 1191 | */ |
1192 | 1192 | ||
1193 | int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], | 1193 | int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[], |
1194 | int is_udplite) | 1194 | int is_udplite) |
1195 | { | 1195 | { |
1196 | struct sock *sk; | 1196 | struct sock *sk; |
1197 | struct udphdr *uh = skb->h.uh; | 1197 | struct udphdr *uh = skb->h.uh; |
1198 | unsigned short ulen; | 1198 | unsigned short ulen; |
1199 | struct rtable *rt = (struct rtable*)skb->dst; | 1199 | struct rtable *rt = (struct rtable*)skb->dst; |
1200 | __be32 saddr = skb->nh.iph->saddr; | 1200 | __be32 saddr = skb->nh.iph->saddr; |
@@ -1270,9 +1270,9 @@ short_packet: | |||
1270 | goto drop; | 1270 | goto drop; |
1271 | 1271 | ||
1272 | csum_error: | 1272 | csum_error: |
1273 | /* | 1273 | /* |
1274 | * RFC1122: OK. Discards the bad packet silently (as far as | 1274 | * RFC1122: OK. Discards the bad packet silently (as far as |
1275 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). | 1275 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). |
1276 | */ | 1276 | */ |
1277 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", | 1277 | LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", |
1278 | is_udplite? "-Lite" : "", | 1278 | is_udplite? "-Lite" : "", |
@@ -1328,7 +1328,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1328 | release_sock(sk); | 1328 | release_sock(sk); |
1329 | } | 1329 | } |
1330 | break; | 1330 | break; |
1331 | 1331 | ||
1332 | case UDP_ENCAP: | 1332 | case UDP_ENCAP: |
1333 | switch (val) { | 1333 | switch (val) { |
1334 | case 0: | 1334 | case 0: |
@@ -1356,8 +1356,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1356 | up->pcflag |= UDPLITE_SEND_CC; | 1356 | up->pcflag |= UDPLITE_SEND_CC; |
1357 | break; | 1357 | break; |
1358 | 1358 | ||
1359 | /* The receiver specifies a minimum checksum coverage value. To make | 1359 | /* The receiver specifies a minimum checksum coverage value. To make |
1360 | * sense, this should be set to at least 8 (as done below). If zero is | 1360 | * sense, this should be set to at least 8 (as done below). If zero is |
1361 | * used, this again means full checksum coverage. */ | 1361 | * used, this again means full checksum coverage. */ |
1362 | case UDPLITE_RECV_CSCOV: | 1362 | case UDPLITE_RECV_CSCOV: |
1363 | if (!up->pcflag) /* Disable the option on UDP sockets */ | 1363 | if (!up->pcflag) /* Disable the option on UDP sockets */ |
@@ -1406,7 +1406,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, | |||
1406 | return -EFAULT; | 1406 | return -EFAULT; |
1407 | 1407 | ||
1408 | len = min_t(unsigned int, len, sizeof(int)); | 1408 | len = min_t(unsigned int, len, sizeof(int)); |
1409 | 1409 | ||
1410 | if(len < 0) | 1410 | if(len < 0) |
1411 | return -EINVAL; | 1411 | return -EINVAL; |
1412 | 1412 | ||
@@ -1433,11 +1433,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, | |||
1433 | return -ENOPROTOOPT; | 1433 | return -ENOPROTOOPT; |
1434 | }; | 1434 | }; |
1435 | 1435 | ||
1436 | if(put_user(len, optlen)) | 1436 | if(put_user(len, optlen)) |
1437 | return -EFAULT; | 1437 | return -EFAULT; |
1438 | if(copy_to_user(optval, &val,len)) | 1438 | if(copy_to_user(optval, &val,len)) |
1439 | return -EFAULT; | 1439 | return -EFAULT; |
1440 | return 0; | 1440 | return 0; |
1441 | } | 1441 | } |
1442 | 1442 | ||
1443 | int udp_getsockopt(struct sock *sk, int level, int optname, | 1443 | int udp_getsockopt(struct sock *sk, int level, int optname, |
@@ -1463,7 +1463,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname, | |||
1463 | * @sock - socket | 1463 | * @sock - socket |
1464 | * @wait - poll table | 1464 | * @wait - poll table |
1465 | * | 1465 | * |
1466 | * This is same as datagram poll, except for the special case of | 1466 | * This is same as datagram poll, except for the special case of |
1467 | * blocking sockets. If application is using a blocking fd | 1467 | * blocking sockets. If application is using a blocking fd |
1468 | * and a packet with checksum error is in the queue; | 1468 | * and a packet with checksum error is in the queue; |
1469 | * then it could get return from select indicating data available | 1469 | * then it could get return from select indicating data available |
@@ -1502,11 +1502,11 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | return mask; | 1504 | return mask; |
1505 | 1505 | ||
1506 | } | 1506 | } |
1507 | 1507 | ||
1508 | struct proto udp_prot = { | 1508 | struct proto udp_prot = { |
1509 | .name = "UDP", | 1509 | .name = "UDP", |
1510 | .owner = THIS_MODULE, | 1510 | .owner = THIS_MODULE, |
1511 | .close = udp_lib_close, | 1511 | .close = udp_lib_close, |
1512 | .connect = ip4_datagram_connect, | 1512 | .connect = ip4_datagram_connect, |
@@ -1670,7 +1670,7 @@ static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) | |||
1670 | 1670 | ||
1671 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" | 1671 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" |
1672 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", | 1672 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", |
1673 | bucket, src, srcp, dest, destp, sp->sk_state, | 1673 | bucket, src, srcp, dest, destp, sp->sk_state, |
1674 | atomic_read(&sp->sk_wmem_alloc), | 1674 | atomic_read(&sp->sk_wmem_alloc), |
1675 | atomic_read(&sp->sk_rmem_alloc), | 1675 | atomic_read(&sp->sk_rmem_alloc), |
1676 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | 1676 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), |
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index f6f4277ba6dc..820a477cfaa6 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h | |||
@@ -10,7 +10,7 @@ extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []); | |||
10 | 10 | ||
11 | extern int __udp_lib_get_port(struct sock *sk, unsigned short snum, | 11 | extern int __udp_lib_get_port(struct sock *sk, unsigned short snum, |
12 | struct hlist_head udptable[], int *port_rover, | 12 | struct hlist_head udptable[], int *port_rover, |
13 | int (*)(const struct sock*,const struct sock*)); | 13 | int (*)(const struct sock*,const struct sock*)); |
14 | extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *); | 14 | extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *); |
15 | 15 | ||
16 | 16 | ||
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 8655d038364c..289146bdb8b0 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Split up af-specific portion | 6 | * Split up af-specific portion |
7 | * Derek Atkins <derek@ihtfp.com> | 7 | * Derek Atkins <derek@ihtfp.com> |
8 | * Add Encapsulation support | 8 | * Add Encapsulation support |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -42,7 +42,7 @@ static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) | |||
42 | 42 | ||
43 | if (skb->dst == NULL) { | 43 | if (skb->dst == NULL) { |
44 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 44 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
45 | skb->dev)) | 45 | skb->dev)) |
46 | goto drop; | 46 | goto drop; |
47 | } | 47 | } |
48 | return dst_input(skb); | 48 | return dst_input(skb); |
@@ -149,7 +149,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) | |||
149 | ip_send_check(skb->nh.iph); | 149 | ip_send_check(skb->nh.iph); |
150 | 150 | ||
151 | NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, | 151 | NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL, |
152 | xfrm4_rcv_encap_finish); | 152 | xfrm4_rcv_encap_finish); |
153 | return 0; | 153 | return 0; |
154 | #else | 154 | #else |
155 | return -skb->nh.iph->protocol; | 155 | return -skb->nh.iph->protocol; |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 04403fb01a58..038ca160fe2c 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * xfrm4_output.c - Common IPsec encapsulation code for IPv4. | 2 | * xfrm4_output.c - Common IPsec encapsulation code for IPv4. |
3 | * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au> | 3 | * Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License | 6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version | 7 | * as published by the Free Software Foundation; either version |
@@ -28,7 +28,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
28 | goto out; | 28 | goto out; |
29 | 29 | ||
30 | IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; | 30 | IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; |
31 | 31 | ||
32 | if (!(iph->frag_off & htons(IP_DF)) || skb->local_df) | 32 | if (!(iph->frag_off & htons(IP_DF)) || skb->local_df) |
33 | goto out; | 33 | goto out; |
34 | 34 | ||
@@ -47,7 +47,7 @@ static int xfrm4_output_one(struct sk_buff *skb) | |||
47 | struct dst_entry *dst = skb->dst; | 47 | struct dst_entry *dst = skb->dst; |
48 | struct xfrm_state *x = dst->xfrm; | 48 | struct xfrm_state *x = dst->xfrm; |
49 | int err; | 49 | int err; |
50 | 50 | ||
51 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 51 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
52 | err = skb_checksum_help(skb); | 52 | err = skb_checksum_help(skb); |
53 | if (err) | 53 | if (err) |
@@ -78,7 +78,7 @@ static int xfrm4_output_one(struct sk_buff *skb) | |||
78 | x->curlft.packets++; | 78 | x->curlft.packets++; |
79 | 79 | ||
80 | spin_unlock_bh(&x->lock); | 80 | spin_unlock_bh(&x->lock); |
81 | 81 | ||
82 | if (!(skb->dst = dst_pop(dst))) { | 82 | if (!(skb->dst = dst_pop(dst))) { |
83 | err = -EHOSTUNREACH; | 83 | err = -EHOSTUNREACH; |
84 | goto error_nolock; | 84 | goto error_nolock; |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 699f27ce62ad..fef19c6bcb98 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * xfrm4_policy.c | 2 | * xfrm4_policy.c |
3 | * | 3 | * |
4 | * Changes: | 4 | * Changes: |
5 | * Kazunori MIYAZAWA @USAGI | 5 | * Kazunori MIYAZAWA @USAGI |
6 | * YOSHIFUJI Hideaki @USAGI | 6 | * YOSHIFUJI Hideaki @USAGI |
7 | * Split up af-specific portion | 7 | * Split up af-specific portion |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
@@ -50,8 +50,8 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy) | |||
50 | struct xfrm_dst *xdst = (struct xfrm_dst*)dst; | 50 | struct xfrm_dst *xdst = (struct xfrm_dst*)dst; |
51 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ | 51 | if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/ |
52 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && | 52 | xdst->u.rt.fl.fl4_dst == fl->fl4_dst && |
53 | xdst->u.rt.fl.fl4_src == fl->fl4_src && | 53 | xdst->u.rt.fl.fl4_src == fl->fl4_src && |
54 | xdst->u.rt.fl.fl4_tos == fl->fl4_tos && | 54 | xdst->u.rt.fl.fl4_tos == fl->fl4_tos && |
55 | xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { | 55 | xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { |
56 | dst_clone(dst); | 56 | dst_clone(dst); |
57 | break; | 57 | break; |
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index f110af5b1319..1be6762b2d47 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c | |||
@@ -13,7 +13,7 @@ | |||
13 | static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) | 13 | static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) |
14 | { | 14 | { |
15 | struct iphdr *iph; | 15 | struct iphdr *iph; |
16 | 16 | ||
17 | iph = skb->nh.iph; | 17 | iph = skb->nh.iph; |
18 | iph->tot_len = htons(skb->len); | 18 | iph->tot_len = htons(skb->len); |
19 | ip_send_check(iph); | 19 | ip_send_check(iph); |